1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 
10 #include "spectrum.h"
11 #include "core.h"
12 #include "port.h"
13 #include "reg.h"
14 
15 struct mlxsw_sp_sb_pr {
16 	enum mlxsw_reg_sbpr_mode mode;
17 	u32 size;
18 };
19 
20 struct mlxsw_cp_sb_occ {
21 	u32 cur;
22 	u32 max;
23 };
24 
25 struct mlxsw_sp_sb_cm {
26 	u32 min_buff;
27 	u32 max_buff;
28 	u16 pool_index;
29 	struct mlxsw_cp_sb_occ occ;
30 };
31 
32 #define MLXSW_SP_SB_INFI -1U
33 
34 struct mlxsw_sp_sb_pm {
35 	u32 min_buff;
36 	u32 max_buff;
37 	struct mlxsw_cp_sb_occ occ;
38 };
39 
40 struct mlxsw_sp_sb_mm {
41 	u32 min_buff;
42 	u32 max_buff;
43 	u16 pool_index;
44 };
45 
46 struct mlxsw_sp_sb_pool_des {
47 	enum mlxsw_reg_sbxx_dir dir;
48 	u8 pool;
49 };
50 
51 /* Order ingress pools before egress pools. */
52 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
53 	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
54 	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
55 	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
56 	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
57 	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
58 	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
59 	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
60 	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
61 	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
62 };
63 
64 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
65 	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
66 	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
67 	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
68 	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
69 	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
70 	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
71 	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
72 	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
73 	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
74 };
75 
76 #define MLXSW_SP_SB_ING_TC_COUNT 8
77 #define MLXSW_SP_SB_EG_TC_COUNT 16
78 
79 struct mlxsw_sp_sb_port {
80 	struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
81 	struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
82 	struct mlxsw_sp_sb_pm *pms;
83 };
84 
85 struct mlxsw_sp_sb {
86 	struct mlxsw_sp_sb_pr *prs;
87 	struct mlxsw_sp_sb_port *ports;
88 	u32 cell_size;
89 	u32 max_headroom_cells;
90 	u64 sb_size;
91 };
92 
93 struct mlxsw_sp_sb_vals {
94 	unsigned int pool_count;
95 	const struct mlxsw_sp_sb_pool_des *pool_dess;
96 	const struct mlxsw_sp_sb_pm *pms;
97 	const struct mlxsw_sp_sb_pr *prs;
98 	const struct mlxsw_sp_sb_mm *mms;
99 	const struct mlxsw_sp_sb_cm *cms_ingress;
100 	const struct mlxsw_sp_sb_cm *cms_egress;
101 	const struct mlxsw_sp_sb_cm *cms_cpu;
102 	unsigned int mms_count;
103 	unsigned int cms_ingress_count;
104 	unsigned int cms_egress_count;
105 	unsigned int cms_cpu_count;
106 };
107 
108 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
109 {
110 	return mlxsw_sp->sb->cell_size * cells;
111 }
112 
113 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
114 {
115 	return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
116 }
117 
118 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
119 {
120 	return mlxsw_sp->sb->max_headroom_cells;
121 }
122 
123 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
124 						 u16 pool_index)
125 {
126 	return &mlxsw_sp->sb->prs[pool_index];
127 }
128 
129 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
130 {
131 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
132 		return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
133 	else
134 		return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
135 }
136 
137 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
138 						 u8 local_port, u8 pg_buff,
139 						 enum mlxsw_reg_sbxx_dir dir)
140 {
141 	struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
142 
143 	WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
144 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
145 		return &sb_port->ing_cms[pg_buff];
146 	else
147 		return &sb_port->eg_cms[pg_buff];
148 }
149 
150 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
151 						 u8 local_port, u16 pool_index)
152 {
153 	return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
154 }
155 
156 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
157 				enum mlxsw_reg_sbpr_mode mode,
158 				u32 size, bool infi_size)
159 {
160 	const struct mlxsw_sp_sb_pool_des *des =
161 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
162 	char sbpr_pl[MLXSW_REG_SBPR_LEN];
163 	struct mlxsw_sp_sb_pr *pr;
164 	int err;
165 
166 	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
167 			    size, infi_size);
168 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
169 	if (err)
170 		return err;
171 
172 	if (infi_size)
173 		size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
174 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
175 	pr->mode = mode;
176 	pr->size = size;
177 	return 0;
178 }
179 
180 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
181 				u8 pg_buff, u32 min_buff, u32 max_buff,
182 				bool infi_max, u16 pool_index)
183 {
184 	const struct mlxsw_sp_sb_pool_des *des =
185 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
186 	char sbcm_pl[MLXSW_REG_SBCM_LEN];
187 	struct mlxsw_sp_sb_cm *cm;
188 	int err;
189 
190 	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
191 			    min_buff, max_buff, infi_max, des->pool);
192 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
193 	if (err)
194 		return err;
195 
196 	if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
197 		if (infi_max)
198 			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
199 							mlxsw_sp->sb->sb_size);
200 
201 		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
202 					des->dir);
203 		cm->min_buff = min_buff;
204 		cm->max_buff = max_buff;
205 		cm->pool_index = pool_index;
206 	}
207 	return 0;
208 }
209 
210 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
211 				u16 pool_index, u32 min_buff, u32 max_buff)
212 {
213 	const struct mlxsw_sp_sb_pool_des *des =
214 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
215 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
216 	struct mlxsw_sp_sb_pm *pm;
217 	int err;
218 
219 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
220 			    min_buff, max_buff);
221 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
222 	if (err)
223 		return err;
224 
225 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
226 	pm->min_buff = min_buff;
227 	pm->max_buff = max_buff;
228 	return 0;
229 }
230 
231 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
232 				    u16 pool_index, struct list_head *bulk_list)
233 {
234 	const struct mlxsw_sp_sb_pool_des *des =
235 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
236 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
237 
238 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
239 			    true, 0, 0);
240 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
241 				     bulk_list, NULL, 0);
242 }
243 
244 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
245 					char *sbpm_pl, size_t sbpm_pl_len,
246 					unsigned long cb_priv)
247 {
248 	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
249 
250 	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
251 }
252 
253 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
254 				    u16 pool_index, struct list_head *bulk_list)
255 {
256 	const struct mlxsw_sp_sb_pool_des *des =
257 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
258 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
259 	struct mlxsw_sp_sb_pm *pm;
260 
261 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
262 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
263 			    false, 0, 0);
264 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
265 				     bulk_list,
266 				     mlxsw_sp_sb_pm_occ_query_cb,
267 				     (unsigned long) pm);
268 }
269 
270 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
271 #define MLXSW_SP_PB_HEADROOM 25632
272 #define MLXSW_SP_PB_UNUSED 8
273 
274 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
275 {
276 	const u32 pbs[] = {
277 		[0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
278 		[9] = 2 * MLXSW_PORT_MAX_MTU,
279 	};
280 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
281 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
282 	int i;
283 
284 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
285 			    0xffff, 0xffff / 2);
286 	for (i = 0; i < ARRAY_SIZE(pbs); i++) {
287 		u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
288 
289 		if (i == MLXSW_SP_PB_UNUSED)
290 			continue;
291 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
292 	}
293 	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
294 					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
295 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
296 }
297 
298 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
299 {
300 	char pptb_pl[MLXSW_REG_PPTB_LEN];
301 	int i;
302 
303 	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
304 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
305 		mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
306 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
307 			       pptb_pl);
308 }
309 
310 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
311 {
312 	int err;
313 
314 	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
315 	if (err)
316 		return err;
317 	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
318 }
319 
320 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
321 				 struct mlxsw_sp_sb_port *sb_port)
322 {
323 	struct mlxsw_sp_sb_pm *pms;
324 
325 	pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
326 		      GFP_KERNEL);
327 	if (!pms)
328 		return -ENOMEM;
329 	sb_port->pms = pms;
330 	return 0;
331 }
332 
333 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
334 {
335 	kfree(sb_port->pms);
336 }
337 
338 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
339 {
340 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
341 	struct mlxsw_sp_sb_pr *prs;
342 	int i;
343 	int err;
344 
345 	mlxsw_sp->sb->ports = kcalloc(max_ports,
346 				      sizeof(struct mlxsw_sp_sb_port),
347 				      GFP_KERNEL);
348 	if (!mlxsw_sp->sb->ports)
349 		return -ENOMEM;
350 
351 	prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
352 		      GFP_KERNEL);
353 	if (!prs) {
354 		err = -ENOMEM;
355 		goto err_alloc_prs;
356 	}
357 	mlxsw_sp->sb->prs = prs;
358 
359 	for (i = 0; i < max_ports; i++) {
360 		err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
361 		if (err)
362 			goto err_sb_port_init;
363 	}
364 
365 	return 0;
366 
367 err_sb_port_init:
368 	for (i--; i >= 0; i--)
369 		mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
370 	kfree(mlxsw_sp->sb->prs);
371 err_alloc_prs:
372 	kfree(mlxsw_sp->sb->ports);
373 	return err;
374 }
375 
376 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
377 {
378 	int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
379 	int i;
380 
381 	for (i = max_ports - 1; i >= 0; i--)
382 		mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
383 	kfree(mlxsw_sp->sb->prs);
384 	kfree(mlxsw_sp->sb->ports);
385 }
386 
387 #define MLXSW_SP_SB_PR(_mode, _size)	\
388 	{				\
389 		.mode = _mode,		\
390 		.size = _size,		\
391 	}
392 
393 #define MLXSW_SP1_SB_PR_INGRESS_SIZE	12440000
394 #define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
395 #define MLXSW_SP1_SB_PR_EGRESS_SIZE	13232000
396 
397 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
398 	/* Ingress pools. */
399 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
400 		       MLXSW_SP1_SB_PR_INGRESS_SIZE),
401 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
402 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
403 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
404 		       MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
405 	/* Egress pools. */
406 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
407 		       MLXSW_SP1_SB_PR_EGRESS_SIZE),
408 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
409 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
410 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
411 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
412 };
413 
414 #define MLXSW_SP2_SB_PR_INGRESS_SIZE	40960000
415 #define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
416 #define MLXSW_SP2_SB_PR_EGRESS_SIZE	40960000
417 
418 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
419 	/* Ingress pools. */
420 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
421 		       MLXSW_SP2_SB_PR_INGRESS_SIZE),
422 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
423 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
424 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
425 		       MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
426 	/* Egress pools. */
427 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
428 		       MLXSW_SP2_SB_PR_EGRESS_SIZE),
429 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
430 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
431 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
432 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
433 };
434 
435 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
436 				const struct mlxsw_sp_sb_pr *prs,
437 				size_t prs_len)
438 {
439 	int i;
440 	int err;
441 
442 	for (i = 0; i < prs_len; i++) {
443 		u32 size = prs[i].size;
444 		u32 size_cells;
445 
446 		if (size == MLXSW_SP_SB_INFI) {
447 			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
448 						   0, true);
449 		} else {
450 			size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
451 			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
452 						   size_cells, false);
453 		}
454 		if (err)
455 			return err;
456 	}
457 	return 0;
458 }
459 
460 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
461 	{						\
462 		.min_buff = _min_buff,			\
463 		.max_buff = _max_buff,			\
464 		.pool_index = _pool,			\
465 	}
466 
467 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
468 	MLXSW_SP_SB_CM(10000, 8, 0),
469 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
470 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
471 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
472 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
473 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
474 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
475 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
476 	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
477 	MLXSW_SP_SB_CM(20000, 1, 3),
478 };
479 
480 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
481 	MLXSW_SP_SB_CM(0, 7, 0),
482 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
483 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
484 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
485 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
486 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
487 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
488 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
489 	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
490 	MLXSW_SP_SB_CM(20000, 1, 3),
491 };
492 
493 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
494 	MLXSW_SP_SB_CM(1500, 9, 4),
495 	MLXSW_SP_SB_CM(1500, 9, 4),
496 	MLXSW_SP_SB_CM(1500, 9, 4),
497 	MLXSW_SP_SB_CM(1500, 9, 4),
498 	MLXSW_SP_SB_CM(1500, 9, 4),
499 	MLXSW_SP_SB_CM(1500, 9, 4),
500 	MLXSW_SP_SB_CM(1500, 9, 4),
501 	MLXSW_SP_SB_CM(1500, 9, 4),
502 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
503 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
504 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
505 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
506 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
507 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
508 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
509 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
510 	MLXSW_SP_SB_CM(1, 0xff, 4),
511 };
512 
513 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
514 	MLXSW_SP_SB_CM(0, 7, 4),
515 	MLXSW_SP_SB_CM(0, 7, 4),
516 	MLXSW_SP_SB_CM(0, 7, 4),
517 	MLXSW_SP_SB_CM(0, 7, 4),
518 	MLXSW_SP_SB_CM(0, 7, 4),
519 	MLXSW_SP_SB_CM(0, 7, 4),
520 	MLXSW_SP_SB_CM(0, 7, 4),
521 	MLXSW_SP_SB_CM(0, 7, 4),
522 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
523 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
524 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
525 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
526 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
527 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
528 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
529 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
530 	MLXSW_SP_SB_CM(1, 0xff, 4),
531 };
532 
533 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
534 
535 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
536 	MLXSW_SP_CPU_PORT_SB_CM,
537 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
538 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
539 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
540 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
541 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
542 	MLXSW_SP_CPU_PORT_SB_CM,
543 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
544 	MLXSW_SP_CPU_PORT_SB_CM,
545 	MLXSW_SP_CPU_PORT_SB_CM,
546 	MLXSW_SP_CPU_PORT_SB_CM,
547 	MLXSW_SP_CPU_PORT_SB_CM,
548 	MLXSW_SP_CPU_PORT_SB_CM,
549 	MLXSW_SP_CPU_PORT_SB_CM,
550 	MLXSW_SP_CPU_PORT_SB_CM,
551 	MLXSW_SP_CPU_PORT_SB_CM,
552 	MLXSW_SP_CPU_PORT_SB_CM,
553 	MLXSW_SP_CPU_PORT_SB_CM,
554 	MLXSW_SP_CPU_PORT_SB_CM,
555 	MLXSW_SP_CPU_PORT_SB_CM,
556 	MLXSW_SP_CPU_PORT_SB_CM,
557 	MLXSW_SP_CPU_PORT_SB_CM,
558 	MLXSW_SP_CPU_PORT_SB_CM,
559 	MLXSW_SP_CPU_PORT_SB_CM,
560 	MLXSW_SP_CPU_PORT_SB_CM,
561 	MLXSW_SP_CPU_PORT_SB_CM,
562 	MLXSW_SP_CPU_PORT_SB_CM,
563 	MLXSW_SP_CPU_PORT_SB_CM,
564 	MLXSW_SP_CPU_PORT_SB_CM,
565 	MLXSW_SP_CPU_PORT_SB_CM,
566 	MLXSW_SP_CPU_PORT_SB_CM,
567 	MLXSW_SP_CPU_PORT_SB_CM,
568 };
569 
570 static bool
571 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
572 {
573 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
574 
575 	return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
576 }
577 
578 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
579 				  enum mlxsw_reg_sbxx_dir dir,
580 				  const struct mlxsw_sp_sb_cm *cms,
581 				  size_t cms_len)
582 {
583 	const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
584 	int i;
585 	int err;
586 
587 	for (i = 0; i < cms_len; i++) {
588 		const struct mlxsw_sp_sb_cm *cm;
589 		u32 min_buff;
590 		u32 max_buff;
591 
592 		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
593 			continue; /* PG number 8 does not exist, skip it */
594 		cm = &cms[i];
595 		if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
596 			continue;
597 
598 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
599 		max_buff = cm->max_buff;
600 		if (max_buff == MLXSW_SP_SB_INFI) {
601 			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
602 						   min_buff, 0,
603 						   true, cm->pool_index);
604 		} else {
605 			if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
606 						       cm->pool_index))
607 				max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
608 								max_buff);
609 			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
610 						   min_buff, max_buff,
611 						   false, cm->pool_index);
612 		}
613 		if (err)
614 			return err;
615 	}
616 	return 0;
617 }
618 
619 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
620 {
621 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 	int err;
623 
624 	err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
625 				     mlxsw_sp_port->local_port,
626 				     MLXSW_REG_SBXX_DIR_INGRESS,
627 				     mlxsw_sp->sb_vals->cms_ingress,
628 				     mlxsw_sp->sb_vals->cms_ingress_count);
629 	if (err)
630 		return err;
631 	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
632 				      mlxsw_sp_port->local_port,
633 				      MLXSW_REG_SBXX_DIR_EGRESS,
634 				      mlxsw_sp->sb_vals->cms_egress,
635 				      mlxsw_sp->sb_vals->cms_egress_count);
636 }
637 
638 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
639 {
640 	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
641 				      mlxsw_sp->sb_vals->cms_cpu,
642 				      mlxsw_sp->sb_vals->cms_cpu_count);
643 }
644 
645 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
646 	{					\
647 		.min_buff = _min_buff,		\
648 		.max_buff = _max_buff,		\
649 	}
650 
651 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
652 	/* Ingress pools. */
653 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
654 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
655 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
656 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
657 	/* Egress pools. */
658 	MLXSW_SP_SB_PM(0, 7),
659 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
660 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
661 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
662 	MLXSW_SP_SB_PM(10000, 90000),
663 };
664 
665 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
666 	/* Ingress pools. */
667 	MLXSW_SP_SB_PM(0, 7),
668 	MLXSW_SP_SB_PM(0, 0),
669 	MLXSW_SP_SB_PM(0, 0),
670 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
671 	/* Egress pools. */
672 	MLXSW_SP_SB_PM(0, 7),
673 	MLXSW_SP_SB_PM(0, 0),
674 	MLXSW_SP_SB_PM(0, 0),
675 	MLXSW_SP_SB_PM(0, 0),
676 	MLXSW_SP_SB_PM(10000, 90000),
677 };
678 
679 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
680 {
681 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
682 	int i;
683 	int err;
684 
685 	for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
686 		const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
687 		u32 max_buff;
688 		u32 min_buff;
689 
690 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
691 		max_buff = pm->max_buff;
692 		if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
693 			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
694 		err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
695 					   i, min_buff, max_buff);
696 		if (err)
697 			return err;
698 	}
699 	return 0;
700 }
701 
702 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
703 	{						\
704 		.min_buff = _min_buff,			\
705 		.max_buff = _max_buff,			\
706 		.pool_index = _pool,			\
707 	}
708 
709 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
710 	MLXSW_SP_SB_MM(0, 6, 4),
711 	MLXSW_SP_SB_MM(0, 6, 4),
712 	MLXSW_SP_SB_MM(0, 6, 4),
713 	MLXSW_SP_SB_MM(0, 6, 4),
714 	MLXSW_SP_SB_MM(0, 6, 4),
715 	MLXSW_SP_SB_MM(0, 6, 4),
716 	MLXSW_SP_SB_MM(0, 6, 4),
717 	MLXSW_SP_SB_MM(0, 6, 4),
718 	MLXSW_SP_SB_MM(0, 6, 4),
719 	MLXSW_SP_SB_MM(0, 6, 4),
720 	MLXSW_SP_SB_MM(0, 6, 4),
721 	MLXSW_SP_SB_MM(0, 6, 4),
722 	MLXSW_SP_SB_MM(0, 6, 4),
723 	MLXSW_SP_SB_MM(0, 6, 4),
724 	MLXSW_SP_SB_MM(0, 6, 4),
725 };
726 
727 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
728 {
729 	char sbmm_pl[MLXSW_REG_SBMM_LEN];
730 	int i;
731 	int err;
732 
733 	for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
734 		const struct mlxsw_sp_sb_pool_des *des;
735 		const struct mlxsw_sp_sb_mm *mc;
736 		u32 min_buff;
737 
738 		mc = &mlxsw_sp->sb_vals->mms[i];
739 		des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
740 		/* All pools used by sb_mm's are initialized using dynamic
741 		 * thresholds, therefore 'max_buff' isn't specified in cells.
742 		 */
743 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
744 		mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
745 				    des->pool);
746 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
747 		if (err)
748 			return err;
749 	}
750 	return 0;
751 }
752 
753 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
754 				u16 *p_ingress_len, u16 *p_egress_len)
755 {
756 	int i;
757 
758 	for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
759 		if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
760 		    MLXSW_REG_SBXX_DIR_EGRESS)
761 			goto out;
762 	WARN(1, "No egress pools\n");
763 
764 out:
765 	*p_ingress_len = i;
766 	*p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
767 }
768 
769 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
770 	.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
771 	.pool_dess = mlxsw_sp1_sb_pool_dess,
772 	.pms = mlxsw_sp1_sb_pms,
773 	.prs = mlxsw_sp1_sb_prs,
774 	.mms = mlxsw_sp_sb_mms,
775 	.cms_ingress = mlxsw_sp1_sb_cms_ingress,
776 	.cms_egress = mlxsw_sp1_sb_cms_egress,
777 	.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
778 	.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
779 	.cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
780 	.cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
781 	.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
782 };
783 
784 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
785 	.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
786 	.pool_dess = mlxsw_sp2_sb_pool_dess,
787 	.pms = mlxsw_sp2_sb_pms,
788 	.prs = mlxsw_sp2_sb_prs,
789 	.mms = mlxsw_sp_sb_mms,
790 	.cms_ingress = mlxsw_sp2_sb_cms_ingress,
791 	.cms_egress = mlxsw_sp2_sb_cms_egress,
792 	.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
793 	.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
794 	.cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
795 	.cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
796 	.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
797 };
798 
799 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
800 {
801 	u32 max_headroom_size;
802 	u16 ing_pool_count;
803 	u16 eg_pool_count;
804 	int err;
805 
806 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
807 		return -EIO;
808 
809 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
810 		return -EIO;
811 
812 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
813 		return -EIO;
814 
815 	mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
816 	if (!mlxsw_sp->sb)
817 		return -ENOMEM;
818 	mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
819 	mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
820 						   MAX_BUFFER_SIZE);
821 	max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
822 					       MAX_HEADROOM_SIZE);
823 	/* Round down, because this limit must not be overstepped. */
824 	mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
825 						mlxsw_sp->sb->cell_size;
826 
827 	err = mlxsw_sp_sb_ports_init(mlxsw_sp);
828 	if (err)
829 		goto err_sb_ports_init;
830 	err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
831 				   mlxsw_sp->sb_vals->pool_count);
832 	if (err)
833 		goto err_sb_prs_init;
834 	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
835 	if (err)
836 		goto err_sb_cpu_port_sb_cms_init;
837 	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
838 	if (err)
839 		goto err_sb_mms_init;
840 	mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
841 	err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
842 				  mlxsw_sp->sb->sb_size,
843 				  ing_pool_count,
844 				  eg_pool_count,
845 				  MLXSW_SP_SB_ING_TC_COUNT,
846 				  MLXSW_SP_SB_EG_TC_COUNT);
847 	if (err)
848 		goto err_devlink_sb_register;
849 
850 	return 0;
851 
852 err_devlink_sb_register:
853 err_sb_mms_init:
854 err_sb_cpu_port_sb_cms_init:
855 err_sb_prs_init:
856 	mlxsw_sp_sb_ports_fini(mlxsw_sp);
857 err_sb_ports_init:
858 	kfree(mlxsw_sp->sb);
859 	return err;
860 }
861 
862 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
863 {
864 	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
865 	mlxsw_sp_sb_ports_fini(mlxsw_sp);
866 	kfree(mlxsw_sp->sb);
867 }
868 
869 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
870 {
871 	int err;
872 
873 	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
874 	if (err)
875 		return err;
876 	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
877 	if (err)
878 		return err;
879 	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
880 
881 	return err;
882 }
883 
884 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
885 			 unsigned int sb_index, u16 pool_index,
886 			 struct devlink_sb_pool_info *pool_info)
887 {
888 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
889 	enum mlxsw_reg_sbxx_dir dir;
890 	struct mlxsw_sp_sb_pr *pr;
891 
892 	dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
893 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
894 	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
895 	pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
896 	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
897 	pool_info->cell_size = mlxsw_sp->sb->cell_size;
898 	return 0;
899 }
900 
901 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
902 			 unsigned int sb_index, u16 pool_index, u32 size,
903 			 enum devlink_sb_threshold_type threshold_type)
904 {
905 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
906 	u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
907 	enum mlxsw_reg_sbpr_mode mode;
908 
909 	if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
910 		return -EINVAL;
911 
912 	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
913 	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
914 				    pool_size, false);
915 }
916 
917 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
918 
919 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
920 				     u32 max_buff)
921 {
922 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
923 
924 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
925 		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
926 	return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
927 }
928 
929 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
930 				    u32 threshold, u32 *p_max_buff)
931 {
932 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
933 
934 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
935 		int val;
936 
937 		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
938 		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
939 		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
940 			return -EINVAL;
941 		*p_max_buff = val;
942 	} else {
943 		*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
944 	}
945 	return 0;
946 }
947 
948 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
949 			      unsigned int sb_index, u16 pool_index,
950 			      u32 *p_threshold)
951 {
952 	struct mlxsw_sp_port *mlxsw_sp_port =
953 			mlxsw_core_port_driver_priv(mlxsw_core_port);
954 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
955 	u8 local_port = mlxsw_sp_port->local_port;
956 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
957 						       pool_index);
958 
959 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
960 						 pm->max_buff);
961 	return 0;
962 }
963 
964 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
965 			      unsigned int sb_index, u16 pool_index,
966 			      u32 threshold)
967 {
968 	struct mlxsw_sp_port *mlxsw_sp_port =
969 			mlxsw_core_port_driver_priv(mlxsw_core_port);
970 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
971 	u8 local_port = mlxsw_sp_port->local_port;
972 	u32 max_buff;
973 	int err;
974 
975 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
976 				       threshold, &max_buff);
977 	if (err)
978 		return err;
979 
980 	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
981 				    0, max_buff);
982 }
983 
984 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
985 				 unsigned int sb_index, u16 tc_index,
986 				 enum devlink_sb_pool_type pool_type,
987 				 u16 *p_pool_index, u32 *p_threshold)
988 {
989 	struct mlxsw_sp_port *mlxsw_sp_port =
990 			mlxsw_core_port_driver_priv(mlxsw_core_port);
991 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
992 	u8 local_port = mlxsw_sp_port->local_port;
993 	u8 pg_buff = tc_index;
994 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
995 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
996 						       pg_buff, dir);
997 
998 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
999 						 cm->max_buff);
1000 	*p_pool_index = cm->pool_index;
1001 	return 0;
1002 }
1003 
1004 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1005 				 unsigned int sb_index, u16 tc_index,
1006 				 enum devlink_sb_pool_type pool_type,
1007 				 u16 pool_index, u32 threshold)
1008 {
1009 	struct mlxsw_sp_port *mlxsw_sp_port =
1010 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1011 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1012 	u8 local_port = mlxsw_sp_port->local_port;
1013 	u8 pg_buff = tc_index;
1014 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1015 	u32 max_buff;
1016 	int err;
1017 
1018 	if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir)
1019 		return -EINVAL;
1020 
1021 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1022 				       threshold, &max_buff);
1023 	if (err)
1024 		return err;
1025 
1026 	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1027 				    0, max_buff, false, pool_index);
1028 }
1029 
1030 #define MASKED_COUNT_MAX \
1031 	(MLXSW_REG_SBSR_REC_MAX_COUNT / \
1032 	 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1033 
1034 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1035 	u8 masked_count;
1036 	u8 local_port_1;
1037 };
1038 
1039 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1040 					char *sbsr_pl, size_t sbsr_pl_len,
1041 					unsigned long cb_priv)
1042 {
1043 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1044 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1045 	u8 masked_count;
1046 	u8 local_port;
1047 	int rec_index = 0;
1048 	struct mlxsw_sp_sb_cm *cm;
1049 	int i;
1050 
1051 	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1052 
1053 	masked_count = 0;
1054 	for (local_port = cb_ctx.local_port_1;
1055 	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1056 		if (!mlxsw_sp->ports[local_port])
1057 			continue;
1058 		for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1059 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1060 						MLXSW_REG_SBXX_DIR_INGRESS);
1061 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1062 						  &cm->occ.cur, &cm->occ.max);
1063 		}
1064 		if (++masked_count == cb_ctx.masked_count)
1065 			break;
1066 	}
1067 	masked_count = 0;
1068 	for (local_port = cb_ctx.local_port_1;
1069 	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1070 		if (!mlxsw_sp->ports[local_port])
1071 			continue;
1072 		for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1073 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1074 						MLXSW_REG_SBXX_DIR_EGRESS);
1075 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1076 						  &cm->occ.cur, &cm->occ.max);
1077 		}
1078 		if (++masked_count == cb_ctx.masked_count)
1079 			break;
1080 	}
1081 }
1082 
1083 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1084 			     unsigned int sb_index)
1085 {
1086 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1087 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1088 	unsigned long cb_priv;
1089 	LIST_HEAD(bulk_list);
1090 	char *sbsr_pl;
1091 	u8 masked_count;
1092 	u8 local_port_1;
1093 	u8 local_port = 0;
1094 	int i;
1095 	int err;
1096 	int err2;
1097 
1098 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1099 	if (!sbsr_pl)
1100 		return -ENOMEM;
1101 
1102 next_batch:
1103 	local_port++;
1104 	local_port_1 = local_port;
1105 	masked_count = 0;
1106 	mlxsw_reg_sbsr_pack(sbsr_pl, false);
1107 	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1108 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1109 	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1110 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1111 	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1112 		if (!mlxsw_sp->ports[local_port])
1113 			continue;
1114 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1115 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1116 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1117 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1118 						       &bulk_list);
1119 			if (err)
1120 				goto out;
1121 		}
1122 		if (++masked_count == MASKED_COUNT_MAX)
1123 			goto do_query;
1124 	}
1125 
1126 do_query:
1127 	cb_ctx.masked_count = masked_count;
1128 	cb_ctx.local_port_1 = local_port_1;
1129 	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1130 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1131 				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1132 				    cb_priv);
1133 	if (err)
1134 		goto out;
1135 	if (local_port < mlxsw_core_max_ports(mlxsw_core))
1136 		goto next_batch;
1137 
1138 out:
1139 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1140 	if (!err)
1141 		err = err2;
1142 	kfree(sbsr_pl);
1143 	return err;
1144 }
1145 
1146 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1147 			      unsigned int sb_index)
1148 {
1149 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1150 	LIST_HEAD(bulk_list);
1151 	char *sbsr_pl;
1152 	unsigned int masked_count;
1153 	u8 local_port = 0;
1154 	int i;
1155 	int err;
1156 	int err2;
1157 
1158 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1159 	if (!sbsr_pl)
1160 		return -ENOMEM;
1161 
1162 next_batch:
1163 	local_port++;
1164 	masked_count = 0;
1165 	mlxsw_reg_sbsr_pack(sbsr_pl, true);
1166 	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1167 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1168 	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1169 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1170 	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1171 		if (!mlxsw_sp->ports[local_port])
1172 			continue;
1173 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1174 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1175 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1176 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1177 						       &bulk_list);
1178 			if (err)
1179 				goto out;
1180 		}
1181 		if (++masked_count == MASKED_COUNT_MAX)
1182 			goto do_query;
1183 	}
1184 
1185 do_query:
1186 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1187 				    &bulk_list, NULL, 0);
1188 	if (err)
1189 		goto out;
1190 	if (local_port < mlxsw_core_max_ports(mlxsw_core))
1191 		goto next_batch;
1192 
1193 out:
1194 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1195 	if (!err)
1196 		err = err2;
1197 	kfree(sbsr_pl);
1198 	return err;
1199 }
1200 
1201 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1202 				  unsigned int sb_index, u16 pool_index,
1203 				  u32 *p_cur, u32 *p_max)
1204 {
1205 	struct mlxsw_sp_port *mlxsw_sp_port =
1206 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1207 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1208 	u8 local_port = mlxsw_sp_port->local_port;
1209 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1210 						       pool_index);
1211 
1212 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1213 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1214 	return 0;
1215 }
1216 
1217 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1218 				     unsigned int sb_index, u16 tc_index,
1219 				     enum devlink_sb_pool_type pool_type,
1220 				     u32 *p_cur, u32 *p_max)
1221 {
1222 	struct mlxsw_sp_port *mlxsw_sp_port =
1223 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1224 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1225 	u8 local_port = mlxsw_sp_port->local_port;
1226 	u8 pg_buff = tc_index;
1227 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1228 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1229 						       pg_buff, dir);
1230 
1231 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1232 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1233 	return 0;
1234 }
1235