1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
10 
11 #include "spectrum.h"
12 #include "core.h"
13 #include "port.h"
14 #include "reg.h"
15 
16 struct mlxsw_sp_sb_pr {
17 	enum mlxsw_reg_sbpr_mode mode;
18 	u32 size;
19 	u8 freeze_mode:1,
20 	   freeze_size:1;
21 };
22 
23 struct mlxsw_cp_sb_occ {
24 	u32 cur;
25 	u32 max;
26 };
27 
28 struct mlxsw_sp_sb_cm {
29 	u32 min_buff;
30 	u32 max_buff;
31 	u16 pool_index;
32 	struct mlxsw_cp_sb_occ occ;
33 	u8 freeze_pool:1,
34 	   freeze_thresh:1;
35 };
36 
37 #define MLXSW_SP_SB_INFI -1U
38 
39 struct mlxsw_sp_sb_pm {
40 	u32 min_buff;
41 	u32 max_buff;
42 	struct mlxsw_cp_sb_occ occ;
43 };
44 
45 struct mlxsw_sp_sb_mm {
46 	u32 min_buff;
47 	u32 max_buff;
48 	u16 pool_index;
49 };
50 
51 struct mlxsw_sp_sb_pool_des {
52 	enum mlxsw_reg_sbxx_dir dir;
53 	u8 pool;
54 };
55 
56 #define MLXSW_SP_SB_POOL_ING		0
57 #define MLXSW_SP_SB_POOL_EGR		4
58 #define MLXSW_SP_SB_POOL_EGR_MC		8
59 #define MLXSW_SP_SB_POOL_ING_CPU	9
60 #define MLXSW_SP_SB_POOL_EGR_CPU	10
61 
62 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
63 	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
64 	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
65 	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
66 	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
67 	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
68 	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
69 	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
70 	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
71 	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
72 	{MLXSW_REG_SBXX_DIR_INGRESS, 4},
73 	{MLXSW_REG_SBXX_DIR_EGRESS, 4},
74 };
75 
76 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
77 	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
78 	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
79 	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
80 	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
81 	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
82 	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
83 	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
84 	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
85 	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
86 	{MLXSW_REG_SBXX_DIR_INGRESS, 4},
87 	{MLXSW_REG_SBXX_DIR_EGRESS, 4},
88 };
89 
90 #define MLXSW_SP_SB_ING_TC_COUNT 8
91 #define MLXSW_SP_SB_EG_TC_COUNT 16
92 
93 struct mlxsw_sp_sb_port {
94 	struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
95 	struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
96 	struct mlxsw_sp_sb_pm *pms;
97 };
98 
99 struct mlxsw_sp_sb {
100 	struct mlxsw_sp_sb_pr *prs;
101 	struct mlxsw_sp_sb_port *ports;
102 	u32 cell_size;
103 	u32 max_headroom_cells;
104 	u64 sb_size;
105 };
106 
107 struct mlxsw_sp_sb_vals {
108 	unsigned int pool_count;
109 	const struct mlxsw_sp_sb_pool_des *pool_dess;
110 	const struct mlxsw_sp_sb_pm *pms;
111 	const struct mlxsw_sp_sb_pm *pms_cpu;
112 	const struct mlxsw_sp_sb_pr *prs;
113 	const struct mlxsw_sp_sb_mm *mms;
114 	const struct mlxsw_sp_sb_cm *cms_ingress;
115 	const struct mlxsw_sp_sb_cm *cms_egress;
116 	const struct mlxsw_sp_sb_cm *cms_cpu;
117 	unsigned int mms_count;
118 	unsigned int cms_ingress_count;
119 	unsigned int cms_egress_count;
120 	unsigned int cms_cpu_count;
121 };
122 
123 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
124 {
125 	return mlxsw_sp->sb->cell_size * cells;
126 }
127 
128 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
129 {
130 	return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
131 }
132 
133 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
134 {
135 	return mlxsw_sp->sb->max_headroom_cells;
136 }
137 
138 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
139 						 u16 pool_index)
140 {
141 	return &mlxsw_sp->sb->prs[pool_index];
142 }
143 
144 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
145 {
146 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
147 		return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
148 	else
149 		return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
150 }
151 
152 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
153 						 u8 local_port, u8 pg_buff,
154 						 enum mlxsw_reg_sbxx_dir dir)
155 {
156 	struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
157 
158 	WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
159 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
160 		return &sb_port->ing_cms[pg_buff];
161 	else
162 		return &sb_port->eg_cms[pg_buff];
163 }
164 
165 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
166 						 u8 local_port, u16 pool_index)
167 {
168 	return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
169 }
170 
171 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
172 				enum mlxsw_reg_sbpr_mode mode,
173 				u32 size, bool infi_size)
174 {
175 	const struct mlxsw_sp_sb_pool_des *des =
176 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
177 	char sbpr_pl[MLXSW_REG_SBPR_LEN];
178 	struct mlxsw_sp_sb_pr *pr;
179 	int err;
180 
181 	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
182 			    size, infi_size);
183 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
184 	if (err)
185 		return err;
186 
187 	if (infi_size)
188 		size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
189 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
190 	pr->mode = mode;
191 	pr->size = size;
192 	return 0;
193 }
194 
195 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
196 				u8 pg_buff, u32 min_buff, u32 max_buff,
197 				bool infi_max, u16 pool_index)
198 {
199 	const struct mlxsw_sp_sb_pool_des *des =
200 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
201 	char sbcm_pl[MLXSW_REG_SBCM_LEN];
202 	struct mlxsw_sp_sb_cm *cm;
203 	int err;
204 
205 	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
206 			    min_buff, max_buff, infi_max, des->pool);
207 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
208 	if (err)
209 		return err;
210 
211 	if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
212 		if (infi_max)
213 			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
214 							mlxsw_sp->sb->sb_size);
215 
216 		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
217 					des->dir);
218 		cm->min_buff = min_buff;
219 		cm->max_buff = max_buff;
220 		cm->pool_index = pool_index;
221 	}
222 	return 0;
223 }
224 
225 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
226 				u16 pool_index, u32 min_buff, u32 max_buff)
227 {
228 	const struct mlxsw_sp_sb_pool_des *des =
229 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
230 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
231 	struct mlxsw_sp_sb_pm *pm;
232 	int err;
233 
234 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
235 			    min_buff, max_buff);
236 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
237 	if (err)
238 		return err;
239 
240 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
241 	pm->min_buff = min_buff;
242 	pm->max_buff = max_buff;
243 	return 0;
244 }
245 
246 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
247 				    u16 pool_index, struct list_head *bulk_list)
248 {
249 	const struct mlxsw_sp_sb_pool_des *des =
250 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
251 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
252 
253 	if (local_port == MLXSW_PORT_CPU_PORT &&
254 	    des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
255 		return 0;
256 
257 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
258 			    true, 0, 0);
259 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
260 				     bulk_list, NULL, 0);
261 }
262 
263 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
264 					char *sbpm_pl, size_t sbpm_pl_len,
265 					unsigned long cb_priv)
266 {
267 	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
268 
269 	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
270 }
271 
272 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
273 				    u16 pool_index, struct list_head *bulk_list)
274 {
275 	const struct mlxsw_sp_sb_pool_des *des =
276 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
277 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
278 	struct mlxsw_sp_sb_pm *pm;
279 
280 	if (local_port == MLXSW_PORT_CPU_PORT &&
281 	    des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
282 		return 0;
283 
284 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
285 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
286 			    false, 0, 0);
287 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
288 				     bulk_list,
289 				     mlxsw_sp_sb_pm_occ_query_cb,
290 				     (unsigned long) pm);
291 }
292 
293 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
294 #define MLXSW_SP_PB_HEADROOM 25632
295 #define MLXSW_SP_PB_UNUSED 8
296 
297 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
298 {
299 	const u32 pbs[] = {
300 		[0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
301 		[9] = MLXSW_PORT_MAX_MTU,
302 	};
303 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
304 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
305 	int i;
306 
307 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
308 			    0xffff, 0xffff / 2);
309 	for (i = 0; i < ARRAY_SIZE(pbs); i++) {
310 		u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
311 
312 		if (i == MLXSW_SP_PB_UNUSED)
313 			continue;
314 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
315 	}
316 	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
317 					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
318 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
319 }
320 
321 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
322 {
323 	char pptb_pl[MLXSW_REG_PPTB_LEN];
324 	int i;
325 
326 	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
327 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
328 		mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
329 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
330 			       pptb_pl);
331 }
332 
333 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
334 {
335 	int err;
336 
337 	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
338 	if (err)
339 		return err;
340 	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
341 }
342 
343 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
344 				 struct mlxsw_sp_sb_port *sb_port)
345 {
346 	struct mlxsw_sp_sb_pm *pms;
347 
348 	pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
349 		      GFP_KERNEL);
350 	if (!pms)
351 		return -ENOMEM;
352 	sb_port->pms = pms;
353 	return 0;
354 }
355 
356 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
357 {
358 	kfree(sb_port->pms);
359 }
360 
361 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
362 {
363 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
364 	struct mlxsw_sp_sb_pr *prs;
365 	int i;
366 	int err;
367 
368 	mlxsw_sp->sb->ports = kcalloc(max_ports,
369 				      sizeof(struct mlxsw_sp_sb_port),
370 				      GFP_KERNEL);
371 	if (!mlxsw_sp->sb->ports)
372 		return -ENOMEM;
373 
374 	prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
375 		      GFP_KERNEL);
376 	if (!prs) {
377 		err = -ENOMEM;
378 		goto err_alloc_prs;
379 	}
380 	mlxsw_sp->sb->prs = prs;
381 
382 	for (i = 0; i < max_ports; i++) {
383 		err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
384 		if (err)
385 			goto err_sb_port_init;
386 	}
387 
388 	return 0;
389 
390 err_sb_port_init:
391 	for (i--; i >= 0; i--)
392 		mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
393 	kfree(mlxsw_sp->sb->prs);
394 err_alloc_prs:
395 	kfree(mlxsw_sp->sb->ports);
396 	return err;
397 }
398 
399 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
400 {
401 	int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
402 	int i;
403 
404 	for (i = max_ports - 1; i >= 0; i--)
405 		mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
406 	kfree(mlxsw_sp->sb->prs);
407 	kfree(mlxsw_sp->sb->ports);
408 }
409 
410 #define MLXSW_SP_SB_PR(_mode, _size)	\
411 	{				\
412 		.mode = _mode,		\
413 		.size = _size,		\
414 	}
415 
416 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size)	\
417 	{								\
418 		.mode = _mode,						\
419 		.size = _size,						\
420 		.freeze_mode = _freeze_mode,				\
421 		.freeze_size = _freeze_size,				\
422 	}
423 
424 #define MLXSW_SP1_SB_PR_INGRESS_SIZE	12440000
425 #define MLXSW_SP1_SB_PR_EGRESS_SIZE	13232000
426 #define MLXSW_SP1_SB_PR_CPU_SIZE	(256 * 1000)
427 
428 /* Order according to mlxsw_sp1_sb_pool_dess */
429 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
430 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
431 		       MLXSW_SP1_SB_PR_INGRESS_SIZE),
432 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
433 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
434 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
435 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
436 			   MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
437 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
438 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
439 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
440 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
441 			   true, true),
442 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
443 			   MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
444 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
445 			   MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
446 };
447 
448 #define MLXSW_SP2_SB_PR_INGRESS_SIZE	35297568
449 #define MLXSW_SP2_SB_PR_EGRESS_SIZE	35297568
450 #define MLXSW_SP2_SB_PR_CPU_SIZE	(256 * 1000)
451 
452 /* Order according to mlxsw_sp2_sb_pool_dess */
453 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
454 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
455 		       MLXSW_SP2_SB_PR_INGRESS_SIZE),
456 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
457 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
458 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
459 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
460 			   MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
461 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
462 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
463 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
464 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
465 			   true, true),
466 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
467 			   MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
468 	MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
469 			   MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
470 };
471 
472 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
473 				const struct mlxsw_sp_sb_pr *prs,
474 				size_t prs_len)
475 {
476 	int i;
477 	int err;
478 
479 	for (i = 0; i < prs_len; i++) {
480 		u32 size = prs[i].size;
481 		u32 size_cells;
482 
483 		if (size == MLXSW_SP_SB_INFI) {
484 			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
485 						   0, true);
486 		} else {
487 			size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
488 			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
489 						   size_cells, false);
490 		}
491 		if (err)
492 			return err;
493 	}
494 	return 0;
495 }
496 
497 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
498 	{						\
499 		.min_buff = _min_buff,			\
500 		.max_buff = _max_buff,			\
501 		.pool_index = _pool,			\
502 	}
503 
504 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff)	\
505 	{						\
506 		.min_buff = _min_buff,			\
507 		.max_buff = _max_buff,			\
508 		.pool_index = MLXSW_SP_SB_POOL_ING,	\
509 	}
510 
511 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff)	\
512 	{						\
513 		.min_buff = _min_buff,			\
514 		.max_buff = _max_buff,			\
515 		.pool_index = MLXSW_SP_SB_POOL_EGR,	\
516 	}
517 
518 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff)	\
519 	{						\
520 		.min_buff = _min_buff,			\
521 		.max_buff = _max_buff,			\
522 		.pool_index = MLXSW_SP_SB_POOL_EGR_MC,	\
523 		.freeze_pool = true,			\
524 		.freeze_thresh = true,			\
525 	}
526 
527 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
528 	MLXSW_SP_SB_CM_ING(10000, 8),
529 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
530 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
531 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
532 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
533 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
534 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
535 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
536 	MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
537 	MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
538 };
539 
540 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
541 	MLXSW_SP_SB_CM_ING(0, 7),
542 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
543 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
544 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
545 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
546 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
547 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
548 	MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
549 	MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
550 	MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
551 };
552 
553 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
554 	MLXSW_SP_SB_CM_EGR(1500, 9),
555 	MLXSW_SP_SB_CM_EGR(1500, 9),
556 	MLXSW_SP_SB_CM_EGR(1500, 9),
557 	MLXSW_SP_SB_CM_EGR(1500, 9),
558 	MLXSW_SP_SB_CM_EGR(1500, 9),
559 	MLXSW_SP_SB_CM_EGR(1500, 9),
560 	MLXSW_SP_SB_CM_EGR(1500, 9),
561 	MLXSW_SP_SB_CM_EGR(1500, 9),
562 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
563 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
564 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
565 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
566 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
567 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
568 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
569 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
570 	MLXSW_SP_SB_CM_EGR(1, 0xff),
571 };
572 
573 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
574 	MLXSW_SP_SB_CM_EGR(0, 7),
575 	MLXSW_SP_SB_CM_EGR(0, 7),
576 	MLXSW_SP_SB_CM_EGR(0, 7),
577 	MLXSW_SP_SB_CM_EGR(0, 7),
578 	MLXSW_SP_SB_CM_EGR(0, 7),
579 	MLXSW_SP_SB_CM_EGR(0, 7),
580 	MLXSW_SP_SB_CM_EGR(0, 7),
581 	MLXSW_SP_SB_CM_EGR(0, 7),
582 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
583 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
584 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
585 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
586 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
587 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
588 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
589 	MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
590 	MLXSW_SP_SB_CM_EGR(1, 0xff),
591 };
592 
593 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
594 
595 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
596 	MLXSW_SP_CPU_PORT_SB_CM,
597 	MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
598 	MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
599 	MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
600 	MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
601 	MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
602 	MLXSW_SP_CPU_PORT_SB_CM,
603 	MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
604 	MLXSW_SP_CPU_PORT_SB_CM,
605 	MLXSW_SP_CPU_PORT_SB_CM,
606 	MLXSW_SP_CPU_PORT_SB_CM,
607 	MLXSW_SP_CPU_PORT_SB_CM,
608 	MLXSW_SP_CPU_PORT_SB_CM,
609 	MLXSW_SP_CPU_PORT_SB_CM,
610 	MLXSW_SP_CPU_PORT_SB_CM,
611 	MLXSW_SP_CPU_PORT_SB_CM,
612 	MLXSW_SP_CPU_PORT_SB_CM,
613 	MLXSW_SP_CPU_PORT_SB_CM,
614 	MLXSW_SP_CPU_PORT_SB_CM,
615 	MLXSW_SP_CPU_PORT_SB_CM,
616 	MLXSW_SP_CPU_PORT_SB_CM,
617 	MLXSW_SP_CPU_PORT_SB_CM,
618 	MLXSW_SP_CPU_PORT_SB_CM,
619 	MLXSW_SP_CPU_PORT_SB_CM,
620 	MLXSW_SP_CPU_PORT_SB_CM,
621 	MLXSW_SP_CPU_PORT_SB_CM,
622 	MLXSW_SP_CPU_PORT_SB_CM,
623 	MLXSW_SP_CPU_PORT_SB_CM,
624 	MLXSW_SP_CPU_PORT_SB_CM,
625 	MLXSW_SP_CPU_PORT_SB_CM,
626 	MLXSW_SP_CPU_PORT_SB_CM,
627 	MLXSW_SP_CPU_PORT_SB_CM,
628 };
629 
630 static bool
631 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
632 {
633 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
634 
635 	return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
636 }
637 
638 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
639 				  enum mlxsw_reg_sbxx_dir dir,
640 				  const struct mlxsw_sp_sb_cm *cms,
641 				  size_t cms_len)
642 {
643 	const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
644 	int i;
645 	int err;
646 
647 	for (i = 0; i < cms_len; i++) {
648 		const struct mlxsw_sp_sb_cm *cm;
649 		u32 min_buff;
650 		u32 max_buff;
651 
652 		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
653 			continue; /* PG number 8 does not exist, skip it */
654 		cm = &cms[i];
655 		if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
656 			continue;
657 
658 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
659 		max_buff = cm->max_buff;
660 		if (max_buff == MLXSW_SP_SB_INFI) {
661 			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
662 						   min_buff, 0,
663 						   true, cm->pool_index);
664 		} else {
665 			if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
666 						       cm->pool_index))
667 				max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
668 								max_buff);
669 			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
670 						   min_buff, max_buff,
671 						   false, cm->pool_index);
672 		}
673 		if (err)
674 			return err;
675 	}
676 	return 0;
677 }
678 
679 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
680 {
681 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
682 	int err;
683 
684 	err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
685 				     mlxsw_sp_port->local_port,
686 				     MLXSW_REG_SBXX_DIR_INGRESS,
687 				     mlxsw_sp->sb_vals->cms_ingress,
688 				     mlxsw_sp->sb_vals->cms_ingress_count);
689 	if (err)
690 		return err;
691 	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
692 				      mlxsw_sp_port->local_port,
693 				      MLXSW_REG_SBXX_DIR_EGRESS,
694 				      mlxsw_sp->sb_vals->cms_egress,
695 				      mlxsw_sp->sb_vals->cms_egress_count);
696 }
697 
698 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
699 {
700 	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
701 				      mlxsw_sp->sb_vals->cms_cpu,
702 				      mlxsw_sp->sb_vals->cms_cpu_count);
703 }
704 
705 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
706 	{					\
707 		.min_buff = _min_buff,		\
708 		.max_buff = _max_buff,		\
709 	}
710 
711 /* Order according to mlxsw_sp1_sb_pool_dess */
712 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
713 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
714 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
715 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
716 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
717 	MLXSW_SP_SB_PM(0, 7),
718 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
719 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
720 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
721 	MLXSW_SP_SB_PM(10000, 90000),
722 	MLXSW_SP_SB_PM(0, 8),	/* 50% occupancy */
723 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
724 };
725 
726 /* Order according to mlxsw_sp2_sb_pool_dess */
727 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
728 	MLXSW_SP_SB_PM(0, 7),
729 	MLXSW_SP_SB_PM(0, 0),
730 	MLXSW_SP_SB_PM(0, 0),
731 	MLXSW_SP_SB_PM(0, 0),
732 	MLXSW_SP_SB_PM(0, 7),
733 	MLXSW_SP_SB_PM(0, 0),
734 	MLXSW_SP_SB_PM(0, 0),
735 	MLXSW_SP_SB_PM(0, 0),
736 	MLXSW_SP_SB_PM(10000, 90000),
737 	MLXSW_SP_SB_PM(0, 8),	/* 50% occupancy */
738 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
739 };
740 
741 /* Order according to mlxsw_sp*_sb_pool_dess */
742 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
743 	MLXSW_SP_SB_PM(0, 0),
744 	MLXSW_SP_SB_PM(0, 0),
745 	MLXSW_SP_SB_PM(0, 0),
746 	MLXSW_SP_SB_PM(0, 0),
747 	MLXSW_SP_SB_PM(0, 0),
748 	MLXSW_SP_SB_PM(0, 0),
749 	MLXSW_SP_SB_PM(0, 0),
750 	MLXSW_SP_SB_PM(0, 0),
751 	MLXSW_SP_SB_PM(0, 90000),
752 	MLXSW_SP_SB_PM(0, 0),
753 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
754 };
755 
756 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 				const struct mlxsw_sp_sb_pm *pms,
758 				bool skip_ingress)
759 {
760 	int i, err;
761 
762 	for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
763 		const struct mlxsw_sp_sb_pm *pm = &pms[i];
764 		const struct mlxsw_sp_sb_pool_des *des;
765 		u32 max_buff;
766 		u32 min_buff;
767 
768 		des = &mlxsw_sp->sb_vals->pool_dess[i];
769 		if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
770 			continue;
771 
772 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
773 		max_buff = pm->max_buff;
774 		if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
775 			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
776 		err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
777 					   max_buff);
778 		if (err)
779 			return err;
780 	}
781 	return 0;
782 }
783 
784 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
785 {
786 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
787 
788 	return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
789 				    mlxsw_sp->sb_vals->pms, false);
790 }
791 
792 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
793 {
794 	return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
795 				    true);
796 }
797 
798 #define MLXSW_SP_SB_MM(_min_buff, _max_buff)		\
799 	{						\
800 		.min_buff = _min_buff,			\
801 		.max_buff = _max_buff,			\
802 		.pool_index = MLXSW_SP_SB_POOL_EGR,	\
803 	}
804 
805 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
806 	MLXSW_SP_SB_MM(0, 6),
807 	MLXSW_SP_SB_MM(0, 6),
808 	MLXSW_SP_SB_MM(0, 6),
809 	MLXSW_SP_SB_MM(0, 6),
810 	MLXSW_SP_SB_MM(0, 6),
811 	MLXSW_SP_SB_MM(0, 6),
812 	MLXSW_SP_SB_MM(0, 6),
813 	MLXSW_SP_SB_MM(0, 6),
814 	MLXSW_SP_SB_MM(0, 6),
815 	MLXSW_SP_SB_MM(0, 6),
816 	MLXSW_SP_SB_MM(0, 6),
817 	MLXSW_SP_SB_MM(0, 6),
818 	MLXSW_SP_SB_MM(0, 6),
819 	MLXSW_SP_SB_MM(0, 6),
820 	MLXSW_SP_SB_MM(0, 6),
821 };
822 
823 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
824 {
825 	char sbmm_pl[MLXSW_REG_SBMM_LEN];
826 	int i;
827 	int err;
828 
829 	for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
830 		const struct mlxsw_sp_sb_pool_des *des;
831 		const struct mlxsw_sp_sb_mm *mc;
832 		u32 min_buff;
833 
834 		mc = &mlxsw_sp->sb_vals->mms[i];
835 		des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
836 		/* All pools used by sb_mm's are initialized using dynamic
837 		 * thresholds, therefore 'max_buff' isn't specified in cells.
838 		 */
839 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
840 		mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
841 				    des->pool);
842 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
843 		if (err)
844 			return err;
845 	}
846 	return 0;
847 }
848 
849 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
850 				u16 *p_ingress_len, u16 *p_egress_len)
851 {
852 	int i;
853 
854 	for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
855 		if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
856 		    MLXSW_REG_SBXX_DIR_INGRESS)
857 			(*p_ingress_len)++;
858 		else
859 			(*p_egress_len)++;
860 	}
861 
862 	WARN(*p_egress_len == 0, "No egress pools\n");
863 }
864 
865 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
866 	.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
867 	.pool_dess = mlxsw_sp1_sb_pool_dess,
868 	.pms = mlxsw_sp1_sb_pms,
869 	.pms_cpu = mlxsw_sp_cpu_port_sb_pms,
870 	.prs = mlxsw_sp1_sb_prs,
871 	.mms = mlxsw_sp_sb_mms,
872 	.cms_ingress = mlxsw_sp1_sb_cms_ingress,
873 	.cms_egress = mlxsw_sp1_sb_cms_egress,
874 	.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
875 	.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
876 	.cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
877 	.cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
878 	.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
879 };
880 
881 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
882 	.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
883 	.pool_dess = mlxsw_sp2_sb_pool_dess,
884 	.pms = mlxsw_sp2_sb_pms,
885 	.pms_cpu = mlxsw_sp_cpu_port_sb_pms,
886 	.prs = mlxsw_sp2_sb_prs,
887 	.mms = mlxsw_sp_sb_mms,
888 	.cms_ingress = mlxsw_sp2_sb_cms_ingress,
889 	.cms_egress = mlxsw_sp2_sb_cms_egress,
890 	.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
891 	.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
892 	.cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
893 	.cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
894 	.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
895 };
896 
897 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
898 {
899 	u32 max_headroom_size;
900 	u16 ing_pool_count = 0;
901 	u16 eg_pool_count = 0;
902 	int err;
903 
904 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
905 		return -EIO;
906 
907 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
908 		return -EIO;
909 
910 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
911 		return -EIO;
912 
913 	mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
914 	if (!mlxsw_sp->sb)
915 		return -ENOMEM;
916 	mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
917 	mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
918 						   MAX_BUFFER_SIZE);
919 	max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
920 					       MAX_HEADROOM_SIZE);
921 	/* Round down, because this limit must not be overstepped. */
922 	mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
923 						mlxsw_sp->sb->cell_size;
924 
925 	err = mlxsw_sp_sb_ports_init(mlxsw_sp);
926 	if (err)
927 		goto err_sb_ports_init;
928 	err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
929 				   mlxsw_sp->sb_vals->pool_count);
930 	if (err)
931 		goto err_sb_prs_init;
932 	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
933 	if (err)
934 		goto err_sb_cpu_port_sb_cms_init;
935 	err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
936 	if (err)
937 		goto err_sb_cpu_port_pms_init;
938 	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
939 	if (err)
940 		goto err_sb_mms_init;
941 	mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
942 	err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
943 				  mlxsw_sp->sb->sb_size,
944 				  ing_pool_count,
945 				  eg_pool_count,
946 				  MLXSW_SP_SB_ING_TC_COUNT,
947 				  MLXSW_SP_SB_EG_TC_COUNT);
948 	if (err)
949 		goto err_devlink_sb_register;
950 
951 	return 0;
952 
953 err_devlink_sb_register:
954 err_sb_mms_init:
955 err_sb_cpu_port_pms_init:
956 err_sb_cpu_port_sb_cms_init:
957 err_sb_prs_init:
958 	mlxsw_sp_sb_ports_fini(mlxsw_sp);
959 err_sb_ports_init:
960 	kfree(mlxsw_sp->sb);
961 	return err;
962 }
963 
964 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
965 {
966 	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
967 	mlxsw_sp_sb_ports_fini(mlxsw_sp);
968 	kfree(mlxsw_sp->sb);
969 }
970 
971 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
972 {
973 	int err;
974 
975 	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
976 	if (err)
977 		return err;
978 	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
979 	if (err)
980 		return err;
981 	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
982 
983 	return err;
984 }
985 
986 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
987 			 unsigned int sb_index, u16 pool_index,
988 			 struct devlink_sb_pool_info *pool_info)
989 {
990 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
991 	enum mlxsw_reg_sbxx_dir dir;
992 	struct mlxsw_sp_sb_pr *pr;
993 
994 	dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
995 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
996 	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
997 	pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
998 	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
999 	pool_info->cell_size = mlxsw_sp->sb->cell_size;
1000 	return 0;
1001 }
1002 
1003 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1004 			 unsigned int sb_index, u16 pool_index, u32 size,
1005 			 enum devlink_sb_threshold_type threshold_type,
1006 			 struct netlink_ext_ack *extack)
1007 {
1008 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1009 	u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1010 	const struct mlxsw_sp_sb_pr *pr;
1011 	enum mlxsw_reg_sbpr_mode mode;
1012 
1013 	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1014 	pr = &mlxsw_sp->sb_vals->prs[pool_index];
1015 
1016 	if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
1017 		NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1018 		return -EINVAL;
1019 	}
1020 
1021 	if (pr->freeze_mode && pr->mode != mode) {
1022 		NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1023 		return -EINVAL;
1024 	};
1025 
1026 	if (pr->freeze_size && pr->size != size) {
1027 		NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1028 		return -EINVAL;
1029 	};
1030 
1031 	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1032 				    pool_size, false);
1033 }
1034 
1035 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1036 
1037 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1038 				     u32 max_buff)
1039 {
1040 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1041 
1042 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1043 		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1044 	return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1045 }
1046 
1047 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1048 				    u32 threshold, u32 *p_max_buff,
1049 				    struct netlink_ext_ack *extack)
1050 {
1051 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1052 
1053 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1054 		int val;
1055 
1056 		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1057 		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1058 		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1059 			NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1060 			return -EINVAL;
1061 		}
1062 		*p_max_buff = val;
1063 	} else {
1064 		*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1065 	}
1066 	return 0;
1067 }
1068 
1069 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1070 			      unsigned int sb_index, u16 pool_index,
1071 			      u32 *p_threshold)
1072 {
1073 	struct mlxsw_sp_port *mlxsw_sp_port =
1074 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1075 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1076 	u8 local_port = mlxsw_sp_port->local_port;
1077 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1078 						       pool_index);
1079 
1080 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1081 						 pm->max_buff);
1082 	return 0;
1083 }
1084 
1085 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1086 			      unsigned int sb_index, u16 pool_index,
1087 			      u32 threshold, struct netlink_ext_ack *extack)
1088 {
1089 	struct mlxsw_sp_port *mlxsw_sp_port =
1090 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1091 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1092 	u8 local_port = mlxsw_sp_port->local_port;
1093 	u32 max_buff;
1094 	int err;
1095 
1096 	if (local_port == MLXSW_PORT_CPU_PORT) {
1097 		NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1098 		return -EINVAL;
1099 	}
1100 
1101 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1102 				       threshold, &max_buff, extack);
1103 	if (err)
1104 		return err;
1105 
1106 	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1107 				    0, max_buff);
1108 }
1109 
1110 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1111 				 unsigned int sb_index, u16 tc_index,
1112 				 enum devlink_sb_pool_type pool_type,
1113 				 u16 *p_pool_index, u32 *p_threshold)
1114 {
1115 	struct mlxsw_sp_port *mlxsw_sp_port =
1116 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1117 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1118 	u8 local_port = mlxsw_sp_port->local_port;
1119 	u8 pg_buff = tc_index;
1120 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1121 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1122 						       pg_buff, dir);
1123 
1124 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1125 						 cm->max_buff);
1126 	*p_pool_index = cm->pool_index;
1127 	return 0;
1128 }
1129 
1130 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1131 				 unsigned int sb_index, u16 tc_index,
1132 				 enum devlink_sb_pool_type pool_type,
1133 				 u16 pool_index, u32 threshold,
1134 				 struct netlink_ext_ack *extack)
1135 {
1136 	struct mlxsw_sp_port *mlxsw_sp_port =
1137 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1138 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1139 	u8 local_port = mlxsw_sp_port->local_port;
1140 	const struct mlxsw_sp_sb_cm *cm;
1141 	u8 pg_buff = tc_index;
1142 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1143 	u32 max_buff;
1144 	int err;
1145 
1146 	if (local_port == MLXSW_PORT_CPU_PORT) {
1147 		NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1148 		return -EINVAL;
1149 	}
1150 
1151 	if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1152 		NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1153 		return -EINVAL;
1154 	}
1155 
1156 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1157 		cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1158 	else
1159 		cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1160 
1161 	if (cm->freeze_pool && cm->pool_index != pool_index) {
1162 		NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1163 		return -EINVAL;
1164 	}
1165 
1166 	if (cm->freeze_thresh && cm->max_buff != threshold) {
1167 		NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1168 		return -EINVAL;
1169 	}
1170 
1171 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1172 				       threshold, &max_buff, extack);
1173 	if (err)
1174 		return err;
1175 
1176 	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1177 				    0, max_buff, false, pool_index);
1178 }
1179 
1180 #define MASKED_COUNT_MAX \
1181 	(MLXSW_REG_SBSR_REC_MAX_COUNT / \
1182 	 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1183 
1184 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1185 	u8 masked_count;
1186 	u8 local_port_1;
1187 };
1188 
1189 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1190 					char *sbsr_pl, size_t sbsr_pl_len,
1191 					unsigned long cb_priv)
1192 {
1193 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1194 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1195 	u8 masked_count;
1196 	u8 local_port;
1197 	int rec_index = 0;
1198 	struct mlxsw_sp_sb_cm *cm;
1199 	int i;
1200 
1201 	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1202 
1203 	masked_count = 0;
1204 	for (local_port = cb_ctx.local_port_1;
1205 	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1206 		if (!mlxsw_sp->ports[local_port])
1207 			continue;
1208 		if (local_port == MLXSW_PORT_CPU_PORT) {
1209 			/* Ingress quotas are not supported for the CPU port */
1210 			masked_count++;
1211 			continue;
1212 		}
1213 		for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1214 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1215 						MLXSW_REG_SBXX_DIR_INGRESS);
1216 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1217 						  &cm->occ.cur, &cm->occ.max);
1218 		}
1219 		if (++masked_count == cb_ctx.masked_count)
1220 			break;
1221 	}
1222 	masked_count = 0;
1223 	for (local_port = cb_ctx.local_port_1;
1224 	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1225 		if (!mlxsw_sp->ports[local_port])
1226 			continue;
1227 		for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1228 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1229 						MLXSW_REG_SBXX_DIR_EGRESS);
1230 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1231 						  &cm->occ.cur, &cm->occ.max);
1232 		}
1233 		if (++masked_count == cb_ctx.masked_count)
1234 			break;
1235 	}
1236 }
1237 
1238 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1239 			     unsigned int sb_index)
1240 {
1241 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1242 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1243 	unsigned long cb_priv;
1244 	LIST_HEAD(bulk_list);
1245 	char *sbsr_pl;
1246 	u8 masked_count;
1247 	u8 local_port_1;
1248 	u8 local_port;
1249 	int i;
1250 	int err;
1251 	int err2;
1252 
1253 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1254 	if (!sbsr_pl)
1255 		return -ENOMEM;
1256 
1257 	local_port = MLXSW_PORT_CPU_PORT;
1258 next_batch:
1259 	local_port_1 = local_port;
1260 	masked_count = 0;
1261 	mlxsw_reg_sbsr_pack(sbsr_pl, false);
1262 	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1263 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1264 	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1265 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1266 	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1267 		if (!mlxsw_sp->ports[local_port])
1268 			continue;
1269 		if (local_port != MLXSW_PORT_CPU_PORT) {
1270 			/* Ingress quotas are not supported for the CPU port */
1271 			mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1272 							     local_port, 1);
1273 		}
1274 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1275 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1276 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1277 						       &bulk_list);
1278 			if (err)
1279 				goto out;
1280 		}
1281 		if (++masked_count == MASKED_COUNT_MAX)
1282 			goto do_query;
1283 	}
1284 
1285 do_query:
1286 	cb_ctx.masked_count = masked_count;
1287 	cb_ctx.local_port_1 = local_port_1;
1288 	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1289 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1290 				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1291 				    cb_priv);
1292 	if (err)
1293 		goto out;
1294 	if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1295 		local_port++;
1296 		goto next_batch;
1297 	}
1298 
1299 out:
1300 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1301 	if (!err)
1302 		err = err2;
1303 	kfree(sbsr_pl);
1304 	return err;
1305 }
1306 
1307 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1308 			      unsigned int sb_index)
1309 {
1310 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1311 	LIST_HEAD(bulk_list);
1312 	char *sbsr_pl;
1313 	unsigned int masked_count;
1314 	u8 local_port;
1315 	int i;
1316 	int err;
1317 	int err2;
1318 
1319 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1320 	if (!sbsr_pl)
1321 		return -ENOMEM;
1322 
1323 	local_port = MLXSW_PORT_CPU_PORT;
1324 next_batch:
1325 	masked_count = 0;
1326 	mlxsw_reg_sbsr_pack(sbsr_pl, true);
1327 	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1328 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1329 	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1330 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1331 	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1332 		if (!mlxsw_sp->ports[local_port])
1333 			continue;
1334 		if (local_port != MLXSW_PORT_CPU_PORT) {
1335 			/* Ingress quotas are not supported for the CPU port */
1336 			mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1337 							     local_port, 1);
1338 		}
1339 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1340 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1341 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1342 						       &bulk_list);
1343 			if (err)
1344 				goto out;
1345 		}
1346 		if (++masked_count == MASKED_COUNT_MAX)
1347 			goto do_query;
1348 	}
1349 
1350 do_query:
1351 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1352 				    &bulk_list, NULL, 0);
1353 	if (err)
1354 		goto out;
1355 	if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1356 		local_port++;
1357 		goto next_batch;
1358 	}
1359 
1360 out:
1361 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1362 	if (!err)
1363 		err = err2;
1364 	kfree(sbsr_pl);
1365 	return err;
1366 }
1367 
1368 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1369 				  unsigned int sb_index, u16 pool_index,
1370 				  u32 *p_cur, u32 *p_max)
1371 {
1372 	struct mlxsw_sp_port *mlxsw_sp_port =
1373 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1374 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1375 	u8 local_port = mlxsw_sp_port->local_port;
1376 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1377 						       pool_index);
1378 
1379 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1380 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1381 	return 0;
1382 }
1383 
1384 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1385 				     unsigned int sb_index, u16 tc_index,
1386 				     enum devlink_sb_pool_type pool_type,
1387 				     u32 *p_cur, u32 *p_max)
1388 {
1389 	struct mlxsw_sp_port *mlxsw_sp_port =
1390 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1391 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1392 	u8 local_port = mlxsw_sp_port->local_port;
1393 	u8 pg_buff = tc_index;
1394 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1395 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1396 						       pg_buff, dir);
1397 
1398 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1399 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1400 	return 0;
1401 }
1402