1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
40 
41 #include "spectrum.h"
42 #include "core.h"
43 #include "port.h"
44 #include "reg.h"
45 
46 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
47 						 u8 pool,
48 						 enum mlxsw_reg_sbxx_dir dir)
49 {
50 	return &mlxsw_sp->sb.prs[dir][pool];
51 }
52 
53 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
54 						 u8 local_port, u8 pg_buff,
55 						 enum mlxsw_reg_sbxx_dir dir)
56 {
57 	return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
58 }
59 
60 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
61 						 u8 local_port, u8 pool,
62 						 enum mlxsw_reg_sbxx_dir dir)
63 {
64 	return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
65 }
66 
67 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
68 				enum mlxsw_reg_sbxx_dir dir,
69 				enum mlxsw_reg_sbpr_mode mode, u32 size)
70 {
71 	char sbpr_pl[MLXSW_REG_SBPR_LEN];
72 	struct mlxsw_sp_sb_pr *pr;
73 	int err;
74 
75 	mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
76 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
77 	if (err)
78 		return err;
79 
80 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
81 	pr->mode = mode;
82 	pr->size = size;
83 	return 0;
84 }
85 
86 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
87 				u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
88 				u32 min_buff, u32 max_buff, u8 pool)
89 {
90 	char sbcm_pl[MLXSW_REG_SBCM_LEN];
91 	int err;
92 
93 	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
94 			    min_buff, max_buff, pool);
95 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
96 	if (err)
97 		return err;
98 	if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
99 		struct mlxsw_sp_sb_cm *cm;
100 
101 		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
102 		cm->min_buff = min_buff;
103 		cm->max_buff = max_buff;
104 		cm->pool = pool;
105 	}
106 	return 0;
107 }
108 
109 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
110 				u8 pool, enum mlxsw_reg_sbxx_dir dir,
111 				u32 min_buff, u32 max_buff)
112 {
113 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
114 	struct mlxsw_sp_sb_pm *pm;
115 	int err;
116 
117 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
118 			    min_buff, max_buff);
119 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
120 	if (err)
121 		return err;
122 
123 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
124 	pm->min_buff = min_buff;
125 	pm->max_buff = max_buff;
126 	return 0;
127 }
128 
129 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
130 				    u8 pool, enum mlxsw_reg_sbxx_dir dir,
131 				    struct list_head *bulk_list)
132 {
133 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
134 
135 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
136 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
137 				     bulk_list, NULL, 0);
138 }
139 
140 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
141 					char *sbpm_pl, size_t sbpm_pl_len,
142 					unsigned long cb_priv)
143 {
144 	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
145 
146 	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
147 }
148 
149 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
150 				    u8 pool, enum mlxsw_reg_sbxx_dir dir,
151 				    struct list_head *bulk_list)
152 {
153 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
154 	struct mlxsw_sp_sb_pm *pm;
155 
156 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
157 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
158 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
159 				     bulk_list,
160 				     mlxsw_sp_sb_pm_occ_query_cb,
161 				     (unsigned long) pm);
162 }
163 
164 static const u16 mlxsw_sp_pbs[] = {
165 	[0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
166 	[9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
167 };
168 
169 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
170 #define MLXSW_SP_PB_UNUSED 8
171 
172 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
173 {
174 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
175 	int i;
176 
177 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
178 			    0xffff, 0xffff / 2);
179 	for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
180 		if (i == MLXSW_SP_PB_UNUSED)
181 			continue;
182 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
183 	}
184 	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
185 					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
186 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
187 			       MLXSW_REG(pbmc), pbmc_pl);
188 }
189 
190 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
191 {
192 	char pptb_pl[MLXSW_REG_PPTB_LEN];
193 	int i;
194 
195 	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
196 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
197 		mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
198 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
199 			       pptb_pl);
200 }
201 
202 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204 	int err;
205 
206 	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
207 	if (err)
208 		return err;
209 	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
210 }
211 
212 #define MLXSW_SP_SB_PR_INGRESS_SIZE				\
213 	(15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
214 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
215 #define MLXSW_SP_SB_PR_EGRESS_SIZE				\
216 	(14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
217 
218 #define MLXSW_SP_SB_PR(_mode, _size)	\
219 	{				\
220 		.mode = _mode,		\
221 		.size = _size,		\
222 	}
223 
224 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
225 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
226 		       MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
227 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
228 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
229 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
230 		       MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
231 };
232 
233 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
234 
235 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
236 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
237 		       MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
238 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
239 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
240 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
241 };
242 
243 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
244 
245 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
246 				  enum mlxsw_reg_sbxx_dir dir,
247 				  const struct mlxsw_sp_sb_pr *prs,
248 				  size_t prs_len)
249 {
250 	int i;
251 	int err;
252 
253 	for (i = 0; i < prs_len; i++) {
254 		const struct mlxsw_sp_sb_pr *pr;
255 
256 		pr = &prs[i];
257 		err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
258 					   pr->mode, pr->size);
259 		if (err)
260 			return err;
261 	}
262 	return 0;
263 }
264 
265 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
266 {
267 	int err;
268 
269 	err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
270 				     mlxsw_sp_sb_prs_ingress,
271 				     MLXSW_SP_SB_PRS_INGRESS_LEN);
272 	if (err)
273 		return err;
274 	return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
275 				      mlxsw_sp_sb_prs_egress,
276 				      MLXSW_SP_SB_PRS_EGRESS_LEN);
277 }
278 
279 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
280 	{						\
281 		.min_buff = _min_buff,			\
282 		.max_buff = _max_buff,			\
283 		.pool = _pool,				\
284 	}
285 
286 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
287 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
288 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
289 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
290 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
291 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
292 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
293 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
294 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
295 	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
296 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
297 };
298 
299 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
300 
301 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
302 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
303 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
304 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
305 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
306 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
307 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
308 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
309 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
310 	MLXSW_SP_SB_CM(0, 0, 0),
311 	MLXSW_SP_SB_CM(0, 0, 0),
312 	MLXSW_SP_SB_CM(0, 0, 0),
313 	MLXSW_SP_SB_CM(0, 0, 0),
314 	MLXSW_SP_SB_CM(0, 0, 0),
315 	MLXSW_SP_SB_CM(0, 0, 0),
316 	MLXSW_SP_SB_CM(0, 0, 0),
317 	MLXSW_SP_SB_CM(0, 0, 0),
318 	MLXSW_SP_SB_CM(1, 0xff, 0),
319 };
320 
321 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
322 
323 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
324 
325 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
326 	MLXSW_SP_CPU_PORT_SB_CM,
327 	MLXSW_SP_CPU_PORT_SB_CM,
328 	MLXSW_SP_CPU_PORT_SB_CM,
329 	MLXSW_SP_CPU_PORT_SB_CM,
330 	MLXSW_SP_CPU_PORT_SB_CM,
331 	MLXSW_SP_CPU_PORT_SB_CM,
332 	MLXSW_SP_CPU_PORT_SB_CM,
333 	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
334 	MLXSW_SP_CPU_PORT_SB_CM,
335 	MLXSW_SP_CPU_PORT_SB_CM,
336 	MLXSW_SP_CPU_PORT_SB_CM,
337 	MLXSW_SP_CPU_PORT_SB_CM,
338 	MLXSW_SP_CPU_PORT_SB_CM,
339 	MLXSW_SP_CPU_PORT_SB_CM,
340 	MLXSW_SP_CPU_PORT_SB_CM,
341 	MLXSW_SP_CPU_PORT_SB_CM,
342 	MLXSW_SP_CPU_PORT_SB_CM,
343 	MLXSW_SP_CPU_PORT_SB_CM,
344 	MLXSW_SP_CPU_PORT_SB_CM,
345 	MLXSW_SP_CPU_PORT_SB_CM,
346 	MLXSW_SP_CPU_PORT_SB_CM,
347 	MLXSW_SP_CPU_PORT_SB_CM,
348 	MLXSW_SP_CPU_PORT_SB_CM,
349 	MLXSW_SP_CPU_PORT_SB_CM,
350 	MLXSW_SP_CPU_PORT_SB_CM,
351 	MLXSW_SP_CPU_PORT_SB_CM,
352 	MLXSW_SP_CPU_PORT_SB_CM,
353 	MLXSW_SP_CPU_PORT_SB_CM,
354 	MLXSW_SP_CPU_PORT_SB_CM,
355 	MLXSW_SP_CPU_PORT_SB_CM,
356 	MLXSW_SP_CPU_PORT_SB_CM,
357 	MLXSW_SP_CPU_PORT_SB_CM,
358 };
359 
360 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
361 	ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
362 
363 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
364 				  enum mlxsw_reg_sbxx_dir dir,
365 				  const struct mlxsw_sp_sb_cm *cms,
366 				  size_t cms_len)
367 {
368 	int i;
369 	int err;
370 
371 	for (i = 0; i < cms_len; i++) {
372 		const struct mlxsw_sp_sb_cm *cm;
373 
374 		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
375 			continue; /* PG number 8 does not exist, skip it */
376 		cm = &cms[i];
377 		err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
378 					   cm->min_buff, cm->max_buff,
379 					   cm->pool);
380 		if (err)
381 			return err;
382 	}
383 	return 0;
384 }
385 
386 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
387 {
388 	int err;
389 
390 	err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
391 				     mlxsw_sp_port->local_port,
392 				     MLXSW_REG_SBXX_DIR_INGRESS,
393 				     mlxsw_sp_sb_cms_ingress,
394 				     MLXSW_SP_SB_CMS_INGRESS_LEN);
395 	if (err)
396 		return err;
397 	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
398 				      mlxsw_sp_port->local_port,
399 				      MLXSW_REG_SBXX_DIR_EGRESS,
400 				      mlxsw_sp_sb_cms_egress,
401 				      MLXSW_SP_SB_CMS_EGRESS_LEN);
402 }
403 
404 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
405 {
406 	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
407 				      mlxsw_sp_cpu_port_sb_cms,
408 				      MLXSW_SP_CPU_PORT_SB_MCS_LEN);
409 }
410 
411 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
412 	{					\
413 		.min_buff = _min_buff,		\
414 		.max_buff = _max_buff,		\
415 	}
416 
417 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
418 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
419 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
420 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
421 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
422 };
423 
424 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
425 
426 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
427 	MLXSW_SP_SB_PM(0, 7),
428 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
429 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
430 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
431 };
432 
433 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
434 
435 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
436 				       enum mlxsw_reg_sbxx_dir dir,
437 				       const struct mlxsw_sp_sb_pm *pms,
438 				       size_t pms_len)
439 {
440 	int i;
441 	int err;
442 
443 	for (i = 0; i < pms_len; i++) {
444 		const struct mlxsw_sp_sb_pm *pm;
445 
446 		pm = &pms[i];
447 		err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
448 					   pm->min_buff, pm->max_buff);
449 		if (err)
450 			return err;
451 	}
452 	return 0;
453 }
454 
455 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
456 {
457 	int err;
458 
459 	err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
460 					  mlxsw_sp_port->local_port,
461 					  MLXSW_REG_SBXX_DIR_INGRESS,
462 					  mlxsw_sp_sb_pms_ingress,
463 					  MLXSW_SP_SB_PMS_INGRESS_LEN);
464 	if (err)
465 		return err;
466 	return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
467 					   mlxsw_sp_port->local_port,
468 					   MLXSW_REG_SBXX_DIR_EGRESS,
469 					   mlxsw_sp_sb_pms_egress,
470 					   MLXSW_SP_SB_PMS_EGRESS_LEN);
471 }
472 
473 struct mlxsw_sp_sb_mm {
474 	u32 min_buff;
475 	u32 max_buff;
476 	u8 pool;
477 };
478 
479 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
480 	{						\
481 		.min_buff = _min_buff,			\
482 		.max_buff = _max_buff,			\
483 		.pool = _pool,				\
484 	}
485 
486 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
487 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
488 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
489 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
490 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
491 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
492 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
493 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
494 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
495 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
496 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
497 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
498 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
499 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
500 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
501 	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
502 };
503 
504 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
505 
506 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
507 {
508 	char sbmm_pl[MLXSW_REG_SBMM_LEN];
509 	int i;
510 	int err;
511 
512 	for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
513 		const struct mlxsw_sp_sb_mm *mc;
514 
515 		mc = &mlxsw_sp_sb_mms[i];
516 		mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
517 				    mc->max_buff, mc->pool);
518 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
519 		if (err)
520 			return err;
521 	}
522 	return 0;
523 }
524 
525 #define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
526 
527 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
528 {
529 	int err;
530 
531 	err = mlxsw_sp_sb_prs_init(mlxsw_sp);
532 	if (err)
533 		return err;
534 	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
535 	if (err)
536 		return err;
537 	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
538 	if (err)
539 		return err;
540 	return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
541 				   MLXSW_SP_SB_SIZE,
542 				   MLXSW_SP_SB_POOL_COUNT,
543 				   MLXSW_SP_SB_POOL_COUNT,
544 				   MLXSW_SP_SB_TC_COUNT,
545 				   MLXSW_SP_SB_TC_COUNT);
546 }
547 
548 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
549 {
550 	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
551 }
552 
553 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
554 {
555 	int err;
556 
557 	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
558 	if (err)
559 		return err;
560 	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
561 	if (err)
562 		return err;
563 	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
564 
565 	return err;
566 }
567 
568 static u8 pool_get(u16 pool_index)
569 {
570 	return pool_index % MLXSW_SP_SB_POOL_COUNT;
571 }
572 
573 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
574 {
575 	u16 pool_index;
576 
577 	pool_index = pool;
578 	if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
579 		pool_index += MLXSW_SP_SB_POOL_COUNT;
580 	return pool_index;
581 }
582 
583 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
584 {
585 	return pool_index < MLXSW_SP_SB_POOL_COUNT ?
586 	       MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
587 }
588 
589 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
590 			 unsigned int sb_index, u16 pool_index,
591 			 struct devlink_sb_pool_info *pool_info)
592 {
593 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
594 	u8 pool = pool_get(pool_index);
595 	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
596 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
597 
598 	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
599 	pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
600 	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
601 	return 0;
602 }
603 
604 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
605 			 unsigned int sb_index, u16 pool_index, u32 size,
606 			 enum devlink_sb_threshold_type threshold_type)
607 {
608 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
609 	u8 pool = pool_get(pool_index);
610 	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
611 	u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
612 	enum mlxsw_reg_sbpr_mode mode;
613 
614 	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
615 	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
616 }
617 
618 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
619 
620 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
621 				     enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
622 {
623 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
624 
625 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
626 		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
627 	return MLXSW_SP_CELLS_TO_BYTES(max_buff);
628 }
629 
630 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
631 				    enum mlxsw_reg_sbxx_dir dir, u32 threshold,
632 				    u32 *p_max_buff)
633 {
634 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
635 
636 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
637 		int val;
638 
639 		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
640 		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
641 		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
642 			return -EINVAL;
643 		*p_max_buff = val;
644 	} else {
645 		*p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
646 	}
647 	return 0;
648 }
649 
650 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
651 			      unsigned int sb_index, u16 pool_index,
652 			      u32 *p_threshold)
653 {
654 	struct mlxsw_sp_port *mlxsw_sp_port =
655 			mlxsw_core_port_driver_priv(mlxsw_core_port);
656 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
657 	u8 local_port = mlxsw_sp_port->local_port;
658 	u8 pool = pool_get(pool_index);
659 	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
660 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
661 						       pool, dir);
662 
663 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
664 						 pm->max_buff);
665 	return 0;
666 }
667 
668 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
669 			      unsigned int sb_index, u16 pool_index,
670 			      u32 threshold)
671 {
672 	struct mlxsw_sp_port *mlxsw_sp_port =
673 			mlxsw_core_port_driver_priv(mlxsw_core_port);
674 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
675 	u8 local_port = mlxsw_sp_port->local_port;
676 	u8 pool = pool_get(pool_index);
677 	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
678 	u32 max_buff;
679 	int err;
680 
681 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
682 				       threshold, &max_buff);
683 	if (err)
684 		return err;
685 
686 	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
687 				    0, max_buff);
688 }
689 
690 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
691 				 unsigned int sb_index, u16 tc_index,
692 				 enum devlink_sb_pool_type pool_type,
693 				 u16 *p_pool_index, u32 *p_threshold)
694 {
695 	struct mlxsw_sp_port *mlxsw_sp_port =
696 			mlxsw_core_port_driver_priv(mlxsw_core_port);
697 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
698 	u8 local_port = mlxsw_sp_port->local_port;
699 	u8 pg_buff = tc_index;
700 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
701 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
702 						       pg_buff, dir);
703 
704 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
705 						 cm->max_buff);
706 	*p_pool_index = pool_index_get(cm->pool, dir);
707 	return 0;
708 }
709 
710 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
711 				 unsigned int sb_index, u16 tc_index,
712 				 enum devlink_sb_pool_type pool_type,
713 				 u16 pool_index, u32 threshold)
714 {
715 	struct mlxsw_sp_port *mlxsw_sp_port =
716 			mlxsw_core_port_driver_priv(mlxsw_core_port);
717 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
718 	u8 local_port = mlxsw_sp_port->local_port;
719 	u8 pg_buff = tc_index;
720 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
721 	u8 pool = pool_get(pool_index);
722 	u32 max_buff;
723 	int err;
724 
725 	if (dir != dir_get(pool_index))
726 		return -EINVAL;
727 
728 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
729 				       threshold, &max_buff);
730 	if (err)
731 		return err;
732 
733 	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
734 				    0, max_buff, pool);
735 }
736 
737 #define MASKED_COUNT_MAX \
738 	(MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
739 
740 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
741 	u8 masked_count;
742 	u8 local_port_1;
743 };
744 
745 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
746 					char *sbsr_pl, size_t sbsr_pl_len,
747 					unsigned long cb_priv)
748 {
749 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
750 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
751 	u8 masked_count;
752 	u8 local_port;
753 	int rec_index = 0;
754 	struct mlxsw_sp_sb_cm *cm;
755 	int i;
756 
757 	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
758 
759 	masked_count = 0;
760 	for (local_port = cb_ctx.local_port_1;
761 	     local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
762 		if (!mlxsw_sp->ports[local_port])
763 			continue;
764 		for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
765 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
766 						MLXSW_REG_SBXX_DIR_INGRESS);
767 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
768 						  &cm->occ.cur, &cm->occ.max);
769 		}
770 		if (++masked_count == cb_ctx.masked_count)
771 			break;
772 	}
773 	masked_count = 0;
774 	for (local_port = cb_ctx.local_port_1;
775 	     local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
776 		if (!mlxsw_sp->ports[local_port])
777 			continue;
778 		for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
779 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
780 						MLXSW_REG_SBXX_DIR_EGRESS);
781 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
782 						  &cm->occ.cur, &cm->occ.max);
783 		}
784 		if (++masked_count == cb_ctx.masked_count)
785 			break;
786 	}
787 }
788 
789 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
790 			     unsigned int sb_index)
791 {
792 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
793 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
794 	unsigned long cb_priv;
795 	LIST_HEAD(bulk_list);
796 	char *sbsr_pl;
797 	u8 masked_count;
798 	u8 local_port_1;
799 	u8 local_port = 0;
800 	int i;
801 	int err;
802 	int err2;
803 
804 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
805 	if (!sbsr_pl)
806 		return -ENOMEM;
807 
808 next_batch:
809 	local_port++;
810 	local_port_1 = local_port;
811 	masked_count = 0;
812 	mlxsw_reg_sbsr_pack(sbsr_pl, false);
813 	for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
814 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
815 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
816 	}
817 	for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
818 		if (!mlxsw_sp->ports[local_port])
819 			continue;
820 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
821 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
822 		for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
823 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
824 						       MLXSW_REG_SBXX_DIR_INGRESS,
825 						       &bulk_list);
826 			if (err)
827 				goto out;
828 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
829 						       MLXSW_REG_SBXX_DIR_EGRESS,
830 						       &bulk_list);
831 			if (err)
832 				goto out;
833 		}
834 		if (++masked_count == MASKED_COUNT_MAX)
835 			goto do_query;
836 	}
837 
838 do_query:
839 	cb_ctx.masked_count = masked_count;
840 	cb_ctx.local_port_1 = local_port_1;
841 	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
842 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
843 				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
844 				    cb_priv);
845 	if (err)
846 		goto out;
847 	if (local_port < MLXSW_PORT_MAX_PORTS)
848 		goto next_batch;
849 
850 out:
851 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
852 	if (!err)
853 		err = err2;
854 	kfree(sbsr_pl);
855 	return err;
856 }
857 
858 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
859 			      unsigned int sb_index)
860 {
861 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
862 	LIST_HEAD(bulk_list);
863 	char *sbsr_pl;
864 	unsigned int masked_count;
865 	u8 local_port = 0;
866 	int i;
867 	int err;
868 	int err2;
869 
870 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
871 	if (!sbsr_pl)
872 		return -ENOMEM;
873 
874 next_batch:
875 	local_port++;
876 	masked_count = 0;
877 	mlxsw_reg_sbsr_pack(sbsr_pl, true);
878 	for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
879 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
880 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
881 	}
882 	for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
883 		if (!mlxsw_sp->ports[local_port])
884 			continue;
885 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
886 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
887 		for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
888 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
889 						       MLXSW_REG_SBXX_DIR_INGRESS,
890 						       &bulk_list);
891 			if (err)
892 				goto out;
893 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
894 						       MLXSW_REG_SBXX_DIR_EGRESS,
895 						       &bulk_list);
896 			if (err)
897 				goto out;
898 		}
899 		if (++masked_count == MASKED_COUNT_MAX)
900 			goto do_query;
901 	}
902 
903 do_query:
904 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
905 				    &bulk_list, NULL, 0);
906 	if (err)
907 		goto out;
908 	if (local_port < MLXSW_PORT_MAX_PORTS)
909 		goto next_batch;
910 
911 out:
912 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
913 	if (!err)
914 		err = err2;
915 	kfree(sbsr_pl);
916 	return err;
917 }
918 
919 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
920 				  unsigned int sb_index, u16 pool_index,
921 				  u32 *p_cur, u32 *p_max)
922 {
923 	struct mlxsw_sp_port *mlxsw_sp_port =
924 			mlxsw_core_port_driver_priv(mlxsw_core_port);
925 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
926 	u8 local_port = mlxsw_sp_port->local_port;
927 	u8 pool = pool_get(pool_index);
928 	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
929 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
930 						       pool, dir);
931 
932 	*p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
933 	*p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
934 	return 0;
935 }
936 
937 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
938 				     unsigned int sb_index, u16 tc_index,
939 				     enum devlink_sb_pool_type pool_type,
940 				     u32 *p_cur, u32 *p_max)
941 {
942 	struct mlxsw_sp_port *mlxsw_sp_port =
943 			mlxsw_core_port_driver_priv(mlxsw_core_port);
944 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
945 	u8 local_port = mlxsw_sp_port->local_port;
946 	u8 pg_buff = tc_index;
947 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
948 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
949 						       pg_buff, dir);
950 
951 	*p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
952 	*p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
953 	return 0;
954 }
955