1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 
10 #include "spectrum.h"
11 #include "core.h"
12 #include "port.h"
13 #include "reg.h"
14 
15 struct mlxsw_sp_sb_pr {
16 	enum mlxsw_reg_sbpr_mode mode;
17 	u32 size;
18 };
19 
20 struct mlxsw_cp_sb_occ {
21 	u32 cur;
22 	u32 max;
23 };
24 
25 struct mlxsw_sp_sb_cm {
26 	u32 min_buff;
27 	u32 max_buff;
28 	u16 pool_index;
29 	struct mlxsw_cp_sb_occ occ;
30 };
31 
32 #define MLXSW_SP_SB_INFI -1U
33 
34 struct mlxsw_sp_sb_pm {
35 	u32 min_buff;
36 	u32 max_buff;
37 	struct mlxsw_cp_sb_occ occ;
38 };
39 
40 struct mlxsw_sp_sb_mm {
41 	u32 min_buff;
42 	u32 max_buff;
43 	u16 pool_index;
44 };
45 
46 struct mlxsw_sp_sb_pool_des {
47 	enum mlxsw_reg_sbxx_dir dir;
48 	u8 pool;
49 };
50 
51 /* Order ingress pools before egress pools. */
52 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
53 	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
54 	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
55 	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
56 	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
57 	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
58 	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
59 	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
60 	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
61 	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
62 };
63 
64 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
65 	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
66 	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
67 	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
68 	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
69 	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
70 	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
71 	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
72 	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
73 };
74 
75 #define MLXSW_SP_SB_ING_TC_COUNT 8
76 #define MLXSW_SP_SB_EG_TC_COUNT 16
77 
78 struct mlxsw_sp_sb_port {
79 	struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
80 	struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
81 	struct mlxsw_sp_sb_pm *pms;
82 };
83 
84 struct mlxsw_sp_sb {
85 	struct mlxsw_sp_sb_pr *prs;
86 	struct mlxsw_sp_sb_port *ports;
87 	u32 cell_size;
88 	u32 max_headroom_cells;
89 	u64 sb_size;
90 };
91 
92 struct mlxsw_sp_sb_vals {
93 	unsigned int pool_count;
94 	const struct mlxsw_sp_sb_pool_des *pool_dess;
95 	const struct mlxsw_sp_sb_pm *pms;
96 	const struct mlxsw_sp_sb_pr *prs;
97 	const struct mlxsw_sp_sb_mm *mms;
98 	const struct mlxsw_sp_sb_cm *cms_ingress;
99 	const struct mlxsw_sp_sb_cm *cms_egress;
100 	const struct mlxsw_sp_sb_cm *cms_cpu;
101 	unsigned int mms_count;
102 	unsigned int cms_ingress_count;
103 	unsigned int cms_egress_count;
104 	unsigned int cms_cpu_count;
105 };
106 
107 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
108 {
109 	return mlxsw_sp->sb->cell_size * cells;
110 }
111 
112 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
113 {
114 	return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
115 }
116 
117 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
118 {
119 	return mlxsw_sp->sb->max_headroom_cells;
120 }
121 
122 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
123 						 u16 pool_index)
124 {
125 	return &mlxsw_sp->sb->prs[pool_index];
126 }
127 
128 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
129 {
130 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
131 		return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
132 	else
133 		return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
134 }
135 
136 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
137 						 u8 local_port, u8 pg_buff,
138 						 enum mlxsw_reg_sbxx_dir dir)
139 {
140 	struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
141 
142 	WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
143 	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
144 		return &sb_port->ing_cms[pg_buff];
145 	else
146 		return &sb_port->eg_cms[pg_buff];
147 }
148 
149 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
150 						 u8 local_port, u16 pool_index)
151 {
152 	return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
153 }
154 
155 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
156 				enum mlxsw_reg_sbpr_mode mode,
157 				u32 size, bool infi_size)
158 {
159 	const struct mlxsw_sp_sb_pool_des *des =
160 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
161 	char sbpr_pl[MLXSW_REG_SBPR_LEN];
162 	struct mlxsw_sp_sb_pr *pr;
163 	int err;
164 
165 	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
166 			    size, infi_size);
167 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
168 	if (err)
169 		return err;
170 
171 	if (infi_size)
172 		size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
173 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
174 	pr->mode = mode;
175 	pr->size = size;
176 	return 0;
177 }
178 
179 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
180 				u8 pg_buff, u32 min_buff, u32 max_buff,
181 				bool infi_max, u16 pool_index)
182 {
183 	const struct mlxsw_sp_sb_pool_des *des =
184 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
185 	char sbcm_pl[MLXSW_REG_SBCM_LEN];
186 	struct mlxsw_sp_sb_cm *cm;
187 	int err;
188 
189 	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
190 			    min_buff, max_buff, infi_max, des->pool);
191 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
192 	if (err)
193 		return err;
194 
195 	if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
196 		if (infi_max)
197 			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
198 							mlxsw_sp->sb->sb_size);
199 
200 		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
201 					des->dir);
202 		cm->min_buff = min_buff;
203 		cm->max_buff = max_buff;
204 		cm->pool_index = pool_index;
205 	}
206 	return 0;
207 }
208 
209 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
210 				u16 pool_index, u32 min_buff, u32 max_buff)
211 {
212 	const struct mlxsw_sp_sb_pool_des *des =
213 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
214 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
215 	struct mlxsw_sp_sb_pm *pm;
216 	int err;
217 
218 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
219 			    min_buff, max_buff);
220 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
221 	if (err)
222 		return err;
223 
224 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
225 	pm->min_buff = min_buff;
226 	pm->max_buff = max_buff;
227 	return 0;
228 }
229 
230 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
231 				    u16 pool_index, struct list_head *bulk_list)
232 {
233 	const struct mlxsw_sp_sb_pool_des *des =
234 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
235 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
236 
237 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
238 			    true, 0, 0);
239 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
240 				     bulk_list, NULL, 0);
241 }
242 
243 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
244 					char *sbpm_pl, size_t sbpm_pl_len,
245 					unsigned long cb_priv)
246 {
247 	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
248 
249 	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
250 }
251 
252 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
253 				    u16 pool_index, struct list_head *bulk_list)
254 {
255 	const struct mlxsw_sp_sb_pool_des *des =
256 		&mlxsw_sp->sb_vals->pool_dess[pool_index];
257 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
258 	struct mlxsw_sp_sb_pm *pm;
259 
260 	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
261 	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
262 			    false, 0, 0);
263 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
264 				     bulk_list,
265 				     mlxsw_sp_sb_pm_occ_query_cb,
266 				     (unsigned long) pm);
267 }
268 
269 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
270 #define MLXSW_SP_PB_HEADROOM 25632
271 #define MLXSW_SP_PB_UNUSED 8
272 
273 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
274 {
275 	const u32 pbs[] = {
276 		[0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
277 		[9] = 2 * MLXSW_PORT_MAX_MTU,
278 	};
279 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
280 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
281 	int i;
282 
283 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
284 			    0xffff, 0xffff / 2);
285 	for (i = 0; i < ARRAY_SIZE(pbs); i++) {
286 		u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
287 
288 		if (i == MLXSW_SP_PB_UNUSED)
289 			continue;
290 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
291 	}
292 	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
293 					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
294 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
295 }
296 
297 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
298 {
299 	char pptb_pl[MLXSW_REG_PPTB_LEN];
300 	int i;
301 
302 	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
303 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
304 		mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
305 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
306 			       pptb_pl);
307 }
308 
309 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
310 {
311 	int err;
312 
313 	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
314 	if (err)
315 		return err;
316 	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
317 }
318 
319 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
320 				 struct mlxsw_sp_sb_port *sb_port)
321 {
322 	struct mlxsw_sp_sb_pm *pms;
323 
324 	pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
325 		      GFP_KERNEL);
326 	if (!pms)
327 		return -ENOMEM;
328 	sb_port->pms = pms;
329 	return 0;
330 }
331 
332 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
333 {
334 	kfree(sb_port->pms);
335 }
336 
337 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
338 {
339 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
340 	struct mlxsw_sp_sb_pr *prs;
341 	int i;
342 	int err;
343 
344 	mlxsw_sp->sb->ports = kcalloc(max_ports,
345 				      sizeof(struct mlxsw_sp_sb_port),
346 				      GFP_KERNEL);
347 	if (!mlxsw_sp->sb->ports)
348 		return -ENOMEM;
349 
350 	prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
351 		      GFP_KERNEL);
352 	if (!prs) {
353 		err = -ENOMEM;
354 		goto err_alloc_prs;
355 	}
356 	mlxsw_sp->sb->prs = prs;
357 
358 	for (i = 0; i < max_ports; i++) {
359 		err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
360 		if (err)
361 			goto err_sb_port_init;
362 	}
363 
364 	return 0;
365 
366 err_sb_port_init:
367 	for (i--; i >= 0; i--)
368 		mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
369 	kfree(mlxsw_sp->sb->prs);
370 err_alloc_prs:
371 	kfree(mlxsw_sp->sb->ports);
372 	return err;
373 }
374 
375 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
376 {
377 	int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
378 	int i;
379 
380 	for (i = max_ports - 1; i >= 0; i--)
381 		mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
382 	kfree(mlxsw_sp->sb->prs);
383 	kfree(mlxsw_sp->sb->ports);
384 }
385 
386 #define MLXSW_SP_SB_PR(_mode, _size)	\
387 	{				\
388 		.mode = _mode,		\
389 		.size = _size,		\
390 	}
391 
392 #define MLXSW_SP1_SB_PR_INGRESS_SIZE	12440000
393 #define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
394 #define MLXSW_SP1_SB_PR_EGRESS_SIZE	13232000
395 
396 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
397 	/* Ingress pools. */
398 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
399 		       MLXSW_SP1_SB_PR_INGRESS_SIZE),
400 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
401 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
402 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
403 		       MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
404 	/* Egress pools. */
405 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
406 		       MLXSW_SP1_SB_PR_EGRESS_SIZE),
407 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
408 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
409 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
410 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
411 };
412 
413 #define MLXSW_SP2_SB_PR_INGRESS_SIZE	40960000
414 #define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
415 #define MLXSW_SP2_SB_PR_EGRESS_SIZE	40960000
416 
417 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
418 	/* Ingress pools. */
419 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
420 		       MLXSW_SP2_SB_PR_INGRESS_SIZE),
421 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
422 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
423 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
424 		       MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
425 	/* Egress pools. */
426 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
427 		       MLXSW_SP2_SB_PR_EGRESS_SIZE),
428 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
429 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
430 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
431 };
432 
433 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
434 				const struct mlxsw_sp_sb_pr *prs,
435 				size_t prs_len)
436 {
437 	int i;
438 	int err;
439 
440 	for (i = 0; i < prs_len; i++) {
441 		u32 size = prs[i].size;
442 		u32 size_cells;
443 
444 		if (size == MLXSW_SP_SB_INFI) {
445 			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
446 						   0, true);
447 		} else {
448 			size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
449 			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
450 						   size_cells, false);
451 		}
452 		if (err)
453 			return err;
454 	}
455 	return 0;
456 }
457 
458 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
459 	{						\
460 		.min_buff = _min_buff,			\
461 		.max_buff = _max_buff,			\
462 		.pool_index = _pool,			\
463 	}
464 
465 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
466 	MLXSW_SP_SB_CM(10000, 8, 0),
467 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
468 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
469 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
470 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
471 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
472 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
473 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
474 	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
475 	MLXSW_SP_SB_CM(20000, 1, 3),
476 };
477 
478 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
479 	MLXSW_SP_SB_CM(0, 7, 0),
480 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
481 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
482 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
483 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
484 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
485 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
486 	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
487 	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
488 	MLXSW_SP_SB_CM(20000, 1, 3),
489 };
490 
491 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
492 	MLXSW_SP_SB_CM(1500, 9, 4),
493 	MLXSW_SP_SB_CM(1500, 9, 4),
494 	MLXSW_SP_SB_CM(1500, 9, 4),
495 	MLXSW_SP_SB_CM(1500, 9, 4),
496 	MLXSW_SP_SB_CM(1500, 9, 4),
497 	MLXSW_SP_SB_CM(1500, 9, 4),
498 	MLXSW_SP_SB_CM(1500, 9, 4),
499 	MLXSW_SP_SB_CM(1500, 9, 4),
500 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
501 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
502 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
503 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
504 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
505 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
506 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
507 	MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
508 	MLXSW_SP_SB_CM(1, 0xff, 4),
509 };
510 
511 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
512 	MLXSW_SP_SB_CM(0, 7, 4),
513 	MLXSW_SP_SB_CM(0, 7, 4),
514 	MLXSW_SP_SB_CM(0, 7, 4),
515 	MLXSW_SP_SB_CM(0, 7, 4),
516 	MLXSW_SP_SB_CM(0, 7, 4),
517 	MLXSW_SP_SB_CM(0, 7, 4),
518 	MLXSW_SP_SB_CM(0, 7, 4),
519 	MLXSW_SP_SB_CM(0, 7, 4),
520 	MLXSW_SP_SB_CM(0, 7, 4),
521 	MLXSW_SP_SB_CM(0, 7, 4),
522 	MLXSW_SP_SB_CM(0, 7, 4),
523 	MLXSW_SP_SB_CM(0, 7, 4),
524 	MLXSW_SP_SB_CM(0, 7, 4),
525 	MLXSW_SP_SB_CM(0, 7, 4),
526 	MLXSW_SP_SB_CM(0, 7, 4),
527 	MLXSW_SP_SB_CM(0, 7, 4),
528 	MLXSW_SP_SB_CM(1, 0xff, 4),
529 };
530 
531 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
532 
533 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
534 	MLXSW_SP_CPU_PORT_SB_CM,
535 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
536 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
537 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
538 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
539 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
540 	MLXSW_SP_CPU_PORT_SB_CM,
541 	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
542 	MLXSW_SP_CPU_PORT_SB_CM,
543 	MLXSW_SP_CPU_PORT_SB_CM,
544 	MLXSW_SP_CPU_PORT_SB_CM,
545 	MLXSW_SP_CPU_PORT_SB_CM,
546 	MLXSW_SP_CPU_PORT_SB_CM,
547 	MLXSW_SP_CPU_PORT_SB_CM,
548 	MLXSW_SP_CPU_PORT_SB_CM,
549 	MLXSW_SP_CPU_PORT_SB_CM,
550 	MLXSW_SP_CPU_PORT_SB_CM,
551 	MLXSW_SP_CPU_PORT_SB_CM,
552 	MLXSW_SP_CPU_PORT_SB_CM,
553 	MLXSW_SP_CPU_PORT_SB_CM,
554 	MLXSW_SP_CPU_PORT_SB_CM,
555 	MLXSW_SP_CPU_PORT_SB_CM,
556 	MLXSW_SP_CPU_PORT_SB_CM,
557 	MLXSW_SP_CPU_PORT_SB_CM,
558 	MLXSW_SP_CPU_PORT_SB_CM,
559 	MLXSW_SP_CPU_PORT_SB_CM,
560 	MLXSW_SP_CPU_PORT_SB_CM,
561 	MLXSW_SP_CPU_PORT_SB_CM,
562 	MLXSW_SP_CPU_PORT_SB_CM,
563 	MLXSW_SP_CPU_PORT_SB_CM,
564 	MLXSW_SP_CPU_PORT_SB_CM,
565 	MLXSW_SP_CPU_PORT_SB_CM,
566 };
567 
568 static bool
569 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
570 {
571 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
572 
573 	return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
574 }
575 
576 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
577 				  enum mlxsw_reg_sbxx_dir dir,
578 				  const struct mlxsw_sp_sb_cm *cms,
579 				  size_t cms_len)
580 {
581 	const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
582 	int i;
583 	int err;
584 
585 	for (i = 0; i < cms_len; i++) {
586 		const struct mlxsw_sp_sb_cm *cm;
587 		u32 min_buff;
588 		u32 max_buff;
589 
590 		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
591 			continue; /* PG number 8 does not exist, skip it */
592 		cm = &cms[i];
593 		if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
594 			continue;
595 
596 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
597 		max_buff = cm->max_buff;
598 		if (max_buff == MLXSW_SP_SB_INFI) {
599 			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
600 						   min_buff, 0,
601 						   true, cm->pool_index);
602 		} else {
603 			if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
604 						       cm->pool_index))
605 				max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
606 								max_buff);
607 			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
608 						   min_buff, max_buff,
609 						   false, cm->pool_index);
610 		}
611 		if (err)
612 			return err;
613 	}
614 	return 0;
615 }
616 
617 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
618 {
619 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
620 	int err;
621 
622 	err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
623 				     mlxsw_sp_port->local_port,
624 				     MLXSW_REG_SBXX_DIR_INGRESS,
625 				     mlxsw_sp->sb_vals->cms_ingress,
626 				     mlxsw_sp->sb_vals->cms_ingress_count);
627 	if (err)
628 		return err;
629 	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
630 				      mlxsw_sp_port->local_port,
631 				      MLXSW_REG_SBXX_DIR_EGRESS,
632 				      mlxsw_sp->sb_vals->cms_egress,
633 				      mlxsw_sp->sb_vals->cms_egress_count);
634 }
635 
636 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
637 {
638 	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
639 				      mlxsw_sp->sb_vals->cms_cpu,
640 				      mlxsw_sp->sb_vals->cms_cpu_count);
641 }
642 
643 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
644 	{					\
645 		.min_buff = _min_buff,		\
646 		.max_buff = _max_buff,		\
647 	}
648 
649 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
650 	/* Ingress pools. */
651 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
652 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
653 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
654 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
655 	/* Egress pools. */
656 	MLXSW_SP_SB_PM(0, 7),
657 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
658 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
659 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
660 	MLXSW_SP_SB_PM(10000, 90000),
661 };
662 
663 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
664 	/* Ingress pools. */
665 	MLXSW_SP_SB_PM(0, 7),
666 	MLXSW_SP_SB_PM(0, 0),
667 	MLXSW_SP_SB_PM(0, 0),
668 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
669 	/* Egress pools. */
670 	MLXSW_SP_SB_PM(0, 7),
671 	MLXSW_SP_SB_PM(0, 0),
672 	MLXSW_SP_SB_PM(0, 0),
673 	MLXSW_SP_SB_PM(0, 0),
674 };
675 
676 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
677 {
678 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
679 	int i;
680 	int err;
681 
682 	for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
683 		const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
684 		u32 max_buff;
685 		u32 min_buff;
686 
687 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
688 		max_buff = pm->max_buff;
689 		if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
690 			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
691 		err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
692 					   i, min_buff, max_buff);
693 		if (err)
694 			return err;
695 	}
696 	return 0;
697 }
698 
699 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
700 	{						\
701 		.min_buff = _min_buff,			\
702 		.max_buff = _max_buff,			\
703 		.pool_index = _pool,			\
704 	}
705 
706 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
707 	MLXSW_SP_SB_MM(0, 6, 4),
708 	MLXSW_SP_SB_MM(0, 6, 4),
709 	MLXSW_SP_SB_MM(0, 6, 4),
710 	MLXSW_SP_SB_MM(0, 6, 4),
711 	MLXSW_SP_SB_MM(0, 6, 4),
712 	MLXSW_SP_SB_MM(0, 6, 4),
713 	MLXSW_SP_SB_MM(0, 6, 4),
714 	MLXSW_SP_SB_MM(0, 6, 4),
715 	MLXSW_SP_SB_MM(0, 6, 4),
716 	MLXSW_SP_SB_MM(0, 6, 4),
717 	MLXSW_SP_SB_MM(0, 6, 4),
718 	MLXSW_SP_SB_MM(0, 6, 4),
719 	MLXSW_SP_SB_MM(0, 6, 4),
720 	MLXSW_SP_SB_MM(0, 6, 4),
721 	MLXSW_SP_SB_MM(0, 6, 4),
722 };
723 
724 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
725 {
726 	char sbmm_pl[MLXSW_REG_SBMM_LEN];
727 	int i;
728 	int err;
729 
730 	for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
731 		const struct mlxsw_sp_sb_pool_des *des;
732 		const struct mlxsw_sp_sb_mm *mc;
733 		u32 min_buff;
734 
735 		mc = &mlxsw_sp->sb_vals->mms[i];
736 		des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
737 		/* All pools used by sb_mm's are initialized using dynamic
738 		 * thresholds, therefore 'max_buff' isn't specified in cells.
739 		 */
740 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
741 		mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
742 				    des->pool);
743 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
744 		if (err)
745 			return err;
746 	}
747 	return 0;
748 }
749 
750 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
751 				u16 *p_ingress_len, u16 *p_egress_len)
752 {
753 	int i;
754 
755 	for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
756 		if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
757 		    MLXSW_REG_SBXX_DIR_EGRESS)
758 			goto out;
759 	WARN(1, "No egress pools\n");
760 
761 out:
762 	*p_ingress_len = i;
763 	*p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
764 }
765 
766 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
767 	.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
768 	.pool_dess = mlxsw_sp1_sb_pool_dess,
769 	.pms = mlxsw_sp1_sb_pms,
770 	.prs = mlxsw_sp1_sb_prs,
771 	.mms = mlxsw_sp_sb_mms,
772 	.cms_ingress = mlxsw_sp1_sb_cms_ingress,
773 	.cms_egress = mlxsw_sp1_sb_cms_egress,
774 	.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
775 	.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
776 	.cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
777 	.cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
778 	.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
779 };
780 
781 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
782 	.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
783 	.pool_dess = mlxsw_sp2_sb_pool_dess,
784 	.pms = mlxsw_sp2_sb_pms,
785 	.prs = mlxsw_sp2_sb_prs,
786 	.mms = mlxsw_sp_sb_mms,
787 	.cms_ingress = mlxsw_sp2_sb_cms_ingress,
788 	.cms_egress = mlxsw_sp2_sb_cms_egress,
789 	.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
790 	.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
791 	.cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
792 	.cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
793 	.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
794 };
795 
796 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
797 {
798 	u32 max_headroom_size;
799 	u16 ing_pool_count;
800 	u16 eg_pool_count;
801 	int err;
802 
803 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
804 		return -EIO;
805 
806 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
807 		return -EIO;
808 
809 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
810 		return -EIO;
811 
812 	mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
813 	if (!mlxsw_sp->sb)
814 		return -ENOMEM;
815 	mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
816 	mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
817 						   MAX_BUFFER_SIZE);
818 	max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
819 					       MAX_HEADROOM_SIZE);
820 	/* Round down, because this limit must not be overstepped. */
821 	mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
822 						mlxsw_sp->sb->cell_size;
823 
824 	err = mlxsw_sp_sb_ports_init(mlxsw_sp);
825 	if (err)
826 		goto err_sb_ports_init;
827 	err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
828 				   mlxsw_sp->sb_vals->pool_count);
829 	if (err)
830 		goto err_sb_prs_init;
831 	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
832 	if (err)
833 		goto err_sb_cpu_port_sb_cms_init;
834 	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
835 	if (err)
836 		goto err_sb_mms_init;
837 	mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
838 	err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
839 				  mlxsw_sp->sb->sb_size,
840 				  ing_pool_count,
841 				  eg_pool_count,
842 				  MLXSW_SP_SB_ING_TC_COUNT,
843 				  MLXSW_SP_SB_EG_TC_COUNT);
844 	if (err)
845 		goto err_devlink_sb_register;
846 
847 	return 0;
848 
849 err_devlink_sb_register:
850 err_sb_mms_init:
851 err_sb_cpu_port_sb_cms_init:
852 err_sb_prs_init:
853 	mlxsw_sp_sb_ports_fini(mlxsw_sp);
854 err_sb_ports_init:
855 	kfree(mlxsw_sp->sb);
856 	return err;
857 }
858 
859 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
860 {
861 	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
862 	mlxsw_sp_sb_ports_fini(mlxsw_sp);
863 	kfree(mlxsw_sp->sb);
864 }
865 
866 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
867 {
868 	int err;
869 
870 	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
871 	if (err)
872 		return err;
873 	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
874 	if (err)
875 		return err;
876 	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
877 
878 	return err;
879 }
880 
881 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
882 			 unsigned int sb_index, u16 pool_index,
883 			 struct devlink_sb_pool_info *pool_info)
884 {
885 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
886 	enum mlxsw_reg_sbxx_dir dir;
887 	struct mlxsw_sp_sb_pr *pr;
888 
889 	dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
890 	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
891 	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
892 	pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
893 	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
894 	pool_info->cell_size = mlxsw_sp->sb->cell_size;
895 	return 0;
896 }
897 
898 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
899 			 unsigned int sb_index, u16 pool_index, u32 size,
900 			 enum devlink_sb_threshold_type threshold_type)
901 {
902 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
903 	u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
904 	enum mlxsw_reg_sbpr_mode mode;
905 
906 	if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
907 		return -EINVAL;
908 
909 	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
910 	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
911 				    pool_size, false);
912 }
913 
914 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
915 
916 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
917 				     u32 max_buff)
918 {
919 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
920 
921 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
922 		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
923 	return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
924 }
925 
926 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
927 				    u32 threshold, u32 *p_max_buff)
928 {
929 	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
930 
931 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
932 		int val;
933 
934 		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
935 		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
936 		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
937 			return -EINVAL;
938 		*p_max_buff = val;
939 	} else {
940 		*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
941 	}
942 	return 0;
943 }
944 
945 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
946 			      unsigned int sb_index, u16 pool_index,
947 			      u32 *p_threshold)
948 {
949 	struct mlxsw_sp_port *mlxsw_sp_port =
950 			mlxsw_core_port_driver_priv(mlxsw_core_port);
951 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
952 	u8 local_port = mlxsw_sp_port->local_port;
953 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
954 						       pool_index);
955 
956 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
957 						 pm->max_buff);
958 	return 0;
959 }
960 
961 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
962 			      unsigned int sb_index, u16 pool_index,
963 			      u32 threshold)
964 {
965 	struct mlxsw_sp_port *mlxsw_sp_port =
966 			mlxsw_core_port_driver_priv(mlxsw_core_port);
967 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
968 	u8 local_port = mlxsw_sp_port->local_port;
969 	u32 max_buff;
970 	int err;
971 
972 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
973 				       threshold, &max_buff);
974 	if (err)
975 		return err;
976 
977 	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
978 				    0, max_buff);
979 }
980 
981 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
982 				 unsigned int sb_index, u16 tc_index,
983 				 enum devlink_sb_pool_type pool_type,
984 				 u16 *p_pool_index, u32 *p_threshold)
985 {
986 	struct mlxsw_sp_port *mlxsw_sp_port =
987 			mlxsw_core_port_driver_priv(mlxsw_core_port);
988 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
989 	u8 local_port = mlxsw_sp_port->local_port;
990 	u8 pg_buff = tc_index;
991 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
992 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
993 						       pg_buff, dir);
994 
995 	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
996 						 cm->max_buff);
997 	*p_pool_index = cm->pool_index;
998 	return 0;
999 }
1000 
1001 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1002 				 unsigned int sb_index, u16 tc_index,
1003 				 enum devlink_sb_pool_type pool_type,
1004 				 u16 pool_index, u32 threshold)
1005 {
1006 	struct mlxsw_sp_port *mlxsw_sp_port =
1007 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1008 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1009 	u8 local_port = mlxsw_sp_port->local_port;
1010 	u8 pg_buff = tc_index;
1011 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1012 	u32 max_buff;
1013 	int err;
1014 
1015 	if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir)
1016 		return -EINVAL;
1017 
1018 	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1019 				       threshold, &max_buff);
1020 	if (err)
1021 		return err;
1022 
1023 	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1024 				    0, max_buff, false, pool_index);
1025 }
1026 
1027 #define MASKED_COUNT_MAX \
1028 	(MLXSW_REG_SBSR_REC_MAX_COUNT / \
1029 	 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1030 
1031 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1032 	u8 masked_count;
1033 	u8 local_port_1;
1034 };
1035 
1036 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1037 					char *sbsr_pl, size_t sbsr_pl_len,
1038 					unsigned long cb_priv)
1039 {
1040 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1041 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1042 	u8 masked_count;
1043 	u8 local_port;
1044 	int rec_index = 0;
1045 	struct mlxsw_sp_sb_cm *cm;
1046 	int i;
1047 
1048 	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1049 
1050 	masked_count = 0;
1051 	for (local_port = cb_ctx.local_port_1;
1052 	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1053 		if (!mlxsw_sp->ports[local_port])
1054 			continue;
1055 		for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1056 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1057 						MLXSW_REG_SBXX_DIR_INGRESS);
1058 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1059 						  &cm->occ.cur, &cm->occ.max);
1060 		}
1061 		if (++masked_count == cb_ctx.masked_count)
1062 			break;
1063 	}
1064 	masked_count = 0;
1065 	for (local_port = cb_ctx.local_port_1;
1066 	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1067 		if (!mlxsw_sp->ports[local_port])
1068 			continue;
1069 		for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1070 			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1071 						MLXSW_REG_SBXX_DIR_EGRESS);
1072 			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1073 						  &cm->occ.cur, &cm->occ.max);
1074 		}
1075 		if (++masked_count == cb_ctx.masked_count)
1076 			break;
1077 	}
1078 }
1079 
1080 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1081 			     unsigned int sb_index)
1082 {
1083 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1084 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1085 	unsigned long cb_priv;
1086 	LIST_HEAD(bulk_list);
1087 	char *sbsr_pl;
1088 	u8 masked_count;
1089 	u8 local_port_1;
1090 	u8 local_port = 0;
1091 	int i;
1092 	int err;
1093 	int err2;
1094 
1095 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1096 	if (!sbsr_pl)
1097 		return -ENOMEM;
1098 
1099 next_batch:
1100 	local_port++;
1101 	local_port_1 = local_port;
1102 	masked_count = 0;
1103 	mlxsw_reg_sbsr_pack(sbsr_pl, false);
1104 	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1105 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1106 	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1107 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1108 	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1109 		if (!mlxsw_sp->ports[local_port])
1110 			continue;
1111 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1112 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1113 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1114 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1115 						       &bulk_list);
1116 			if (err)
1117 				goto out;
1118 		}
1119 		if (++masked_count == MASKED_COUNT_MAX)
1120 			goto do_query;
1121 	}
1122 
1123 do_query:
1124 	cb_ctx.masked_count = masked_count;
1125 	cb_ctx.local_port_1 = local_port_1;
1126 	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1127 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1128 				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1129 				    cb_priv);
1130 	if (err)
1131 		goto out;
1132 	if (local_port < mlxsw_core_max_ports(mlxsw_core))
1133 		goto next_batch;
1134 
1135 out:
1136 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1137 	if (!err)
1138 		err = err2;
1139 	kfree(sbsr_pl);
1140 	return err;
1141 }
1142 
1143 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1144 			      unsigned int sb_index)
1145 {
1146 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1147 	LIST_HEAD(bulk_list);
1148 	char *sbsr_pl;
1149 	unsigned int masked_count;
1150 	u8 local_port = 0;
1151 	int i;
1152 	int err;
1153 	int err2;
1154 
1155 	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1156 	if (!sbsr_pl)
1157 		return -ENOMEM;
1158 
1159 next_batch:
1160 	local_port++;
1161 	masked_count = 0;
1162 	mlxsw_reg_sbsr_pack(sbsr_pl, true);
1163 	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1164 		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1165 	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1166 		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1167 	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1168 		if (!mlxsw_sp->ports[local_port])
1169 			continue;
1170 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1171 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1172 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1173 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1174 						       &bulk_list);
1175 			if (err)
1176 				goto out;
1177 		}
1178 		if (++masked_count == MASKED_COUNT_MAX)
1179 			goto do_query;
1180 	}
1181 
1182 do_query:
1183 	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1184 				    &bulk_list, NULL, 0);
1185 	if (err)
1186 		goto out;
1187 	if (local_port < mlxsw_core_max_ports(mlxsw_core))
1188 		goto next_batch;
1189 
1190 out:
1191 	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1192 	if (!err)
1193 		err = err2;
1194 	kfree(sbsr_pl);
1195 	return err;
1196 }
1197 
1198 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1199 				  unsigned int sb_index, u16 pool_index,
1200 				  u32 *p_cur, u32 *p_max)
1201 {
1202 	struct mlxsw_sp_port *mlxsw_sp_port =
1203 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1204 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1205 	u8 local_port = mlxsw_sp_port->local_port;
1206 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1207 						       pool_index);
1208 
1209 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1210 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1211 	return 0;
1212 }
1213 
1214 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1215 				     unsigned int sb_index, u16 tc_index,
1216 				     enum devlink_sb_pool_type pool_type,
1217 				     u32 *p_cur, u32 *p_max)
1218 {
1219 	struct mlxsw_sp_port *mlxsw_sp_port =
1220 			mlxsw_core_port_driver_priv(mlxsw_core_port);
1221 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1222 	u8 local_port = mlxsw_sp_port->local_port;
1223 	u8 pg_buff = tc_index;
1224 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1225 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1226 						       pg_buff, dir);
1227 
1228 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1229 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1230 	return 0;
1231 }
1232