1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/dcbnl.h> 7 #include <linux/if_ether.h> 8 #include <linux/list.h> 9 10 #include "spectrum.h" 11 #include "core.h" 12 #include "port.h" 13 #include "reg.h" 14 15 struct mlxsw_sp_sb_pr { 16 enum mlxsw_reg_sbpr_mode mode; 17 u32 size; 18 }; 19 20 struct mlxsw_cp_sb_occ { 21 u32 cur; 22 u32 max; 23 }; 24 25 struct mlxsw_sp_sb_cm { 26 u32 min_buff; 27 u32 max_buff; 28 u16 pool_index; 29 struct mlxsw_cp_sb_occ occ; 30 }; 31 32 #define MLXSW_SP_SB_INFI -1U 33 34 struct mlxsw_sp_sb_pm { 35 u32 min_buff; 36 u32 max_buff; 37 struct mlxsw_cp_sb_occ occ; 38 }; 39 40 struct mlxsw_sp_sb_pool_des { 41 enum mlxsw_reg_sbxx_dir dir; 42 u8 pool; 43 }; 44 45 /* Order ingress pools before egress pools. */ 46 static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { 47 {MLXSW_REG_SBXX_DIR_INGRESS, 0}, 48 {MLXSW_REG_SBXX_DIR_INGRESS, 1}, 49 {MLXSW_REG_SBXX_DIR_INGRESS, 2}, 50 {MLXSW_REG_SBXX_DIR_INGRESS, 3}, 51 {MLXSW_REG_SBXX_DIR_EGRESS, 0}, 52 {MLXSW_REG_SBXX_DIR_EGRESS, 1}, 53 {MLXSW_REG_SBXX_DIR_EGRESS, 2}, 54 {MLXSW_REG_SBXX_DIR_EGRESS, 3}, 55 {MLXSW_REG_SBXX_DIR_EGRESS, 15}, 56 }; 57 58 #define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess) 59 60 #define MLXSW_SP_SB_ING_TC_COUNT 8 61 #define MLXSW_SP_SB_EG_TC_COUNT 16 62 63 struct mlxsw_sp_sb_port { 64 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT]; 65 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT]; 66 struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN]; 67 }; 68 69 struct mlxsw_sp_sb { 70 struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN]; 71 struct mlxsw_sp_sb_port *ports; 72 u32 cell_size; 73 u64 sb_size; 74 }; 75 76 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) 77 { 78 return mlxsw_sp->sb->cell_size * cells; 79 } 80 81 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) 82 { 83 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size); 84 } 85 86 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, 87 u16 pool_index) 88 { 89 return &mlxsw_sp->sb->prs[pool_index]; 90 } 91 92 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) 93 { 94 if (dir == MLXSW_REG_SBXX_DIR_INGRESS) 95 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT; 96 else 97 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT; 98 } 99 100 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, 101 u8 local_port, u8 pg_buff, 102 enum mlxsw_reg_sbxx_dir dir) 103 { 104 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port]; 105 106 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir)); 107 if (dir == MLXSW_REG_SBXX_DIR_INGRESS) 108 return &sb_port->ing_cms[pg_buff]; 109 else 110 return &sb_port->eg_cms[pg_buff]; 111 } 112 113 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, 114 u8 local_port, u16 pool_index) 115 { 116 return &mlxsw_sp->sb->ports[local_port].pms[pool_index]; 117 } 118 119 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, 120 enum mlxsw_reg_sbpr_mode mode, 121 u32 size, bool infi_size) 122 { 123 const struct mlxsw_sp_sb_pool_des *des = 124 &mlxsw_sp_sb_pool_dess[pool_index]; 125 char sbpr_pl[MLXSW_REG_SBPR_LEN]; 126 struct mlxsw_sp_sb_pr *pr; 127 int err; 128 129 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode, 130 size, infi_size); 131 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); 132 if (err) 133 return err; 134 135 if (infi_size) 136 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size); 137 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); 138 pr->mode = mode; 139 pr->size = size; 140 return 0; 141 } 142 143 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, 144 u8 pg_buff, u32 min_buff, u32 max_buff, 145 bool infi_max, u16 pool_index) 146 { 147 const struct mlxsw_sp_sb_pool_des *des = 148 &mlxsw_sp_sb_pool_dess[pool_index]; 149 char sbcm_pl[MLXSW_REG_SBCM_LEN]; 150 struct mlxsw_sp_sb_cm *cm; 151 int err; 152 153 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir, 154 min_buff, max_buff, infi_max, des->pool); 155 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); 156 if (err) 157 return err; 158 159 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) { 160 if (infi_max) 161 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, 162 mlxsw_sp->sb->sb_size); 163 164 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, 165 des->dir); 166 cm->min_buff = min_buff; 167 cm->max_buff = max_buff; 168 cm->pool_index = pool_index; 169 } 170 return 0; 171 } 172 173 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, 174 u16 pool_index, u32 min_buff, u32 max_buff) 175 { 176 const struct mlxsw_sp_sb_pool_des *des = 177 &mlxsw_sp_sb_pool_dess[pool_index]; 178 char sbpm_pl[MLXSW_REG_SBPM_LEN]; 179 struct mlxsw_sp_sb_pm *pm; 180 int err; 181 182 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false, 183 min_buff, max_buff); 184 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); 185 if (err) 186 return err; 187 188 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); 189 pm->min_buff = min_buff; 190 pm->max_buff = max_buff; 191 return 0; 192 } 193 194 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, 195 u16 pool_index, struct list_head *bulk_list) 196 { 197 const struct mlxsw_sp_sb_pool_des *des = 198 &mlxsw_sp_sb_pool_dess[pool_index]; 199 char sbpm_pl[MLXSW_REG_SBPM_LEN]; 200 201 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, 202 true, 0, 0); 203 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, 204 bulk_list, NULL, 0); 205 } 206 207 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, 208 char *sbpm_pl, size_t sbpm_pl_len, 209 unsigned long cb_priv) 210 { 211 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv; 212 213 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max); 214 } 215 216 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, 217 u16 pool_index, struct list_head *bulk_list) 218 { 219 const struct mlxsw_sp_sb_pool_des *des = 220 &mlxsw_sp_sb_pool_dess[pool_index]; 221 char sbpm_pl[MLXSW_REG_SBPM_LEN]; 222 struct mlxsw_sp_sb_pm *pm; 223 224 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); 225 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, 226 false, 0, 0); 227 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, 228 bulk_list, 229 mlxsw_sp_sb_pm_occ_query_cb, 230 (unsigned long) pm); 231 } 232 233 static const u16 mlxsw_sp_pbs[] = { 234 [0] = 2 * ETH_FRAME_LEN, 235 [9] = 2 * MLXSW_PORT_MAX_MTU, 236 }; 237 238 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) 239 #define MLXSW_SP_PB_UNUSED 8 240 241 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) 242 { 243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 244 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 245 int i; 246 247 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 248 0xffff, 0xffff / 2); 249 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { 250 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]); 251 252 if (i == MLXSW_SP_PB_UNUSED) 253 continue; 254 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size); 255 } 256 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, 257 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); 258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 259 } 260 261 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port) 262 { 263 char pptb_pl[MLXSW_REG_PPTB_LEN]; 264 int i; 265 266 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); 267 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 268 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0); 269 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), 270 pptb_pl); 271 } 272 273 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) 274 { 275 int err; 276 277 err = mlxsw_sp_port_pb_init(mlxsw_sp_port); 278 if (err) 279 return err; 280 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); 281 } 282 283 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) 284 { 285 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 286 287 mlxsw_sp->sb->ports = kcalloc(max_ports, 288 sizeof(struct mlxsw_sp_sb_port), 289 GFP_KERNEL); 290 if (!mlxsw_sp->sb->ports) 291 return -ENOMEM; 292 return 0; 293 } 294 295 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) 296 { 297 kfree(mlxsw_sp->sb->ports); 298 } 299 300 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000 301 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) 302 #define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000 303 304 #define MLXSW_SP_SB_PR(_mode, _size) \ 305 { \ 306 .mode = _mode, \ 307 .size = _size, \ 308 } 309 310 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = { 311 /* Ingress pools. */ 312 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 313 MLXSW_SP_SB_PR_INGRESS_SIZE), 314 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 315 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 316 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 317 MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), 318 /* Egress pools. */ 319 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), 320 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 321 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 322 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 323 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), 324 }; 325 326 #define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs) 327 328 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, 329 const struct mlxsw_sp_sb_pr *prs, 330 size_t prs_len) 331 { 332 int i; 333 int err; 334 335 for (i = 0; i < prs_len; i++) { 336 u32 size = prs[i].size; 337 u32 size_cells; 338 339 if (size == MLXSW_SP_SB_INFI) { 340 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, 341 0, true); 342 } else { 343 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size); 344 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, 345 size_cells, false); 346 } 347 if (err) 348 return err; 349 } 350 return 0; 351 } 352 353 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ 354 { \ 355 .min_buff = _min_buff, \ 356 .max_buff = _max_buff, \ 357 .pool_index = _pool, \ 358 } 359 360 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { 361 MLXSW_SP_SB_CM(10000, 8, 0), 362 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 363 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 364 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 365 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 366 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 367 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 368 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 369 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ 370 MLXSW_SP_SB_CM(20000, 1, 3), 371 }; 372 373 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) 374 375 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { 376 MLXSW_SP_SB_CM(1500, 9, 4), 377 MLXSW_SP_SB_CM(1500, 9, 4), 378 MLXSW_SP_SB_CM(1500, 9, 4), 379 MLXSW_SP_SB_CM(1500, 9, 4), 380 MLXSW_SP_SB_CM(1500, 9, 4), 381 MLXSW_SP_SB_CM(1500, 9, 4), 382 MLXSW_SP_SB_CM(1500, 9, 4), 383 MLXSW_SP_SB_CM(1500, 9, 4), 384 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 385 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 386 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 387 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 388 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 389 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 390 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 391 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), 392 MLXSW_SP_SB_CM(1, 0xff, 4), 393 }; 394 395 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) 396 397 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4) 398 399 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { 400 MLXSW_SP_CPU_PORT_SB_CM, 401 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), 402 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), 403 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), 404 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), 405 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), 406 MLXSW_SP_CPU_PORT_SB_CM, 407 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), 408 MLXSW_SP_CPU_PORT_SB_CM, 409 MLXSW_SP_CPU_PORT_SB_CM, 410 MLXSW_SP_CPU_PORT_SB_CM, 411 MLXSW_SP_CPU_PORT_SB_CM, 412 MLXSW_SP_CPU_PORT_SB_CM, 413 MLXSW_SP_CPU_PORT_SB_CM, 414 MLXSW_SP_CPU_PORT_SB_CM, 415 MLXSW_SP_CPU_PORT_SB_CM, 416 MLXSW_SP_CPU_PORT_SB_CM, 417 MLXSW_SP_CPU_PORT_SB_CM, 418 MLXSW_SP_CPU_PORT_SB_CM, 419 MLXSW_SP_CPU_PORT_SB_CM, 420 MLXSW_SP_CPU_PORT_SB_CM, 421 MLXSW_SP_CPU_PORT_SB_CM, 422 MLXSW_SP_CPU_PORT_SB_CM, 423 MLXSW_SP_CPU_PORT_SB_CM, 424 MLXSW_SP_CPU_PORT_SB_CM, 425 MLXSW_SP_CPU_PORT_SB_CM, 426 MLXSW_SP_CPU_PORT_SB_CM, 427 MLXSW_SP_CPU_PORT_SB_CM, 428 MLXSW_SP_CPU_PORT_SB_CM, 429 MLXSW_SP_CPU_PORT_SB_CM, 430 MLXSW_SP_CPU_PORT_SB_CM, 431 MLXSW_SP_CPU_PORT_SB_CM, 432 }; 433 434 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ 435 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) 436 437 static bool 438 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) 439 { 440 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); 441 442 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; 443 } 444 445 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, 446 enum mlxsw_reg_sbxx_dir dir, 447 const struct mlxsw_sp_sb_cm *cms, 448 size_t cms_len) 449 { 450 int i; 451 int err; 452 453 for (i = 0; i < cms_len; i++) { 454 const struct mlxsw_sp_sb_cm *cm; 455 u32 min_buff; 456 u32 max_buff; 457 458 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) 459 continue; /* PG number 8 does not exist, skip it */ 460 cm = &cms[i]; 461 if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir)) 462 continue; 463 464 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); 465 max_buff = cm->max_buff; 466 if (max_buff == MLXSW_SP_SB_INFI) { 467 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, 468 min_buff, 0, 469 true, cm->pool_index); 470 } else { 471 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, 472 cm->pool_index)) 473 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, 474 max_buff); 475 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, 476 min_buff, max_buff, 477 false, cm->pool_index); 478 } 479 if (err) 480 return err; 481 } 482 return 0; 483 } 484 485 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) 486 { 487 int err; 488 489 err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, 490 mlxsw_sp_port->local_port, 491 MLXSW_REG_SBXX_DIR_INGRESS, 492 mlxsw_sp_sb_cms_ingress, 493 MLXSW_SP_SB_CMS_INGRESS_LEN); 494 if (err) 495 return err; 496 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, 497 mlxsw_sp_port->local_port, 498 MLXSW_REG_SBXX_DIR_EGRESS, 499 mlxsw_sp_sb_cms_egress, 500 MLXSW_SP_SB_CMS_EGRESS_LEN); 501 } 502 503 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) 504 { 505 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS, 506 mlxsw_sp_cpu_port_sb_cms, 507 MLXSW_SP_CPU_PORT_SB_MCS_LEN); 508 } 509 510 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ 511 { \ 512 .min_buff = _min_buff, \ 513 .max_buff = _max_buff, \ 514 } 515 516 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { 517 /* Ingress pools. */ 518 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), 519 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 520 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 521 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), 522 /* Egress pools. */ 523 MLXSW_SP_SB_PM(0, 7), 524 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 525 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 526 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 527 MLXSW_SP_SB_PM(10000, 90000), 528 }; 529 530 #define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) 531 532 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) 533 { 534 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 535 int i; 536 int err; 537 538 for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { 539 const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i]; 540 u32 max_buff; 541 u32 min_buff; 542 543 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff); 544 max_buff = pm->max_buff; 545 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i)) 546 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff); 547 err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port, 548 i, min_buff, max_buff); 549 if (err) 550 return err; 551 } 552 return 0; 553 } 554 555 struct mlxsw_sp_sb_mm { 556 u32 min_buff; 557 u32 max_buff; 558 u16 pool_index; 559 }; 560 561 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ 562 { \ 563 .min_buff = _min_buff, \ 564 .max_buff = _max_buff, \ 565 .pool_index = _pool, \ 566 } 567 568 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { 569 MLXSW_SP_SB_MM(0, 6, 4), 570 MLXSW_SP_SB_MM(0, 6, 4), 571 MLXSW_SP_SB_MM(0, 6, 4), 572 MLXSW_SP_SB_MM(0, 6, 4), 573 MLXSW_SP_SB_MM(0, 6, 4), 574 MLXSW_SP_SB_MM(0, 6, 4), 575 MLXSW_SP_SB_MM(0, 6, 4), 576 MLXSW_SP_SB_MM(0, 6, 4), 577 MLXSW_SP_SB_MM(0, 6, 4), 578 MLXSW_SP_SB_MM(0, 6, 4), 579 MLXSW_SP_SB_MM(0, 6, 4), 580 MLXSW_SP_SB_MM(0, 6, 4), 581 MLXSW_SP_SB_MM(0, 6, 4), 582 MLXSW_SP_SB_MM(0, 6, 4), 583 MLXSW_SP_SB_MM(0, 6, 4), 584 }; 585 586 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) 587 588 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) 589 { 590 char sbmm_pl[MLXSW_REG_SBMM_LEN]; 591 int i; 592 int err; 593 594 for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { 595 const struct mlxsw_sp_sb_pool_des *des; 596 const struct mlxsw_sp_sb_mm *mc; 597 u32 min_buff; 598 599 mc = &mlxsw_sp_sb_mms[i]; 600 des = &mlxsw_sp_sb_pool_dess[mc->pool_index]; 601 /* All pools used by sb_mm's are initialized using dynamic 602 * thresholds, therefore 'max_buff' isn't specified in cells. 603 */ 604 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff); 605 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff, 606 des->pool); 607 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); 608 if (err) 609 return err; 610 } 611 return 0; 612 } 613 614 static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len) 615 { 616 int i; 617 618 for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i) 619 if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS) 620 goto out; 621 WARN(1, "No egress pools\n"); 622 623 out: 624 *p_ingress_len = i; 625 *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i; 626 } 627 628 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) 629 { 630 u16 ing_pool_count; 631 u16 eg_pool_count; 632 int err; 633 634 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) 635 return -EIO; 636 637 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) 638 return -EIO; 639 640 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL); 641 if (!mlxsw_sp->sb) 642 return -ENOMEM; 643 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); 644 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, 645 MAX_BUFFER_SIZE); 646 647 err = mlxsw_sp_sb_ports_init(mlxsw_sp); 648 if (err) 649 goto err_sb_ports_init; 650 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs, 651 MLXSW_SP_SB_PRS_LEN); 652 if (err) 653 goto err_sb_prs_init; 654 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); 655 if (err) 656 goto err_sb_cpu_port_sb_cms_init; 657 err = mlxsw_sp_sb_mms_init(mlxsw_sp); 658 if (err) 659 goto err_sb_mms_init; 660 mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count); 661 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, 662 mlxsw_sp->sb->sb_size, 663 ing_pool_count, 664 eg_pool_count, 665 MLXSW_SP_SB_ING_TC_COUNT, 666 MLXSW_SP_SB_EG_TC_COUNT); 667 if (err) 668 goto err_devlink_sb_register; 669 670 return 0; 671 672 err_devlink_sb_register: 673 err_sb_mms_init: 674 err_sb_cpu_port_sb_cms_init: 675 err_sb_prs_init: 676 mlxsw_sp_sb_ports_fini(mlxsw_sp); 677 err_sb_ports_init: 678 kfree(mlxsw_sp->sb); 679 return err; 680 } 681 682 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) 683 { 684 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); 685 mlxsw_sp_sb_ports_fini(mlxsw_sp); 686 kfree(mlxsw_sp->sb); 687 } 688 689 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) 690 { 691 int err; 692 693 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port); 694 if (err) 695 return err; 696 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); 697 if (err) 698 return err; 699 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); 700 701 return err; 702 } 703 704 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, 705 unsigned int sb_index, u16 pool_index, 706 struct devlink_sb_pool_info *pool_info) 707 { 708 enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir; 709 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 710 struct mlxsw_sp_sb_pr *pr; 711 712 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); 713 pool_info->pool_type = (enum devlink_sb_pool_type) dir; 714 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); 715 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; 716 return 0; 717 } 718 719 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, 720 unsigned int sb_index, u16 pool_index, u32 size, 721 enum devlink_sb_threshold_type threshold_type) 722 { 723 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 724 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size); 725 enum mlxsw_reg_sbpr_mode mode; 726 727 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) 728 return -EINVAL; 729 730 mode = (enum mlxsw_reg_sbpr_mode) threshold_type; 731 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, 732 pool_size, false); 733 } 734 735 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ 736 737 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index, 738 u32 max_buff) 739 { 740 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); 741 742 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) 743 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; 744 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff); 745 } 746 747 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index, 748 u32 threshold, u32 *p_max_buff) 749 { 750 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); 751 752 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { 753 int val; 754 755 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; 756 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN || 757 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) 758 return -EINVAL; 759 *p_max_buff = val; 760 } else { 761 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold); 762 } 763 return 0; 764 } 765 766 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 767 unsigned int sb_index, u16 pool_index, 768 u32 *p_threshold) 769 { 770 struct mlxsw_sp_port *mlxsw_sp_port = 771 mlxsw_core_port_driver_priv(mlxsw_core_port); 772 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 773 u8 local_port = mlxsw_sp_port->local_port; 774 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, 775 pool_index); 776 777 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index, 778 pm->max_buff); 779 return 0; 780 } 781 782 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, 783 unsigned int sb_index, u16 pool_index, 784 u32 threshold) 785 { 786 struct mlxsw_sp_port *mlxsw_sp_port = 787 mlxsw_core_port_driver_priv(mlxsw_core_port); 788 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 789 u8 local_port = mlxsw_sp_port->local_port; 790 u32 max_buff; 791 int err; 792 793 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, 794 threshold, &max_buff); 795 if (err) 796 return err; 797 798 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index, 799 0, max_buff); 800 } 801 802 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, 803 unsigned int sb_index, u16 tc_index, 804 enum devlink_sb_pool_type pool_type, 805 u16 *p_pool_index, u32 *p_threshold) 806 { 807 struct mlxsw_sp_port *mlxsw_sp_port = 808 mlxsw_core_port_driver_priv(mlxsw_core_port); 809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 810 u8 local_port = mlxsw_sp_port->local_port; 811 u8 pg_buff = tc_index; 812 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; 813 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, 814 pg_buff, dir); 815 816 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index, 817 cm->max_buff); 818 *p_pool_index = cm->pool_index; 819 return 0; 820 } 821 822 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, 823 unsigned int sb_index, u16 tc_index, 824 enum devlink_sb_pool_type pool_type, 825 u16 pool_index, u32 threshold) 826 { 827 struct mlxsw_sp_port *mlxsw_sp_port = 828 mlxsw_core_port_driver_priv(mlxsw_core_port); 829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 830 u8 local_port = mlxsw_sp_port->local_port; 831 u8 pg_buff = tc_index; 832 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; 833 u32 max_buff; 834 int err; 835 836 if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir) 837 return -EINVAL; 838 839 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, 840 threshold, &max_buff); 841 if (err) 842 return err; 843 844 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, 845 0, max_buff, false, pool_index); 846 } 847 848 #define MASKED_COUNT_MAX \ 849 (MLXSW_REG_SBSR_REC_MAX_COUNT / \ 850 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT)) 851 852 struct mlxsw_sp_sb_sr_occ_query_cb_ctx { 853 u8 masked_count; 854 u8 local_port_1; 855 }; 856 857 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, 858 char *sbsr_pl, size_t sbsr_pl_len, 859 unsigned long cb_priv) 860 { 861 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 862 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; 863 u8 masked_count; 864 u8 local_port; 865 int rec_index = 0; 866 struct mlxsw_sp_sb_cm *cm; 867 int i; 868 869 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx)); 870 871 masked_count = 0; 872 for (local_port = cb_ctx.local_port_1; 873 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { 874 if (!mlxsw_sp->ports[local_port]) 875 continue; 876 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) { 877 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, 878 MLXSW_REG_SBXX_DIR_INGRESS); 879 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, 880 &cm->occ.cur, &cm->occ.max); 881 } 882 if (++masked_count == cb_ctx.masked_count) 883 break; 884 } 885 masked_count = 0; 886 for (local_port = cb_ctx.local_port_1; 887 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { 888 if (!mlxsw_sp->ports[local_port]) 889 continue; 890 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) { 891 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, 892 MLXSW_REG_SBXX_DIR_EGRESS); 893 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, 894 &cm->occ.cur, &cm->occ.max); 895 } 896 if (++masked_count == cb_ctx.masked_count) 897 break; 898 } 899 } 900 901 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, 902 unsigned int sb_index) 903 { 904 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 905 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; 906 unsigned long cb_priv; 907 LIST_HEAD(bulk_list); 908 char *sbsr_pl; 909 u8 masked_count; 910 u8 local_port_1; 911 u8 local_port = 0; 912 int i; 913 int err; 914 int err2; 915 916 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); 917 if (!sbsr_pl) 918 return -ENOMEM; 919 920 next_batch: 921 local_port++; 922 local_port_1 = local_port; 923 masked_count = 0; 924 mlxsw_reg_sbsr_pack(sbsr_pl, false); 925 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) 926 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); 927 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) 928 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); 929 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { 930 if (!mlxsw_sp->ports[local_port]) 931 continue; 932 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); 933 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); 934 for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { 935 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, 936 &bulk_list); 937 if (err) 938 goto out; 939 } 940 if (++masked_count == MASKED_COUNT_MAX) 941 goto do_query; 942 } 943 944 do_query: 945 cb_ctx.masked_count = masked_count; 946 cb_ctx.local_port_1 = local_port_1; 947 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx)); 948 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, 949 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb, 950 cb_priv); 951 if (err) 952 goto out; 953 if (local_port < mlxsw_core_max_ports(mlxsw_core)) 954 goto next_batch; 955 956 out: 957 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); 958 if (!err) 959 err = err2; 960 kfree(sbsr_pl); 961 return err; 962 } 963 964 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, 965 unsigned int sb_index) 966 { 967 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 968 LIST_HEAD(bulk_list); 969 char *sbsr_pl; 970 unsigned int masked_count; 971 u8 local_port = 0; 972 int i; 973 int err; 974 int err2; 975 976 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); 977 if (!sbsr_pl) 978 return -ENOMEM; 979 980 next_batch: 981 local_port++; 982 masked_count = 0; 983 mlxsw_reg_sbsr_pack(sbsr_pl, true); 984 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) 985 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); 986 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) 987 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); 988 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { 989 if (!mlxsw_sp->ports[local_port]) 990 continue; 991 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); 992 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); 993 for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { 994 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, 995 &bulk_list); 996 if (err) 997 goto out; 998 } 999 if (++masked_count == MASKED_COUNT_MAX) 1000 goto do_query; 1001 } 1002 1003 do_query: 1004 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, 1005 &bulk_list, NULL, 0); 1006 if (err) 1007 goto out; 1008 if (local_port < mlxsw_core_max_ports(mlxsw_core)) 1009 goto next_batch; 1010 1011 out: 1012 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); 1013 if (!err) 1014 err = err2; 1015 kfree(sbsr_pl); 1016 return err; 1017 } 1018 1019 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 1020 unsigned int sb_index, u16 pool_index, 1021 u32 *p_cur, u32 *p_max) 1022 { 1023 struct mlxsw_sp_port *mlxsw_sp_port = 1024 mlxsw_core_port_driver_priv(mlxsw_core_port); 1025 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1026 u8 local_port = mlxsw_sp_port->local_port; 1027 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, 1028 pool_index); 1029 1030 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur); 1031 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max); 1032 return 0; 1033 } 1034 1035 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, 1036 unsigned int sb_index, u16 tc_index, 1037 enum devlink_sb_pool_type pool_type, 1038 u32 *p_cur, u32 *p_max) 1039 { 1040 struct mlxsw_sp_port *mlxsw_sp_port = 1041 mlxsw_core_port_driver_priv(mlxsw_core_port); 1042 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1043 u8 local_port = mlxsw_sp_port->local_port; 1044 u8 pg_buff = tc_index; 1045 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; 1046 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, 1047 pg_buff, dir); 1048 1049 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur); 1050 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max); 1051 return 0; 1052 } 1053