1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include <net/pkt_cls.h> 8 9 #include "sparx5_main.h" 10 #include "sparx5_qos.h" 11 12 /* Max rates for leak groups */ 13 static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = { 14 1048568, /* 1.049 Gbps */ 15 2621420, /* 2.621 Gbps */ 16 10485680, /* 10.486 Gbps */ 17 26214200 /* 26.214 Gbps */ 18 }; 19 20 static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT]; 21 22 static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group) 23 { 24 u32 value; 25 26 value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group)); 27 return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value); 28 } 29 30 static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group, 31 u32 leak_time) 32 { 33 spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5, 34 HSCH_HSCH_TIMER_CFG(layer, group)); 35 } 36 37 static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group) 38 { 39 u32 value; 40 41 value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group)); 42 return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value); 43 } 44 45 static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group, 46 u32 idx) 47 48 { 49 u32 value; 50 51 value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx)); 52 return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value); 53 } 54 55 static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group) 56 { 57 u32 itr, next; 58 59 itr = sparx5_lg_get_first(sparx5, layer, group); 60 61 for (;;) { 62 next = sparx5_lg_get_next(sparx5, layer, group, itr); 63 if (itr == next) 64 return itr; 65 66 itr = next; 67 } 68 } 69 70 static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group, 71 u32 idx) 72 { 73 return idx == sparx5_lg_get_next(sparx5, layer, group, idx); 74 } 75 76 static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group, 77 u32 idx) 78 { 79 return idx == sparx5_lg_get_first(sparx5, layer, group); 80 } 81 82 static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group) 83 { 84 return sparx5_lg_get_leak_time(sparx5, layer, group) == 0; 85 } 86 87 static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group) 88 { 89 if (sparx5_lg_is_empty(sparx5, layer, group)) 90 return false; 91 92 return sparx5_lg_get_first(sparx5, layer, group) == 93 sparx5_lg_get_last(sparx5, layer, group); 94 } 95 96 static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group, 97 u32 leak_time) 98 { 99 sparx5_lg_set_leak_time(sparx5, layer, group, leak_time); 100 } 101 102 static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group) 103 { 104 sparx5_lg_set_leak_time(sparx5, layer, group, 0); 105 } 106 107 static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer, 108 u32 idx, u32 *group) 109 { 110 u32 itr, next; 111 int i; 112 113 for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) { 114 if (sparx5_lg_is_empty(sparx5, layer, i)) 115 continue; 116 117 itr = sparx5_lg_get_first(sparx5, layer, i); 118 119 for (;;) { 120 next = sparx5_lg_get_next(sparx5, layer, i, itr); 121 122 if (itr == idx) { 123 *group = i; 124 return 0; /* Found it */ 125 } 126 if (itr == next) 127 break; /* Was not found */ 128 129 itr = next; 130 } 131 } 132 133 return -1; 134 } 135 136 static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group) 137 { 138 struct sparx5_layer *l = &layers[layer]; 139 struct sparx5_lg *lg; 140 u32 i; 141 142 for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) { 143 lg = &l->leak_groups[i]; 144 if (rate <= lg->max_rate) { 145 *group = i; 146 return 0; 147 } 148 } 149 150 return -1; 151 } 152 153 static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group, 154 u32 idx, u32 *prev, u32 *next, u32 *first) 155 { 156 u32 itr; 157 158 *first = sparx5_lg_get_first(sparx5, layer, group); 159 *prev = *first; 160 *next = *first; 161 itr = *first; 162 163 for (;;) { 164 *next = sparx5_lg_get_next(sparx5, layer, group, itr); 165 166 if (itr == idx) 167 return 0; /* Found it */ 168 169 if (itr == *next) 170 return -1; /* Was not found */ 171 172 *prev = itr; 173 itr = *next; 174 } 175 176 return -1; 177 } 178 179 static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group, 180 u32 se_first, u32 idx, u32 idx_next, bool empty) 181 { 182 u32 leak_time = layers[layer].leak_groups[group].leak_time; 183 184 /* Stop leaking */ 185 sparx5_lg_disable(sparx5, layer, group); 186 187 if (empty) 188 return 0; 189 190 /* Select layer */ 191 spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer), 192 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG); 193 194 /* Link elements */ 195 spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5, 196 HSCH_SE_CONNECT(idx)); 197 198 /* Set the first element. */ 199 spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first), 200 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5, 201 HSCH_HSCH_LEAK_CFG(layer, group)); 202 203 /* Start leaking */ 204 sparx5_lg_enable(sparx5, layer, group, leak_time); 205 206 return 0; 207 } 208 209 static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx) 210 { 211 u32 first, next, prev; 212 bool empty = false; 213 214 /* idx *must* be present in the leak group */ 215 WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next, 216 &first) < 0); 217 218 if (sparx5_lg_is_singular(sparx5, layer, group)) { 219 empty = true; 220 } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) { 221 /* idx is removed, prev is now last */ 222 idx = prev; 223 next = prev; 224 } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) { 225 /* idx is removed and points to itself, first is next */ 226 first = next; 227 next = idx; 228 } else { 229 /* Next is not touched */ 230 idx = prev; 231 } 232 233 return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next, 234 empty); 235 } 236 237 static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group, 238 u32 idx) 239 { 240 u32 first, next, old_group; 241 242 pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group, 243 idx); 244 245 /* Is this SE already shaping ? */ 246 if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) { 247 if (old_group != new_group) { 248 /* Delete from old group */ 249 sparx5_lg_del(sparx5, layer, old_group, idx); 250 } else { 251 /* Nothing to do here */ 252 return 0; 253 } 254 } 255 256 /* We always add to head of the list */ 257 first = idx; 258 259 if (sparx5_lg_is_empty(sparx5, layer, new_group)) 260 next = idx; 261 else 262 next = sparx5_lg_get_first(sparx5, layer, new_group); 263 264 return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next, 265 false); 266 } 267 268 static int sparx5_shaper_conf_set(struct sparx5_port *port, 269 const struct sparx5_shaper *sh, u32 layer, 270 u32 idx, u32 group) 271 { 272 int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32); 273 struct sparx5 *sparx5 = port->sparx5; 274 275 if (!sh->rate && !sh->burst) 276 sparx5_lg_action = &sparx5_lg_del; 277 else 278 sparx5_lg_action = &sparx5_lg_add; 279 280 /* Select layer */ 281 spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer), 282 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG); 283 284 /* Set frame mode */ 285 spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE, 286 sparx5, HSCH_SE_CFG(idx)); 287 288 /* Set committed rate and burst */ 289 spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) | 290 HSCH_CIR_CFG_CIR_BURST_SET(sh->burst), 291 sparx5, HSCH_CIR_CFG(idx)); 292 293 /* This has to be done after the shaper configuration has been set */ 294 sparx5_lg_action(sparx5, layer, group, idx); 295 296 return 0; 297 } 298 299 static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight) 300 { 301 return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) - 302 1; 303 } 304 305 static int sparx5_dwrr_conf_set(struct sparx5_port *port, 306 struct sparx5_dwrr *dwrr) 307 { 308 int i; 309 310 spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) | 311 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno), 312 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX, 313 port->sparx5, HSCH_HSCH_CFG_CFG); 314 315 /* Number of *lower* indexes that are arbitrated dwrr */ 316 spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count), 317 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5, 318 HSCH_SE_CFG(port->portno)); 319 320 for (i = 0; i < dwrr->count; i++) { 321 spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]), 322 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5, 323 HSCH_DWRR_ENTRY(i)); 324 } 325 326 return 0; 327 } 328 329 static int sparx5_leak_groups_init(struct sparx5 *sparx5) 330 { 331 struct sparx5_layer *layer; 332 u32 sys_clk_per_100ps; 333 struct sparx5_lg *lg; 334 u32 leak_time_us; 335 int i, ii; 336 337 sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER); 338 339 for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) { 340 layer = &layers[i]; 341 for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) { 342 lg = &layer->leak_groups[ii]; 343 lg->max_rate = spx5_hsch_max_group_rate[ii]; 344 345 /* Calculate the leak time in us, to serve a maximum 346 * rate of 'max_rate' for this group 347 */ 348 leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate; 349 350 /* Hardware wants leak time in ns */ 351 lg->leak_time = 1000 * leak_time_us; 352 353 /* Calculate resolution */ 354 lg->resolution = 1000 / leak_time_us; 355 356 /* Maximum number of shapers that can be served by 357 * this leak group 358 */ 359 lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps; 360 361 /* Example: 362 * Wanted bandwidth is 100Mbit: 363 * 364 * 100 mbps can be served by leak group zero. 365 * 366 * leak_time is 125000 ns. 367 * resolution is: 8 368 * 369 * cir = 100000 / 8 = 12500 370 * leaks_pr_sec = 125000 / 10^9 = 8000 371 * bw = 12500 * 8000 = 10^8 (100 Mbit) 372 */ 373 374 /* Disable by default - this also indicates an empty 375 * leak group 376 */ 377 sparx5_lg_disable(sparx5, i, ii); 378 } 379 } 380 381 return 0; 382 } 383 384 int sparx5_qos_init(struct sparx5 *sparx5) 385 { 386 int ret; 387 388 ret = sparx5_leak_groups_init(sparx5); 389 if (ret < 0) 390 return ret; 391 392 ret = sparx5_dcb_init(sparx5); 393 if (ret < 0) 394 return ret; 395 396 return 0; 397 } 398 399 int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc) 400 { 401 int i; 402 403 if (num_tc != SPX5_PRIOS) { 404 netdev_err(ndev, "Only %d traffic classes supported\n", 405 SPX5_PRIOS); 406 return -EINVAL; 407 } 408 409 netdev_set_num_tc(ndev, num_tc); 410 411 for (i = 0; i < num_tc; i++) 412 netdev_set_tc_queue(ndev, i, 1, i); 413 414 netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n", 415 ndev->num_tc, ndev->real_num_tx_queues); 416 417 return 0; 418 } 419 420 int sparx5_tc_mqprio_del(struct net_device *ndev) 421 { 422 netdev_reset_tc(ndev); 423 424 netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n", 425 ndev->num_tc, ndev->real_num_tx_queues); 426 427 return 0; 428 } 429 430 int sparx5_tc_tbf_add(struct sparx5_port *port, 431 struct tc_tbf_qopt_offload_replace_params *params, 432 u32 layer, u32 idx) 433 { 434 struct sparx5_shaper sh = { 435 .mode = SPX5_SE_MODE_DATARATE, 436 .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8, 437 .burst = params->max_size, 438 }; 439 struct sparx5_lg *lg; 440 u32 group; 441 442 /* Find suitable group for this se */ 443 if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) { 444 pr_debug("Could not find leak group for se with rate: %d", 445 sh.rate); 446 return -EINVAL; 447 } 448 449 lg = &layers[layer].leak_groups[group]; 450 451 pr_debug("Found matching group (speed: %d)\n", lg->max_rate); 452 453 if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN) 454 return -EINVAL; 455 456 /* Calculate committed rate and burst */ 457 sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution); 458 sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT); 459 460 if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX) 461 return -EINVAL; 462 463 return sparx5_shaper_conf_set(port, &sh, layer, idx, group); 464 } 465 466 int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx) 467 { 468 struct sparx5_shaper sh = {0}; 469 u32 group; 470 471 sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group); 472 473 return sparx5_shaper_conf_set(port, &sh, layer, idx, group); 474 } 475 476 int sparx5_tc_ets_add(struct sparx5_port *port, 477 struct tc_ets_qopt_offload_replace_params *params) 478 { 479 struct sparx5_dwrr dwrr = {0}; 480 /* Minimum weight for each iteration */ 481 unsigned int w_min = 100; 482 int i; 483 484 /* Find minimum weight for all dwrr bands */ 485 for (i = 0; i < SPX5_PRIOS; i++) { 486 if (params->quanta[i] == 0) 487 continue; 488 w_min = min(w_min, params->weights[i]); 489 } 490 491 for (i = 0; i < SPX5_PRIOS; i++) { 492 /* Strict band; skip */ 493 if (params->quanta[i] == 0) 494 continue; 495 496 dwrr.count++; 497 498 /* On the sparx5, bands with higher indexes are preferred and 499 * arbitrated strict. Strict bands are put in the lower indexes, 500 * by tc, so we reverse the bands here. 501 * 502 * Also convert the weight to something the hardware 503 * understands. 504 */ 505 dwrr.cost[SPX5_PRIOS - i - 1] = 506 sparx5_weight_to_hw_cost(w_min, params->weights[i]); 507 } 508 509 return sparx5_dwrr_conf_set(port, &dwrr); 510 } 511 512 int sparx5_tc_ets_del(struct sparx5_port *port) 513 { 514 struct sparx5_dwrr dwrr = {0}; 515 516 return sparx5_dwrr_conf_set(port, &dwrr); 517 } 518