1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/errno.h> 37 #include <linux/netdevice.h> 38 #include <net/pkt_cls.h> 39 #include <net/red.h> 40 41 #include "spectrum.h" 42 #include "reg.h" 43 44 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) 45 46 enum mlxsw_sp_qdisc_type { 47 MLXSW_SP_QDISC_NO_QDISC, 48 MLXSW_SP_QDISC_RED, 49 MLXSW_SP_QDISC_PRIO, 50 }; 51 52 struct mlxsw_sp_qdisc_ops { 53 enum mlxsw_sp_qdisc_type type; 54 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port, 55 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 56 void *params); 57 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, 58 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); 59 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port, 60 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); 61 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 62 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 63 struct tc_qopt_offload_stats *stats_ptr); 64 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port, 65 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 66 void *xstats_ptr); 67 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 68 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); 69 /* unoffload - to be used for a qdisc that stops being offloaded without 70 * being destroyed. 71 */ 72 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port, 73 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); 74 }; 75 76 struct mlxsw_sp_qdisc { 77 u32 handle; 78 u8 tclass_num; 79 union { 80 struct red_stats red; 81 } xstats_base; 82 struct mlxsw_sp_qdisc_stats { 83 u64 tx_bytes; 84 u64 tx_packets; 85 u64 drops; 86 u64 overlimits; 87 u64 backlog; 88 } stats_base; 89 90 struct mlxsw_sp_qdisc_ops *ops; 91 }; 92 93 static bool 94 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, 95 enum mlxsw_sp_qdisc_type type) 96 { 97 return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && 98 mlxsw_sp_qdisc->ops->type == type && 99 mlxsw_sp_qdisc->handle == handle; 100 } 101 102 static int 103 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 104 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 105 { 106 int err = 0; 107 108 if (!mlxsw_sp_qdisc) 109 return 0; 110 111 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy) 112 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port, 113 mlxsw_sp_qdisc); 114 115 mlxsw_sp_qdisc->handle = TC_H_UNSPEC; 116 mlxsw_sp_qdisc->ops = NULL; 117 return err; 118 } 119 120 static int 121 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, 122 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 123 struct mlxsw_sp_qdisc_ops *ops, void *params) 124 { 125 int err; 126 127 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type) 128 /* In case this location contained a different qdisc of the 129 * same type we can override the old qdisc configuration. 130 * Otherwise, we need to remove the old qdisc before setting the 131 * new one. 132 */ 133 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 134 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params); 135 if (err) 136 goto err_bad_param; 137 138 err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params); 139 if (err) 140 goto err_config; 141 142 if (mlxsw_sp_qdisc->handle != handle) { 143 mlxsw_sp_qdisc->ops = ops; 144 if (ops->clean_stats) 145 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc); 146 } 147 148 mlxsw_sp_qdisc->handle = handle; 149 return 0; 150 151 err_bad_param: 152 err_config: 153 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload) 154 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params); 155 156 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 157 return err; 158 } 159 160 static int 161 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, 162 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 163 struct tc_qopt_offload_stats *stats_ptr) 164 { 165 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && 166 mlxsw_sp_qdisc->ops->get_stats) 167 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port, 168 mlxsw_sp_qdisc, 169 stats_ptr); 170 171 return -EOPNOTSUPP; 172 } 173 174 static int 175 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, 176 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 177 void *xstats_ptr) 178 { 179 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && 180 mlxsw_sp_qdisc->ops->get_xstats) 181 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port, 182 mlxsw_sp_qdisc, 183 xstats_ptr); 184 185 return -EOPNOTSUPP; 186 } 187 188 static int 189 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, 190 int tclass_num, u32 min, u32 max, 191 u32 probability, bool is_ecn) 192 { 193 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; 194 char cwtp_cmd[MLXSW_REG_CWTP_LEN]; 195 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 196 int err; 197 198 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num); 199 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE, 200 roundup(min, MLXSW_REG_CWTP_MIN_VALUE), 201 roundup(max, MLXSW_REG_CWTP_MIN_VALUE), 202 probability); 203 204 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd); 205 if (err) 206 return err; 207 208 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, 209 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 210 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); 212 } 213 214 static int 215 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, 216 int tclass_num) 217 { 218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 219 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; 220 221 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, 222 MLXSW_REG_CWTPM_RESET_PROFILE, false, false); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); 224 } 225 226 static void 227 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, 228 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 229 { 230 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 231 struct mlxsw_sp_qdisc_stats *stats_base; 232 struct mlxsw_sp_port_xstats *xstats; 233 struct rtnl_link_stats64 *stats; 234 struct red_stats *red_base; 235 236 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 237 stats = &mlxsw_sp_port->periodic_hw_stats.stats; 238 stats_base = &mlxsw_sp_qdisc->stats_base; 239 red_base = &mlxsw_sp_qdisc->xstats_base.red; 240 241 stats_base->tx_packets = stats->tx_packets; 242 stats_base->tx_bytes = stats->tx_bytes; 243 244 red_base->prob_mark = xstats->ecn; 245 red_base->prob_drop = xstats->wred_drop[tclass_num]; 246 red_base->pdrop = xstats->tail_drop[tclass_num]; 247 248 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; 249 stats_base->drops = red_base->prob_drop + red_base->pdrop; 250 251 stats_base->backlog = 0; 252 } 253 254 static int 255 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 256 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 257 { 258 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, 259 mlxsw_sp_qdisc->tclass_num); 260 } 261 262 static int 263 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, 264 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 265 void *params) 266 { 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 268 struct tc_red_qopt_offload_params *p = params; 269 270 if (p->min > p->max) { 271 dev_err(mlxsw_sp->bus_info->dev, 272 "spectrum: RED: min %u is bigger then max %u\n", p->min, 273 p->max); 274 return -EINVAL; 275 } 276 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { 277 dev_err(mlxsw_sp->bus_info->dev, 278 "spectrum: RED: max value %u is too big\n", p->max); 279 return -EINVAL; 280 } 281 if (p->min == 0 || p->max == 0) { 282 dev_err(mlxsw_sp->bus_info->dev, 283 "spectrum: RED: 0 value is illegal for min and max\n"); 284 return -EINVAL; 285 } 286 return 0; 287 } 288 289 static int 290 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, 291 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 292 void *params) 293 { 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 295 struct tc_red_qopt_offload_params *p = params; 296 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 297 u32 min, max; 298 u64 prob; 299 300 /* calculate probability in percentage */ 301 prob = p->probability; 302 prob *= 100; 303 prob = DIV_ROUND_UP(prob, 1 << 16); 304 prob = DIV_ROUND_UP(prob, 1 << 16); 305 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); 306 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); 307 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, 308 max, prob, p->is_ecn); 309 } 310 311 static void 312 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 313 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 314 void *params) 315 { 316 struct tc_red_qopt_offload_params *p = params; 317 u64 backlog; 318 319 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 320 mlxsw_sp_qdisc->stats_base.backlog); 321 p->qstats->backlog -= backlog; 322 } 323 324 static int 325 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, 326 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 327 void *xstats_ptr) 328 { 329 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red; 330 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 331 struct mlxsw_sp_port_xstats *xstats; 332 struct red_stats *res = xstats_ptr; 333 int early_drops, marks, pdrops; 334 335 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 336 337 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; 338 marks = xstats->ecn - xstats_base->prob_mark; 339 pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; 340 341 res->pdrop += pdrops; 342 res->prob_drop += early_drops; 343 res->prob_mark += marks; 344 345 xstats_base->pdrop += pdrops; 346 xstats_base->prob_drop += early_drops; 347 xstats_base->prob_mark += marks; 348 return 0; 349 } 350 351 static int 352 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, 353 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 354 struct tc_qopt_offload_stats *stats_ptr) 355 { 356 u64 tx_bytes, tx_packets, overlimits, drops, backlog; 357 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 358 struct mlxsw_sp_qdisc_stats *stats_base; 359 struct mlxsw_sp_port_xstats *xstats; 360 struct rtnl_link_stats64 *stats; 361 362 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 363 stats = &mlxsw_sp_port->periodic_hw_stats.stats; 364 stats_base = &mlxsw_sp_qdisc->stats_base; 365 366 tx_bytes = stats->tx_bytes - stats_base->tx_bytes; 367 tx_packets = stats->tx_packets - stats_base->tx_packets; 368 overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - 369 stats_base->overlimits; 370 drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - 371 stats_base->drops; 372 backlog = xstats->backlog[tclass_num]; 373 374 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); 375 stats_ptr->qstats->overlimits += overlimits; 376 stats_ptr->qstats->drops += drops; 377 stats_ptr->qstats->backlog += 378 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 379 backlog) - 380 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 381 stats_base->backlog); 382 383 stats_base->backlog = backlog; 384 stats_base->drops += drops; 385 stats_base->overlimits += overlimits; 386 stats_base->tx_bytes += tx_bytes; 387 stats_base->tx_packets += tx_packets; 388 return 0; 389 } 390 391 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 392 393 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { 394 .type = MLXSW_SP_QDISC_RED, 395 .check_params = mlxsw_sp_qdisc_red_check_params, 396 .replace = mlxsw_sp_qdisc_red_replace, 397 .unoffload = mlxsw_sp_qdisc_red_unoffload, 398 .destroy = mlxsw_sp_qdisc_red_destroy, 399 .get_stats = mlxsw_sp_qdisc_get_red_stats, 400 .get_xstats = mlxsw_sp_qdisc_get_red_xstats, 401 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats, 402 }; 403 404 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, 405 struct tc_red_qopt_offload *p) 406 { 407 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 408 409 if (p->parent != TC_H_ROOT) 410 return -EOPNOTSUPP; 411 412 mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; 413 414 if (p->command == TC_RED_REPLACE) 415 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, 416 mlxsw_sp_qdisc, 417 &mlxsw_sp_qdisc_ops_red, 418 &p->set); 419 420 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, 421 MLXSW_SP_QDISC_RED)) 422 return -EOPNOTSUPP; 423 424 switch (p->command) { 425 case TC_RED_DESTROY: 426 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 427 case TC_RED_XSTATS: 428 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc, 429 p->xstats); 430 case TC_RED_STATS: 431 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 432 &p->stats); 433 default: 434 return -EOPNOTSUPP; 435 } 436 } 437 438 static int 439 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 440 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 441 { 442 int i; 443 444 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 445 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 446 MLXSW_SP_PORT_DEFAULT_TCLASS); 447 448 return 0; 449 } 450 451 static int 452 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, 453 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 454 void *params) 455 { 456 struct tc_prio_qopt_offload_params *p = params; 457 458 if (p->bands > IEEE_8021QAZ_MAX_TCS) 459 return -EOPNOTSUPP; 460 461 return 0; 462 } 463 464 static int 465 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, 466 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 467 void *params) 468 { 469 struct tc_prio_qopt_offload_params *p = params; 470 int tclass, i; 471 int err; 472 473 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 474 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]); 475 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass); 476 if (err) 477 return err; 478 } 479 480 return 0; 481 } 482 483 static void 484 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 485 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 486 void *params) 487 { 488 struct tc_prio_qopt_offload_params *p = params; 489 u64 backlog; 490 491 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 492 mlxsw_sp_qdisc->stats_base.backlog); 493 p->qstats->backlog -= backlog; 494 } 495 496 static int 497 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, 498 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 499 struct tc_qopt_offload_stats *stats_ptr) 500 { 501 u64 tx_bytes, tx_packets, drops = 0, backlog = 0; 502 struct mlxsw_sp_qdisc_stats *stats_base; 503 struct mlxsw_sp_port_xstats *xstats; 504 struct rtnl_link_stats64 *stats; 505 int i; 506 507 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 508 stats = &mlxsw_sp_port->periodic_hw_stats.stats; 509 stats_base = &mlxsw_sp_qdisc->stats_base; 510 511 tx_bytes = stats->tx_bytes - stats_base->tx_bytes; 512 tx_packets = stats->tx_packets - stats_base->tx_packets; 513 514 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 515 drops += xstats->tail_drop[i]; 516 backlog += xstats->backlog[i]; 517 } 518 drops = drops - stats_base->drops; 519 520 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); 521 stats_ptr->qstats->drops += drops; 522 stats_ptr->qstats->backlog += 523 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 524 backlog) - 525 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 526 stats_base->backlog); 527 stats_base->backlog = backlog; 528 stats_base->drops += drops; 529 stats_base->tx_bytes += tx_bytes; 530 stats_base->tx_packets += tx_packets; 531 return 0; 532 } 533 534 static void 535 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, 536 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 537 { 538 struct mlxsw_sp_qdisc_stats *stats_base; 539 struct mlxsw_sp_port_xstats *xstats; 540 struct rtnl_link_stats64 *stats; 541 int i; 542 543 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 544 stats = &mlxsw_sp_port->periodic_hw_stats.stats; 545 stats_base = &mlxsw_sp_qdisc->stats_base; 546 547 stats_base->tx_packets = stats->tx_packets; 548 stats_base->tx_bytes = stats->tx_bytes; 549 550 stats_base->drops = 0; 551 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 552 stats_base->drops += xstats->tail_drop[i]; 553 554 mlxsw_sp_qdisc->stats_base.backlog = 0; 555 } 556 557 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { 558 .type = MLXSW_SP_QDISC_PRIO, 559 .check_params = mlxsw_sp_qdisc_prio_check_params, 560 .replace = mlxsw_sp_qdisc_prio_replace, 561 .unoffload = mlxsw_sp_qdisc_prio_unoffload, 562 .destroy = mlxsw_sp_qdisc_prio_destroy, 563 .get_stats = mlxsw_sp_qdisc_get_prio_stats, 564 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, 565 }; 566 567 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, 568 struct tc_prio_qopt_offload *p) 569 { 570 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 571 572 if (p->parent != TC_H_ROOT) 573 return -EOPNOTSUPP; 574 575 mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; 576 if (p->command == TC_PRIO_REPLACE) 577 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, 578 mlxsw_sp_qdisc, 579 &mlxsw_sp_qdisc_ops_prio, 580 &p->replace_params); 581 582 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, 583 MLXSW_SP_QDISC_PRIO)) 584 return -EOPNOTSUPP; 585 586 switch (p->command) { 587 case TC_PRIO_DESTROY: 588 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 589 case TC_PRIO_STATS: 590 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 591 &p->stats); 592 default: 593 return -EOPNOTSUPP; 594 } 595 } 596 597 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) 598 { 599 mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc), 600 GFP_KERNEL); 601 if (!mlxsw_sp_port->root_qdisc) 602 return -ENOMEM; 603 604 mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; 605 606 return 0; 607 } 608 609 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) 610 { 611 kfree(mlxsw_sp_port->root_qdisc); 612 } 613