1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/device.h> 33 #include <linux/netdevice.h> 34 #include "en.h" 35 #include "en/port.h" 36 #include "en/port_buffer.h" 37 38 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 39 40 #define MLX5E_100MB (100000) 41 #define MLX5E_1GB (1000000) 42 43 #define MLX5E_CEE_STATE_UP 1 44 #define MLX5E_CEE_STATE_DOWN 0 45 46 /* Max supported cable length is 1000 meters */ 47 #define MLX5E_MAX_CABLE_LENGTH 1000 48 49 enum { 50 MLX5E_VENDOR_TC_GROUP_NUM = 7, 51 MLX5E_LOWEST_PRIO_GROUP = 0, 52 }; 53 54 enum { 55 MLX5_DCB_CHG_RESET, 56 MLX5_DCB_NO_CHG, 57 MLX5_DCB_CHG_NO_RESET, 58 }; 59 60 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \ 61 MLX5_CAP_QCAM_REG(mdev, qpts) && \ 62 MLX5_CAP_QCAM_REG(mdev, qpdpm)) 63 64 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state); 65 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio); 66 67 /* If dcbx mode is non-host set the dcbx mode to host. 68 */ 69 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, 70 enum mlx5_dcbx_oper_mode mode) 71 { 72 struct mlx5_core_dev *mdev = priv->mdev; 73 u32 param[MLX5_ST_SZ_DW(dcbx_param)]; 74 int err; 75 76 err = mlx5_query_port_dcbx_param(mdev, param); 77 if (err) 78 return err; 79 80 MLX5_SET(dcbx_param, param, version_admin, mode); 81 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST) 82 MLX5_SET(dcbx_param, param, willing_admin, 1); 83 84 return mlx5_set_port_dcbx_param(mdev, param); 85 } 86 87 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv) 88 { 89 struct mlx5e_dcbx *dcbx = &priv->dcbx; 90 int err; 91 92 if (!MLX5_CAP_GEN(priv->mdev, dcbx)) 93 return 0; 94 95 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) 96 return 0; 97 98 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST); 99 if (err) 100 return err; 101 102 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST; 103 return 0; 104 } 105 106 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, 107 struct ieee_ets *ets) 108 { 109 struct mlx5e_priv *priv = netdev_priv(netdev); 110 struct mlx5_core_dev *mdev = priv->mdev; 111 u8 tc_group[IEEE_8021QAZ_MAX_TCS]; 112 bool is_tc_group_6_exist = false; 113 bool is_zero_bw_ets_tc = false; 114 int err = 0; 115 int i; 116 117 if (!MLX5_CAP_GEN(priv->mdev, ets)) 118 return -EOPNOTSUPP; 119 120 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 121 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); 122 if (err) 123 return err; 124 } 125 126 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; 127 for (i = 0; i < ets->ets_cap; i++) { 128 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); 129 if (err) 130 return err; 131 132 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); 133 if (err) 134 return err; 135 136 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC && 137 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1)) 138 is_zero_bw_ets_tc = true; 139 140 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1)) 141 is_tc_group_6_exist = true; 142 } 143 144 /* Report 0% ets tc if exits*/ 145 if (is_zero_bw_ets_tc) { 146 for (i = 0; i < ets->ets_cap; i++) 147 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP) 148 ets->tc_tx_bw[i] = 0; 149 } 150 151 /* Update tc_tsa based on fw setting*/ 152 for (i = 0; i < ets->ets_cap; i++) { 153 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) 154 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 155 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM && 156 !is_tc_group_6_exist) 157 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; 158 } 159 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); 160 161 return err; 162 } 163 164 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) 165 { 166 bool any_tc_mapped_to_ets = false; 167 bool ets_zero_bw = false; 168 int strict_group; 169 int i; 170 171 for (i = 0; i <= max_tc; i++) { 172 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 173 any_tc_mapped_to_ets = true; 174 if (!ets->tc_tx_bw[i]) 175 ets_zero_bw = true; 176 } 177 } 178 179 /* strict group has higher priority than ets group */ 180 strict_group = MLX5E_LOWEST_PRIO_GROUP; 181 if (any_tc_mapped_to_ets) 182 strict_group++; 183 if (ets_zero_bw) 184 strict_group++; 185 186 for (i = 0; i <= max_tc; i++) { 187 switch (ets->tc_tsa[i]) { 188 case IEEE_8021QAZ_TSA_VENDOR: 189 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM; 190 break; 191 case IEEE_8021QAZ_TSA_STRICT: 192 tc_group[i] = strict_group++; 193 break; 194 case IEEE_8021QAZ_TSA_ETS: 195 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP; 196 if (ets->tc_tx_bw[i] && ets_zero_bw) 197 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1; 198 break; 199 } 200 } 201 } 202 203 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, 204 u8 *tc_group, int max_tc) 205 { 206 int bw_for_ets_zero_bw_tc = 0; 207 int last_ets_zero_bw_tc = -1; 208 int num_ets_zero_bw = 0; 209 int i; 210 211 for (i = 0; i <= max_tc; i++) { 212 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS && 213 !ets->tc_tx_bw[i]) { 214 num_ets_zero_bw++; 215 last_ets_zero_bw_tc = i; 216 } 217 } 218 219 if (num_ets_zero_bw) 220 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw; 221 222 for (i = 0; i <= max_tc; i++) { 223 switch (ets->tc_tsa[i]) { 224 case IEEE_8021QAZ_TSA_VENDOR: 225 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 226 break; 227 case IEEE_8021QAZ_TSA_STRICT: 228 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 229 break; 230 case IEEE_8021QAZ_TSA_ETS: 231 tc_tx_bw[i] = ets->tc_tx_bw[i] ? 232 ets->tc_tx_bw[i] : 233 bw_for_ets_zero_bw_tc; 234 break; 235 } 236 } 237 238 /* Make sure the total bw for ets zero bw group is 100% */ 239 if (last_ets_zero_bw_tc != -1) 240 tc_tx_bw[last_ets_zero_bw_tc] += 241 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw; 242 } 243 244 /* If there are ETS BW 0, 245 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%. 246 * Set group #0 to all the ETS BW 0 tcs and 247 * equally splits the 100% BW between them 248 * Report both group #0 and #1 as ETS type. 249 * All the tcs in group #0 will be reported with 0% BW. 250 */ 251 static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) 252 { 253 struct mlx5_core_dev *mdev = priv->mdev; 254 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS]; 255 u8 tc_group[IEEE_8021QAZ_MAX_TCS]; 256 int max_tc = mlx5_max_tc(mdev); 257 int err, i; 258 259 mlx5e_build_tc_group(ets, tc_group, max_tc); 260 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc); 261 262 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc); 263 if (err) 264 return err; 265 266 err = mlx5_set_port_tc_group(mdev, tc_group); 267 if (err) 268 return err; 269 270 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); 271 272 if (err) 273 return err; 274 275 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); 276 277 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 278 mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n", 279 __func__, i, ets->prio_tc[i]); 280 mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n", 281 __func__, i, tc_tx_bw[i], tc_group[i]); 282 } 283 284 return err; 285 } 286 287 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, 288 struct ieee_ets *ets, 289 bool zero_sum_allowed) 290 { 291 bool have_ets_tc = false; 292 int bw_sum = 0; 293 int i; 294 295 /* Validate Priority */ 296 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 297 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) { 298 netdev_err(netdev, 299 "Failed to validate ETS: priority value greater than max(%d)\n", 300 MLX5E_MAX_PRIORITY); 301 return -EINVAL; 302 } 303 } 304 305 /* Validate Bandwidth Sum */ 306 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 307 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 308 have_ets_tc = true; 309 bw_sum += ets->tc_tx_bw[i]; 310 } 311 } 312 313 if (have_ets_tc && bw_sum != 100) { 314 if (bw_sum || (!bw_sum && !zero_sum_allowed)) 315 netdev_err(netdev, 316 "Failed to validate ETS: BW sum is illegal\n"); 317 return -EINVAL; 318 } 319 return 0; 320 } 321 322 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, 323 struct ieee_ets *ets) 324 { 325 struct mlx5e_priv *priv = netdev_priv(netdev); 326 int err; 327 328 if (!MLX5_CAP_GEN(priv->mdev, ets)) 329 return -EOPNOTSUPP; 330 331 err = mlx5e_dbcnl_validate_ets(netdev, ets, false); 332 if (err) 333 return err; 334 335 err = mlx5e_dcbnl_ieee_setets_core(priv, ets); 336 if (err) 337 return err; 338 339 return 0; 340 } 341 342 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, 343 struct ieee_pfc *pfc) 344 { 345 struct mlx5e_priv *priv = netdev_priv(dev); 346 struct mlx5_core_dev *mdev = priv->mdev; 347 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 348 int i; 349 350 pfc->pfc_cap = mlx5_max_tc(mdev) + 1; 351 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 352 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause); 353 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause); 354 } 355 356 if (MLX5_BUFFER_SUPPORTED(mdev)) 357 pfc->delay = priv->dcbx.cable_len; 358 359 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); 360 } 361 362 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, 363 struct ieee_pfc *pfc) 364 { 365 struct mlx5e_priv *priv = netdev_priv(dev); 366 struct mlx5_core_dev *mdev = priv->mdev; 367 u32 old_cable_len = priv->dcbx.cable_len; 368 struct ieee_pfc pfc_new; 369 u32 changed = 0; 370 u8 curr_pfc_en; 371 int ret = 0; 372 373 /* pfc_en */ 374 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL); 375 if (pfc->pfc_en != curr_pfc_en) { 376 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); 377 if (ret) 378 return ret; 379 mlx5_toggle_port_link(mdev); 380 changed |= MLX5E_PORT_BUFFER_PFC; 381 } 382 383 if (pfc->delay && 384 pfc->delay < MLX5E_MAX_CABLE_LENGTH && 385 pfc->delay != priv->dcbx.cable_len) { 386 priv->dcbx.cable_len = pfc->delay; 387 changed |= MLX5E_PORT_BUFFER_CABLE_LEN; 388 } 389 390 if (MLX5_BUFFER_SUPPORTED(mdev)) { 391 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en; 392 if (priv->dcbx.manual_buffer) 393 ret = mlx5e_port_manual_buffer_config(priv, changed, 394 dev->mtu, &pfc_new, 395 NULL, NULL); 396 397 if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN)) 398 priv->dcbx.cable_len = old_cable_len; 399 } 400 401 if (!ret) { 402 mlx5e_dbg(HW, priv, 403 "%s: PFC per priority bit mask: 0x%x\n", 404 __func__, pfc->pfc_en); 405 } 406 return ret; 407 } 408 409 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) 410 { 411 struct mlx5e_priv *priv = netdev_priv(dev); 412 413 return priv->dcbx.cap; 414 } 415 416 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) 417 { 418 struct mlx5e_priv *priv = netdev_priv(dev); 419 struct mlx5e_dcbx *dcbx = &priv->dcbx; 420 421 if (mode & DCB_CAP_DCBX_LLD_MANAGED) 422 return 1; 423 424 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { 425 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO) 426 return 0; 427 428 /* set dcbx to fw controlled */ 429 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { 430 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; 431 dcbx->cap &= ~DCB_CAP_DCBX_HOST; 432 return 0; 433 } 434 435 return 1; 436 } 437 438 if (!(mode & DCB_CAP_DCBX_HOST)) 439 return 1; 440 441 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) 442 return 1; 443 444 dcbx->cap = mode; 445 446 return 0; 447 } 448 449 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) 450 { 451 struct mlx5e_priv *priv = netdev_priv(dev); 452 struct dcb_app temp; 453 bool is_new; 454 int err; 455 456 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || 457 !MLX5_DSCP_SUPPORTED(priv->mdev)) 458 return -EOPNOTSUPP; 459 460 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || 461 (app->protocol >= MLX5E_MAX_DSCP)) 462 return -EINVAL; 463 464 /* Save the old entry info */ 465 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; 466 temp.protocol = app->protocol; 467 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol]; 468 469 /* Check if need to switch to dscp trust state */ 470 if (!priv->dcbx.dscp_app_cnt) { 471 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP); 472 if (err) 473 return err; 474 } 475 476 /* Skip the fw command if new and old mapping are the same */ 477 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) { 478 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority); 479 if (err) 480 goto fw_err; 481 } 482 483 /* Delete the old entry if exists */ 484 is_new = false; 485 err = dcb_ieee_delapp(dev, &temp); 486 if (err) 487 is_new = true; 488 489 /* Add new entry and update counter */ 490 err = dcb_ieee_setapp(dev, app); 491 if (err) 492 return err; 493 494 if (is_new) 495 priv->dcbx.dscp_app_cnt++; 496 497 return err; 498 499 fw_err: 500 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP); 501 return err; 502 } 503 504 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) 505 { 506 struct mlx5e_priv *priv = netdev_priv(dev); 507 int err; 508 509 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || 510 !MLX5_DSCP_SUPPORTED(priv->mdev)) 511 return -EOPNOTSUPP; 512 513 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || 514 (app->protocol >= MLX5E_MAX_DSCP)) 515 return -EINVAL; 516 517 /* Skip if no dscp app entry */ 518 if (!priv->dcbx.dscp_app_cnt) 519 return -ENOENT; 520 521 /* Check if the entry matches fw setting */ 522 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) 523 return -ENOENT; 524 525 /* Delete the app entry */ 526 err = dcb_ieee_delapp(dev, app); 527 if (err) 528 return err; 529 530 /* Reset the priority mapping back to zero */ 531 err = mlx5e_set_dscp2prio(priv, app->protocol, 0); 532 if (err) 533 goto fw_err; 534 535 priv->dcbx.dscp_app_cnt--; 536 537 /* Check if need to switch to pcp trust state */ 538 if (!priv->dcbx.dscp_app_cnt) 539 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP); 540 541 return err; 542 543 fw_err: 544 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP); 545 return err; 546 } 547 548 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, 549 struct ieee_maxrate *maxrate) 550 { 551 struct mlx5e_priv *priv = netdev_priv(netdev); 552 struct mlx5_core_dev *mdev = priv->mdev; 553 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; 554 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; 555 int err; 556 int i; 557 558 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); 559 if (err) 560 return err; 561 562 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate)); 563 564 for (i = 0; i <= mlx5_max_tc(mdev); i++) { 565 switch (max_bw_unit[i]) { 566 case MLX5_100_MBPS_UNIT: 567 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB; 568 break; 569 case MLX5_GBPS_UNIT: 570 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB; 571 break; 572 case MLX5_BW_NO_LIMIT: 573 break; 574 default: 575 WARN(true, "non-supported BW unit"); 576 break; 577 } 578 } 579 580 return 0; 581 } 582 583 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, 584 struct ieee_maxrate *maxrate) 585 { 586 struct mlx5e_priv *priv = netdev_priv(netdev); 587 struct mlx5_core_dev *mdev = priv->mdev; 588 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; 589 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; 590 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB); 591 int i; 592 593 memset(max_bw_value, 0, sizeof(max_bw_value)); 594 memset(max_bw_unit, 0, sizeof(max_bw_unit)); 595 596 for (i = 0; i <= mlx5_max_tc(mdev); i++) { 597 if (!maxrate->tc_maxrate[i]) { 598 max_bw_unit[i] = MLX5_BW_NO_LIMIT; 599 continue; 600 } 601 if (maxrate->tc_maxrate[i] < upper_limit_mbps) { 602 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], 603 MLX5E_100MB); 604 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; 605 max_bw_unit[i] = MLX5_100_MBPS_UNIT; 606 } else { 607 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], 608 MLX5E_1GB); 609 max_bw_unit[i] = MLX5_GBPS_UNIT; 610 } 611 } 612 613 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 614 mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n", 615 __func__, i, max_bw_value[i]); 616 } 617 618 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); 619 } 620 621 static u8 mlx5e_dcbnl_setall(struct net_device *netdev) 622 { 623 struct mlx5e_priv *priv = netdev_priv(netdev); 624 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; 625 struct mlx5_core_dev *mdev = priv->mdev; 626 struct ieee_ets ets; 627 struct ieee_pfc pfc; 628 int err = -EOPNOTSUPP; 629 int i; 630 631 if (!MLX5_CAP_GEN(mdev, ets)) 632 goto out; 633 634 memset(&ets, 0, sizeof(ets)); 635 memset(&pfc, 0, sizeof(pfc)); 636 637 ets.ets_cap = IEEE_8021QAZ_MAX_TCS; 638 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { 639 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i]; 640 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i]; 641 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 642 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i]; 643 mlx5e_dbg(HW, priv, 644 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n", 645 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i], 646 ets.prio_tc[i]); 647 } 648 649 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true); 650 if (err) 651 goto out; 652 653 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); 654 if (err) { 655 netdev_err(netdev, 656 "%s, Failed to set ETS: %d\n", __func__, err); 657 goto out; 658 } 659 660 /* Set PFC */ 661 pfc.pfc_cap = mlx5_max_tc(mdev) + 1; 662 if (!cee_cfg->pfc_enable) 663 pfc.pfc_en = 0; 664 else 665 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) 666 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i; 667 668 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc); 669 if (err) { 670 netdev_err(netdev, 671 "%s, Failed to set PFC: %d\n", __func__, err); 672 goto out; 673 } 674 out: 675 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET; 676 } 677 678 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev) 679 { 680 return MLX5E_CEE_STATE_UP; 681 } 682 683 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev, 684 u8 *perm_addr) 685 { 686 struct mlx5e_priv *priv = netdev_priv(netdev); 687 688 if (!perm_addr) 689 return; 690 691 memset(perm_addr, 0xff, MAX_ADDR_LEN); 692 693 mlx5_query_mac_address(priv->mdev, perm_addr); 694 } 695 696 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev, 697 int priority, u8 prio_type, 698 u8 pgid, u8 bw_pct, u8 up_map) 699 { 700 struct mlx5e_priv *priv = netdev_priv(netdev); 701 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; 702 703 if (priority >= CEE_DCBX_MAX_PRIO) { 704 netdev_err(netdev, 705 "%s, priority is out of range\n", __func__); 706 return; 707 } 708 709 if (pgid >= CEE_DCBX_MAX_PGS) { 710 netdev_err(netdev, 711 "%s, priority group is out of range\n", __func__); 712 return; 713 } 714 715 cee_cfg->prio_to_pg_map[priority] = pgid; 716 } 717 718 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev, 719 int pgid, u8 bw_pct) 720 { 721 struct mlx5e_priv *priv = netdev_priv(netdev); 722 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; 723 724 if (pgid >= CEE_DCBX_MAX_PGS) { 725 netdev_err(netdev, 726 "%s, priority group is out of range\n", __func__); 727 return; 728 } 729 730 cee_cfg->pg_bw_pct[pgid] = bw_pct; 731 } 732 733 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, 734 int priority, u8 *prio_type, 735 u8 *pgid, u8 *bw_pct, u8 *up_map) 736 { 737 struct mlx5e_priv *priv = netdev_priv(netdev); 738 struct mlx5_core_dev *mdev = priv->mdev; 739 740 if (!MLX5_CAP_GEN(priv->mdev, ets)) { 741 netdev_err(netdev, "%s, ets is not supported\n", __func__); 742 return; 743 } 744 745 if (priority >= CEE_DCBX_MAX_PRIO) { 746 netdev_err(netdev, 747 "%s, priority is out of range\n", __func__); 748 return; 749 } 750 751 *prio_type = 0; 752 *bw_pct = 0; 753 *up_map = 0; 754 755 if (mlx5_query_port_prio_tc(mdev, priority, pgid)) 756 *pgid = 0; 757 } 758 759 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, 760 int pgid, u8 *bw_pct) 761 { 762 struct ieee_ets ets; 763 764 if (pgid >= CEE_DCBX_MAX_PGS) { 765 netdev_err(netdev, 766 "%s, priority group is out of range\n", __func__); 767 return; 768 } 769 770 mlx5e_dcbnl_ieee_getets(netdev, &ets); 771 *bw_pct = ets.tc_tx_bw[pgid]; 772 } 773 774 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, 775 int priority, u8 setting) 776 { 777 struct mlx5e_priv *priv = netdev_priv(netdev); 778 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; 779 780 if (priority >= CEE_DCBX_MAX_PRIO) { 781 netdev_err(netdev, 782 "%s, priority is out of range\n", __func__); 783 return; 784 } 785 786 if (setting > 1) 787 return; 788 789 cee_cfg->pfc_setting[priority] = setting; 790 } 791 792 static int 793 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev, 794 int priority, u8 *setting) 795 { 796 struct ieee_pfc pfc; 797 int err; 798 799 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc); 800 801 if (err) 802 *setting = 0; 803 else 804 *setting = (pfc.pfc_en >> priority) & 0x01; 805 806 return err; 807 } 808 809 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev, 810 int priority, u8 *setting) 811 { 812 if (priority >= CEE_DCBX_MAX_PRIO) { 813 netdev_err(netdev, 814 "%s, priority is out of range\n", __func__); 815 return; 816 } 817 818 if (!setting) 819 return; 820 821 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting); 822 } 823 824 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev, 825 int capid, u8 *cap) 826 { 827 struct mlx5e_priv *priv = netdev_priv(netdev); 828 struct mlx5_core_dev *mdev = priv->mdev; 829 u8 rval = 0; 830 831 switch (capid) { 832 case DCB_CAP_ATTR_PG: 833 *cap = true; 834 break; 835 case DCB_CAP_ATTR_PFC: 836 *cap = true; 837 break; 838 case DCB_CAP_ATTR_UP2TC: 839 *cap = false; 840 break; 841 case DCB_CAP_ATTR_PG_TCS: 842 *cap = 1 << mlx5_max_tc(mdev); 843 break; 844 case DCB_CAP_ATTR_PFC_TCS: 845 *cap = 1 << mlx5_max_tc(mdev); 846 break; 847 case DCB_CAP_ATTR_GSP: 848 *cap = false; 849 break; 850 case DCB_CAP_ATTR_BCN: 851 *cap = false; 852 break; 853 case DCB_CAP_ATTR_DCBX: 854 *cap = priv->dcbx.cap | 855 DCB_CAP_DCBX_VER_CEE | 856 DCB_CAP_DCBX_VER_IEEE; 857 break; 858 default: 859 *cap = 0; 860 rval = 1; 861 break; 862 } 863 864 return rval; 865 } 866 867 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev, 868 int tcs_id, u8 *num) 869 { 870 struct mlx5e_priv *priv = netdev_priv(netdev); 871 struct mlx5_core_dev *mdev = priv->mdev; 872 873 switch (tcs_id) { 874 case DCB_NUMTCS_ATTR_PG: 875 case DCB_NUMTCS_ATTR_PFC: 876 *num = mlx5_max_tc(mdev) + 1; 877 break; 878 default: 879 return -EINVAL; 880 } 881 882 return 0; 883 } 884 885 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev) 886 { 887 struct ieee_pfc pfc; 888 889 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc)) 890 return MLX5E_CEE_STATE_DOWN; 891 892 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN; 893 } 894 895 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state) 896 { 897 struct mlx5e_priv *priv = netdev_priv(netdev); 898 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; 899 900 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN)) 901 return; 902 903 cee_cfg->pfc_enable = state; 904 } 905 906 static int mlx5e_dcbnl_getbuffer(struct net_device *dev, 907 struct dcbnl_buffer *dcb_buffer) 908 { 909 struct mlx5e_priv *priv = netdev_priv(dev); 910 struct mlx5_core_dev *mdev = priv->mdev; 911 struct mlx5e_port_buffer port_buffer; 912 u8 buffer[MLX5E_MAX_PRIORITY]; 913 int i, err; 914 915 if (!MLX5_BUFFER_SUPPORTED(mdev)) 916 return -EOPNOTSUPP; 917 918 err = mlx5e_port_query_priority2buffer(mdev, buffer); 919 if (err) 920 return err; 921 922 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) 923 dcb_buffer->prio2buffer[i] = buffer[i]; 924 925 err = mlx5e_port_query_buffer(priv, &port_buffer); 926 if (err) 927 return err; 928 929 for (i = 0; i < MLX5E_MAX_BUFFER; i++) 930 dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size; 931 dcb_buffer->total_size = port_buffer.port_buffer_size; 932 933 return 0; 934 } 935 936 static int mlx5e_dcbnl_setbuffer(struct net_device *dev, 937 struct dcbnl_buffer *dcb_buffer) 938 { 939 struct mlx5e_priv *priv = netdev_priv(dev); 940 struct mlx5_core_dev *mdev = priv->mdev; 941 struct mlx5e_port_buffer port_buffer; 942 u8 old_prio2buffer[MLX5E_MAX_PRIORITY]; 943 u32 *buffer_size = NULL; 944 u8 *prio2buffer = NULL; 945 u32 changed = 0; 946 int i, err; 947 948 if (!MLX5_BUFFER_SUPPORTED(mdev)) 949 return -EOPNOTSUPP; 950 951 for (i = 0; i < DCBX_MAX_BUFFERS; i++) 952 mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]); 953 954 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) 955 mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]); 956 957 err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer); 958 if (err) 959 return err; 960 961 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) { 962 if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) { 963 changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER; 964 prio2buffer = dcb_buffer->prio2buffer; 965 break; 966 } 967 } 968 969 err = mlx5e_port_query_buffer(priv, &port_buffer); 970 if (err) 971 return err; 972 973 for (i = 0; i < MLX5E_MAX_BUFFER; i++) { 974 if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) { 975 changed |= MLX5E_PORT_BUFFER_SIZE; 976 buffer_size = dcb_buffer->buffer_size; 977 break; 978 } 979 } 980 981 if (!changed) 982 return 0; 983 984 priv->dcbx.manual_buffer = true; 985 err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL, 986 buffer_size, prio2buffer); 987 return err; 988 } 989 990 static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { 991 .ieee_getets = mlx5e_dcbnl_ieee_getets, 992 .ieee_setets = mlx5e_dcbnl_ieee_setets, 993 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate, 994 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate, 995 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc, 996 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc, 997 .ieee_setapp = mlx5e_dcbnl_ieee_setapp, 998 .ieee_delapp = mlx5e_dcbnl_ieee_delapp, 999 .getdcbx = mlx5e_dcbnl_getdcbx, 1000 .setdcbx = mlx5e_dcbnl_setdcbx, 1001 .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer, 1002 .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer, 1003 1004 /* CEE interfaces */ 1005 .setall = mlx5e_dcbnl_setall, 1006 .getstate = mlx5e_dcbnl_getstate, 1007 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr, 1008 1009 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx, 1010 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx, 1011 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx, 1012 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx, 1013 1014 .setpfccfg = mlx5e_dcbnl_setpfccfg, 1015 .getpfccfg = mlx5e_dcbnl_getpfccfg, 1016 .getcap = mlx5e_dcbnl_getcap, 1017 .getnumtcs = mlx5e_dcbnl_getnumtcs, 1018 .getpfcstate = mlx5e_dcbnl_getpfcstate, 1019 .setpfcstate = mlx5e_dcbnl_setpfcstate, 1020 }; 1021 1022 void mlx5e_dcbnl_build_netdev(struct net_device *netdev) 1023 { 1024 struct mlx5e_priv *priv = netdev_priv(netdev); 1025 struct mlx5_core_dev *mdev = priv->mdev; 1026 1027 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) 1028 netdev->dcbnl_ops = &mlx5e_dcbnl_ops; 1029 } 1030 1031 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv, 1032 enum mlx5_dcbx_oper_mode *mode) 1033 { 1034 u32 out[MLX5_ST_SZ_DW(dcbx_param)]; 1035 1036 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST; 1037 1038 if (!mlx5_query_port_dcbx_param(priv->mdev, out)) 1039 *mode = MLX5_GET(dcbx_param, out, version_oper); 1040 1041 /* From driver's point of view, we only care if the mode 1042 * is host (HOST) or non-host (AUTO) 1043 */ 1044 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST) 1045 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; 1046 } 1047 1048 static void mlx5e_ets_init(struct mlx5e_priv *priv) 1049 { 1050 struct ieee_ets ets; 1051 int err; 1052 int i; 1053 1054 if (!MLX5_CAP_GEN(priv->mdev, ets)) 1055 return; 1056 1057 memset(&ets, 0, sizeof(ets)); 1058 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; 1059 for (i = 0; i < ets.ets_cap; i++) { 1060 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 1061 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; 1062 ets.prio_tc[i] = i; 1063 } 1064 1065 if (ets.ets_cap > 1) { 1066 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 1067 ets.prio_tc[0] = 1; 1068 ets.prio_tc[1] = 0; 1069 } 1070 1071 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); 1072 if (err) 1073 netdev_err(priv->netdev, 1074 "%s, Failed to init ETS: %d\n", __func__, err); 1075 } 1076 1077 enum { 1078 INIT, 1079 DELETE, 1080 }; 1081 1082 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action) 1083 { 1084 struct dcb_app temp; 1085 int i; 1086 1087 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) 1088 return; 1089 1090 if (!MLX5_DSCP_SUPPORTED(priv->mdev)) 1091 return; 1092 1093 /* No SEL_DSCP entry in non DSCP state */ 1094 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP) 1095 return; 1096 1097 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; 1098 for (i = 0; i < MLX5E_MAX_DSCP; i++) { 1099 temp.protocol = i; 1100 temp.priority = priv->dcbx_dp.dscp2prio[i]; 1101 if (action == INIT) 1102 dcb_ieee_setapp(priv->netdev, &temp); 1103 else 1104 dcb_ieee_delapp(priv->netdev, &temp); 1105 } 1106 1107 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0; 1108 } 1109 1110 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) 1111 { 1112 mlx5e_dcbnl_dscp_app(priv, INIT); 1113 } 1114 1115 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) 1116 { 1117 mlx5e_dcbnl_dscp_app(priv, DELETE); 1118 } 1119 1120 static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev, 1121 struct mlx5e_params *params, 1122 u8 trust_state) 1123 { 1124 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); 1125 if (trust_state == MLX5_QPTS_TRUST_DSCP && 1126 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) 1127 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; 1128 } 1129 1130 static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context) 1131 { 1132 u8 *trust_state = context; 1133 int err; 1134 1135 err = mlx5_set_trust_state(priv->mdev, *trust_state); 1136 if (err) 1137 return err; 1138 WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state); 1139 1140 return 0; 1141 } 1142 1143 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) 1144 { 1145 struct mlx5e_params new_params; 1146 bool reset = true; 1147 int err; 1148 1149 mutex_lock(&priv->state_lock); 1150 1151 new_params = priv->channels.params; 1152 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params, 1153 trust_state); 1154 1155 /* Skip if tx_min_inline is the same */ 1156 if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) 1157 reset = false; 1158 1159 err = mlx5e_safe_switch_params(priv, &new_params, 1160 mlx5e_update_trust_state_hw, 1161 &trust_state, reset); 1162 1163 mutex_unlock(&priv->state_lock); 1164 1165 return err; 1166 } 1167 1168 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio) 1169 { 1170 int err; 1171 1172 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio); 1173 if (err) 1174 return err; 1175 1176 priv->dcbx_dp.dscp2prio[dscp] = prio; 1177 return err; 1178 } 1179 1180 static int mlx5e_trust_initialize(struct mlx5e_priv *priv) 1181 { 1182 struct mlx5_core_dev *mdev = priv->mdev; 1183 u8 trust_state; 1184 int err; 1185 1186 if (!MLX5_DSCP_SUPPORTED(mdev)) { 1187 WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP); 1188 return 0; 1189 } 1190 1191 err = mlx5_query_trust_state(priv->mdev, &trust_state); 1192 if (err) 1193 return err; 1194 WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state); 1195 1196 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) { 1197 /* 1198 * Align the driver state with the register state. 1199 * Temporary state change is required to enable the app list reset. 1200 */ 1201 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP; 1202 mlx5e_dcbnl_delete_app(priv); 1203 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; 1204 } 1205 1206 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, 1207 priv->dcbx_dp.trust_state); 1208 1209 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio); 1210 if (err) 1211 return err; 1212 1213 return 0; 1214 } 1215 1216 #define MLX5E_BUFFER_CELL_SHIFT 7 1217 1218 static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv) 1219 { 1220 struct mlx5_core_dev *mdev = priv->mdev; 1221 u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {}; 1222 u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {}; 1223 1224 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1225 return (1 << MLX5E_BUFFER_CELL_SHIFT); 1226 1227 if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), 1228 MLX5_REG_SBCAM, 0, 0)) 1229 return (1 << MLX5E_BUFFER_CELL_SHIFT); 1230 1231 return MLX5_GET(sbcam_reg, out, cap_cell_size); 1232 } 1233 1234 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) 1235 { 1236 struct mlx5e_dcbx *dcbx = &priv->dcbx; 1237 1238 mlx5e_trust_initialize(priv); 1239 1240 if (!MLX5_CAP_GEN(priv->mdev, qos)) 1241 return; 1242 1243 if (MLX5_CAP_GEN(priv->mdev, dcbx)) 1244 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); 1245 1246 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE | 1247 DCB_CAP_DCBX_VER_IEEE; 1248 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) 1249 priv->dcbx.cap |= DCB_CAP_DCBX_HOST; 1250 1251 priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); 1252 priv->dcbx.manual_buffer = false; 1253 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; 1254 1255 mlx5e_ets_init(priv); 1256 } 1257