1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/ethtool.h> 36 #include <linux/netdevice.h> 37 #include <linux/mlx4/driver.h> 38 #include <linux/mlx4/device.h> 39 #include <linux/in.h> 40 #include <net/ip.h> 41 42 #include "mlx4_en.h" 43 #include "en_port.h" 44 45 #define EN_ETHTOOL_QP_ATTACH (1ull << 63) 46 #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) 47 #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) 48 49 static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) 50 { 51 int i; 52 int err = 0; 53 54 for (i = 0; i < priv->tx_ring_num; i++) { 55 priv->tx_cq[i]->moder_cnt = priv->tx_frames; 56 priv->tx_cq[i]->moder_time = priv->tx_usecs; 57 if (priv->port_up) { 58 err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); 59 if (err) 60 return err; 61 } 62 } 63 64 if (priv->adaptive_rx_coal) 65 return 0; 66 67 for (i = 0; i < priv->rx_ring_num; i++) { 68 priv->rx_cq[i]->moder_cnt = priv->rx_frames; 69 priv->rx_cq[i]->moder_time = priv->rx_usecs; 70 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 71 if (priv->port_up) { 72 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]); 73 if (err) 74 return err; 75 } 76 } 77 78 return err; 79 } 80 81 static void 82 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 83 { 84 struct mlx4_en_priv *priv = netdev_priv(dev); 85 struct mlx4_en_dev *mdev = priv->mdev; 86 87 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 88 strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 89 sizeof(drvinfo->version)); 90 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 91 "%d.%d.%d", 92 (u16) (mdev->dev->caps.fw_ver >> 32), 93 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), 94 (u16) (mdev->dev->caps.fw_ver & 0xffff)); 95 strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), 96 sizeof(drvinfo->bus_info)); 97 drvinfo->n_stats = 0; 98 drvinfo->regdump_len = 0; 99 drvinfo->eedump_len = 0; 100 } 101 102 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { 103 "blueflame", 104 }; 105 106 static const char main_strings[][ETH_GSTRING_LEN] = { 107 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 108 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 109 "rx_length_errors", "rx_over_errors", "rx_crc_errors", 110 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", 111 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 112 "tx_heartbeat_errors", "tx_window_errors", 113 114 /* port statistics */ 115 "tso_packets", 116 "xmit_more", 117 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 118 "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", 119 120 /* packet statistics */ 121 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", 122 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0", 123 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5", 124 "tx_prio_6", "tx_prio_7", 125 }; 126 #define NUM_MAIN_STATS 21 127 #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 128 129 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { 130 "Interrupt Test", 131 "Link Test", 132 "Speed Test", 133 "Register Test", 134 "Loopback Test", 135 }; 136 137 static u32 mlx4_en_get_msglevel(struct net_device *dev) 138 { 139 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; 140 } 141 142 static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) 143 { 144 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; 145 } 146 147 static void mlx4_en_get_wol(struct net_device *netdev, 148 struct ethtool_wolinfo *wol) 149 { 150 struct mlx4_en_priv *priv = netdev_priv(netdev); 151 int err = 0; 152 u64 config = 0; 153 u64 mask; 154 155 if ((priv->port < 1) || (priv->port > 2)) { 156 en_err(priv, "Failed to get WoL information\n"); 157 return; 158 } 159 160 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 161 MLX4_DEV_CAP_FLAG_WOL_PORT2; 162 163 if (!(priv->mdev->dev->caps.flags & mask)) { 164 wol->supported = 0; 165 wol->wolopts = 0; 166 return; 167 } 168 169 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 170 if (err) { 171 en_err(priv, "Failed to get WoL information\n"); 172 return; 173 } 174 175 if (config & MLX4_EN_WOL_MAGIC) 176 wol->supported = WAKE_MAGIC; 177 else 178 wol->supported = 0; 179 180 if (config & MLX4_EN_WOL_ENABLED) 181 wol->wolopts = WAKE_MAGIC; 182 else 183 wol->wolopts = 0; 184 } 185 186 static int mlx4_en_set_wol(struct net_device *netdev, 187 struct ethtool_wolinfo *wol) 188 { 189 struct mlx4_en_priv *priv = netdev_priv(netdev); 190 u64 config = 0; 191 int err = 0; 192 u64 mask; 193 194 if ((priv->port < 1) || (priv->port > 2)) 195 return -EOPNOTSUPP; 196 197 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 198 MLX4_DEV_CAP_FLAG_WOL_PORT2; 199 200 if (!(priv->mdev->dev->caps.flags & mask)) 201 return -EOPNOTSUPP; 202 203 if (wol->supported & ~WAKE_MAGIC) 204 return -EINVAL; 205 206 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 207 if (err) { 208 en_err(priv, "Failed to get WoL info, unable to modify\n"); 209 return err; 210 } 211 212 if (wol->wolopts & WAKE_MAGIC) { 213 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | 214 MLX4_EN_WOL_MAGIC; 215 } else { 216 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); 217 config |= MLX4_EN_WOL_DO_MODIFY; 218 } 219 220 err = mlx4_wol_write(priv->mdev->dev, config, priv->port); 221 if (err) 222 en_err(priv, "Failed to set WoL information\n"); 223 224 return err; 225 } 226 227 static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 228 { 229 struct mlx4_en_priv *priv = netdev_priv(dev); 230 int bit_count = hweight64(priv->stats_bitmap); 231 232 switch (sset) { 233 case ETH_SS_STATS: 234 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + 235 (priv->tx_ring_num * 2) + 236 #ifdef CONFIG_NET_RX_BUSY_POLL 237 (priv->rx_ring_num * 5); 238 #else 239 (priv->rx_ring_num * 2); 240 #endif 241 case ETH_SS_TEST: 242 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 243 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 244 case ETH_SS_PRIV_FLAGS: 245 return ARRAY_SIZE(mlx4_en_priv_flags); 246 default: 247 return -EOPNOTSUPP; 248 } 249 } 250 251 static void mlx4_en_get_ethtool_stats(struct net_device *dev, 252 struct ethtool_stats *stats, uint64_t *data) 253 { 254 struct mlx4_en_priv *priv = netdev_priv(dev); 255 int index = 0; 256 int i, j = 0; 257 258 spin_lock_bh(&priv->stats_lock); 259 260 if (!(priv->stats_bitmap)) { 261 for (i = 0; i < NUM_MAIN_STATS; i++) 262 data[index++] = 263 ((unsigned long *) &priv->stats)[i]; 264 for (i = 0; i < NUM_PORT_STATS; i++) 265 data[index++] = 266 ((unsigned long *) &priv->port_stats)[i]; 267 for (i = 0; i < NUM_PKT_STATS; i++) 268 data[index++] = 269 ((unsigned long *) &priv->pkstats)[i]; 270 } else { 271 for (i = 0; i < NUM_MAIN_STATS; i++) { 272 if ((priv->stats_bitmap >> j) & 1) 273 data[index++] = 274 ((unsigned long *) &priv->stats)[i]; 275 j++; 276 } 277 for (i = 0; i < NUM_PORT_STATS; i++) { 278 if ((priv->stats_bitmap >> j) & 1) 279 data[index++] = 280 ((unsigned long *) &priv->port_stats)[i]; 281 j++; 282 } 283 } 284 for (i = 0; i < priv->tx_ring_num; i++) { 285 data[index++] = priv->tx_ring[i]->packets; 286 data[index++] = priv->tx_ring[i]->bytes; 287 } 288 for (i = 0; i < priv->rx_ring_num; i++) { 289 data[index++] = priv->rx_ring[i]->packets; 290 data[index++] = priv->rx_ring[i]->bytes; 291 #ifdef CONFIG_NET_RX_BUSY_POLL 292 data[index++] = priv->rx_ring[i]->yields; 293 data[index++] = priv->rx_ring[i]->misses; 294 data[index++] = priv->rx_ring[i]->cleaned; 295 #endif 296 } 297 spin_unlock_bh(&priv->stats_lock); 298 299 } 300 301 static void mlx4_en_self_test(struct net_device *dev, 302 struct ethtool_test *etest, u64 *buf) 303 { 304 mlx4_en_ex_selftest(dev, &etest->flags, buf); 305 } 306 307 static void mlx4_en_get_strings(struct net_device *dev, 308 uint32_t stringset, uint8_t *data) 309 { 310 struct mlx4_en_priv *priv = netdev_priv(dev); 311 int index = 0; 312 int i; 313 314 switch (stringset) { 315 case ETH_SS_TEST: 316 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) 317 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); 318 if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) 319 for (; i < MLX4_EN_NUM_SELF_TEST; i++) 320 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); 321 break; 322 323 case ETH_SS_STATS: 324 /* Add main counters */ 325 if (!priv->stats_bitmap) { 326 for (i = 0; i < NUM_MAIN_STATS; i++) 327 strcpy(data + (index++) * ETH_GSTRING_LEN, 328 main_strings[i]); 329 for (i = 0; i < NUM_PORT_STATS; i++) 330 strcpy(data + (index++) * ETH_GSTRING_LEN, 331 main_strings[i + 332 NUM_MAIN_STATS]); 333 for (i = 0; i < NUM_PKT_STATS; i++) 334 strcpy(data + (index++) * ETH_GSTRING_LEN, 335 main_strings[i + 336 NUM_MAIN_STATS + 337 NUM_PORT_STATS]); 338 } else 339 for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) { 340 if ((priv->stats_bitmap >> i) & 1) { 341 strcpy(data + 342 (index++) * ETH_GSTRING_LEN, 343 main_strings[i]); 344 } 345 if (!(priv->stats_bitmap >> i)) 346 break; 347 } 348 for (i = 0; i < priv->tx_ring_num; i++) { 349 sprintf(data + (index++) * ETH_GSTRING_LEN, 350 "tx%d_packets", i); 351 sprintf(data + (index++) * ETH_GSTRING_LEN, 352 "tx%d_bytes", i); 353 } 354 for (i = 0; i < priv->rx_ring_num; i++) { 355 sprintf(data + (index++) * ETH_GSTRING_LEN, 356 "rx%d_packets", i); 357 sprintf(data + (index++) * ETH_GSTRING_LEN, 358 "rx%d_bytes", i); 359 #ifdef CONFIG_NET_RX_BUSY_POLL 360 sprintf(data + (index++) * ETH_GSTRING_LEN, 361 "rx%d_napi_yield", i); 362 sprintf(data + (index++) * ETH_GSTRING_LEN, 363 "rx%d_misses", i); 364 sprintf(data + (index++) * ETH_GSTRING_LEN, 365 "rx%d_cleaned", i); 366 #endif 367 } 368 break; 369 case ETH_SS_PRIV_FLAGS: 370 for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) 371 strcpy(data + i * ETH_GSTRING_LEN, 372 mlx4_en_priv_flags[i]); 373 break; 374 375 } 376 } 377 378 static u32 mlx4_en_autoneg_get(struct net_device *dev) 379 { 380 struct mlx4_en_priv *priv = netdev_priv(dev); 381 struct mlx4_en_dev *mdev = priv->mdev; 382 u32 autoneg = AUTONEG_DISABLE; 383 384 if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && 385 (priv->port_state.flags & MLX4_EN_PORT_ANE)) 386 autoneg = AUTONEG_ENABLE; 387 388 return autoneg; 389 } 390 391 static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) 392 { 393 u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); 394 395 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) 396 | MLX4_PROT_MASK(MLX4_1000BASE_T) 397 | MLX4_PROT_MASK(MLX4_100BASE_TX))) { 398 return SUPPORTED_TP; 399 } 400 401 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) 402 | MLX4_PROT_MASK(MLX4_10GBASE_SR) 403 | MLX4_PROT_MASK(MLX4_56GBASE_SR4) 404 | MLX4_PROT_MASK(MLX4_40GBASE_CR4) 405 | MLX4_PROT_MASK(MLX4_40GBASE_SR4) 406 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { 407 return SUPPORTED_FIBRE; 408 } 409 410 if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) 411 | MLX4_PROT_MASK(MLX4_40GBASE_KR4) 412 | MLX4_PROT_MASK(MLX4_20GBASE_KR2) 413 | MLX4_PROT_MASK(MLX4_10GBASE_KR) 414 | MLX4_PROT_MASK(MLX4_10GBASE_KX4) 415 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { 416 return SUPPORTED_Backplane; 417 } 418 return 0; 419 } 420 421 static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) 422 { 423 u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); 424 425 if (!eth_proto) /* link down */ 426 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); 427 428 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) 429 | MLX4_PROT_MASK(MLX4_1000BASE_T) 430 | MLX4_PROT_MASK(MLX4_100BASE_TX))) { 431 return PORT_TP; 432 } 433 434 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) 435 | MLX4_PROT_MASK(MLX4_56GBASE_SR4) 436 | MLX4_PROT_MASK(MLX4_40GBASE_SR4) 437 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { 438 return PORT_FIBRE; 439 } 440 441 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) 442 | MLX4_PROT_MASK(MLX4_56GBASE_CR4) 443 | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { 444 return PORT_DA; 445 } 446 447 if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) 448 | MLX4_PROT_MASK(MLX4_40GBASE_KR4) 449 | MLX4_PROT_MASK(MLX4_20GBASE_KR2) 450 | MLX4_PROT_MASK(MLX4_10GBASE_KR) 451 | MLX4_PROT_MASK(MLX4_10GBASE_KX4) 452 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { 453 return PORT_NONE; 454 } 455 return PORT_OTHER; 456 } 457 458 #define MLX4_LINK_MODES_SZ \ 459 (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) 460 461 enum ethtool_report { 462 SUPPORTED = 0, 463 ADVERTISED = 1, 464 SPEED = 2 465 }; 466 467 /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ 468 static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { 469 [MLX4_100BASE_TX] = { 470 SUPPORTED_100baseT_Full, 471 ADVERTISED_100baseT_Full, 472 SPEED_100 473 }, 474 475 [MLX4_1000BASE_T] = { 476 SUPPORTED_1000baseT_Full, 477 ADVERTISED_1000baseT_Full, 478 SPEED_1000 479 }, 480 [MLX4_1000BASE_CX_SGMII] = { 481 SUPPORTED_1000baseKX_Full, 482 ADVERTISED_1000baseKX_Full, 483 SPEED_1000 484 }, 485 [MLX4_1000BASE_KX] = { 486 SUPPORTED_1000baseKX_Full, 487 ADVERTISED_1000baseKX_Full, 488 SPEED_1000 489 }, 490 491 [MLX4_10GBASE_T] = { 492 SUPPORTED_10000baseT_Full, 493 ADVERTISED_10000baseT_Full, 494 SPEED_10000 495 }, 496 [MLX4_10GBASE_CX4] = { 497 SUPPORTED_10000baseKX4_Full, 498 ADVERTISED_10000baseKX4_Full, 499 SPEED_10000 500 }, 501 [MLX4_10GBASE_KX4] = { 502 SUPPORTED_10000baseKX4_Full, 503 ADVERTISED_10000baseKX4_Full, 504 SPEED_10000 505 }, 506 [MLX4_10GBASE_KR] = { 507 SUPPORTED_10000baseKR_Full, 508 ADVERTISED_10000baseKR_Full, 509 SPEED_10000 510 }, 511 [MLX4_10GBASE_CR] = { 512 SUPPORTED_10000baseKR_Full, 513 ADVERTISED_10000baseKR_Full, 514 SPEED_10000 515 }, 516 [MLX4_10GBASE_SR] = { 517 SUPPORTED_10000baseKR_Full, 518 ADVERTISED_10000baseKR_Full, 519 SPEED_10000 520 }, 521 522 [MLX4_20GBASE_KR2] = { 523 SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, 524 ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, 525 SPEED_20000 526 }, 527 528 [MLX4_40GBASE_CR4] = { 529 SUPPORTED_40000baseCR4_Full, 530 ADVERTISED_40000baseCR4_Full, 531 SPEED_40000 532 }, 533 [MLX4_40GBASE_KR4] = { 534 SUPPORTED_40000baseKR4_Full, 535 ADVERTISED_40000baseKR4_Full, 536 SPEED_40000 537 }, 538 [MLX4_40GBASE_SR4] = { 539 SUPPORTED_40000baseSR4_Full, 540 ADVERTISED_40000baseSR4_Full, 541 SPEED_40000 542 }, 543 544 [MLX4_56GBASE_KR4] = { 545 SUPPORTED_56000baseKR4_Full, 546 ADVERTISED_56000baseKR4_Full, 547 SPEED_56000 548 }, 549 [MLX4_56GBASE_CR4] = { 550 SUPPORTED_56000baseCR4_Full, 551 ADVERTISED_56000baseCR4_Full, 552 SPEED_56000 553 }, 554 [MLX4_56GBASE_SR4] = { 555 SUPPORTED_56000baseSR4_Full, 556 ADVERTISED_56000baseSR4_Full, 557 SPEED_56000 558 }, 559 }; 560 561 static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) 562 { 563 int i; 564 u32 link_modes = 0; 565 566 for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { 567 if (eth_proto & MLX4_PROT_MASK(i)) 568 link_modes |= ptys2ethtool_map[i][report]; 569 } 570 return link_modes; 571 } 572 573 static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) 574 { 575 int i; 576 u32 ptys_modes = 0; 577 578 for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { 579 if (ptys2ethtool_map[i][report] & link_modes) 580 ptys_modes |= 1 << i; 581 } 582 return ptys_modes; 583 } 584 585 /* Convert actual speed (SPEED_XXX) to ptys link modes */ 586 static u32 speed2ptys_link_modes(u32 speed) 587 { 588 int i; 589 u32 ptys_modes = 0; 590 591 for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { 592 if (ptys2ethtool_map[i][SPEED] == speed) 593 ptys_modes |= 1 << i; 594 } 595 return ptys_modes; 596 } 597 598 static int ethtool_get_ptys_settings(struct net_device *dev, 599 struct ethtool_cmd *cmd) 600 { 601 struct mlx4_en_priv *priv = netdev_priv(dev); 602 struct mlx4_ptys_reg ptys_reg; 603 u32 eth_proto; 604 int ret; 605 606 memset(&ptys_reg, 0, sizeof(ptys_reg)); 607 ptys_reg.local_port = priv->port; 608 ptys_reg.proto_mask = MLX4_PTYS_EN; 609 ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, 610 MLX4_ACCESS_REG_QUERY, &ptys_reg); 611 if (ret) { 612 en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", 613 ret); 614 return ret; 615 } 616 en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", 617 ptys_reg.proto_mask); 618 en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", 619 be32_to_cpu(ptys_reg.eth_proto_cap)); 620 en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", 621 be32_to_cpu(ptys_reg.eth_proto_admin)); 622 en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", 623 be32_to_cpu(ptys_reg.eth_proto_oper)); 624 en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", 625 be32_to_cpu(ptys_reg.eth_proto_lp_adv)); 626 627 cmd->supported = 0; 628 cmd->advertising = 0; 629 630 cmd->supported |= ptys_get_supported_port(&ptys_reg); 631 632 eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); 633 cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); 634 635 eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); 636 cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); 637 638 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 639 cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; 640 641 cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? 642 ADVERTISED_Asym_Pause : 0; 643 644 cmd->port = ptys_get_active_port(&ptys_reg); 645 cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? 646 XCVR_EXTERNAL : XCVR_INTERNAL; 647 648 if (mlx4_en_autoneg_get(dev)) { 649 cmd->supported |= SUPPORTED_Autoneg; 650 cmd->advertising |= ADVERTISED_Autoneg; 651 } 652 653 cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? 654 AUTONEG_ENABLE : AUTONEG_DISABLE; 655 656 eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); 657 cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); 658 659 cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? 660 ADVERTISED_Autoneg : 0; 661 662 cmd->phy_address = 0; 663 cmd->mdio_support = 0; 664 cmd->maxtxpkt = 0; 665 cmd->maxrxpkt = 0; 666 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 667 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; 668 669 return ret; 670 } 671 672 static void ethtool_get_default_settings(struct net_device *dev, 673 struct ethtool_cmd *cmd) 674 { 675 struct mlx4_en_priv *priv = netdev_priv(dev); 676 int trans_type; 677 678 cmd->autoneg = AUTONEG_DISABLE; 679 cmd->supported = SUPPORTED_10000baseT_Full; 680 cmd->advertising = ADVERTISED_10000baseT_Full; 681 trans_type = priv->port_state.transceiver; 682 683 if (trans_type > 0 && trans_type <= 0xC) { 684 cmd->port = PORT_FIBRE; 685 cmd->transceiver = XCVR_EXTERNAL; 686 cmd->supported |= SUPPORTED_FIBRE; 687 cmd->advertising |= ADVERTISED_FIBRE; 688 } else if (trans_type == 0x80 || trans_type == 0) { 689 cmd->port = PORT_TP; 690 cmd->transceiver = XCVR_INTERNAL; 691 cmd->supported |= SUPPORTED_TP; 692 cmd->advertising |= ADVERTISED_TP; 693 } else { 694 cmd->port = -1; 695 cmd->transceiver = -1; 696 } 697 } 698 699 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 700 { 701 struct mlx4_en_priv *priv = netdev_priv(dev); 702 int ret = -EINVAL; 703 704 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 705 return -ENOMEM; 706 707 en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", 708 priv->port_state.flags & MLX4_EN_PORT_ANC, 709 priv->port_state.flags & MLX4_EN_PORT_ANE); 710 711 if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) 712 ret = ethtool_get_ptys_settings(dev, cmd); 713 if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ 714 ethtool_get_default_settings(dev, cmd); 715 716 if (netif_carrier_ok(dev)) { 717 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); 718 cmd->duplex = DUPLEX_FULL; 719 } else { 720 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 721 cmd->duplex = DUPLEX_UNKNOWN; 722 } 723 return 0; 724 } 725 726 /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ 727 static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, 728 __be32 proto_cap) 729 { 730 __be32 proto_admin = 0; 731 732 if (!speed) { /* Speed = 0 ==> Reset Link modes */ 733 proto_admin = proto_cap; 734 en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", 735 be32_to_cpu(proto_cap)); 736 } else { 737 u32 ptys_link_modes = speed2ptys_link_modes(speed); 738 739 proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; 740 en_info(priv, "Setting Speed to %d\n", speed); 741 } 742 return proto_admin; 743 } 744 745 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 746 { 747 struct mlx4_en_priv *priv = netdev_priv(dev); 748 struct mlx4_ptys_reg ptys_reg; 749 __be32 proto_admin; 750 int ret; 751 752 u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); 753 int speed = ethtool_cmd_speed(cmd); 754 755 en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", 756 speed, cmd->advertising, cmd->autoneg, cmd->duplex); 757 758 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || 759 (cmd->duplex == DUPLEX_HALF)) 760 return -EINVAL; 761 762 memset(&ptys_reg, 0, sizeof(ptys_reg)); 763 ptys_reg.local_port = priv->port; 764 ptys_reg.proto_mask = MLX4_PTYS_EN; 765 ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, 766 MLX4_ACCESS_REG_QUERY, &ptys_reg); 767 if (ret) { 768 en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", 769 ret); 770 return 0; 771 } 772 773 proto_admin = cmd->autoneg == AUTONEG_ENABLE ? 774 cpu_to_be32(ptys_adv) : 775 speed_set_ptys_admin(priv, speed, 776 ptys_reg.eth_proto_cap); 777 778 proto_admin &= ptys_reg.eth_proto_cap; 779 if (!proto_admin) { 780 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); 781 return -EINVAL; /* nothing to change due to bad input */ 782 } 783 784 if (proto_admin == ptys_reg.eth_proto_admin) 785 return 0; /* Nothing to change */ 786 787 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", 788 be32_to_cpu(proto_admin)); 789 790 ptys_reg.eth_proto_admin = proto_admin; 791 ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, 792 &ptys_reg); 793 if (ret) { 794 en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", 795 be32_to_cpu(ptys_reg.eth_proto_admin), ret); 796 return ret; 797 } 798 799 mutex_lock(&priv->mdev->state_lock); 800 if (priv->port_up) { 801 en_warn(priv, "Port link mode changed, restarting port...\n"); 802 mlx4_en_stop_port(dev, 1); 803 if (mlx4_en_start_port(dev)) 804 en_err(priv, "Failed restarting port %d\n", priv->port); 805 } 806 mutex_unlock(&priv->mdev->state_lock); 807 return 0; 808 } 809 810 static int mlx4_en_get_coalesce(struct net_device *dev, 811 struct ethtool_coalesce *coal) 812 { 813 struct mlx4_en_priv *priv = netdev_priv(dev); 814 815 coal->tx_coalesce_usecs = priv->tx_usecs; 816 coal->tx_max_coalesced_frames = priv->tx_frames; 817 coal->tx_max_coalesced_frames_irq = priv->tx_work_limit; 818 819 coal->rx_coalesce_usecs = priv->rx_usecs; 820 coal->rx_max_coalesced_frames = priv->rx_frames; 821 822 coal->pkt_rate_low = priv->pkt_rate_low; 823 coal->rx_coalesce_usecs_low = priv->rx_usecs_low; 824 coal->pkt_rate_high = priv->pkt_rate_high; 825 coal->rx_coalesce_usecs_high = priv->rx_usecs_high; 826 coal->rate_sample_interval = priv->sample_interval; 827 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; 828 829 return 0; 830 } 831 832 static int mlx4_en_set_coalesce(struct net_device *dev, 833 struct ethtool_coalesce *coal) 834 { 835 struct mlx4_en_priv *priv = netdev_priv(dev); 836 837 if (!coal->tx_max_coalesced_frames_irq) 838 return -EINVAL; 839 840 priv->rx_frames = (coal->rx_max_coalesced_frames == 841 MLX4_EN_AUTO_CONF) ? 842 MLX4_EN_RX_COAL_TARGET : 843 coal->rx_max_coalesced_frames; 844 priv->rx_usecs = (coal->rx_coalesce_usecs == 845 MLX4_EN_AUTO_CONF) ? 846 MLX4_EN_RX_COAL_TIME : 847 coal->rx_coalesce_usecs; 848 849 /* Setting TX coalescing parameters */ 850 if (coal->tx_coalesce_usecs != priv->tx_usecs || 851 coal->tx_max_coalesced_frames != priv->tx_frames) { 852 priv->tx_usecs = coal->tx_coalesce_usecs; 853 priv->tx_frames = coal->tx_max_coalesced_frames; 854 } 855 856 /* Set adaptive coalescing params */ 857 priv->pkt_rate_low = coal->pkt_rate_low; 858 priv->rx_usecs_low = coal->rx_coalesce_usecs_low; 859 priv->pkt_rate_high = coal->pkt_rate_high; 860 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 861 priv->sample_interval = coal->rate_sample_interval; 862 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 863 priv->tx_work_limit = coal->tx_max_coalesced_frames_irq; 864 865 return mlx4_en_moderation_update(priv); 866 } 867 868 static int mlx4_en_set_pauseparam(struct net_device *dev, 869 struct ethtool_pauseparam *pause) 870 { 871 struct mlx4_en_priv *priv = netdev_priv(dev); 872 struct mlx4_en_dev *mdev = priv->mdev; 873 int err; 874 875 if (pause->autoneg) 876 return -EINVAL; 877 878 priv->prof->tx_pause = pause->tx_pause != 0; 879 priv->prof->rx_pause = pause->rx_pause != 0; 880 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 881 priv->rx_skb_size + ETH_FCS_LEN, 882 priv->prof->tx_pause, 883 priv->prof->tx_ppp, 884 priv->prof->rx_pause, 885 priv->prof->rx_ppp); 886 if (err) 887 en_err(priv, "Failed setting pause params\n"); 888 889 return err; 890 } 891 892 static void mlx4_en_get_pauseparam(struct net_device *dev, 893 struct ethtool_pauseparam *pause) 894 { 895 struct mlx4_en_priv *priv = netdev_priv(dev); 896 897 pause->tx_pause = priv->prof->tx_pause; 898 pause->rx_pause = priv->prof->rx_pause; 899 } 900 901 static int mlx4_en_set_ringparam(struct net_device *dev, 902 struct ethtool_ringparam *param) 903 { 904 struct mlx4_en_priv *priv = netdev_priv(dev); 905 struct mlx4_en_dev *mdev = priv->mdev; 906 u32 rx_size, tx_size; 907 int port_up = 0; 908 int err = 0; 909 910 if (param->rx_jumbo_pending || param->rx_mini_pending) 911 return -EINVAL; 912 913 rx_size = roundup_pow_of_two(param->rx_pending); 914 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 915 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 916 tx_size = roundup_pow_of_two(param->tx_pending); 917 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 918 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 919 920 if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : 921 priv->rx_ring[0]->size) && 922 tx_size == priv->tx_ring[0]->size) 923 return 0; 924 925 mutex_lock(&mdev->state_lock); 926 if (priv->port_up) { 927 port_up = 1; 928 mlx4_en_stop_port(dev, 1); 929 } 930 931 mlx4_en_free_resources(priv); 932 933 priv->prof->tx_ring_size = tx_size; 934 priv->prof->rx_ring_size = rx_size; 935 936 err = mlx4_en_alloc_resources(priv); 937 if (err) { 938 en_err(priv, "Failed reallocating port resources\n"); 939 goto out; 940 } 941 if (port_up) { 942 err = mlx4_en_start_port(dev); 943 if (err) 944 en_err(priv, "Failed starting port\n"); 945 } 946 947 err = mlx4_en_moderation_update(priv); 948 949 out: 950 mutex_unlock(&mdev->state_lock); 951 return err; 952 } 953 954 static void mlx4_en_get_ringparam(struct net_device *dev, 955 struct ethtool_ringparam *param) 956 { 957 struct mlx4_en_priv *priv = netdev_priv(dev); 958 959 memset(param, 0, sizeof(*param)); 960 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 961 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 962 param->rx_pending = priv->port_up ? 963 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; 964 param->tx_pending = priv->tx_ring[0]->size; 965 } 966 967 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) 968 { 969 struct mlx4_en_priv *priv = netdev_priv(dev); 970 971 return priv->rx_ring_num; 972 } 973 974 static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) 975 { 976 return MLX4_EN_RSS_KEY_SIZE; 977 } 978 979 static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) 980 { 981 struct mlx4_en_priv *priv = netdev_priv(dev); 982 983 /* check if requested function is supported by the device */ 984 if ((hfunc == ETH_RSS_HASH_TOP && 985 !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || 986 (hfunc == ETH_RSS_HASH_XOR && 987 !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) 988 return -EINVAL; 989 990 priv->rss_hash_fn = hfunc; 991 if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) 992 en_warn(priv, 993 "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); 994 if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) 995 en_warn(priv, 996 "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); 997 return 0; 998 } 999 1000 static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, 1001 u8 *hfunc) 1002 { 1003 struct mlx4_en_priv *priv = netdev_priv(dev); 1004 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 1005 int rss_rings; 1006 size_t n = priv->rx_ring_num; 1007 int err = 0; 1008 1009 rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; 1010 rss_rings = 1 << ilog2(rss_rings); 1011 1012 while (n--) { 1013 if (!ring_index) 1014 break; 1015 ring_index[n] = rss_map->qps[n % rss_rings].qpn - 1016 rss_map->base_qpn; 1017 } 1018 if (key) 1019 memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); 1020 if (hfunc) 1021 *hfunc = priv->rss_hash_fn; 1022 return err; 1023 } 1024 1025 static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, 1026 const u8 *key, const u8 hfunc) 1027 { 1028 struct mlx4_en_priv *priv = netdev_priv(dev); 1029 struct mlx4_en_dev *mdev = priv->mdev; 1030 int port_up = 0; 1031 int err = 0; 1032 int i; 1033 int rss_rings = 0; 1034 1035 /* Calculate RSS table size and make sure flows are spread evenly 1036 * between rings 1037 */ 1038 for (i = 0; i < priv->rx_ring_num; i++) { 1039 if (!ring_index) 1040 continue; 1041 if (i > 0 && !ring_index[i] && !rss_rings) 1042 rss_rings = i; 1043 1044 if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) 1045 return -EINVAL; 1046 } 1047 1048 if (!rss_rings) 1049 rss_rings = priv->rx_ring_num; 1050 1051 /* RSS table size must be an order of 2 */ 1052 if (!is_power_of_2(rss_rings)) 1053 return -EINVAL; 1054 1055 if (hfunc != ETH_RSS_HASH_NO_CHANGE) { 1056 err = mlx4_en_check_rxfh_func(dev, hfunc); 1057 if (err) 1058 return err; 1059 } 1060 1061 mutex_lock(&mdev->state_lock); 1062 if (priv->port_up) { 1063 port_up = 1; 1064 mlx4_en_stop_port(dev, 1); 1065 } 1066 1067 if (ring_index) 1068 priv->prof->rss_rings = rss_rings; 1069 if (key) 1070 memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); 1071 1072 if (port_up) { 1073 err = mlx4_en_start_port(dev); 1074 if (err) 1075 en_err(priv, "Failed starting port\n"); 1076 } 1077 1078 mutex_unlock(&mdev->state_lock); 1079 return err; 1080 } 1081 1082 #define all_zeros_or_all_ones(field) \ 1083 ((field) == 0 || (field) == (__force typeof(field))-1) 1084 1085 static int mlx4_en_validate_flow(struct net_device *dev, 1086 struct ethtool_rxnfc *cmd) 1087 { 1088 struct ethtool_usrip4_spec *l3_mask; 1089 struct ethtool_tcpip4_spec *l4_mask; 1090 struct ethhdr *eth_mask; 1091 1092 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) 1093 return -EINVAL; 1094 1095 if (cmd->fs.flow_type & FLOW_MAC_EXT) { 1096 /* dest mac mask must be ff:ff:ff:ff:ff:ff */ 1097 if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest)) 1098 return -EINVAL; 1099 } 1100 1101 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 1102 case TCP_V4_FLOW: 1103 case UDP_V4_FLOW: 1104 if (cmd->fs.m_u.tcp_ip4_spec.tos) 1105 return -EINVAL; 1106 l4_mask = &cmd->fs.m_u.tcp_ip4_spec; 1107 /* don't allow mask which isn't all 0 or 1 */ 1108 if (!all_zeros_or_all_ones(l4_mask->ip4src) || 1109 !all_zeros_or_all_ones(l4_mask->ip4dst) || 1110 !all_zeros_or_all_ones(l4_mask->psrc) || 1111 !all_zeros_or_all_ones(l4_mask->pdst)) 1112 return -EINVAL; 1113 break; 1114 case IP_USER_FLOW: 1115 l3_mask = &cmd->fs.m_u.usr_ip4_spec; 1116 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || 1117 cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 || 1118 (!l3_mask->ip4src && !l3_mask->ip4dst) || 1119 !all_zeros_or_all_ones(l3_mask->ip4src) || 1120 !all_zeros_or_all_ones(l3_mask->ip4dst)) 1121 return -EINVAL; 1122 break; 1123 case ETHER_FLOW: 1124 eth_mask = &cmd->fs.m_u.ether_spec; 1125 /* source mac mask must not be set */ 1126 if (!is_zero_ether_addr(eth_mask->h_source)) 1127 return -EINVAL; 1128 1129 /* dest mac mask must be ff:ff:ff:ff:ff:ff */ 1130 if (!is_broadcast_ether_addr(eth_mask->h_dest)) 1131 return -EINVAL; 1132 1133 if (!all_zeros_or_all_ones(eth_mask->h_proto)) 1134 return -EINVAL; 1135 break; 1136 default: 1137 return -EINVAL; 1138 } 1139 1140 if ((cmd->fs.flow_type & FLOW_EXT)) { 1141 if (cmd->fs.m_ext.vlan_etype || 1142 !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == 1143 0 || 1144 (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == 1145 cpu_to_be16(VLAN_VID_MASK))) 1146 return -EINVAL; 1147 1148 if (cmd->fs.m_ext.vlan_tci) { 1149 if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID) 1150 return -EINVAL; 1151 1152 } 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd, 1159 struct list_head *rule_list_h, 1160 struct mlx4_spec_list *spec_l2, 1161 unsigned char *mac) 1162 { 1163 int err = 0; 1164 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 1165 1166 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; 1167 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); 1168 memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN); 1169 1170 if ((cmd->fs.flow_type & FLOW_EXT) && 1171 (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { 1172 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; 1173 spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK); 1174 } 1175 1176 list_add_tail(&spec_l2->list, rule_list_h); 1177 1178 return err; 1179 } 1180 1181 static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv, 1182 struct ethtool_rxnfc *cmd, 1183 struct list_head *rule_list_h, 1184 struct mlx4_spec_list *spec_l2, 1185 __be32 ipv4_dst) 1186 { 1187 #ifdef CONFIG_INET 1188 unsigned char mac[ETH_ALEN]; 1189 1190 if (!ipv4_is_multicast(ipv4_dst)) { 1191 if (cmd->fs.flow_type & FLOW_MAC_EXT) 1192 memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN); 1193 else 1194 memcpy(&mac, priv->dev->dev_addr, ETH_ALEN); 1195 } else { 1196 ip_eth_mc_map(ipv4_dst, mac); 1197 } 1198 1199 return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]); 1200 #else 1201 return -EINVAL; 1202 #endif 1203 } 1204 1205 static int add_ip_rule(struct mlx4_en_priv *priv, 1206 struct ethtool_rxnfc *cmd, 1207 struct list_head *list_h) 1208 { 1209 int err; 1210 struct mlx4_spec_list *spec_l2 = NULL; 1211 struct mlx4_spec_list *spec_l3 = NULL; 1212 struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; 1213 1214 spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); 1215 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); 1216 if (!spec_l2 || !spec_l3) { 1217 err = -ENOMEM; 1218 goto free_spec; 1219 } 1220 1221 err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2, 1222 cmd->fs.h_u. 1223 usr_ip4_spec.ip4dst); 1224 if (err) 1225 goto free_spec; 1226 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; 1227 spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; 1228 if (l3_mask->ip4src) 1229 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; 1230 spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst; 1231 if (l3_mask->ip4dst) 1232 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; 1233 list_add_tail(&spec_l3->list, list_h); 1234 1235 return 0; 1236 1237 free_spec: 1238 kfree(spec_l2); 1239 kfree(spec_l3); 1240 return err; 1241 } 1242 1243 static int add_tcp_udp_rule(struct mlx4_en_priv *priv, 1244 struct ethtool_rxnfc *cmd, 1245 struct list_head *list_h, int proto) 1246 { 1247 int err; 1248 struct mlx4_spec_list *spec_l2 = NULL; 1249 struct mlx4_spec_list *spec_l3 = NULL; 1250 struct mlx4_spec_list *spec_l4 = NULL; 1251 struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; 1252 1253 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); 1254 spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); 1255 spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL); 1256 if (!spec_l2 || !spec_l3 || !spec_l4) { 1257 err = -ENOMEM; 1258 goto free_spec; 1259 } 1260 1261 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; 1262 1263 if (proto == TCP_V4_FLOW) { 1264 err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, 1265 spec_l2, 1266 cmd->fs.h_u. 1267 tcp_ip4_spec.ip4dst); 1268 if (err) 1269 goto free_spec; 1270 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; 1271 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; 1272 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; 1273 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; 1274 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; 1275 } else { 1276 err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, 1277 spec_l2, 1278 cmd->fs.h_u. 1279 udp_ip4_spec.ip4dst); 1280 if (err) 1281 goto free_spec; 1282 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; 1283 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; 1284 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; 1285 spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc; 1286 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst; 1287 } 1288 1289 if (l4_mask->ip4src) 1290 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; 1291 if (l4_mask->ip4dst) 1292 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; 1293 1294 if (l4_mask->psrc) 1295 spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK; 1296 if (l4_mask->pdst) 1297 spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK; 1298 1299 list_add_tail(&spec_l3->list, list_h); 1300 list_add_tail(&spec_l4->list, list_h); 1301 1302 return 0; 1303 1304 free_spec: 1305 kfree(spec_l2); 1306 kfree(spec_l3); 1307 kfree(spec_l4); 1308 return err; 1309 } 1310 1311 static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, 1312 struct ethtool_rxnfc *cmd, 1313 struct list_head *rule_list_h) 1314 { 1315 int err; 1316 struct ethhdr *eth_spec; 1317 struct mlx4_spec_list *spec_l2; 1318 struct mlx4_en_priv *priv = netdev_priv(dev); 1319 1320 err = mlx4_en_validate_flow(dev, cmd); 1321 if (err) 1322 return err; 1323 1324 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 1325 case ETHER_FLOW: 1326 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); 1327 if (!spec_l2) 1328 return -ENOMEM; 1329 1330 eth_spec = &cmd->fs.h_u.ether_spec; 1331 mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, 1332 ð_spec->h_dest[0]); 1333 spec_l2->eth.ether_type = eth_spec->h_proto; 1334 if (eth_spec->h_proto) 1335 spec_l2->eth.ether_type_enable = 1; 1336 break; 1337 case IP_USER_FLOW: 1338 err = add_ip_rule(priv, cmd, rule_list_h); 1339 break; 1340 case TCP_V4_FLOW: 1341 err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW); 1342 break; 1343 case UDP_V4_FLOW: 1344 err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW); 1345 break; 1346 } 1347 1348 return err; 1349 } 1350 1351 static int mlx4_en_flow_replace(struct net_device *dev, 1352 struct ethtool_rxnfc *cmd) 1353 { 1354 int err; 1355 struct mlx4_en_priv *priv = netdev_priv(dev); 1356 struct ethtool_flow_id *loc_rule; 1357 struct mlx4_spec_list *spec, *tmp_spec; 1358 u32 qpn; 1359 u64 reg_id; 1360 1361 struct mlx4_net_trans_rule rule = { 1362 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1363 .exclusive = 0, 1364 .allow_loopback = 1, 1365 .promisc_mode = MLX4_FS_REGULAR, 1366 }; 1367 1368 rule.port = priv->port; 1369 rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location; 1370 INIT_LIST_HEAD(&rule.list); 1371 1372 /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */ 1373 if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC) 1374 qpn = priv->drop_qp.qpn; 1375 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { 1376 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 1377 } else { 1378 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 1379 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", 1380 cmd->fs.ring_cookie); 1381 return -EINVAL; 1382 } 1383 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 1384 if (!qpn) { 1385 en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n", 1386 cmd->fs.ring_cookie); 1387 return -EINVAL; 1388 } 1389 } 1390 rule.qpn = qpn; 1391 err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list); 1392 if (err) 1393 goto out_free_list; 1394 1395 loc_rule = &priv->ethtool_rules[cmd->fs.location]; 1396 if (loc_rule->id) { 1397 err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id); 1398 if (err) { 1399 en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n", 1400 cmd->fs.location, loc_rule->id); 1401 goto out_free_list; 1402 } 1403 loc_rule->id = 0; 1404 memset(&loc_rule->flow_spec, 0, 1405 sizeof(struct ethtool_rx_flow_spec)); 1406 list_del(&loc_rule->list); 1407 } 1408 err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id); 1409 if (err) { 1410 en_err(priv, "Fail to attach network rule at location %d\n", 1411 cmd->fs.location); 1412 goto out_free_list; 1413 } 1414 loc_rule->id = reg_id; 1415 memcpy(&loc_rule->flow_spec, &cmd->fs, 1416 sizeof(struct ethtool_rx_flow_spec)); 1417 list_add_tail(&loc_rule->list, &priv->ethtool_list); 1418 1419 out_free_list: 1420 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { 1421 list_del(&spec->list); 1422 kfree(spec); 1423 } 1424 return err; 1425 } 1426 1427 static int mlx4_en_flow_detach(struct net_device *dev, 1428 struct ethtool_rxnfc *cmd) 1429 { 1430 int err = 0; 1431 struct ethtool_flow_id *rule; 1432 struct mlx4_en_priv *priv = netdev_priv(dev); 1433 1434 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) 1435 return -EINVAL; 1436 1437 rule = &priv->ethtool_rules[cmd->fs.location]; 1438 if (!rule->id) { 1439 err = -ENOENT; 1440 goto out; 1441 } 1442 1443 err = mlx4_flow_detach(priv->mdev->dev, rule->id); 1444 if (err) { 1445 en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n", 1446 cmd->fs.location, rule->id); 1447 goto out; 1448 } 1449 rule->id = 0; 1450 memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); 1451 list_del(&rule->list); 1452 out: 1453 return err; 1454 1455 } 1456 1457 static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, 1458 int loc) 1459 { 1460 int err = 0; 1461 struct ethtool_flow_id *rule; 1462 struct mlx4_en_priv *priv = netdev_priv(dev); 1463 1464 if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) 1465 return -EINVAL; 1466 1467 rule = &priv->ethtool_rules[loc]; 1468 if (rule->id) 1469 memcpy(&cmd->fs, &rule->flow_spec, 1470 sizeof(struct ethtool_rx_flow_spec)); 1471 else 1472 err = -ENOENT; 1473 1474 return err; 1475 } 1476 1477 static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) 1478 { 1479 1480 int i, res = 0; 1481 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { 1482 if (priv->ethtool_rules[i].id) 1483 res++; 1484 } 1485 return res; 1486 1487 } 1488 1489 static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1490 u32 *rule_locs) 1491 { 1492 struct mlx4_en_priv *priv = netdev_priv(dev); 1493 struct mlx4_en_dev *mdev = priv->mdev; 1494 int err = 0; 1495 int i = 0, priority = 0; 1496 1497 if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || 1498 cmd->cmd == ETHTOOL_GRXCLSRULE || 1499 cmd->cmd == ETHTOOL_GRXCLSRLALL) && 1500 (mdev->dev->caps.steering_mode != 1501 MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)) 1502 return -EINVAL; 1503 1504 switch (cmd->cmd) { 1505 case ETHTOOL_GRXRINGS: 1506 cmd->data = priv->rx_ring_num; 1507 break; 1508 case ETHTOOL_GRXCLSRLCNT: 1509 cmd->rule_cnt = mlx4_en_get_num_flows(priv); 1510 break; 1511 case ETHTOOL_GRXCLSRULE: 1512 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); 1513 break; 1514 case ETHTOOL_GRXCLSRLALL: 1515 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { 1516 err = mlx4_en_get_flow(dev, cmd, i); 1517 if (!err) 1518 rule_locs[priority++] = i; 1519 i++; 1520 } 1521 err = 0; 1522 break; 1523 default: 1524 err = -EOPNOTSUPP; 1525 break; 1526 } 1527 1528 return err; 1529 } 1530 1531 static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1532 { 1533 int err = 0; 1534 struct mlx4_en_priv *priv = netdev_priv(dev); 1535 struct mlx4_en_dev *mdev = priv->mdev; 1536 1537 if (mdev->dev->caps.steering_mode != 1538 MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up) 1539 return -EINVAL; 1540 1541 switch (cmd->cmd) { 1542 case ETHTOOL_SRXCLSRLINS: 1543 err = mlx4_en_flow_replace(dev, cmd); 1544 break; 1545 case ETHTOOL_SRXCLSRLDEL: 1546 err = mlx4_en_flow_detach(dev, cmd); 1547 break; 1548 default: 1549 en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd); 1550 return -EINVAL; 1551 } 1552 1553 return err; 1554 } 1555 1556 static void mlx4_en_get_channels(struct net_device *dev, 1557 struct ethtool_channels *channel) 1558 { 1559 struct mlx4_en_priv *priv = netdev_priv(dev); 1560 1561 memset(channel, 0, sizeof(*channel)); 1562 1563 channel->max_rx = MAX_RX_RINGS; 1564 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; 1565 1566 channel->rx_count = priv->rx_ring_num; 1567 channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; 1568 } 1569 1570 static int mlx4_en_set_channels(struct net_device *dev, 1571 struct ethtool_channels *channel) 1572 { 1573 struct mlx4_en_priv *priv = netdev_priv(dev); 1574 struct mlx4_en_dev *mdev = priv->mdev; 1575 int port_up = 0; 1576 int err = 0; 1577 1578 if (channel->other_count || channel->combined_count || 1579 channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP || 1580 channel->rx_count > MAX_RX_RINGS || 1581 !channel->tx_count || !channel->rx_count) 1582 return -EINVAL; 1583 1584 mutex_lock(&mdev->state_lock); 1585 if (priv->port_up) { 1586 port_up = 1; 1587 mlx4_en_stop_port(dev, 1); 1588 } 1589 1590 mlx4_en_free_resources(priv); 1591 1592 priv->num_tx_rings_p_up = channel->tx_count; 1593 priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; 1594 priv->rx_ring_num = channel->rx_count; 1595 1596 err = mlx4_en_alloc_resources(priv); 1597 if (err) { 1598 en_err(priv, "Failed reallocating port resources\n"); 1599 goto out; 1600 } 1601 1602 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1603 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1604 1605 if (dev->num_tc) 1606 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); 1607 1608 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); 1609 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); 1610 1611 if (port_up) { 1612 err = mlx4_en_start_port(dev); 1613 if (err) 1614 en_err(priv, "Failed starting port\n"); 1615 } 1616 1617 err = mlx4_en_moderation_update(priv); 1618 1619 out: 1620 mutex_unlock(&mdev->state_lock); 1621 return err; 1622 } 1623 1624 static int mlx4_en_get_ts_info(struct net_device *dev, 1625 struct ethtool_ts_info *info) 1626 { 1627 struct mlx4_en_priv *priv = netdev_priv(dev); 1628 struct mlx4_en_dev *mdev = priv->mdev; 1629 int ret; 1630 1631 ret = ethtool_op_get_ts_info(dev, info); 1632 if (ret) 1633 return ret; 1634 1635 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 1636 info->so_timestamping |= 1637 SOF_TIMESTAMPING_TX_HARDWARE | 1638 SOF_TIMESTAMPING_RX_HARDWARE | 1639 SOF_TIMESTAMPING_RAW_HARDWARE; 1640 1641 info->tx_types = 1642 (1 << HWTSTAMP_TX_OFF) | 1643 (1 << HWTSTAMP_TX_ON); 1644 1645 info->rx_filters = 1646 (1 << HWTSTAMP_FILTER_NONE) | 1647 (1 << HWTSTAMP_FILTER_ALL); 1648 1649 if (mdev->ptp_clock) 1650 info->phc_index = ptp_clock_index(mdev->ptp_clock); 1651 } 1652 1653 return ret; 1654 } 1655 1656 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) 1657 { 1658 struct mlx4_en_priv *priv = netdev_priv(dev); 1659 bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); 1660 bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); 1661 int i; 1662 1663 if (bf_enabled_new == bf_enabled_old) 1664 return 0; /* Nothing to do */ 1665 1666 if (bf_enabled_new) { 1667 bool bf_supported = true; 1668 1669 for (i = 0; i < priv->tx_ring_num; i++) 1670 bf_supported &= priv->tx_ring[i]->bf_alloced; 1671 1672 if (!bf_supported) { 1673 en_err(priv, "BlueFlame is not supported\n"); 1674 return -EINVAL; 1675 } 1676 1677 priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; 1678 } else { 1679 priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; 1680 } 1681 1682 for (i = 0; i < priv->tx_ring_num; i++) 1683 priv->tx_ring[i]->bf_enabled = bf_enabled_new; 1684 1685 en_info(priv, "BlueFlame %s\n", 1686 bf_enabled_new ? "Enabled" : "Disabled"); 1687 1688 return 0; 1689 } 1690 1691 static u32 mlx4_en_get_priv_flags(struct net_device *dev) 1692 { 1693 struct mlx4_en_priv *priv = netdev_priv(dev); 1694 1695 return priv->pflags; 1696 } 1697 1698 static int mlx4_en_get_tunable(struct net_device *dev, 1699 const struct ethtool_tunable *tuna, 1700 void *data) 1701 { 1702 const struct mlx4_en_priv *priv = netdev_priv(dev); 1703 int ret = 0; 1704 1705 switch (tuna->id) { 1706 case ETHTOOL_TX_COPYBREAK: 1707 *(u32 *)data = priv->prof->inline_thold; 1708 break; 1709 default: 1710 ret = -EINVAL; 1711 break; 1712 } 1713 1714 return ret; 1715 } 1716 1717 static int mlx4_en_set_tunable(struct net_device *dev, 1718 const struct ethtool_tunable *tuna, 1719 const void *data) 1720 { 1721 struct mlx4_en_priv *priv = netdev_priv(dev); 1722 int val, ret = 0; 1723 1724 switch (tuna->id) { 1725 case ETHTOOL_TX_COPYBREAK: 1726 val = *(u32 *)data; 1727 if (val < MIN_PKT_LEN || val > MAX_INLINE) 1728 ret = -EINVAL; 1729 else 1730 priv->prof->inline_thold = val; 1731 break; 1732 default: 1733 ret = -EINVAL; 1734 break; 1735 } 1736 1737 return ret; 1738 } 1739 1740 static int mlx4_en_get_module_info(struct net_device *dev, 1741 struct ethtool_modinfo *modinfo) 1742 { 1743 struct mlx4_en_priv *priv = netdev_priv(dev); 1744 struct mlx4_en_dev *mdev = priv->mdev; 1745 int ret; 1746 u8 data[4]; 1747 1748 /* Read first 2 bytes to get Module & REV ID */ 1749 ret = mlx4_get_module_info(mdev->dev, priv->port, 1750 0/*offset*/, 2/*size*/, data); 1751 if (ret < 2) 1752 return -EIO; 1753 1754 switch (data[0] /* identifier */) { 1755 case MLX4_MODULE_ID_QSFP: 1756 modinfo->type = ETH_MODULE_SFF_8436; 1757 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 1758 break; 1759 case MLX4_MODULE_ID_QSFP_PLUS: 1760 if (data[1] >= 0x3) { /* revision id */ 1761 modinfo->type = ETH_MODULE_SFF_8636; 1762 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 1763 } else { 1764 modinfo->type = ETH_MODULE_SFF_8436; 1765 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 1766 } 1767 break; 1768 case MLX4_MODULE_ID_QSFP28: 1769 modinfo->type = ETH_MODULE_SFF_8636; 1770 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 1771 break; 1772 case MLX4_MODULE_ID_SFP: 1773 modinfo->type = ETH_MODULE_SFF_8472; 1774 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1775 break; 1776 default: 1777 return -ENOSYS; 1778 } 1779 1780 return 0; 1781 } 1782 1783 static int mlx4_en_get_module_eeprom(struct net_device *dev, 1784 struct ethtool_eeprom *ee, 1785 u8 *data) 1786 { 1787 struct mlx4_en_priv *priv = netdev_priv(dev); 1788 struct mlx4_en_dev *mdev = priv->mdev; 1789 int offset = ee->offset; 1790 int i = 0, ret; 1791 1792 if (ee->len == 0) 1793 return -EINVAL; 1794 1795 memset(data, 0, ee->len); 1796 1797 while (i < ee->len) { 1798 en_dbg(DRV, priv, 1799 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 1800 i, offset, ee->len - i); 1801 1802 ret = mlx4_get_module_info(mdev->dev, priv->port, 1803 offset, ee->len - i, data + i); 1804 1805 if (!ret) /* Done reading */ 1806 return 0; 1807 1808 if (ret < 0) { 1809 en_err(priv, 1810 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 1811 i, offset, ee->len - i, ret); 1812 return 0; 1813 } 1814 1815 i += ret; 1816 offset += ret; 1817 } 1818 return 0; 1819 } 1820 1821 const struct ethtool_ops mlx4_en_ethtool_ops = { 1822 .get_drvinfo = mlx4_en_get_drvinfo, 1823 .get_settings = mlx4_en_get_settings, 1824 .set_settings = mlx4_en_set_settings, 1825 .get_link = ethtool_op_get_link, 1826 .get_strings = mlx4_en_get_strings, 1827 .get_sset_count = mlx4_en_get_sset_count, 1828 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 1829 .self_test = mlx4_en_self_test, 1830 .get_wol = mlx4_en_get_wol, 1831 .set_wol = mlx4_en_set_wol, 1832 .get_msglevel = mlx4_en_get_msglevel, 1833 .set_msglevel = mlx4_en_set_msglevel, 1834 .get_coalesce = mlx4_en_get_coalesce, 1835 .set_coalesce = mlx4_en_set_coalesce, 1836 .get_pauseparam = mlx4_en_get_pauseparam, 1837 .set_pauseparam = mlx4_en_set_pauseparam, 1838 .get_ringparam = mlx4_en_get_ringparam, 1839 .set_ringparam = mlx4_en_set_ringparam, 1840 .get_rxnfc = mlx4_en_get_rxnfc, 1841 .set_rxnfc = mlx4_en_set_rxnfc, 1842 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1843 .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, 1844 .get_rxfh = mlx4_en_get_rxfh, 1845 .set_rxfh = mlx4_en_set_rxfh, 1846 .get_channels = mlx4_en_get_channels, 1847 .set_channels = mlx4_en_set_channels, 1848 .get_ts_info = mlx4_en_get_ts_info, 1849 .set_priv_flags = mlx4_en_set_priv_flags, 1850 .get_priv_flags = mlx4_en_get_priv_flags, 1851 .get_tunable = mlx4_en_get_tunable, 1852 .set_tunable = mlx4_en_set_tunable, 1853 .get_module_info = mlx4_en_get_module_info, 1854 .get_module_eeprom = mlx4_en_get_module_eeprom 1855 }; 1856 1857 1858 1859 1860 1861