1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/netdevice.h> 12 #include <linux/ethtool.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/in.h> 15 #include "net_driver.h" 16 #include "workarounds.h" 17 #include "selftest.h" 18 #include "efx.h" 19 #include "filter.h" 20 #include "nic.h" 21 22 struct efx_sw_stat_desc { 23 const char *name; 24 enum { 25 EFX_ETHTOOL_STAT_SOURCE_nic, 26 EFX_ETHTOOL_STAT_SOURCE_channel, 27 EFX_ETHTOOL_STAT_SOURCE_tx_queue 28 } source; 29 unsigned offset; 30 u64(*get_stat) (void *field); /* Reader function */ 31 }; 32 33 /* Initialiser for a struct efx_sw_stat_desc with type-checking */ 34 #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ 35 get_stat_function) { \ 36 .name = #stat_name, \ 37 .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ 38 .offset = ((((field_type *) 0) == \ 39 &((struct efx_##source_name *)0)->field) ? \ 40 offsetof(struct efx_##source_name, field) : \ 41 offsetof(struct efx_##source_name, field)), \ 42 .get_stat = get_stat_function, \ 43 } 44 45 static u64 efx_get_uint_stat(void *field) 46 { 47 return *(unsigned int *)field; 48 } 49 50 static u64 efx_get_atomic_stat(void *field) 51 { 52 return atomic_read((atomic_t *) field); 53 } 54 55 #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ 56 EFX_ETHTOOL_STAT(field, nic, field, \ 57 atomic_t, efx_get_atomic_stat) 58 59 #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ 60 EFX_ETHTOOL_STAT(field, channel, n_##field, \ 61 unsigned int, efx_get_uint_stat) 62 63 #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ 64 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ 65 unsigned int, efx_get_uint_stat) 66 67 static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { 68 EFX_ETHTOOL_UINT_TXQ_STAT(merge_events), 69 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), 70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes), 73 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 74 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 76 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), 77 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 78 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 79 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), 80 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), 81 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets), 82 }; 83 84 #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) 85 86 #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 87 88 /************************************************************************** 89 * 90 * Ethtool operations 91 * 92 ************************************************************************** 93 */ 94 95 /* Identify device by flashing LEDs */ 96 static int efx_ethtool_phys_id(struct net_device *net_dev, 97 enum ethtool_phys_id_state state) 98 { 99 struct efx_nic *efx = netdev_priv(net_dev); 100 enum efx_led_mode mode = EFX_LED_DEFAULT; 101 102 switch (state) { 103 case ETHTOOL_ID_ON: 104 mode = EFX_LED_ON; 105 break; 106 case ETHTOOL_ID_OFF: 107 mode = EFX_LED_OFF; 108 break; 109 case ETHTOOL_ID_INACTIVE: 110 mode = EFX_LED_DEFAULT; 111 break; 112 case ETHTOOL_ID_ACTIVE: 113 return 1; /* cycle on/off once per second */ 114 } 115 116 efx->type->set_id_led(efx, mode); 117 return 0; 118 } 119 120 /* This must be called with rtnl_lock held. */ 121 static int efx_ethtool_get_settings(struct net_device *net_dev, 122 struct ethtool_cmd *ecmd) 123 { 124 struct efx_nic *efx = netdev_priv(net_dev); 125 struct efx_link_state *link_state = &efx->link_state; 126 127 mutex_lock(&efx->mac_lock); 128 efx->phy_op->get_settings(efx, ecmd); 129 mutex_unlock(&efx->mac_lock); 130 131 /* Both MACs support pause frames (bidirectional and respond-only) */ 132 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 133 134 if (LOOPBACK_INTERNAL(efx)) { 135 ethtool_cmd_speed_set(ecmd, link_state->speed); 136 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; 137 } 138 139 return 0; 140 } 141 142 /* This must be called with rtnl_lock held. */ 143 static int efx_ethtool_set_settings(struct net_device *net_dev, 144 struct ethtool_cmd *ecmd) 145 { 146 struct efx_nic *efx = netdev_priv(net_dev); 147 int rc; 148 149 /* GMAC does not support 1000Mbps HD */ 150 if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && 151 (ecmd->duplex != DUPLEX_FULL)) { 152 netif_dbg(efx, drv, efx->net_dev, 153 "rejecting unsupported 1000Mbps HD setting\n"); 154 return -EINVAL; 155 } 156 157 mutex_lock(&efx->mac_lock); 158 rc = efx->phy_op->set_settings(efx, ecmd); 159 mutex_unlock(&efx->mac_lock); 160 return rc; 161 } 162 163 static void efx_ethtool_get_drvinfo(struct net_device *net_dev, 164 struct ethtool_drvinfo *info) 165 { 166 struct efx_nic *efx = netdev_priv(net_dev); 167 168 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 169 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 170 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 171 efx_mcdi_print_fwver(efx, info->fw_version, 172 sizeof(info->fw_version)); 173 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 174 } 175 176 static int efx_ethtool_get_regs_len(struct net_device *net_dev) 177 { 178 return efx_nic_get_regs_len(netdev_priv(net_dev)); 179 } 180 181 static void efx_ethtool_get_regs(struct net_device *net_dev, 182 struct ethtool_regs *regs, void *buf) 183 { 184 struct efx_nic *efx = netdev_priv(net_dev); 185 186 regs->version = efx->type->revision; 187 efx_nic_get_regs(efx, buf); 188 } 189 190 static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) 191 { 192 struct efx_nic *efx = netdev_priv(net_dev); 193 return efx->msg_enable; 194 } 195 196 static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) 197 { 198 struct efx_nic *efx = netdev_priv(net_dev); 199 efx->msg_enable = msg_enable; 200 } 201 202 /** 203 * efx_fill_test - fill in an individual self-test entry 204 * @test_index: Index of the test 205 * @strings: Ethtool strings, or %NULL 206 * @data: Ethtool test results, or %NULL 207 * @test: Pointer to test result (used only if data != %NULL) 208 * @unit_format: Unit name format (e.g. "chan\%d") 209 * @unit_id: Unit id (e.g. 0 for "chan0") 210 * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") 211 * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") 212 * 213 * Fill in an individual self-test entry. 214 */ 215 static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, 216 int *test, const char *unit_format, int unit_id, 217 const char *test_format, const char *test_id) 218 { 219 char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN]; 220 221 /* Fill data value, if applicable */ 222 if (data) 223 data[test_index] = *test; 224 225 /* Fill string, if applicable */ 226 if (strings) { 227 if (strchr(unit_format, '%')) 228 snprintf(unit_str, sizeof(unit_str), 229 unit_format, unit_id); 230 else 231 strcpy(unit_str, unit_format); 232 snprintf(test_str, sizeof(test_str), test_format, test_id); 233 snprintf(strings + test_index * ETH_GSTRING_LEN, 234 ETH_GSTRING_LEN, 235 "%-6s %-24s", unit_str, test_str); 236 } 237 } 238 239 #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel 240 #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue 241 #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue 242 #define EFX_LOOPBACK_NAME(_mode, _counter) \ 243 "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) 244 245 /** 246 * efx_fill_loopback_test - fill in a block of loopback self-test entries 247 * @efx: Efx NIC 248 * @lb_tests: Efx loopback self-test results structure 249 * @mode: Loopback test mode 250 * @test_index: Starting index of the test 251 * @strings: Ethtool strings, or %NULL 252 * @data: Ethtool test results, or %NULL 253 */ 254 static int efx_fill_loopback_test(struct efx_nic *efx, 255 struct efx_loopback_self_tests *lb_tests, 256 enum efx_loopback_mode mode, 257 unsigned int test_index, 258 u8 *strings, u64 *data) 259 { 260 struct efx_channel *channel = 261 efx_get_channel(efx, efx->tx_channel_offset); 262 struct efx_tx_queue *tx_queue; 263 264 efx_for_each_channel_tx_queue(tx_queue, channel) { 265 efx_fill_test(test_index++, strings, data, 266 &lb_tests->tx_sent[tx_queue->queue], 267 EFX_TX_QUEUE_NAME(tx_queue), 268 EFX_LOOPBACK_NAME(mode, "tx_sent")); 269 efx_fill_test(test_index++, strings, data, 270 &lb_tests->tx_done[tx_queue->queue], 271 EFX_TX_QUEUE_NAME(tx_queue), 272 EFX_LOOPBACK_NAME(mode, "tx_done")); 273 } 274 efx_fill_test(test_index++, strings, data, 275 &lb_tests->rx_good, 276 "rx", 0, 277 EFX_LOOPBACK_NAME(mode, "rx_good")); 278 efx_fill_test(test_index++, strings, data, 279 &lb_tests->rx_bad, 280 "rx", 0, 281 EFX_LOOPBACK_NAME(mode, "rx_bad")); 282 283 return test_index; 284 } 285 286 /** 287 * efx_ethtool_fill_self_tests - get self-test details 288 * @efx: Efx NIC 289 * @tests: Efx self-test results structure, or %NULL 290 * @strings: Ethtool strings, or %NULL 291 * @data: Ethtool test results, or %NULL 292 */ 293 static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 294 struct efx_self_tests *tests, 295 u8 *strings, u64 *data) 296 { 297 struct efx_channel *channel; 298 unsigned int n = 0, i; 299 enum efx_loopback_mode mode; 300 301 efx_fill_test(n++, strings, data, &tests->phy_alive, 302 "phy", 0, "alive", NULL); 303 efx_fill_test(n++, strings, data, &tests->nvram, 304 "core", 0, "nvram", NULL); 305 efx_fill_test(n++, strings, data, &tests->interrupt, 306 "core", 0, "interrupt", NULL); 307 308 /* Event queues */ 309 efx_for_each_channel(channel, efx) { 310 efx_fill_test(n++, strings, data, 311 &tests->eventq_dma[channel->channel], 312 EFX_CHANNEL_NAME(channel), 313 "eventq.dma", NULL); 314 efx_fill_test(n++, strings, data, 315 &tests->eventq_int[channel->channel], 316 EFX_CHANNEL_NAME(channel), 317 "eventq.int", NULL); 318 } 319 320 efx_fill_test(n++, strings, data, &tests->registers, 321 "core", 0, "registers", NULL); 322 323 if (efx->phy_op->run_tests != NULL) { 324 EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); 325 326 for (i = 0; true; ++i) { 327 const char *name; 328 329 EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); 330 name = efx->phy_op->test_name(efx, i); 331 if (name == NULL) 332 break; 333 334 efx_fill_test(n++, strings, data, &tests->phy_ext[i], 335 "phy", 0, name, NULL); 336 } 337 } 338 339 /* Loopback tests */ 340 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 341 if (!(efx->loopback_modes & (1 << mode))) 342 continue; 343 n = efx_fill_loopback_test(efx, 344 &tests->loopback[mode], mode, n, 345 strings, data); 346 } 347 348 return n; 349 } 350 351 static int efx_ethtool_get_sset_count(struct net_device *net_dev, 352 int string_set) 353 { 354 struct efx_nic *efx = netdev_priv(net_dev); 355 356 switch (string_set) { 357 case ETH_SS_STATS: 358 return efx->type->describe_stats(efx, NULL) + 359 EFX_ETHTOOL_SW_STAT_COUNT; 360 case ETH_SS_TEST: 361 return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); 362 default: 363 return -EINVAL; 364 } 365 } 366 367 static void efx_ethtool_get_strings(struct net_device *net_dev, 368 u32 string_set, u8 *strings) 369 { 370 struct efx_nic *efx = netdev_priv(net_dev); 371 int i; 372 373 switch (string_set) { 374 case ETH_SS_STATS: 375 strings += (efx->type->describe_stats(efx, strings) * 376 ETH_GSTRING_LEN); 377 for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) 378 strlcpy(strings + i * ETH_GSTRING_LEN, 379 efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); 380 break; 381 case ETH_SS_TEST: 382 efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); 383 break; 384 default: 385 /* No other string sets */ 386 break; 387 } 388 } 389 390 static void efx_ethtool_get_stats(struct net_device *net_dev, 391 struct ethtool_stats *stats, 392 u64 *data) 393 { 394 struct efx_nic *efx = netdev_priv(net_dev); 395 const struct efx_sw_stat_desc *stat; 396 struct efx_channel *channel; 397 struct efx_tx_queue *tx_queue; 398 int i; 399 400 spin_lock_bh(&efx->stats_lock); 401 402 /* Get NIC statistics */ 403 data += efx->type->update_stats(efx, data, NULL); 404 405 /* Get software statistics */ 406 for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) { 407 stat = &efx_sw_stat_desc[i]; 408 switch (stat->source) { 409 case EFX_ETHTOOL_STAT_SOURCE_nic: 410 data[i] = stat->get_stat((void *)efx + stat->offset); 411 break; 412 case EFX_ETHTOOL_STAT_SOURCE_channel: 413 data[i] = 0; 414 efx_for_each_channel(channel, efx) 415 data[i] += stat->get_stat((void *)channel + 416 stat->offset); 417 break; 418 case EFX_ETHTOOL_STAT_SOURCE_tx_queue: 419 data[i] = 0; 420 efx_for_each_channel(channel, efx) { 421 efx_for_each_channel_tx_queue(tx_queue, channel) 422 data[i] += 423 stat->get_stat((void *)tx_queue 424 + stat->offset); 425 } 426 break; 427 } 428 } 429 430 spin_unlock_bh(&efx->stats_lock); 431 } 432 433 static void efx_ethtool_self_test(struct net_device *net_dev, 434 struct ethtool_test *test, u64 *data) 435 { 436 struct efx_nic *efx = netdev_priv(net_dev); 437 struct efx_self_tests *efx_tests; 438 int already_up; 439 int rc = -ENOMEM; 440 441 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); 442 if (!efx_tests) 443 goto fail; 444 445 if (efx->state != STATE_READY) { 446 rc = -EIO; 447 goto fail1; 448 } 449 450 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", 451 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 452 453 /* We need rx buffers and interrupts. */ 454 already_up = (efx->net_dev->flags & IFF_UP); 455 if (!already_up) { 456 rc = dev_open(efx->net_dev); 457 if (rc) { 458 netif_err(efx, drv, efx->net_dev, 459 "failed opening device.\n"); 460 goto fail1; 461 } 462 } 463 464 rc = efx_selftest(efx, efx_tests, test->flags); 465 466 if (!already_up) 467 dev_close(efx->net_dev); 468 469 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", 470 rc == 0 ? "passed" : "failed", 471 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 472 473 fail1: 474 /* Fill ethtool results structures */ 475 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); 476 kfree(efx_tests); 477 fail: 478 if (rc) 479 test->flags |= ETH_TEST_FL_FAILED; 480 } 481 482 /* Restart autonegotiation */ 483 static int efx_ethtool_nway_reset(struct net_device *net_dev) 484 { 485 struct efx_nic *efx = netdev_priv(net_dev); 486 487 return mdio45_nway_restart(&efx->mdio); 488 } 489 490 /* 491 * Each channel has a single IRQ and moderation timer, started by any 492 * completion (or other event). Unless the module parameter 493 * separate_tx_channels is set, IRQs and moderation are therefore 494 * shared between RX and TX completions. In this case, when RX IRQ 495 * moderation is explicitly changed then TX IRQ moderation is 496 * automatically changed too, but otherwise we fail if the two values 497 * are requested to be different. 498 * 499 * The hardware does not support a limit on the number of completions 500 * before an IRQ, so we do not use the max_frames fields. We should 501 * report and require that max_frames == (usecs != 0), but this would 502 * invalidate existing user documentation. 503 * 504 * The hardware does not have distinct settings for interrupt 505 * moderation while the previous IRQ is being handled, so we should 506 * not use the 'irq' fields. However, an earlier developer 507 * misunderstood the meaning of the 'irq' fields and the driver did 508 * not support the standard fields. To avoid invalidating existing 509 * user documentation, we report and accept changes through either the 510 * standard or 'irq' fields. If both are changed at the same time, we 511 * prefer the standard field. 512 * 513 * We implement adaptive IRQ moderation, but use a different algorithm 514 * from that assumed in the definition of struct ethtool_coalesce. 515 * Therefore we do not use any of the adaptive moderation parameters 516 * in it. 517 */ 518 519 static int efx_ethtool_get_coalesce(struct net_device *net_dev, 520 struct ethtool_coalesce *coalesce) 521 { 522 struct efx_nic *efx = netdev_priv(net_dev); 523 unsigned int tx_usecs, rx_usecs; 524 bool rx_adaptive; 525 526 efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); 527 528 coalesce->tx_coalesce_usecs = tx_usecs; 529 coalesce->tx_coalesce_usecs_irq = tx_usecs; 530 coalesce->rx_coalesce_usecs = rx_usecs; 531 coalesce->rx_coalesce_usecs_irq = rx_usecs; 532 coalesce->use_adaptive_rx_coalesce = rx_adaptive; 533 534 return 0; 535 } 536 537 static int efx_ethtool_set_coalesce(struct net_device *net_dev, 538 struct ethtool_coalesce *coalesce) 539 { 540 struct efx_nic *efx = netdev_priv(net_dev); 541 struct efx_channel *channel; 542 unsigned int tx_usecs, rx_usecs; 543 bool adaptive, rx_may_override_tx; 544 int rc; 545 546 if (coalesce->use_adaptive_tx_coalesce) 547 return -EINVAL; 548 549 efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); 550 551 if (coalesce->rx_coalesce_usecs != rx_usecs) 552 rx_usecs = coalesce->rx_coalesce_usecs; 553 else 554 rx_usecs = coalesce->rx_coalesce_usecs_irq; 555 556 adaptive = coalesce->use_adaptive_rx_coalesce; 557 558 /* If channels are shared, TX IRQ moderation can be quietly 559 * overridden unless it is changed from its old value. 560 */ 561 rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && 562 coalesce->tx_coalesce_usecs_irq == tx_usecs); 563 if (coalesce->tx_coalesce_usecs != tx_usecs) 564 tx_usecs = coalesce->tx_coalesce_usecs; 565 else 566 tx_usecs = coalesce->tx_coalesce_usecs_irq; 567 568 rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, 569 rx_may_override_tx); 570 if (rc != 0) 571 return rc; 572 573 efx_for_each_channel(channel, efx) 574 efx->type->push_irq_moderation(channel); 575 576 return 0; 577 } 578 579 static void efx_ethtool_get_ringparam(struct net_device *net_dev, 580 struct ethtool_ringparam *ring) 581 { 582 struct efx_nic *efx = netdev_priv(net_dev); 583 584 ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; 585 ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; 586 ring->rx_pending = efx->rxq_entries; 587 ring->tx_pending = efx->txq_entries; 588 } 589 590 static int efx_ethtool_set_ringparam(struct net_device *net_dev, 591 struct ethtool_ringparam *ring) 592 { 593 struct efx_nic *efx = netdev_priv(net_dev); 594 u32 txq_entries; 595 596 if (ring->rx_mini_pending || ring->rx_jumbo_pending || 597 ring->rx_pending > EFX_MAX_DMAQ_SIZE || 598 ring->tx_pending > EFX_MAX_DMAQ_SIZE) 599 return -EINVAL; 600 601 if (ring->rx_pending < EFX_RXQ_MIN_ENT) { 602 netif_err(efx, drv, efx->net_dev, 603 "RX queues cannot be smaller than %u\n", 604 EFX_RXQ_MIN_ENT); 605 return -EINVAL; 606 } 607 608 txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); 609 if (txq_entries != ring->tx_pending) 610 netif_warn(efx, drv, efx->net_dev, 611 "increasing TX queue size to minimum of %u\n", 612 txq_entries); 613 614 return efx_realloc_channels(efx, ring->rx_pending, txq_entries); 615 } 616 617 static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 618 struct ethtool_pauseparam *pause) 619 { 620 struct efx_nic *efx = netdev_priv(net_dev); 621 u8 wanted_fc, old_fc; 622 u32 old_adv; 623 int rc = 0; 624 625 mutex_lock(&efx->mac_lock); 626 627 wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | 628 (pause->tx_pause ? EFX_FC_TX : 0) | 629 (pause->autoneg ? EFX_FC_AUTO : 0)); 630 631 if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { 632 netif_dbg(efx, drv, efx->net_dev, 633 "Flow control unsupported: tx ON rx OFF\n"); 634 rc = -EINVAL; 635 goto out; 636 } 637 638 if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { 639 netif_dbg(efx, drv, efx->net_dev, 640 "Autonegotiation is disabled\n"); 641 rc = -EINVAL; 642 goto out; 643 } 644 645 /* Hook for Falcon bug 11482 workaround */ 646 if (efx->type->prepare_enable_fc_tx && 647 (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) 648 efx->type->prepare_enable_fc_tx(efx); 649 650 old_adv = efx->link_advertising; 651 old_fc = efx->wanted_fc; 652 efx_link_set_wanted_fc(efx, wanted_fc); 653 if (efx->link_advertising != old_adv || 654 (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { 655 rc = efx->phy_op->reconfigure(efx); 656 if (rc) { 657 netif_err(efx, drv, efx->net_dev, 658 "Unable to advertise requested flow " 659 "control setting\n"); 660 goto out; 661 } 662 } 663 664 /* Reconfigure the MAC. The PHY *may* generate a link state change event 665 * if the user just changed the advertised capabilities, but there's no 666 * harm doing this twice */ 667 efx->type->reconfigure_mac(efx); 668 669 out: 670 mutex_unlock(&efx->mac_lock); 671 672 return rc; 673 } 674 675 static void efx_ethtool_get_pauseparam(struct net_device *net_dev, 676 struct ethtool_pauseparam *pause) 677 { 678 struct efx_nic *efx = netdev_priv(net_dev); 679 680 pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); 681 pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); 682 pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); 683 } 684 685 686 static void efx_ethtool_get_wol(struct net_device *net_dev, 687 struct ethtool_wolinfo *wol) 688 { 689 struct efx_nic *efx = netdev_priv(net_dev); 690 return efx->type->get_wol(efx, wol); 691 } 692 693 694 static int efx_ethtool_set_wol(struct net_device *net_dev, 695 struct ethtool_wolinfo *wol) 696 { 697 struct efx_nic *efx = netdev_priv(net_dev); 698 return efx->type->set_wol(efx, wol->wolopts); 699 } 700 701 static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) 702 { 703 struct efx_nic *efx = netdev_priv(net_dev); 704 int rc; 705 706 rc = efx->type->map_reset_flags(flags); 707 if (rc < 0) 708 return rc; 709 710 return efx_reset(efx, rc); 711 } 712 713 /* MAC address mask including only I/G bit */ 714 static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 715 716 #define IP4_ADDR_FULL_MASK ((__force __be32)~0) 717 #define PORT_FULL_MASK ((__force __be16)~0) 718 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) 719 720 static int efx_ethtool_get_class_rule(struct efx_nic *efx, 721 struct ethtool_rx_flow_spec *rule) 722 { 723 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; 724 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; 725 struct ethhdr *mac_entry = &rule->h_u.ether_spec; 726 struct ethhdr *mac_mask = &rule->m_u.ether_spec; 727 struct efx_filter_spec spec; 728 int rc; 729 730 rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, 731 rule->location, &spec); 732 if (rc) 733 return rc; 734 735 if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) 736 rule->ring_cookie = RX_CLS_FLOW_DISC; 737 else 738 rule->ring_cookie = spec.dmaq_id; 739 740 if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && 741 spec.ether_type == htons(ETH_P_IP) && 742 (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && 743 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && 744 !(spec.match_flags & 745 ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | 746 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | 747 EFX_FILTER_MATCH_IP_PROTO | 748 EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { 749 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? 750 TCP_V4_FLOW : UDP_V4_FLOW); 751 if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { 752 ip_entry->ip4dst = spec.loc_host[0]; 753 ip_mask->ip4dst = IP4_ADDR_FULL_MASK; 754 } 755 if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { 756 ip_entry->ip4src = spec.rem_host[0]; 757 ip_mask->ip4src = IP4_ADDR_FULL_MASK; 758 } 759 if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { 760 ip_entry->pdst = spec.loc_port; 761 ip_mask->pdst = PORT_FULL_MASK; 762 } 763 if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { 764 ip_entry->psrc = spec.rem_port; 765 ip_mask->psrc = PORT_FULL_MASK; 766 } 767 } else if (!(spec.match_flags & 768 ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | 769 EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | 770 EFX_FILTER_MATCH_OUTER_VID))) { 771 rule->flow_type = ETHER_FLOW; 772 if (spec.match_flags & 773 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { 774 memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN); 775 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) 776 memset(mac_mask->h_dest, ~0, ETH_ALEN); 777 else 778 memcpy(mac_mask->h_dest, mac_addr_ig_mask, 779 ETH_ALEN); 780 } 781 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { 782 memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN); 783 memset(mac_mask->h_source, ~0, ETH_ALEN); 784 } 785 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 786 mac_entry->h_proto = spec.ether_type; 787 mac_mask->h_proto = ETHER_TYPE_FULL_MASK; 788 } 789 } else { 790 /* The above should handle all filters that we insert */ 791 WARN_ON(1); 792 return -EINVAL; 793 } 794 795 if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { 796 rule->flow_type |= FLOW_EXT; 797 rule->h_ext.vlan_tci = spec.outer_vid; 798 rule->m_ext.vlan_tci = htons(0xfff); 799 } 800 801 return rc; 802 } 803 804 static int 805 efx_ethtool_get_rxnfc(struct net_device *net_dev, 806 struct ethtool_rxnfc *info, u32 *rule_locs) 807 { 808 struct efx_nic *efx = netdev_priv(net_dev); 809 810 switch (info->cmd) { 811 case ETHTOOL_GRXRINGS: 812 info->data = efx->n_rx_channels; 813 return 0; 814 815 case ETHTOOL_GRXFH: { 816 unsigned min_revision = 0; 817 818 info->data = 0; 819 switch (info->flow_type) { 820 case TCP_V4_FLOW: 821 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 822 /* fall through */ 823 case UDP_V4_FLOW: 824 case SCTP_V4_FLOW: 825 case AH_ESP_V4_FLOW: 826 case IPV4_FLOW: 827 info->data |= RXH_IP_SRC | RXH_IP_DST; 828 min_revision = EFX_REV_FALCON_B0; 829 break; 830 case TCP_V6_FLOW: 831 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 832 /* fall through */ 833 case UDP_V6_FLOW: 834 case SCTP_V6_FLOW: 835 case AH_ESP_V6_FLOW: 836 case IPV6_FLOW: 837 info->data |= RXH_IP_SRC | RXH_IP_DST; 838 min_revision = EFX_REV_SIENA_A0; 839 break; 840 default: 841 break; 842 } 843 if (efx_nic_rev(efx) < min_revision) 844 info->data = 0; 845 return 0; 846 } 847 848 case ETHTOOL_GRXCLSRLCNT: 849 info->data = efx_filter_get_rx_id_limit(efx); 850 if (info->data == 0) 851 return -EOPNOTSUPP; 852 info->data |= RX_CLS_LOC_SPECIAL; 853 info->rule_cnt = 854 efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); 855 return 0; 856 857 case ETHTOOL_GRXCLSRULE: 858 if (efx_filter_get_rx_id_limit(efx) == 0) 859 return -EOPNOTSUPP; 860 return efx_ethtool_get_class_rule(efx, &info->fs); 861 862 case ETHTOOL_GRXCLSRLALL: { 863 s32 rc; 864 info->data = efx_filter_get_rx_id_limit(efx); 865 if (info->data == 0) 866 return -EOPNOTSUPP; 867 rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, 868 rule_locs, info->rule_cnt); 869 if (rc < 0) 870 return rc; 871 info->rule_cnt = rc; 872 return 0; 873 } 874 875 default: 876 return -EOPNOTSUPP; 877 } 878 } 879 880 static int efx_ethtool_set_class_rule(struct efx_nic *efx, 881 struct ethtool_rx_flow_spec *rule) 882 { 883 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; 884 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; 885 struct ethhdr *mac_entry = &rule->h_u.ether_spec; 886 struct ethhdr *mac_mask = &rule->m_u.ether_spec; 887 struct efx_filter_spec spec; 888 int rc; 889 890 /* Check that user wants us to choose the location */ 891 if (rule->location != RX_CLS_LOC_ANY) 892 return -EINVAL; 893 894 /* Range-check ring_cookie */ 895 if (rule->ring_cookie >= efx->n_rx_channels && 896 rule->ring_cookie != RX_CLS_FLOW_DISC) 897 return -EINVAL; 898 899 /* Check for unsupported extensions */ 900 if ((rule->flow_type & FLOW_EXT) && 901 (rule->m_ext.vlan_etype || rule->m_ext.data[0] || 902 rule->m_ext.data[1])) 903 return -EINVAL; 904 905 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 906 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 907 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 908 EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); 909 910 switch (rule->flow_type & ~FLOW_EXT) { 911 case TCP_V4_FLOW: 912 case UDP_V4_FLOW: 913 spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | 914 EFX_FILTER_MATCH_IP_PROTO); 915 spec.ether_type = htons(ETH_P_IP); 916 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ? 917 IPPROTO_TCP : IPPROTO_UDP); 918 if (ip_mask->ip4dst) { 919 if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) 920 return -EINVAL; 921 spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; 922 spec.loc_host[0] = ip_entry->ip4dst; 923 } 924 if (ip_mask->ip4src) { 925 if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) 926 return -EINVAL; 927 spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; 928 spec.rem_host[0] = ip_entry->ip4src; 929 } 930 if (ip_mask->pdst) { 931 if (ip_mask->pdst != PORT_FULL_MASK) 932 return -EINVAL; 933 spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; 934 spec.loc_port = ip_entry->pdst; 935 } 936 if (ip_mask->psrc) { 937 if (ip_mask->psrc != PORT_FULL_MASK) 938 return -EINVAL; 939 spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; 940 spec.rem_port = ip_entry->psrc; 941 } 942 if (ip_mask->tos) 943 return -EINVAL; 944 break; 945 946 case ETHER_FLOW: 947 if (!is_zero_ether_addr(mac_mask->h_dest)) { 948 if (ether_addr_equal(mac_mask->h_dest, 949 mac_addr_ig_mask)) 950 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; 951 else if (is_broadcast_ether_addr(mac_mask->h_dest)) 952 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; 953 else 954 return -EINVAL; 955 memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN); 956 } 957 if (!is_zero_ether_addr(mac_mask->h_source)) { 958 if (!is_broadcast_ether_addr(mac_mask->h_source)) 959 return -EINVAL; 960 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; 961 memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN); 962 } 963 if (mac_mask->h_proto) { 964 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) 965 return -EINVAL; 966 spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; 967 spec.ether_type = mac_entry->h_proto; 968 } 969 break; 970 971 default: 972 return -EINVAL; 973 } 974 975 if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { 976 if (rule->m_ext.vlan_tci != htons(0xfff)) 977 return -EINVAL; 978 spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; 979 spec.outer_vid = rule->h_ext.vlan_tci; 980 } 981 982 rc = efx_filter_insert_filter(efx, &spec, true); 983 if (rc < 0) 984 return rc; 985 986 rule->location = rc; 987 return 0; 988 } 989 990 static int efx_ethtool_set_rxnfc(struct net_device *net_dev, 991 struct ethtool_rxnfc *info) 992 { 993 struct efx_nic *efx = netdev_priv(net_dev); 994 995 if (efx_filter_get_rx_id_limit(efx) == 0) 996 return -EOPNOTSUPP; 997 998 switch (info->cmd) { 999 case ETHTOOL_SRXCLSRLINS: 1000 return efx_ethtool_set_class_rule(efx, &info->fs); 1001 1002 case ETHTOOL_SRXCLSRLDEL: 1003 return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, 1004 info->fs.location); 1005 1006 default: 1007 return -EOPNOTSUPP; 1008 } 1009 } 1010 1011 static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) 1012 { 1013 struct efx_nic *efx = netdev_priv(net_dev); 1014 1015 return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 || 1016 efx->n_rx_channels == 1) ? 1017 0 : ARRAY_SIZE(efx->rx_indir_table)); 1018 } 1019 1020 static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) 1021 { 1022 struct efx_nic *efx = netdev_priv(net_dev); 1023 1024 memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); 1025 return 0; 1026 } 1027 1028 static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, 1029 const u32 *indir) 1030 { 1031 struct efx_nic *efx = netdev_priv(net_dev); 1032 1033 memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); 1034 efx_nic_push_rx_indir_table(efx); 1035 return 0; 1036 } 1037 1038 int efx_ethtool_get_ts_info(struct net_device *net_dev, 1039 struct ethtool_ts_info *ts_info) 1040 { 1041 struct efx_nic *efx = netdev_priv(net_dev); 1042 1043 /* Software capabilities */ 1044 ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | 1045 SOF_TIMESTAMPING_SOFTWARE); 1046 ts_info->phc_index = -1; 1047 1048 efx_ptp_get_ts_info(efx, ts_info); 1049 return 0; 1050 } 1051 1052 static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, 1053 struct ethtool_eeprom *ee, 1054 u8 *data) 1055 { 1056 struct efx_nic *efx = netdev_priv(net_dev); 1057 int ret; 1058 1059 if (!efx->phy_op || !efx->phy_op->get_module_eeprom) 1060 return -EOPNOTSUPP; 1061 1062 mutex_lock(&efx->mac_lock); 1063 ret = efx->phy_op->get_module_eeprom(efx, ee, data); 1064 mutex_unlock(&efx->mac_lock); 1065 1066 return ret; 1067 } 1068 1069 static int efx_ethtool_get_module_info(struct net_device *net_dev, 1070 struct ethtool_modinfo *modinfo) 1071 { 1072 struct efx_nic *efx = netdev_priv(net_dev); 1073 int ret; 1074 1075 if (!efx->phy_op || !efx->phy_op->get_module_info) 1076 return -EOPNOTSUPP; 1077 1078 mutex_lock(&efx->mac_lock); 1079 ret = efx->phy_op->get_module_info(efx, modinfo); 1080 mutex_unlock(&efx->mac_lock); 1081 1082 return ret; 1083 } 1084 1085 const struct ethtool_ops efx_ethtool_ops = { 1086 .get_settings = efx_ethtool_get_settings, 1087 .set_settings = efx_ethtool_set_settings, 1088 .get_drvinfo = efx_ethtool_get_drvinfo, 1089 .get_regs_len = efx_ethtool_get_regs_len, 1090 .get_regs = efx_ethtool_get_regs, 1091 .get_msglevel = efx_ethtool_get_msglevel, 1092 .set_msglevel = efx_ethtool_set_msglevel, 1093 .nway_reset = efx_ethtool_nway_reset, 1094 .get_link = ethtool_op_get_link, 1095 .get_coalesce = efx_ethtool_get_coalesce, 1096 .set_coalesce = efx_ethtool_set_coalesce, 1097 .get_ringparam = efx_ethtool_get_ringparam, 1098 .set_ringparam = efx_ethtool_set_ringparam, 1099 .get_pauseparam = efx_ethtool_get_pauseparam, 1100 .set_pauseparam = efx_ethtool_set_pauseparam, 1101 .get_sset_count = efx_ethtool_get_sset_count, 1102 .self_test = efx_ethtool_self_test, 1103 .get_strings = efx_ethtool_get_strings, 1104 .set_phys_id = efx_ethtool_phys_id, 1105 .get_ethtool_stats = efx_ethtool_get_stats, 1106 .get_wol = efx_ethtool_get_wol, 1107 .set_wol = efx_ethtool_set_wol, 1108 .reset = efx_ethtool_reset, 1109 .get_rxnfc = efx_ethtool_get_rxnfc, 1110 .set_rxnfc = efx_ethtool_set_rxnfc, 1111 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1112 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1113 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1114 .get_ts_info = efx_ethtool_get_ts_info, 1115 .get_module_info = efx_ethtool_get_module_info, 1116 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1117 }; 1118