1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/pci.h> 34 35 #include "ena_netdev.h" 36 37 struct ena_stats { 38 char name[ETH_GSTRING_LEN]; 39 int stat_offset; 40 }; 41 42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 43 .name = #stat, \ 44 .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \ 45 } 46 47 #define ENA_STAT_ENTRY(stat, stat_type) { \ 48 .name = #stat, \ 49 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \ 50 } 51 52 #define ENA_STAT_HW_ENTRY(stat, stat_type) { \ 53 .name = #stat, \ 54 .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \ 55 } 56 57 #define ENA_STAT_RX_ENTRY(stat) \ 58 ENA_STAT_ENTRY(stat, rx) 59 60 #define ENA_STAT_TX_ENTRY(stat) \ 61 ENA_STAT_ENTRY(stat, tx) 62 63 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 64 ENA_STAT_ENTRY(stat, dev) 65 66 #define ENA_STAT_ENI_ENTRY(stat) \ 67 ENA_STAT_HW_ENTRY(stat, eni_stats) 68 69 static const struct ena_stats ena_stats_global_strings[] = { 70 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 71 ENA_STAT_GLOBAL_ENTRY(suspend), 72 ENA_STAT_GLOBAL_ENTRY(resume), 73 ENA_STAT_GLOBAL_ENTRY(wd_expired), 74 ENA_STAT_GLOBAL_ENTRY(interface_up), 75 ENA_STAT_GLOBAL_ENTRY(interface_down), 76 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 77 }; 78 79 static const struct ena_stats ena_stats_eni_strings[] = { 80 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 81 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 82 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 83 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 84 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 85 }; 86 87 static const struct ena_stats ena_stats_tx_strings[] = { 88 ENA_STAT_TX_ENTRY(cnt), 89 ENA_STAT_TX_ENTRY(bytes), 90 ENA_STAT_TX_ENTRY(queue_stop), 91 ENA_STAT_TX_ENTRY(queue_wakeup), 92 ENA_STAT_TX_ENTRY(dma_mapping_err), 93 ENA_STAT_TX_ENTRY(linearize), 94 ENA_STAT_TX_ENTRY(linearize_failed), 95 ENA_STAT_TX_ENTRY(napi_comp), 96 ENA_STAT_TX_ENTRY(tx_poll), 97 ENA_STAT_TX_ENTRY(doorbells), 98 ENA_STAT_TX_ENTRY(prepare_ctx_err), 99 ENA_STAT_TX_ENTRY(bad_req_id), 100 ENA_STAT_TX_ENTRY(llq_buffer_copy), 101 ENA_STAT_TX_ENTRY(missed_tx), 102 ENA_STAT_TX_ENTRY(unmask_interrupt), 103 }; 104 105 static const struct ena_stats ena_stats_rx_strings[] = { 106 ENA_STAT_RX_ENTRY(cnt), 107 ENA_STAT_RX_ENTRY(bytes), 108 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 109 ENA_STAT_RX_ENTRY(csum_good), 110 ENA_STAT_RX_ENTRY(refil_partial), 111 ENA_STAT_RX_ENTRY(bad_csum), 112 ENA_STAT_RX_ENTRY(page_alloc_fail), 113 ENA_STAT_RX_ENTRY(skb_alloc_fail), 114 ENA_STAT_RX_ENTRY(dma_mapping_err), 115 ENA_STAT_RX_ENTRY(bad_desc_num), 116 ENA_STAT_RX_ENTRY(bad_req_id), 117 ENA_STAT_RX_ENTRY(empty_rx_ring), 118 ENA_STAT_RX_ENTRY(csum_unchecked), 119 ENA_STAT_RX_ENTRY(xdp_aborted), 120 ENA_STAT_RX_ENTRY(xdp_drop), 121 ENA_STAT_RX_ENTRY(xdp_pass), 122 ENA_STAT_RX_ENTRY(xdp_tx), 123 ENA_STAT_RX_ENTRY(xdp_invalid), 124 }; 125 126 static const struct ena_stats ena_stats_ena_com_strings[] = { 127 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 128 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 129 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 130 ENA_STAT_ENA_COM_ENTRY(out_of_space), 131 ENA_STAT_ENA_COM_ENTRY(no_completion), 132 }; 133 134 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 135 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 136 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 137 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 138 #define ENA_STATS_ARRAY_ENI(adapter) \ 139 (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported) 140 141 static void ena_safe_update_stat(u64 *src, u64 *dst, 142 struct u64_stats_sync *syncp) 143 { 144 unsigned int start; 145 146 do { 147 start = u64_stats_fetch_begin_irq(syncp); 148 *(dst) = *src; 149 } while (u64_stats_fetch_retry_irq(syncp, start)); 150 } 151 152 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) 153 { 154 const struct ena_stats *ena_stats; 155 struct ena_ring *ring; 156 157 u64 *ptr; 158 int i, j; 159 160 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { 161 /* Tx stats */ 162 ring = &adapter->tx_ring[i]; 163 164 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 165 ena_stats = &ena_stats_tx_strings[j]; 166 167 ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset; 168 169 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 170 } 171 /* XDP TX queues don't have a RX queue counterpart */ 172 if (!ENA_IS_XDP_INDEX(adapter, i)) { 173 /* Rx stats */ 174 ring = &adapter->rx_ring[i]; 175 176 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 177 ena_stats = &ena_stats_rx_strings[j]; 178 179 ptr = (u64 *)&ring->rx_stats + 180 ena_stats->stat_offset; 181 182 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 183 } 184 } 185 } 186 } 187 188 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) 189 { 190 const struct ena_stats *ena_stats; 191 u64 *ptr; 192 int i; 193 194 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 195 ena_stats = &ena_stats_ena_com_strings[i]; 196 197 ptr = (u64 *)&adapter->ena_dev->admin_queue.stats + 198 ena_stats->stat_offset; 199 200 *(*data)++ = *ptr; 201 } 202 } 203 204 static void ena_get_stats(struct ena_adapter *adapter, 205 u64 *data, 206 bool eni_stats_needed) 207 { 208 const struct ena_stats *ena_stats; 209 u64 *ptr; 210 int i; 211 212 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 213 ena_stats = &ena_stats_global_strings[i]; 214 215 ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset; 216 217 ena_safe_update_stat(ptr, data++, &adapter->syncp); 218 } 219 220 if (eni_stats_needed) { 221 ena_update_hw_stats(adapter); 222 for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { 223 ena_stats = &ena_stats_eni_strings[i]; 224 225 ptr = (u64 *)&adapter->eni_stats + 226 ena_stats->stat_offset; 227 228 ena_safe_update_stat(ptr, data++, &adapter->syncp); 229 } 230 } 231 232 ena_queue_stats(adapter, &data); 233 ena_dev_admin_queue_stats(adapter, &data); 234 } 235 236 static void ena_get_ethtool_stats(struct net_device *netdev, 237 struct ethtool_stats *stats, 238 u64 *data) 239 { 240 struct ena_adapter *adapter = netdev_priv(netdev); 241 242 ena_get_stats(adapter, data, adapter->eni_stats_supported); 243 } 244 245 static int ena_get_sw_stats_count(struct ena_adapter *adapter) 246 { 247 return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) 248 + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX 249 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 250 } 251 252 static int ena_get_hw_stats_count(struct ena_adapter *adapter) 253 { 254 return ENA_STATS_ARRAY_ENI(adapter); 255 } 256 257 int ena_get_sset_count(struct net_device *netdev, int sset) 258 { 259 struct ena_adapter *adapter = netdev_priv(netdev); 260 261 if (sset != ETH_SS_STATS) 262 return -EOPNOTSUPP; 263 264 return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter); 265 } 266 267 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) 268 { 269 const struct ena_stats *ena_stats; 270 bool is_xdp; 271 int i, j; 272 273 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { 274 is_xdp = ENA_IS_XDP_INDEX(adapter, i); 275 /* Tx stats */ 276 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 277 ena_stats = &ena_stats_tx_strings[j]; 278 279 snprintf(*data, ETH_GSTRING_LEN, 280 "queue_%u_%s_%s", i, 281 is_xdp ? "xdp_tx" : "tx", ena_stats->name); 282 (*data) += ETH_GSTRING_LEN; 283 } 284 285 if (!is_xdp) { 286 /* RX stats, in XDP there isn't a RX queue 287 * counterpart 288 */ 289 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 290 ena_stats = &ena_stats_rx_strings[j]; 291 292 snprintf(*data, ETH_GSTRING_LEN, 293 "queue_%u_rx_%s", i, ena_stats->name); 294 (*data) += ETH_GSTRING_LEN; 295 } 296 } 297 } 298 } 299 300 static void ena_com_dev_strings(u8 **data) 301 { 302 const struct ena_stats *ena_stats; 303 int i; 304 305 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 306 ena_stats = &ena_stats_ena_com_strings[i]; 307 308 snprintf(*data, ETH_GSTRING_LEN, 309 "ena_admin_q_%s", ena_stats->name); 310 (*data) += ETH_GSTRING_LEN; 311 } 312 } 313 314 static void ena_get_strings(struct ena_adapter *adapter, 315 u8 *data, 316 bool eni_stats_needed) 317 { 318 const struct ena_stats *ena_stats; 319 int i; 320 321 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 322 ena_stats = &ena_stats_global_strings[i]; 323 memcpy(data, ena_stats->name, ETH_GSTRING_LEN); 324 data += ETH_GSTRING_LEN; 325 } 326 327 if (eni_stats_needed) { 328 for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { 329 ena_stats = &ena_stats_eni_strings[i]; 330 memcpy(data, ena_stats->name, ETH_GSTRING_LEN); 331 data += ETH_GSTRING_LEN; 332 } 333 } 334 335 ena_queue_strings(adapter, &data); 336 ena_com_dev_strings(&data); 337 } 338 339 static void ena_get_ethtool_strings(struct net_device *netdev, 340 u32 sset, 341 u8 *data) 342 { 343 struct ena_adapter *adapter = netdev_priv(netdev); 344 345 if (sset != ETH_SS_STATS) 346 return; 347 348 ena_get_strings(adapter, data, adapter->eni_stats_supported); 349 } 350 351 static int ena_get_link_ksettings(struct net_device *netdev, 352 struct ethtool_link_ksettings *link_ksettings) 353 { 354 struct ena_adapter *adapter = netdev_priv(netdev); 355 struct ena_com_dev *ena_dev = adapter->ena_dev; 356 struct ena_admin_get_feature_link_desc *link; 357 struct ena_admin_get_feat_resp feat_resp; 358 int rc; 359 360 rc = ena_com_get_link_params(ena_dev, &feat_resp); 361 if (rc) 362 return rc; 363 364 link = &feat_resp.u.link; 365 link_ksettings->base.speed = link->speed; 366 367 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { 368 ethtool_link_ksettings_add_link_mode(link_ksettings, 369 supported, Autoneg); 370 ethtool_link_ksettings_add_link_mode(link_ksettings, 371 supported, Autoneg); 372 } 373 374 link_ksettings->base.autoneg = 375 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? 376 AUTONEG_ENABLE : AUTONEG_DISABLE; 377 378 link_ksettings->base.duplex = DUPLEX_FULL; 379 380 return 0; 381 } 382 383 static int ena_get_coalesce(struct net_device *net_dev, 384 struct ethtool_coalesce *coalesce) 385 { 386 struct ena_adapter *adapter = netdev_priv(net_dev); 387 struct ena_com_dev *ena_dev = adapter->ena_dev; 388 389 if (!ena_com_interrupt_moderation_supported(ena_dev)) 390 return -EOPNOTSUPP; 391 392 coalesce->tx_coalesce_usecs = 393 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * 394 ena_dev->intr_delay_resolution; 395 396 coalesce->rx_coalesce_usecs = 397 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) 398 * ena_dev->intr_delay_resolution; 399 400 coalesce->use_adaptive_rx_coalesce = 401 ena_com_get_adaptive_moderation_enabled(ena_dev); 402 403 return 0; 404 } 405 406 static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) 407 { 408 unsigned int val; 409 int i; 410 411 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); 412 413 for (i = 0; i < adapter->num_io_queues; i++) 414 adapter->tx_ring[i].smoothed_interval = val; 415 } 416 417 static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) 418 { 419 unsigned int val; 420 int i; 421 422 val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev); 423 424 for (i = 0; i < adapter->num_io_queues; i++) 425 adapter->rx_ring[i].smoothed_interval = val; 426 } 427 428 static int ena_set_coalesce(struct net_device *net_dev, 429 struct ethtool_coalesce *coalesce) 430 { 431 struct ena_adapter *adapter = netdev_priv(net_dev); 432 struct ena_com_dev *ena_dev = adapter->ena_dev; 433 int rc; 434 435 if (!ena_com_interrupt_moderation_supported(ena_dev)) 436 return -EOPNOTSUPP; 437 438 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, 439 coalesce->tx_coalesce_usecs); 440 if (rc) 441 return rc; 442 443 ena_update_tx_rings_nonadaptive_intr_moderation(adapter); 444 445 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, 446 coalesce->rx_coalesce_usecs); 447 if (rc) 448 return rc; 449 450 ena_update_rx_rings_nonadaptive_intr_moderation(adapter); 451 452 if (coalesce->use_adaptive_rx_coalesce && 453 !ena_com_get_adaptive_moderation_enabled(ena_dev)) 454 ena_com_enable_adaptive_moderation(ena_dev); 455 456 if (!coalesce->use_adaptive_rx_coalesce && 457 ena_com_get_adaptive_moderation_enabled(ena_dev)) 458 ena_com_disable_adaptive_moderation(ena_dev); 459 460 return 0; 461 } 462 463 static u32 ena_get_msglevel(struct net_device *netdev) 464 { 465 struct ena_adapter *adapter = netdev_priv(netdev); 466 467 return adapter->msg_enable; 468 } 469 470 static void ena_set_msglevel(struct net_device *netdev, u32 value) 471 { 472 struct ena_adapter *adapter = netdev_priv(netdev); 473 474 adapter->msg_enable = value; 475 } 476 477 static void ena_get_drvinfo(struct net_device *dev, 478 struct ethtool_drvinfo *info) 479 { 480 struct ena_adapter *adapter = netdev_priv(dev); 481 482 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 483 strlcpy(info->bus_info, pci_name(adapter->pdev), 484 sizeof(info->bus_info)); 485 } 486 487 static void ena_get_ringparam(struct net_device *netdev, 488 struct ethtool_ringparam *ring) 489 { 490 struct ena_adapter *adapter = netdev_priv(netdev); 491 492 ring->tx_max_pending = adapter->max_tx_ring_size; 493 ring->rx_max_pending = adapter->max_rx_ring_size; 494 ring->tx_pending = adapter->tx_ring[0].ring_size; 495 ring->rx_pending = adapter->rx_ring[0].ring_size; 496 } 497 498 static int ena_set_ringparam(struct net_device *netdev, 499 struct ethtool_ringparam *ring) 500 { 501 struct ena_adapter *adapter = netdev_priv(netdev); 502 u32 new_tx_size, new_rx_size; 503 504 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ? 505 ENA_MIN_RING_SIZE : ring->tx_pending; 506 new_tx_size = rounddown_pow_of_two(new_tx_size); 507 508 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ? 509 ENA_MIN_RING_SIZE : ring->rx_pending; 510 new_rx_size = rounddown_pow_of_two(new_rx_size); 511 512 if (new_tx_size == adapter->requested_tx_ring_size && 513 new_rx_size == adapter->requested_rx_ring_size) 514 return 0; 515 516 return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size); 517 } 518 519 static u32 ena_flow_hash_to_flow_type(u16 hash_fields) 520 { 521 u32 data = 0; 522 523 if (hash_fields & ENA_ADMIN_RSS_L2_DA) 524 data |= RXH_L2DA; 525 526 if (hash_fields & ENA_ADMIN_RSS_L3_DA) 527 data |= RXH_IP_DST; 528 529 if (hash_fields & ENA_ADMIN_RSS_L3_SA) 530 data |= RXH_IP_SRC; 531 532 if (hash_fields & ENA_ADMIN_RSS_L4_DP) 533 data |= RXH_L4_B_2_3; 534 535 if (hash_fields & ENA_ADMIN_RSS_L4_SP) 536 data |= RXH_L4_B_0_1; 537 538 return data; 539 } 540 541 static u16 ena_flow_data_to_flow_hash(u32 hash_fields) 542 { 543 u16 data = 0; 544 545 if (hash_fields & RXH_L2DA) 546 data |= ENA_ADMIN_RSS_L2_DA; 547 548 if (hash_fields & RXH_IP_DST) 549 data |= ENA_ADMIN_RSS_L3_DA; 550 551 if (hash_fields & RXH_IP_SRC) 552 data |= ENA_ADMIN_RSS_L3_SA; 553 554 if (hash_fields & RXH_L4_B_2_3) 555 data |= ENA_ADMIN_RSS_L4_DP; 556 557 if (hash_fields & RXH_L4_B_0_1) 558 data |= ENA_ADMIN_RSS_L4_SP; 559 560 return data; 561 } 562 563 static int ena_get_rss_hash(struct ena_com_dev *ena_dev, 564 struct ethtool_rxnfc *cmd) 565 { 566 enum ena_admin_flow_hash_proto proto; 567 u16 hash_fields; 568 int rc; 569 570 cmd->data = 0; 571 572 switch (cmd->flow_type) { 573 case TCP_V4_FLOW: 574 proto = ENA_ADMIN_RSS_TCP4; 575 break; 576 case UDP_V4_FLOW: 577 proto = ENA_ADMIN_RSS_UDP4; 578 break; 579 case TCP_V6_FLOW: 580 proto = ENA_ADMIN_RSS_TCP6; 581 break; 582 case UDP_V6_FLOW: 583 proto = ENA_ADMIN_RSS_UDP6; 584 break; 585 case IPV4_FLOW: 586 proto = ENA_ADMIN_RSS_IP4; 587 break; 588 case IPV6_FLOW: 589 proto = ENA_ADMIN_RSS_IP6; 590 break; 591 case ETHER_FLOW: 592 proto = ENA_ADMIN_RSS_NOT_IP; 593 break; 594 case AH_V4_FLOW: 595 case ESP_V4_FLOW: 596 case AH_V6_FLOW: 597 case ESP_V6_FLOW: 598 case SCTP_V4_FLOW: 599 case AH_ESP_V4_FLOW: 600 return -EOPNOTSUPP; 601 default: 602 return -EINVAL; 603 } 604 605 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); 606 if (rc) 607 return rc; 608 609 cmd->data = ena_flow_hash_to_flow_type(hash_fields); 610 611 return 0; 612 } 613 614 static int ena_set_rss_hash(struct ena_com_dev *ena_dev, 615 struct ethtool_rxnfc *cmd) 616 { 617 enum ena_admin_flow_hash_proto proto; 618 u16 hash_fields; 619 620 switch (cmd->flow_type) { 621 case TCP_V4_FLOW: 622 proto = ENA_ADMIN_RSS_TCP4; 623 break; 624 case UDP_V4_FLOW: 625 proto = ENA_ADMIN_RSS_UDP4; 626 break; 627 case TCP_V6_FLOW: 628 proto = ENA_ADMIN_RSS_TCP6; 629 break; 630 case UDP_V6_FLOW: 631 proto = ENA_ADMIN_RSS_UDP6; 632 break; 633 case IPV4_FLOW: 634 proto = ENA_ADMIN_RSS_IP4; 635 break; 636 case IPV6_FLOW: 637 proto = ENA_ADMIN_RSS_IP6; 638 break; 639 case ETHER_FLOW: 640 proto = ENA_ADMIN_RSS_NOT_IP; 641 break; 642 case AH_V4_FLOW: 643 case ESP_V4_FLOW: 644 case AH_V6_FLOW: 645 case ESP_V6_FLOW: 646 case SCTP_V4_FLOW: 647 case AH_ESP_V4_FLOW: 648 return -EOPNOTSUPP; 649 default: 650 return -EINVAL; 651 } 652 653 hash_fields = ena_flow_data_to_flow_hash(cmd->data); 654 655 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); 656 } 657 658 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 659 { 660 struct ena_adapter *adapter = netdev_priv(netdev); 661 int rc = 0; 662 663 switch (info->cmd) { 664 case ETHTOOL_SRXFH: 665 rc = ena_set_rss_hash(adapter->ena_dev, info); 666 break; 667 case ETHTOOL_SRXCLSRLDEL: 668 case ETHTOOL_SRXCLSRLINS: 669 default: 670 netif_err(adapter, drv, netdev, 671 "Command parameter %d is not supported\n", info->cmd); 672 rc = -EOPNOTSUPP; 673 } 674 675 return rc; 676 } 677 678 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 679 u32 *rules) 680 { 681 struct ena_adapter *adapter = netdev_priv(netdev); 682 int rc = 0; 683 684 switch (info->cmd) { 685 case ETHTOOL_GRXRINGS: 686 info->data = adapter->num_io_queues; 687 rc = 0; 688 break; 689 case ETHTOOL_GRXFH: 690 rc = ena_get_rss_hash(adapter->ena_dev, info); 691 break; 692 case ETHTOOL_GRXCLSRLCNT: 693 case ETHTOOL_GRXCLSRULE: 694 case ETHTOOL_GRXCLSRLALL: 695 default: 696 netif_err(adapter, drv, netdev, 697 "Command parameter %d is not supported\n", info->cmd); 698 rc = -EOPNOTSUPP; 699 } 700 701 return rc; 702 } 703 704 static u32 ena_get_rxfh_indir_size(struct net_device *netdev) 705 { 706 return ENA_RX_RSS_TABLE_SIZE; 707 } 708 709 static u32 ena_get_rxfh_key_size(struct net_device *netdev) 710 { 711 return ENA_HASH_KEY_SIZE; 712 } 713 714 static int ena_indirection_table_set(struct ena_adapter *adapter, 715 const u32 *indir) 716 { 717 struct ena_com_dev *ena_dev = adapter->ena_dev; 718 int i, rc; 719 720 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 721 rc = ena_com_indirect_table_fill_entry(ena_dev, 722 i, 723 ENA_IO_RXQ_IDX(indir[i])); 724 if (unlikely(rc)) { 725 netif_err(adapter, drv, adapter->netdev, 726 "Cannot fill indirect table (index is too large)\n"); 727 return rc; 728 } 729 } 730 731 rc = ena_com_indirect_table_set(ena_dev); 732 if (rc) { 733 netif_err(adapter, drv, adapter->netdev, 734 "Cannot set indirect table\n"); 735 return rc == -EPERM ? -EOPNOTSUPP : rc; 736 } 737 return rc; 738 } 739 740 static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) 741 { 742 struct ena_com_dev *ena_dev = adapter->ena_dev; 743 int i, rc; 744 745 if (!indir) 746 return 0; 747 748 rc = ena_com_indirect_table_get(ena_dev, indir); 749 if (rc) 750 return rc; 751 752 /* Our internal representation of the indices is: even indices 753 * for Tx and uneven indices for Rx. We need to convert the Rx 754 * indices to be consecutive 755 */ 756 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) 757 indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); 758 759 return rc; 760 } 761 762 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 763 u8 *hfunc) 764 { 765 struct ena_adapter *adapter = netdev_priv(netdev); 766 enum ena_admin_hash_functions ena_func; 767 u8 func; 768 int rc; 769 770 rc = ena_indirection_table_get(adapter, indir); 771 if (rc) 772 return rc; 773 774 /* We call this function in order to check if the device 775 * supports getting/setting the hash function. 776 */ 777 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func); 778 if (rc) { 779 if (rc == -EOPNOTSUPP) 780 rc = 0; 781 782 return rc; 783 } 784 785 rc = ena_com_get_hash_key(adapter->ena_dev, key); 786 if (rc) 787 return rc; 788 789 switch (ena_func) { 790 case ENA_ADMIN_TOEPLITZ: 791 func = ETH_RSS_HASH_TOP; 792 break; 793 case ENA_ADMIN_CRC32: 794 func = ETH_RSS_HASH_CRC32; 795 break; 796 default: 797 netif_err(adapter, drv, netdev, 798 "Command parameter is not supported\n"); 799 return -EOPNOTSUPP; 800 } 801 802 if (hfunc) 803 *hfunc = func; 804 805 return 0; 806 } 807 808 static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, 809 const u8 *key, const u8 hfunc) 810 { 811 struct ena_adapter *adapter = netdev_priv(netdev); 812 struct ena_com_dev *ena_dev = adapter->ena_dev; 813 enum ena_admin_hash_functions func = 0; 814 int rc; 815 816 if (indir) { 817 rc = ena_indirection_table_set(adapter, indir); 818 if (rc) 819 return rc; 820 } 821 822 switch (hfunc) { 823 case ETH_RSS_HASH_NO_CHANGE: 824 func = ena_com_get_current_hash_function(ena_dev); 825 break; 826 case ETH_RSS_HASH_TOP: 827 func = ENA_ADMIN_TOEPLITZ; 828 break; 829 case ETH_RSS_HASH_CRC32: 830 func = ENA_ADMIN_CRC32; 831 break; 832 default: 833 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", 834 hfunc); 835 return -EOPNOTSUPP; 836 } 837 838 if (key || func) { 839 rc = ena_com_fill_hash_function(ena_dev, func, key, 840 ENA_HASH_KEY_SIZE, 841 0xFFFFFFFF); 842 if (unlikely(rc)) { 843 netif_err(adapter, drv, netdev, "Cannot fill key\n"); 844 return rc == -EPERM ? -EOPNOTSUPP : rc; 845 } 846 } 847 848 return 0; 849 } 850 851 static void ena_get_channels(struct net_device *netdev, 852 struct ethtool_channels *channels) 853 { 854 struct ena_adapter *adapter = netdev_priv(netdev); 855 856 channels->max_combined = adapter->max_num_io_queues; 857 channels->combined_count = adapter->num_io_queues; 858 } 859 860 static int ena_set_channels(struct net_device *netdev, 861 struct ethtool_channels *channels) 862 { 863 struct ena_adapter *adapter = netdev_priv(netdev); 864 u32 count = channels->combined_count; 865 /* The check for max value is already done in ethtool */ 866 if (count < ENA_MIN_NUM_IO_QUEUES || 867 (ena_xdp_present(adapter) && 868 !ena_xdp_legal_queue_count(adapter, channels->combined_count))) 869 return -EINVAL; 870 871 return ena_update_queue_count(adapter, count); 872 } 873 874 static int ena_get_tunable(struct net_device *netdev, 875 const struct ethtool_tunable *tuna, void *data) 876 { 877 struct ena_adapter *adapter = netdev_priv(netdev); 878 int ret = 0; 879 880 switch (tuna->id) { 881 case ETHTOOL_RX_COPYBREAK: 882 *(u32 *)data = adapter->rx_copybreak; 883 break; 884 default: 885 ret = -EINVAL; 886 break; 887 } 888 889 return ret; 890 } 891 892 static int ena_set_tunable(struct net_device *netdev, 893 const struct ethtool_tunable *tuna, 894 const void *data) 895 { 896 struct ena_adapter *adapter = netdev_priv(netdev); 897 int ret = 0; 898 u32 len; 899 900 switch (tuna->id) { 901 case ETHTOOL_RX_COPYBREAK: 902 len = *(u32 *)data; 903 if (len > adapter->netdev->mtu) { 904 ret = -EINVAL; 905 break; 906 } 907 adapter->rx_copybreak = len; 908 break; 909 default: 910 ret = -EINVAL; 911 break; 912 } 913 914 return ret; 915 } 916 917 static const struct ethtool_ops ena_ethtool_ops = { 918 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 919 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 920 .get_link_ksettings = ena_get_link_ksettings, 921 .get_drvinfo = ena_get_drvinfo, 922 .get_msglevel = ena_get_msglevel, 923 .set_msglevel = ena_set_msglevel, 924 .get_link = ethtool_op_get_link, 925 .get_coalesce = ena_get_coalesce, 926 .set_coalesce = ena_set_coalesce, 927 .get_ringparam = ena_get_ringparam, 928 .set_ringparam = ena_set_ringparam, 929 .get_sset_count = ena_get_sset_count, 930 .get_strings = ena_get_ethtool_strings, 931 .get_ethtool_stats = ena_get_ethtool_stats, 932 .get_rxnfc = ena_get_rxnfc, 933 .set_rxnfc = ena_set_rxnfc, 934 .get_rxfh_indir_size = ena_get_rxfh_indir_size, 935 .get_rxfh_key_size = ena_get_rxfh_key_size, 936 .get_rxfh = ena_get_rxfh, 937 .set_rxfh = ena_set_rxfh, 938 .get_channels = ena_get_channels, 939 .set_channels = ena_set_channels, 940 .get_tunable = ena_get_tunable, 941 .set_tunable = ena_set_tunable, 942 .get_ts_info = ethtool_op_get_ts_info, 943 }; 944 945 void ena_set_ethtool_ops(struct net_device *netdev) 946 { 947 netdev->ethtool_ops = &ena_ethtool_ops; 948 } 949 950 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) 951 { 952 struct net_device *netdev = adapter->netdev; 953 u8 *strings_buf; 954 u64 *data_buf; 955 int strings_num; 956 int i, rc; 957 958 strings_num = ena_get_sw_stats_count(adapter); 959 if (strings_num <= 0) { 960 netif_err(adapter, drv, netdev, "Can't get stats num\n"); 961 return; 962 } 963 964 strings_buf = devm_kcalloc(&adapter->pdev->dev, 965 ETH_GSTRING_LEN, strings_num, 966 GFP_ATOMIC); 967 if (!strings_buf) { 968 netif_err(adapter, drv, netdev, 969 "failed to alloc strings_buf\n"); 970 return; 971 } 972 973 data_buf = devm_kcalloc(&adapter->pdev->dev, 974 strings_num, sizeof(u64), 975 GFP_ATOMIC); 976 if (!data_buf) { 977 netif_err(adapter, drv, netdev, 978 "Failed to allocate data buf\n"); 979 devm_kfree(&adapter->pdev->dev, strings_buf); 980 return; 981 } 982 983 ena_get_strings(adapter, strings_buf, false); 984 ena_get_stats(adapter, data_buf, false); 985 986 /* If there is a buffer, dump stats, otherwise print them to dmesg */ 987 if (buf) 988 for (i = 0; i < strings_num; i++) { 989 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), 990 "%s %llu\n", 991 strings_buf + i * ETH_GSTRING_LEN, 992 data_buf[i]); 993 buf += rc; 994 } 995 else 996 for (i = 0; i < strings_num; i++) 997 netif_err(adapter, drv, netdev, "%s: %llu\n", 998 strings_buf + i * ETH_GSTRING_LEN, 999 data_buf[i]); 1000 1001 devm_kfree(&adapter->pdev->dev, strings_buf); 1002 devm_kfree(&adapter->pdev->dev, data_buf); 1003 } 1004 1005 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) 1006 { 1007 if (!buf) 1008 return; 1009 1010 ena_dump_stats_ex(adapter, buf); 1011 } 1012 1013 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) 1014 { 1015 ena_dump_stats_ex(adapter, NULL); 1016 } 1017