1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/pci.h> 34 35 #include "ena_netdev.h" 36 37 struct ena_stats { 38 char name[ETH_GSTRING_LEN]; 39 int stat_offset; 40 }; 41 42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 43 .name = #stat, \ 44 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 45 } 46 47 #define ENA_STAT_ENTRY(stat, stat_type) { \ 48 .name = #stat, \ 49 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 50 } 51 52 #define ENA_STAT_RX_ENTRY(stat) \ 53 ENA_STAT_ENTRY(stat, rx) 54 55 #define ENA_STAT_TX_ENTRY(stat) \ 56 ENA_STAT_ENTRY(stat, tx) 57 58 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 59 ENA_STAT_ENTRY(stat, dev) 60 61 static const struct ena_stats ena_stats_global_strings[] = { 62 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 63 ENA_STAT_GLOBAL_ENTRY(suspend), 64 ENA_STAT_GLOBAL_ENTRY(resume), 65 ENA_STAT_GLOBAL_ENTRY(wd_expired), 66 ENA_STAT_GLOBAL_ENTRY(interface_up), 67 ENA_STAT_GLOBAL_ENTRY(interface_down), 68 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 69 }; 70 71 static const struct ena_stats ena_stats_tx_strings[] = { 72 ENA_STAT_TX_ENTRY(cnt), 73 ENA_STAT_TX_ENTRY(bytes), 74 ENA_STAT_TX_ENTRY(queue_stop), 75 ENA_STAT_TX_ENTRY(queue_wakeup), 76 ENA_STAT_TX_ENTRY(dma_mapping_err), 77 ENA_STAT_TX_ENTRY(linearize), 78 ENA_STAT_TX_ENTRY(linearize_failed), 79 ENA_STAT_TX_ENTRY(napi_comp), 80 ENA_STAT_TX_ENTRY(tx_poll), 81 ENA_STAT_TX_ENTRY(doorbells), 82 ENA_STAT_TX_ENTRY(prepare_ctx_err), 83 ENA_STAT_TX_ENTRY(bad_req_id), 84 ENA_STAT_TX_ENTRY(llq_buffer_copy), 85 ENA_STAT_TX_ENTRY(missed_tx), 86 }; 87 88 static const struct ena_stats ena_stats_rx_strings[] = { 89 ENA_STAT_RX_ENTRY(cnt), 90 ENA_STAT_RX_ENTRY(bytes), 91 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 92 ENA_STAT_RX_ENTRY(csum_good), 93 ENA_STAT_RX_ENTRY(refil_partial), 94 ENA_STAT_RX_ENTRY(bad_csum), 95 ENA_STAT_RX_ENTRY(page_alloc_fail), 96 ENA_STAT_RX_ENTRY(skb_alloc_fail), 97 ENA_STAT_RX_ENTRY(dma_mapping_err), 98 ENA_STAT_RX_ENTRY(bad_desc_num), 99 ENA_STAT_RX_ENTRY(bad_req_id), 100 ENA_STAT_RX_ENTRY(empty_rx_ring), 101 ENA_STAT_RX_ENTRY(csum_unchecked), 102 }; 103 104 static const struct ena_stats ena_stats_ena_com_strings[] = { 105 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 106 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 107 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 108 ENA_STAT_ENA_COM_ENTRY(out_of_space), 109 ENA_STAT_ENA_COM_ENTRY(no_completion), 110 }; 111 112 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 113 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 114 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 115 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 116 117 static void ena_safe_update_stat(u64 *src, u64 *dst, 118 struct u64_stats_sync *syncp) 119 { 120 unsigned int start; 121 122 do { 123 start = u64_stats_fetch_begin_irq(syncp); 124 *(dst) = *src; 125 } while (u64_stats_fetch_retry_irq(syncp, start)); 126 } 127 128 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) 129 { 130 const struct ena_stats *ena_stats; 131 struct ena_ring *ring; 132 133 u64 *ptr; 134 int i, j; 135 136 for (i = 0; i < adapter->num_queues; i++) { 137 /* Tx stats */ 138 ring = &adapter->tx_ring[i]; 139 140 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 141 ena_stats = &ena_stats_tx_strings[j]; 142 143 ptr = (u64 *)((uintptr_t)&ring->tx_stats + 144 (uintptr_t)ena_stats->stat_offset); 145 146 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 147 } 148 149 /* Rx stats */ 150 ring = &adapter->rx_ring[i]; 151 152 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 153 ena_stats = &ena_stats_rx_strings[j]; 154 155 ptr = (u64 *)((uintptr_t)&ring->rx_stats + 156 (uintptr_t)ena_stats->stat_offset); 157 158 ena_safe_update_stat(ptr, (*data)++, &ring->syncp); 159 } 160 } 161 } 162 163 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) 164 { 165 const struct ena_stats *ena_stats; 166 u32 *ptr; 167 int i; 168 169 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 170 ena_stats = &ena_stats_ena_com_strings[i]; 171 172 ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + 173 (uintptr_t)ena_stats->stat_offset); 174 175 *(*data)++ = *ptr; 176 } 177 } 178 179 static void ena_get_ethtool_stats(struct net_device *netdev, 180 struct ethtool_stats *stats, 181 u64 *data) 182 { 183 struct ena_adapter *adapter = netdev_priv(netdev); 184 const struct ena_stats *ena_stats; 185 u64 *ptr; 186 int i; 187 188 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 189 ena_stats = &ena_stats_global_strings[i]; 190 191 ptr = (u64 *)((uintptr_t)&adapter->dev_stats + 192 (uintptr_t)ena_stats->stat_offset); 193 194 ena_safe_update_stat(ptr, data++, &adapter->syncp); 195 } 196 197 ena_queue_stats(adapter, &data); 198 ena_dev_admin_queue_stats(adapter, &data); 199 } 200 201 int ena_get_sset_count(struct net_device *netdev, int sset) 202 { 203 struct ena_adapter *adapter = netdev_priv(netdev); 204 205 if (sset != ETH_SS_STATS) 206 return -EOPNOTSUPP; 207 208 return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) 209 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 210 } 211 212 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) 213 { 214 const struct ena_stats *ena_stats; 215 int i, j; 216 217 for (i = 0; i < adapter->num_queues; i++) { 218 /* Tx stats */ 219 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { 220 ena_stats = &ena_stats_tx_strings[j]; 221 222 snprintf(*data, ETH_GSTRING_LEN, 223 "queue_%u_tx_%s", i, ena_stats->name); 224 (*data) += ETH_GSTRING_LEN; 225 } 226 /* Rx stats */ 227 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { 228 ena_stats = &ena_stats_rx_strings[j]; 229 230 snprintf(*data, ETH_GSTRING_LEN, 231 "queue_%u_rx_%s", i, ena_stats->name); 232 (*data) += ETH_GSTRING_LEN; 233 } 234 } 235 } 236 237 static void ena_com_dev_strings(u8 **data) 238 { 239 const struct ena_stats *ena_stats; 240 int i; 241 242 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { 243 ena_stats = &ena_stats_ena_com_strings[i]; 244 245 snprintf(*data, ETH_GSTRING_LEN, 246 "ena_admin_q_%s", ena_stats->name); 247 (*data) += ETH_GSTRING_LEN; 248 } 249 } 250 251 static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) 252 { 253 struct ena_adapter *adapter = netdev_priv(netdev); 254 const struct ena_stats *ena_stats; 255 int i; 256 257 if (sset != ETH_SS_STATS) 258 return; 259 260 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { 261 ena_stats = &ena_stats_global_strings[i]; 262 263 memcpy(data, ena_stats->name, ETH_GSTRING_LEN); 264 data += ETH_GSTRING_LEN; 265 } 266 267 ena_queue_strings(adapter, &data); 268 ena_com_dev_strings(&data); 269 } 270 271 static int ena_get_link_ksettings(struct net_device *netdev, 272 struct ethtool_link_ksettings *link_ksettings) 273 { 274 struct ena_adapter *adapter = netdev_priv(netdev); 275 struct ena_com_dev *ena_dev = adapter->ena_dev; 276 struct ena_admin_get_feature_link_desc *link; 277 struct ena_admin_get_feat_resp feat_resp; 278 int rc; 279 280 rc = ena_com_get_link_params(ena_dev, &feat_resp); 281 if (rc) 282 return rc; 283 284 link = &feat_resp.u.link; 285 link_ksettings->base.speed = link->speed; 286 287 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { 288 ethtool_link_ksettings_add_link_mode(link_ksettings, 289 supported, Autoneg); 290 ethtool_link_ksettings_add_link_mode(link_ksettings, 291 supported, Autoneg); 292 } 293 294 link_ksettings->base.autoneg = 295 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? 296 AUTONEG_ENABLE : AUTONEG_DISABLE; 297 298 link_ksettings->base.duplex = DUPLEX_FULL; 299 300 return 0; 301 } 302 303 static int ena_get_coalesce(struct net_device *net_dev, 304 struct ethtool_coalesce *coalesce) 305 { 306 struct ena_adapter *adapter = netdev_priv(net_dev); 307 struct ena_com_dev *ena_dev = adapter->ena_dev; 308 struct ena_intr_moder_entry intr_moder_entry; 309 310 if (!ena_com_interrupt_moderation_supported(ena_dev)) { 311 /* the devie doesn't support interrupt moderation */ 312 return -EOPNOTSUPP; 313 } 314 coalesce->tx_coalesce_usecs = 315 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) / 316 ena_dev->intr_delay_resolution; 317 if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) { 318 coalesce->rx_coalesce_usecs = 319 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) 320 / ena_dev->intr_delay_resolution; 321 } else { 322 ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); 323 coalesce->rx_coalesce_usecs_low = intr_moder_entry.intr_moder_interval; 324 coalesce->rx_max_coalesced_frames_low = intr_moder_entry.pkts_per_interval; 325 326 ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); 327 coalesce->rx_coalesce_usecs = intr_moder_entry.intr_moder_interval; 328 coalesce->rx_max_coalesced_frames = intr_moder_entry.pkts_per_interval; 329 330 ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); 331 coalesce->rx_coalesce_usecs_high = intr_moder_entry.intr_moder_interval; 332 coalesce->rx_max_coalesced_frames_high = intr_moder_entry.pkts_per_interval; 333 } 334 coalesce->use_adaptive_rx_coalesce = 335 ena_com_get_adaptive_moderation_enabled(ena_dev); 336 337 return 0; 338 } 339 340 static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) 341 { 342 unsigned int val; 343 int i; 344 345 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); 346 347 for (i = 0; i < adapter->num_queues; i++) 348 adapter->tx_ring[i].smoothed_interval = val; 349 } 350 351 static int ena_set_coalesce(struct net_device *net_dev, 352 struct ethtool_coalesce *coalesce) 353 { 354 struct ena_adapter *adapter = netdev_priv(net_dev); 355 struct ena_com_dev *ena_dev = adapter->ena_dev; 356 struct ena_intr_moder_entry intr_moder_entry; 357 int rc; 358 359 if (!ena_com_interrupt_moderation_supported(ena_dev)) { 360 /* the devie doesn't support interrupt moderation */ 361 return -EOPNOTSUPP; 362 } 363 364 if (coalesce->rx_coalesce_usecs_irq || 365 coalesce->rx_max_coalesced_frames_irq || 366 coalesce->tx_coalesce_usecs_irq || 367 coalesce->tx_max_coalesced_frames || 368 coalesce->tx_max_coalesced_frames_irq || 369 coalesce->stats_block_coalesce_usecs || 370 coalesce->use_adaptive_tx_coalesce || 371 coalesce->pkt_rate_low || 372 coalesce->tx_coalesce_usecs_low || 373 coalesce->tx_max_coalesced_frames_low || 374 coalesce->pkt_rate_high || 375 coalesce->tx_coalesce_usecs_high || 376 coalesce->tx_max_coalesced_frames_high || 377 coalesce->rate_sample_interval) 378 return -EINVAL; 379 380 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, 381 coalesce->tx_coalesce_usecs); 382 if (rc) 383 return rc; 384 385 ena_update_tx_rings_intr_moderation(adapter); 386 387 if (ena_com_get_adaptive_moderation_enabled(ena_dev)) { 388 if (!coalesce->use_adaptive_rx_coalesce) { 389 ena_com_disable_adaptive_moderation(ena_dev); 390 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, 391 coalesce->rx_coalesce_usecs); 392 return rc; 393 } 394 } else { /* was in non-adaptive mode */ 395 if (coalesce->use_adaptive_rx_coalesce) { 396 ena_com_enable_adaptive_moderation(ena_dev); 397 } else { 398 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, 399 coalesce->rx_coalesce_usecs); 400 return rc; 401 } 402 } 403 404 intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_low; 405 intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_low; 406 intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; 407 ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); 408 409 intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs; 410 intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames; 411 intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; 412 ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); 413 414 intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_high; 415 intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_high; 416 intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; 417 ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); 418 419 return 0; 420 } 421 422 static u32 ena_get_msglevel(struct net_device *netdev) 423 { 424 struct ena_adapter *adapter = netdev_priv(netdev); 425 426 return adapter->msg_enable; 427 } 428 429 static void ena_set_msglevel(struct net_device *netdev, u32 value) 430 { 431 struct ena_adapter *adapter = netdev_priv(netdev); 432 433 adapter->msg_enable = value; 434 } 435 436 static void ena_get_drvinfo(struct net_device *dev, 437 struct ethtool_drvinfo *info) 438 { 439 struct ena_adapter *adapter = netdev_priv(dev); 440 441 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 442 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 443 strlcpy(info->bus_info, pci_name(adapter->pdev), 444 sizeof(info->bus_info)); 445 } 446 447 static void ena_get_ringparam(struct net_device *netdev, 448 struct ethtool_ringparam *ring) 449 { 450 struct ena_adapter *adapter = netdev_priv(netdev); 451 452 ring->tx_max_pending = adapter->max_tx_ring_size; 453 ring->rx_max_pending = adapter->max_rx_ring_size; 454 ring->tx_pending = adapter->tx_ring[0].ring_size; 455 ring->rx_pending = adapter->rx_ring[0].ring_size; 456 } 457 458 static int ena_set_ringparam(struct net_device *netdev, 459 struct ethtool_ringparam *ring) 460 { 461 struct ena_adapter *adapter = netdev_priv(netdev); 462 u32 new_tx_size, new_rx_size; 463 464 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ? 465 ENA_MIN_RING_SIZE : ring->tx_pending; 466 new_tx_size = rounddown_pow_of_two(new_tx_size); 467 468 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ? 469 ENA_MIN_RING_SIZE : ring->rx_pending; 470 new_rx_size = rounddown_pow_of_two(new_rx_size); 471 472 if (new_tx_size == adapter->requested_tx_ring_size && 473 new_rx_size == adapter->requested_rx_ring_size) 474 return 0; 475 476 return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size); 477 } 478 479 static u32 ena_flow_hash_to_flow_type(u16 hash_fields) 480 { 481 u32 data = 0; 482 483 if (hash_fields & ENA_ADMIN_RSS_L2_DA) 484 data |= RXH_L2DA; 485 486 if (hash_fields & ENA_ADMIN_RSS_L3_DA) 487 data |= RXH_IP_DST; 488 489 if (hash_fields & ENA_ADMIN_RSS_L3_SA) 490 data |= RXH_IP_SRC; 491 492 if (hash_fields & ENA_ADMIN_RSS_L4_DP) 493 data |= RXH_L4_B_2_3; 494 495 if (hash_fields & ENA_ADMIN_RSS_L4_SP) 496 data |= RXH_L4_B_0_1; 497 498 return data; 499 } 500 501 static u16 ena_flow_data_to_flow_hash(u32 hash_fields) 502 { 503 u16 data = 0; 504 505 if (hash_fields & RXH_L2DA) 506 data |= ENA_ADMIN_RSS_L2_DA; 507 508 if (hash_fields & RXH_IP_DST) 509 data |= ENA_ADMIN_RSS_L3_DA; 510 511 if (hash_fields & RXH_IP_SRC) 512 data |= ENA_ADMIN_RSS_L3_SA; 513 514 if (hash_fields & RXH_L4_B_2_3) 515 data |= ENA_ADMIN_RSS_L4_DP; 516 517 if (hash_fields & RXH_L4_B_0_1) 518 data |= ENA_ADMIN_RSS_L4_SP; 519 520 return data; 521 } 522 523 static int ena_get_rss_hash(struct ena_com_dev *ena_dev, 524 struct ethtool_rxnfc *cmd) 525 { 526 enum ena_admin_flow_hash_proto proto; 527 u16 hash_fields; 528 int rc; 529 530 cmd->data = 0; 531 532 switch (cmd->flow_type) { 533 case TCP_V4_FLOW: 534 proto = ENA_ADMIN_RSS_TCP4; 535 break; 536 case UDP_V4_FLOW: 537 proto = ENA_ADMIN_RSS_UDP4; 538 break; 539 case TCP_V6_FLOW: 540 proto = ENA_ADMIN_RSS_TCP6; 541 break; 542 case UDP_V6_FLOW: 543 proto = ENA_ADMIN_RSS_UDP6; 544 break; 545 case IPV4_FLOW: 546 proto = ENA_ADMIN_RSS_IP4; 547 break; 548 case IPV6_FLOW: 549 proto = ENA_ADMIN_RSS_IP6; 550 break; 551 case ETHER_FLOW: 552 proto = ENA_ADMIN_RSS_NOT_IP; 553 break; 554 case AH_V4_FLOW: 555 case ESP_V4_FLOW: 556 case AH_V6_FLOW: 557 case ESP_V6_FLOW: 558 case SCTP_V4_FLOW: 559 case AH_ESP_V4_FLOW: 560 return -EOPNOTSUPP; 561 default: 562 return -EINVAL; 563 } 564 565 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); 566 if (rc) 567 return rc; 568 569 cmd->data = ena_flow_hash_to_flow_type(hash_fields); 570 571 return 0; 572 } 573 574 static int ena_set_rss_hash(struct ena_com_dev *ena_dev, 575 struct ethtool_rxnfc *cmd) 576 { 577 enum ena_admin_flow_hash_proto proto; 578 u16 hash_fields; 579 580 switch (cmd->flow_type) { 581 case TCP_V4_FLOW: 582 proto = ENA_ADMIN_RSS_TCP4; 583 break; 584 case UDP_V4_FLOW: 585 proto = ENA_ADMIN_RSS_UDP4; 586 break; 587 case TCP_V6_FLOW: 588 proto = ENA_ADMIN_RSS_TCP6; 589 break; 590 case UDP_V6_FLOW: 591 proto = ENA_ADMIN_RSS_UDP6; 592 break; 593 case IPV4_FLOW: 594 proto = ENA_ADMIN_RSS_IP4; 595 break; 596 case IPV6_FLOW: 597 proto = ENA_ADMIN_RSS_IP6; 598 break; 599 case ETHER_FLOW: 600 proto = ENA_ADMIN_RSS_NOT_IP; 601 break; 602 case AH_V4_FLOW: 603 case ESP_V4_FLOW: 604 case AH_V6_FLOW: 605 case ESP_V6_FLOW: 606 case SCTP_V4_FLOW: 607 case AH_ESP_V4_FLOW: 608 return -EOPNOTSUPP; 609 default: 610 return -EINVAL; 611 } 612 613 hash_fields = ena_flow_data_to_flow_hash(cmd->data); 614 615 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); 616 } 617 618 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 619 { 620 struct ena_adapter *adapter = netdev_priv(netdev); 621 int rc = 0; 622 623 switch (info->cmd) { 624 case ETHTOOL_SRXFH: 625 rc = ena_set_rss_hash(adapter->ena_dev, info); 626 break; 627 case ETHTOOL_SRXCLSRLDEL: 628 case ETHTOOL_SRXCLSRLINS: 629 default: 630 netif_err(adapter, drv, netdev, 631 "Command parameter %d is not supported\n", info->cmd); 632 rc = -EOPNOTSUPP; 633 } 634 635 return rc; 636 } 637 638 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 639 u32 *rules) 640 { 641 struct ena_adapter *adapter = netdev_priv(netdev); 642 int rc = 0; 643 644 switch (info->cmd) { 645 case ETHTOOL_GRXRINGS: 646 info->data = adapter->num_queues; 647 rc = 0; 648 break; 649 case ETHTOOL_GRXFH: 650 rc = ena_get_rss_hash(adapter->ena_dev, info); 651 break; 652 case ETHTOOL_GRXCLSRLCNT: 653 case ETHTOOL_GRXCLSRULE: 654 case ETHTOOL_GRXCLSRLALL: 655 default: 656 netif_err(adapter, drv, netdev, 657 "Command parameter %d is not supported\n", info->cmd); 658 rc = -EOPNOTSUPP; 659 } 660 661 return rc; 662 } 663 664 static u32 ena_get_rxfh_indir_size(struct net_device *netdev) 665 { 666 return ENA_RX_RSS_TABLE_SIZE; 667 } 668 669 static u32 ena_get_rxfh_key_size(struct net_device *netdev) 670 { 671 return ENA_HASH_KEY_SIZE; 672 } 673 674 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 675 u8 *hfunc) 676 { 677 struct ena_adapter *adapter = netdev_priv(netdev); 678 enum ena_admin_hash_functions ena_func; 679 u8 func; 680 int rc; 681 682 rc = ena_com_indirect_table_get(adapter->ena_dev, indir); 683 if (rc) 684 return rc; 685 686 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); 687 if (rc) 688 return rc; 689 690 switch (ena_func) { 691 case ENA_ADMIN_TOEPLITZ: 692 func = ETH_RSS_HASH_TOP; 693 break; 694 case ENA_ADMIN_CRC32: 695 func = ETH_RSS_HASH_XOR; 696 break; 697 default: 698 netif_err(adapter, drv, netdev, 699 "Command parameter is not supported\n"); 700 return -EOPNOTSUPP; 701 } 702 703 if (hfunc) 704 *hfunc = func; 705 706 return rc; 707 } 708 709 static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, 710 const u8 *key, const u8 hfunc) 711 { 712 struct ena_adapter *adapter = netdev_priv(netdev); 713 struct ena_com_dev *ena_dev = adapter->ena_dev; 714 enum ena_admin_hash_functions func; 715 int rc, i; 716 717 if (indir) { 718 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 719 rc = ena_com_indirect_table_fill_entry(ena_dev, 720 i, 721 ENA_IO_RXQ_IDX(indir[i])); 722 if (unlikely(rc)) { 723 netif_err(adapter, drv, netdev, 724 "Cannot fill indirect table (index is too large)\n"); 725 return rc; 726 } 727 } 728 729 rc = ena_com_indirect_table_set(ena_dev); 730 if (rc) { 731 netif_err(adapter, drv, netdev, 732 "Cannot set indirect table\n"); 733 return rc == -EPERM ? -EOPNOTSUPP : rc; 734 } 735 } 736 737 switch (hfunc) { 738 case ETH_RSS_HASH_TOP: 739 func = ENA_ADMIN_TOEPLITZ; 740 break; 741 case ETH_RSS_HASH_XOR: 742 func = ENA_ADMIN_CRC32; 743 break; 744 default: 745 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", 746 hfunc); 747 return -EOPNOTSUPP; 748 } 749 750 if (key) { 751 rc = ena_com_fill_hash_function(ena_dev, func, key, 752 ENA_HASH_KEY_SIZE, 753 0xFFFFFFFF); 754 if (unlikely(rc)) { 755 netif_err(adapter, drv, netdev, "Cannot fill key\n"); 756 return rc == -EPERM ? -EOPNOTSUPP : rc; 757 } 758 } 759 760 return 0; 761 } 762 763 static void ena_get_channels(struct net_device *netdev, 764 struct ethtool_channels *channels) 765 { 766 struct ena_adapter *adapter = netdev_priv(netdev); 767 768 channels->max_rx = adapter->num_queues; 769 channels->max_tx = adapter->num_queues; 770 channels->max_other = 0; 771 channels->max_combined = 0; 772 channels->rx_count = adapter->num_queues; 773 channels->tx_count = adapter->num_queues; 774 channels->other_count = 0; 775 channels->combined_count = 0; 776 } 777 778 static int ena_get_tunable(struct net_device *netdev, 779 const struct ethtool_tunable *tuna, void *data) 780 { 781 struct ena_adapter *adapter = netdev_priv(netdev); 782 int ret = 0; 783 784 switch (tuna->id) { 785 case ETHTOOL_RX_COPYBREAK: 786 *(u32 *)data = adapter->rx_copybreak; 787 break; 788 default: 789 ret = -EINVAL; 790 break; 791 } 792 793 return ret; 794 } 795 796 static int ena_set_tunable(struct net_device *netdev, 797 const struct ethtool_tunable *tuna, 798 const void *data) 799 { 800 struct ena_adapter *adapter = netdev_priv(netdev); 801 int ret = 0; 802 u32 len; 803 804 switch (tuna->id) { 805 case ETHTOOL_RX_COPYBREAK: 806 len = *(u32 *)data; 807 if (len > adapter->netdev->mtu) { 808 ret = -EINVAL; 809 break; 810 } 811 adapter->rx_copybreak = len; 812 break; 813 default: 814 ret = -EINVAL; 815 break; 816 } 817 818 return ret; 819 } 820 821 static const struct ethtool_ops ena_ethtool_ops = { 822 .get_link_ksettings = ena_get_link_ksettings, 823 .get_drvinfo = ena_get_drvinfo, 824 .get_msglevel = ena_get_msglevel, 825 .set_msglevel = ena_set_msglevel, 826 .get_link = ethtool_op_get_link, 827 .get_coalesce = ena_get_coalesce, 828 .set_coalesce = ena_set_coalesce, 829 .get_ringparam = ena_get_ringparam, 830 .set_ringparam = ena_set_ringparam, 831 .get_sset_count = ena_get_sset_count, 832 .get_strings = ena_get_strings, 833 .get_ethtool_stats = ena_get_ethtool_stats, 834 .get_rxnfc = ena_get_rxnfc, 835 .set_rxnfc = ena_set_rxnfc, 836 .get_rxfh_indir_size = ena_get_rxfh_indir_size, 837 .get_rxfh_key_size = ena_get_rxfh_key_size, 838 .get_rxfh = ena_get_rxfh, 839 .set_rxfh = ena_set_rxfh, 840 .get_channels = ena_get_channels, 841 .get_tunable = ena_get_tunable, 842 .set_tunable = ena_set_tunable, 843 }; 844 845 void ena_set_ethtool_ops(struct net_device *netdev) 846 { 847 netdev->ethtool_ops = &ena_ethtool_ops; 848 } 849 850 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) 851 { 852 struct net_device *netdev = adapter->netdev; 853 u8 *strings_buf; 854 u64 *data_buf; 855 int strings_num; 856 int i, rc; 857 858 strings_num = ena_get_sset_count(netdev, ETH_SS_STATS); 859 if (strings_num <= 0) { 860 netif_err(adapter, drv, netdev, "Can't get stats num\n"); 861 return; 862 } 863 864 strings_buf = devm_kcalloc(&adapter->pdev->dev, 865 ETH_GSTRING_LEN, strings_num, 866 GFP_ATOMIC); 867 if (!strings_buf) { 868 netif_err(adapter, drv, netdev, 869 "failed to alloc strings_buf\n"); 870 return; 871 } 872 873 data_buf = devm_kcalloc(&adapter->pdev->dev, 874 strings_num, sizeof(u64), 875 GFP_ATOMIC); 876 if (!data_buf) { 877 netif_err(adapter, drv, netdev, 878 "failed to allocate data buf\n"); 879 devm_kfree(&adapter->pdev->dev, strings_buf); 880 return; 881 } 882 883 ena_get_strings(netdev, ETH_SS_STATS, strings_buf); 884 ena_get_ethtool_stats(netdev, NULL, data_buf); 885 886 /* If there is a buffer, dump stats, otherwise print them to dmesg */ 887 if (buf) 888 for (i = 0; i < strings_num; i++) { 889 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), 890 "%s %llu\n", 891 strings_buf + i * ETH_GSTRING_LEN, 892 data_buf[i]); 893 buf += rc; 894 } 895 else 896 for (i = 0; i < strings_num; i++) 897 netif_err(adapter, drv, netdev, "%s: %llu\n", 898 strings_buf + i * ETH_GSTRING_LEN, 899 data_buf[i]); 900 901 devm_kfree(&adapter->pdev->dev, strings_buf); 902 devm_kfree(&adapter->pdev->dev, data_buf); 903 } 904 905 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) 906 { 907 if (!buf) 908 return; 909 910 ena_dump_stats_ex(adapter, buf); 911 } 912 913 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) 914 { 915 ena_dump_stats_ex(adapter, NULL); 916 } 917