1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2019 Intel Corporation. */ 3 4 #include <linux/ethtool.h> 5 #include <linux/vmalloc.h> 6 7 #include "fm10k.h" 8 9 struct fm10k_stats { 10 /* The stat_string is expected to be a format string formatted using 11 * vsnprintf by fm10k_add_stat_strings. Every member of a stats array 12 * should use the same format specifiers as they will be formatted 13 * using the same variadic arguments. 14 */ 15 char stat_string[ETH_GSTRING_LEN]; 16 int sizeof_stat; 17 int stat_offset; 18 }; 19 20 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \ 21 .stat_string = _name, \ 22 .sizeof_stat = sizeof_field(_type, _stat), \ 23 .stat_offset = offsetof(_type, _stat) \ 24 } 25 26 /* netdevice statistics */ 27 #define FM10K_NETDEV_STAT(_net_stat) \ 28 FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \ 29 _net_stat) 30 31 static const struct fm10k_stats fm10k_gstrings_net_stats[] = { 32 FM10K_NETDEV_STAT(tx_packets), 33 FM10K_NETDEV_STAT(tx_bytes), 34 FM10K_NETDEV_STAT(tx_errors), 35 FM10K_NETDEV_STAT(rx_packets), 36 FM10K_NETDEV_STAT(rx_bytes), 37 FM10K_NETDEV_STAT(rx_errors), 38 FM10K_NETDEV_STAT(rx_dropped), 39 40 /* detailed Rx errors */ 41 FM10K_NETDEV_STAT(rx_length_errors), 42 FM10K_NETDEV_STAT(rx_crc_errors), 43 FM10K_NETDEV_STAT(rx_fifo_errors), 44 }; 45 46 #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) 47 48 /* General interface statistics */ 49 #define FM10K_STAT(_name, _stat) \ 50 FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat) 51 52 static const struct fm10k_stats fm10k_gstrings_global_stats[] = { 53 FM10K_STAT("tx_restart_queue", restart_queue), 54 FM10K_STAT("tx_busy", tx_busy), 55 FM10K_STAT("tx_csum_errors", tx_csum_errors), 56 FM10K_STAT("rx_alloc_failed", alloc_failed), 57 FM10K_STAT("rx_csum_errors", rx_csum_errors), 58 59 FM10K_STAT("tx_packets_nic", tx_packets_nic), 60 FM10K_STAT("tx_bytes_nic", tx_bytes_nic), 61 FM10K_STAT("rx_packets_nic", rx_packets_nic), 62 FM10K_STAT("rx_bytes_nic", rx_bytes_nic), 63 FM10K_STAT("rx_drops_nic", rx_drops_nic), 64 FM10K_STAT("rx_overrun_pf", rx_overrun_pf), 65 FM10K_STAT("rx_overrun_vf", rx_overrun_vf), 66 67 FM10K_STAT("swapi_status", hw.swapi.status), 68 FM10K_STAT("mac_rules_used", hw.swapi.mac.used), 69 FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), 70 71 FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending), 72 73 FM10K_STAT("tx_hang_count", tx_timeout_count), 74 }; 75 76 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { 77 FM10K_STAT("timeout", stats.timeout.count), 78 FM10K_STAT("ur", stats.ur.count), 79 FM10K_STAT("ca", stats.ca.count), 80 FM10K_STAT("um", stats.um.count), 81 FM10K_STAT("xec", stats.xec.count), 82 FM10K_STAT("vlan_drop", stats.vlan_drop.count), 83 FM10K_STAT("loopback_drop", stats.loopback_drop.count), 84 FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), 85 }; 86 87 /* mailbox statistics */ 88 #define FM10K_MBX_STAT(_name, _stat) \ 89 FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat) 90 91 static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { 92 FM10K_MBX_STAT("mbx_tx_busy", tx_busy), 93 FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped), 94 FM10K_MBX_STAT("mbx_tx_messages", tx_messages), 95 FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), 96 FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled), 97 FM10K_MBX_STAT("mbx_rx_messages", rx_messages), 98 FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), 99 FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), 100 FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), 101 }; 102 103 /* per-queue ring statistics */ 104 #define FM10K_QUEUE_STAT(_name, _stat) \ 105 FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat) 106 107 static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { 108 FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets), 109 FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes), 110 }; 111 112 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) 113 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) 114 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) 115 #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats) 116 117 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ 118 FM10K_NETDEV_STATS_LEN + \ 119 FM10K_MBX_STATS_LEN) 120 121 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { 122 "Mailbox test (on/offline)" 123 }; 124 125 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN) 126 127 enum fm10k_self_test_types { 128 FM10K_TEST_MBX, 129 FM10K_TEST_MAX = FM10K_TEST_LEN 130 }; 131 132 enum { 133 FM10K_PRV_FLAG_LEN, 134 }; 135 136 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { 137 }; 138 139 static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], 140 const unsigned int size, ...) 141 { 142 unsigned int i; 143 144 for (i = 0; i < size; i++) { 145 va_list args; 146 147 va_start(args, size); 148 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); 149 *p += ETH_GSTRING_LEN; 150 va_end(args); 151 } 152 } 153 154 #define fm10k_add_stat_strings(p, stats, ...) \ 155 __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) 156 157 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) 158 { 159 struct fm10k_intfc *interface = netdev_priv(dev); 160 unsigned int i; 161 162 fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats); 163 164 fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats); 165 166 fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats); 167 168 if (interface->hw.mac.type != fm10k_mac_vf) 169 fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats); 170 171 for (i = 0; i < interface->hw.mac.max_queues; i++) { 172 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, 173 "tx", i); 174 175 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, 176 "rx", i); 177 } 178 } 179 180 static void fm10k_get_strings(struct net_device *dev, 181 u32 stringset, u8 *data) 182 { 183 switch (stringset) { 184 case ETH_SS_TEST: 185 memcpy(data, fm10k_gstrings_test, 186 FM10K_TEST_LEN * ETH_GSTRING_LEN); 187 break; 188 case ETH_SS_STATS: 189 fm10k_get_stat_strings(dev, data); 190 break; 191 case ETH_SS_PRIV_FLAGS: 192 memcpy(data, fm10k_prv_flags, 193 FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); 194 break; 195 } 196 } 197 198 static int fm10k_get_sset_count(struct net_device *dev, int sset) 199 { 200 struct fm10k_intfc *interface = netdev_priv(dev); 201 struct fm10k_hw *hw = &interface->hw; 202 int stats_len = FM10K_STATIC_STATS_LEN; 203 204 switch (sset) { 205 case ETH_SS_TEST: 206 return FM10K_TEST_LEN; 207 case ETH_SS_STATS: 208 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN; 209 210 if (hw->mac.type != fm10k_mac_vf) 211 stats_len += FM10K_PF_STATS_LEN; 212 213 return stats_len; 214 case ETH_SS_PRIV_FLAGS: 215 return FM10K_PRV_FLAG_LEN; 216 default: 217 return -EOPNOTSUPP; 218 } 219 } 220 221 static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, 222 const struct fm10k_stats stats[], 223 const unsigned int size) 224 { 225 unsigned int i; 226 227 if (!pointer) { 228 /* memory is not zero allocated so we have to clear it */ 229 for (i = 0; i < size; i++) 230 *((*data)++) = 0; 231 return; 232 } 233 234 for (i = 0; i < size; i++) { 235 char *p = (char *)pointer + stats[i].stat_offset; 236 237 switch (stats[i].sizeof_stat) { 238 case sizeof(u64): 239 *((*data)++) = *(u64 *)p; 240 break; 241 case sizeof(u32): 242 *((*data)++) = *(u32 *)p; 243 break; 244 case sizeof(u16): 245 *((*data)++) = *(u16 *)p; 246 break; 247 case sizeof(u8): 248 *((*data)++) = *(u8 *)p; 249 break; 250 default: 251 WARN_ONCE(1, "unexpected stat size for %s", 252 stats[i].stat_string); 253 *((*data)++) = 0; 254 } 255 } 256 } 257 258 #define fm10k_add_ethtool_stats(data, pointer, stats) \ 259 __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) 260 261 static void fm10k_get_ethtool_stats(struct net_device *netdev, 262 struct ethtool_stats __always_unused *stats, 263 u64 *data) 264 { 265 struct fm10k_intfc *interface = netdev_priv(netdev); 266 struct net_device_stats *net_stats = &netdev->stats; 267 int i; 268 269 fm10k_update_stats(interface); 270 271 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats); 272 273 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats); 274 275 fm10k_add_ethtool_stats(&data, &interface->hw.mbx, 276 fm10k_gstrings_mbx_stats); 277 278 if (interface->hw.mac.type != fm10k_mac_vf) { 279 fm10k_add_ethtool_stats(&data, interface, 280 fm10k_gstrings_pf_stats); 281 } 282 283 for (i = 0; i < interface->hw.mac.max_queues; i++) { 284 struct fm10k_ring *ring; 285 286 ring = interface->tx_ring[i]; 287 fm10k_add_ethtool_stats(&data, ring, 288 fm10k_gstrings_queue_stats); 289 290 ring = interface->rx_ring[i]; 291 fm10k_add_ethtool_stats(&data, ring, 292 fm10k_gstrings_queue_stats); 293 } 294 } 295 296 /* If function below adds more registers this define needs to be updated */ 297 #define FM10K_REGS_LEN_Q 29 298 299 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i) 300 { 301 int idx = 0; 302 303 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i)); 304 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i)); 305 buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i)); 306 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i)); 307 buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i)); 308 buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i)); 309 buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i)); 310 buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i)); 311 buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i)); 312 buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i)); 313 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i)); 314 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i)); 315 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i)); 316 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i)); 317 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i)); 318 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i)); 319 buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i)); 320 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i)); 321 buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i)); 322 buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i)); 323 buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i)); 324 buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i)); 325 buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i)); 326 buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i)); 327 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i)); 328 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i)); 329 buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i)); 330 buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i)); 331 buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i)); 332 333 BUG_ON(idx != FM10K_REGS_LEN_Q); 334 } 335 336 /* If function above adds more registers this define needs to be updated */ 337 #define FM10K_REGS_LEN_VSI 43 338 339 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i) 340 { 341 int idx = 0, j; 342 343 buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i)); 344 for (j = 0; j < 10; j++) 345 buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j)); 346 for (j = 0; j < 32; j++) 347 buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j)); 348 349 BUG_ON(idx != FM10K_REGS_LEN_VSI); 350 } 351 352 static void fm10k_get_regs(struct net_device *netdev, 353 struct ethtool_regs *regs, void *p) 354 { 355 struct fm10k_intfc *interface = netdev_priv(netdev); 356 struct fm10k_hw *hw = &interface->hw; 357 u32 *buff = p; 358 u16 i; 359 360 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id; 361 362 switch (hw->mac.type) { 363 case fm10k_mac_pf: 364 /* General PF Registers */ 365 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL); 366 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT); 367 *(buff++) = fm10k_read_reg(hw, FM10K_GCR); 368 *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT); 369 370 for (i = 0; i < 8; i++) { 371 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i)); 372 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i)); 373 } 374 375 for (i = 0; i < 65; i++) { 376 fm10k_get_reg_vsi(hw, buff, i); 377 buff += FM10K_REGS_LEN_VSI; 378 } 379 380 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL); 381 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2); 382 383 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { 384 fm10k_get_reg_q(hw, buff, i); 385 buff += FM10K_REGS_LEN_Q; 386 } 387 388 *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL); 389 390 for (i = 0; i < 8; i++) 391 *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i)); 392 393 /* Interrupt Throttling Registers */ 394 for (i = 0; i < 130; i++) 395 *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i)); 396 397 break; 398 case fm10k_mac_vf: 399 /* General VF registers */ 400 *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL); 401 *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP); 402 *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME); 403 404 /* Interrupt Throttling Registers */ 405 for (i = 0; i < 8; i++) 406 *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i)); 407 408 fm10k_get_reg_vsi(hw, buff, 0); 409 buff += FM10K_REGS_LEN_VSI; 410 411 for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) { 412 if (i < hw->mac.max_queues) 413 fm10k_get_reg_q(hw, buff, i); 414 else 415 memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q); 416 buff += FM10K_REGS_LEN_Q; 417 } 418 419 break; 420 default: 421 return; 422 } 423 } 424 425 /* If function above adds more registers these define need to be updated */ 426 #define FM10K_REGS_LEN_PF \ 427 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q)) 428 #define FM10K_REGS_LEN_VF \ 429 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q)) 430 431 static int fm10k_get_regs_len(struct net_device *netdev) 432 { 433 struct fm10k_intfc *interface = netdev_priv(netdev); 434 struct fm10k_hw *hw = &interface->hw; 435 436 switch (hw->mac.type) { 437 case fm10k_mac_pf: 438 return FM10K_REGS_LEN_PF * sizeof(u32); 439 case fm10k_mac_vf: 440 return FM10K_REGS_LEN_VF * sizeof(u32); 441 default: 442 return 0; 443 } 444 } 445 446 static void fm10k_get_drvinfo(struct net_device *dev, 447 struct ethtool_drvinfo *info) 448 { 449 struct fm10k_intfc *interface = netdev_priv(dev); 450 451 strncpy(info->driver, fm10k_driver_name, 452 sizeof(info->driver) - 1); 453 strncpy(info->bus_info, pci_name(interface->pdev), 454 sizeof(info->bus_info) - 1); 455 } 456 457 static void fm10k_get_pauseparam(struct net_device *dev, 458 struct ethtool_pauseparam *pause) 459 { 460 struct fm10k_intfc *interface = netdev_priv(dev); 461 462 /* record fixed values for autoneg and tx pause */ 463 pause->autoneg = 0; 464 pause->tx_pause = 1; 465 466 pause->rx_pause = interface->rx_pause ? 1 : 0; 467 } 468 469 static int fm10k_set_pauseparam(struct net_device *dev, 470 struct ethtool_pauseparam *pause) 471 { 472 struct fm10k_intfc *interface = netdev_priv(dev); 473 struct fm10k_hw *hw = &interface->hw; 474 475 if (pause->autoneg || !pause->tx_pause) 476 return -EINVAL; 477 478 /* we can only support pause on the PF to avoid head-of-line blocking */ 479 if (hw->mac.type == fm10k_mac_pf) 480 interface->rx_pause = pause->rx_pause ? ~0 : 0; 481 else if (pause->rx_pause) 482 return -EINVAL; 483 484 if (netif_running(dev)) 485 fm10k_update_rx_drop_en(interface); 486 487 return 0; 488 } 489 490 static u32 fm10k_get_msglevel(struct net_device *netdev) 491 { 492 struct fm10k_intfc *interface = netdev_priv(netdev); 493 494 return interface->msg_enable; 495 } 496 497 static void fm10k_set_msglevel(struct net_device *netdev, u32 data) 498 { 499 struct fm10k_intfc *interface = netdev_priv(netdev); 500 501 interface->msg_enable = data; 502 } 503 504 static void fm10k_get_ringparam(struct net_device *netdev, 505 struct ethtool_ringparam *ring) 506 { 507 struct fm10k_intfc *interface = netdev_priv(netdev); 508 509 ring->rx_max_pending = FM10K_MAX_RXD; 510 ring->tx_max_pending = FM10K_MAX_TXD; 511 ring->rx_mini_max_pending = 0; 512 ring->rx_jumbo_max_pending = 0; 513 ring->rx_pending = interface->rx_ring_count; 514 ring->tx_pending = interface->tx_ring_count; 515 ring->rx_mini_pending = 0; 516 ring->rx_jumbo_pending = 0; 517 } 518 519 static int fm10k_set_ringparam(struct net_device *netdev, 520 struct ethtool_ringparam *ring) 521 { 522 struct fm10k_intfc *interface = netdev_priv(netdev); 523 struct fm10k_ring *temp_ring; 524 int i, err = 0; 525 u32 new_rx_count, new_tx_count; 526 527 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 528 return -EINVAL; 529 530 new_tx_count = clamp_t(u32, ring->tx_pending, 531 FM10K_MIN_TXD, FM10K_MAX_TXD); 532 new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE); 533 534 new_rx_count = clamp_t(u32, ring->rx_pending, 535 FM10K_MIN_RXD, FM10K_MAX_RXD); 536 new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE); 537 538 if ((new_tx_count == interface->tx_ring_count) && 539 (new_rx_count == interface->rx_ring_count)) { 540 /* nothing to do */ 541 return 0; 542 } 543 544 while (test_and_set_bit(__FM10K_RESETTING, interface->state)) 545 usleep_range(1000, 2000); 546 547 if (!netif_running(interface->netdev)) { 548 for (i = 0; i < interface->num_tx_queues; i++) 549 interface->tx_ring[i]->count = new_tx_count; 550 for (i = 0; i < interface->num_rx_queues; i++) 551 interface->rx_ring[i]->count = new_rx_count; 552 interface->tx_ring_count = new_tx_count; 553 interface->rx_ring_count = new_rx_count; 554 goto clear_reset; 555 } 556 557 /* allocate temporary buffer to store rings in */ 558 i = max_t(int, interface->num_tx_queues, interface->num_rx_queues); 559 temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring))); 560 561 if (!temp_ring) { 562 err = -ENOMEM; 563 goto clear_reset; 564 } 565 566 fm10k_down(interface); 567 568 /* Setup new Tx resources and free the old Tx resources in that order. 569 * We can then assign the new resources to the rings via a memcpy. 570 * The advantage to this approach is that we are guaranteed to still 571 * have resources even in the case of an allocation failure. 572 */ 573 if (new_tx_count != interface->tx_ring_count) { 574 for (i = 0; i < interface->num_tx_queues; i++) { 575 memcpy(&temp_ring[i], interface->tx_ring[i], 576 sizeof(struct fm10k_ring)); 577 578 temp_ring[i].count = new_tx_count; 579 err = fm10k_setup_tx_resources(&temp_ring[i]); 580 if (err) { 581 while (i) { 582 i--; 583 fm10k_free_tx_resources(&temp_ring[i]); 584 } 585 goto err_setup; 586 } 587 } 588 589 for (i = 0; i < interface->num_tx_queues; i++) { 590 fm10k_free_tx_resources(interface->tx_ring[i]); 591 592 memcpy(interface->tx_ring[i], &temp_ring[i], 593 sizeof(struct fm10k_ring)); 594 } 595 596 interface->tx_ring_count = new_tx_count; 597 } 598 599 /* Repeat the process for the Rx rings if needed */ 600 if (new_rx_count != interface->rx_ring_count) { 601 for (i = 0; i < interface->num_rx_queues; i++) { 602 memcpy(&temp_ring[i], interface->rx_ring[i], 603 sizeof(struct fm10k_ring)); 604 605 temp_ring[i].count = new_rx_count; 606 err = fm10k_setup_rx_resources(&temp_ring[i]); 607 if (err) { 608 while (i) { 609 i--; 610 fm10k_free_rx_resources(&temp_ring[i]); 611 } 612 goto err_setup; 613 } 614 } 615 616 for (i = 0; i < interface->num_rx_queues; i++) { 617 fm10k_free_rx_resources(interface->rx_ring[i]); 618 619 memcpy(interface->rx_ring[i], &temp_ring[i], 620 sizeof(struct fm10k_ring)); 621 } 622 623 interface->rx_ring_count = new_rx_count; 624 } 625 626 err_setup: 627 fm10k_up(interface); 628 vfree(temp_ring); 629 clear_reset: 630 clear_bit(__FM10K_RESETTING, interface->state); 631 return err; 632 } 633 634 static int fm10k_get_coalesce(struct net_device *dev, 635 struct ethtool_coalesce *ec, 636 struct kernel_ethtool_coalesce *kernel_coal, 637 struct netlink_ext_ack *extack) 638 { 639 struct fm10k_intfc *interface = netdev_priv(dev); 640 641 ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr); 642 ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; 643 644 ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr); 645 ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; 646 647 return 0; 648 } 649 650 static int fm10k_set_coalesce(struct net_device *dev, 651 struct ethtool_coalesce *ec, 652 struct kernel_ethtool_coalesce *kernel_coal, 653 struct netlink_ext_ack *extack) 654 { 655 struct fm10k_intfc *interface = netdev_priv(dev); 656 u16 tx_itr, rx_itr; 657 int i; 658 659 /* verify limits */ 660 if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) || 661 (ec->tx_coalesce_usecs > FM10K_ITR_MAX)) 662 return -EINVAL; 663 664 /* record settings */ 665 tx_itr = ec->tx_coalesce_usecs; 666 rx_itr = ec->rx_coalesce_usecs; 667 668 /* set initial values for adaptive ITR */ 669 if (ec->use_adaptive_tx_coalesce) 670 tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT; 671 672 if (ec->use_adaptive_rx_coalesce) 673 rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; 674 675 /* update interface */ 676 interface->tx_itr = tx_itr; 677 interface->rx_itr = rx_itr; 678 679 /* update q_vectors */ 680 for (i = 0; i < interface->num_q_vectors; i++) { 681 struct fm10k_q_vector *qv = interface->q_vector[i]; 682 683 qv->tx.itr = tx_itr; 684 qv->rx.itr = rx_itr; 685 } 686 687 return 0; 688 } 689 690 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, 691 struct ethtool_rxnfc *cmd) 692 { 693 cmd->data = 0; 694 695 /* Report default options for RSS on fm10k */ 696 switch (cmd->flow_type) { 697 case TCP_V4_FLOW: 698 case TCP_V6_FLOW: 699 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 700 fallthrough; 701 case UDP_V4_FLOW: 702 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 703 interface->flags)) 704 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 705 fallthrough; 706 case SCTP_V4_FLOW: 707 case SCTP_V6_FLOW: 708 case AH_ESP_V4_FLOW: 709 case AH_ESP_V6_FLOW: 710 case AH_V4_FLOW: 711 case AH_V6_FLOW: 712 case ESP_V4_FLOW: 713 case ESP_V6_FLOW: 714 case IPV4_FLOW: 715 case IPV6_FLOW: 716 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 717 break; 718 case UDP_V6_FLOW: 719 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 720 interface->flags)) 721 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 722 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 723 break; 724 default: 725 return -EINVAL; 726 } 727 728 return 0; 729 } 730 731 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 732 u32 __always_unused *rule_locs) 733 { 734 struct fm10k_intfc *interface = netdev_priv(dev); 735 int ret = -EOPNOTSUPP; 736 737 switch (cmd->cmd) { 738 case ETHTOOL_GRXRINGS: 739 cmd->data = interface->num_rx_queues; 740 ret = 0; 741 break; 742 case ETHTOOL_GRXFH: 743 ret = fm10k_get_rss_hash_opts(interface, cmd); 744 break; 745 default: 746 break; 747 } 748 749 return ret; 750 } 751 752 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, 753 struct ethtool_rxnfc *nfc) 754 { 755 int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 756 interface->flags); 757 int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 758 interface->flags); 759 760 /* RSS does not support anything other than hashing 761 * to queues on src and dst IPs and ports 762 */ 763 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 764 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 765 return -EINVAL; 766 767 switch (nfc->flow_type) { 768 case TCP_V4_FLOW: 769 case TCP_V6_FLOW: 770 if (!(nfc->data & RXH_IP_SRC) || 771 !(nfc->data & RXH_IP_DST) || 772 !(nfc->data & RXH_L4_B_0_1) || 773 !(nfc->data & RXH_L4_B_2_3)) 774 return -EINVAL; 775 break; 776 case UDP_V4_FLOW: 777 if (!(nfc->data & RXH_IP_SRC) || 778 !(nfc->data & RXH_IP_DST)) 779 return -EINVAL; 780 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 781 case 0: 782 clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 783 interface->flags); 784 break; 785 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 786 set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 787 interface->flags); 788 break; 789 default: 790 return -EINVAL; 791 } 792 break; 793 case UDP_V6_FLOW: 794 if (!(nfc->data & RXH_IP_SRC) || 795 !(nfc->data & RXH_IP_DST)) 796 return -EINVAL; 797 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 798 case 0: 799 clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 800 interface->flags); 801 break; 802 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 803 set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 804 interface->flags); 805 break; 806 default: 807 return -EINVAL; 808 } 809 break; 810 case AH_ESP_V4_FLOW: 811 case AH_V4_FLOW: 812 case ESP_V4_FLOW: 813 case SCTP_V4_FLOW: 814 case AH_ESP_V6_FLOW: 815 case AH_V6_FLOW: 816 case ESP_V6_FLOW: 817 case SCTP_V6_FLOW: 818 if (!(nfc->data & RXH_IP_SRC) || 819 !(nfc->data & RXH_IP_DST) || 820 (nfc->data & RXH_L4_B_0_1) || 821 (nfc->data & RXH_L4_B_2_3)) 822 return -EINVAL; 823 break; 824 default: 825 return -EINVAL; 826 } 827 828 /* If something changed we need to update the MRQC register. Note that 829 * test_bit() is guaranteed to return strictly 0 or 1, so testing for 830 * equality is safe. 831 */ 832 if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 833 interface->flags)) || 834 (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 835 interface->flags))) { 836 struct fm10k_hw *hw = &interface->hw; 837 bool warn = false; 838 u32 mrqc; 839 840 /* Perform hash on these packet types */ 841 mrqc = FM10K_MRQC_IPV4 | 842 FM10K_MRQC_TCP_IPV4 | 843 FM10K_MRQC_IPV6 | 844 FM10K_MRQC_TCP_IPV6; 845 846 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 847 interface->flags)) { 848 mrqc |= FM10K_MRQC_UDP_IPV4; 849 warn = true; 850 } 851 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 852 interface->flags)) { 853 mrqc |= FM10K_MRQC_UDP_IPV6; 854 warn = true; 855 } 856 857 /* If we enable UDP RSS display a warning that this may cause 858 * fragmented UDP packets to arrive out of order. 859 */ 860 if (warn) 861 netif_warn(interface, drv, interface->netdev, 862 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); 863 864 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); 865 } 866 867 return 0; 868 } 869 870 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 871 { 872 struct fm10k_intfc *interface = netdev_priv(dev); 873 int ret = -EOPNOTSUPP; 874 875 switch (cmd->cmd) { 876 case ETHTOOL_SRXFH: 877 ret = fm10k_set_rss_hash_opt(interface, cmd); 878 break; 879 default: 880 break; 881 } 882 883 return ret; 884 } 885 886 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) 887 { 888 struct fm10k_hw *hw = &interface->hw; 889 struct fm10k_mbx_info *mbx = &hw->mbx; 890 u32 attr_flag, test_msg[6]; 891 unsigned long timeout; 892 int err = -EINVAL; 893 894 /* For now this is a VF only feature */ 895 if (hw->mac.type != fm10k_mac_vf) 896 return 0; 897 898 /* loop through both nested and unnested attribute types */ 899 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET); 900 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED); 901 attr_flag += attr_flag) { 902 /* generate message to be tested */ 903 fm10k_tlv_msg_test_create(test_msg, attr_flag); 904 905 fm10k_mbx_lock(interface); 906 mbx->test_result = FM10K_NOT_IMPLEMENTED; 907 err = mbx->ops.enqueue_tx(hw, mbx, test_msg); 908 fm10k_mbx_unlock(interface); 909 910 /* wait up to 1 second for response */ 911 timeout = jiffies + HZ; 912 do { 913 if (err < 0) 914 goto err_out; 915 916 usleep_range(500, 1000); 917 918 fm10k_mbx_lock(interface); 919 mbx->ops.process(hw, mbx); 920 fm10k_mbx_unlock(interface); 921 922 err = mbx->test_result; 923 if (!err) 924 break; 925 } while (time_is_after_jiffies(timeout)); 926 927 /* reporting errors */ 928 if (err) 929 goto err_out; 930 } 931 932 err_out: 933 *data = err < 0 ? (attr_flag) : (err > 0); 934 return err; 935 } 936 937 static void fm10k_self_test(struct net_device *dev, 938 struct ethtool_test *eth_test, u64 *data) 939 { 940 struct fm10k_intfc *interface = netdev_priv(dev); 941 struct fm10k_hw *hw = &interface->hw; 942 943 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); 944 945 if (FM10K_REMOVED(hw->hw_addr)) { 946 netif_err(interface, drv, dev, 947 "Interface removed - test blocked\n"); 948 eth_test->flags |= ETH_TEST_FL_FAILED; 949 return; 950 } 951 952 if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX])) 953 eth_test->flags |= ETH_TEST_FL_FAILED; 954 } 955 956 static u32 fm10k_get_priv_flags(struct net_device *netdev) 957 { 958 return 0; 959 } 960 961 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) 962 { 963 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN)) 964 return -EINVAL; 965 966 return 0; 967 } 968 969 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) 970 { 971 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; 972 } 973 974 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) 975 { 976 u16 rss_i = interface->ring_feature[RING_F_RSS].indices; 977 struct fm10k_hw *hw = &interface->hw; 978 u32 table[4]; 979 int i, j; 980 981 /* record entries to reta table */ 982 for (i = 0; i < FM10K_RETA_SIZE; i++) { 983 u32 reta, n; 984 985 /* generate a new table if we weren't given one */ 986 for (j = 0; j < 4; j++) { 987 if (indir) 988 n = indir[4 * i + j]; 989 else 990 n = ethtool_rxfh_indir_default(4 * i + j, 991 rss_i); 992 993 table[j] = n; 994 } 995 996 reta = table[0] | 997 (table[1] << 8) | 998 (table[2] << 16) | 999 (table[3] << 24); 1000 1001 if (interface->reta[i] == reta) 1002 continue; 1003 1004 interface->reta[i] = reta; 1005 fm10k_write_reg(hw, FM10K_RETA(0, i), reta); 1006 } 1007 } 1008 1009 static int fm10k_get_reta(struct net_device *netdev, u32 *indir) 1010 { 1011 struct fm10k_intfc *interface = netdev_priv(netdev); 1012 int i; 1013 1014 if (!indir) 1015 return 0; 1016 1017 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { 1018 u32 reta = interface->reta[i]; 1019 1020 indir[0] = (reta << 24) >> 24; 1021 indir[1] = (reta << 16) >> 24; 1022 indir[2] = (reta << 8) >> 24; 1023 indir[3] = (reta) >> 24; 1024 } 1025 1026 return 0; 1027 } 1028 1029 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) 1030 { 1031 struct fm10k_intfc *interface = netdev_priv(netdev); 1032 int i; 1033 u16 rss_i; 1034 1035 if (!indir) 1036 return 0; 1037 1038 /* Verify user input. */ 1039 rss_i = interface->ring_feature[RING_F_RSS].indices; 1040 for (i = fm10k_get_reta_size(netdev); i--;) { 1041 if (indir[i] < rss_i) 1042 continue; 1043 return -EINVAL; 1044 } 1045 1046 fm10k_write_reta(interface, indir); 1047 1048 return 0; 1049 } 1050 1051 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) 1052 { 1053 return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; 1054 } 1055 1056 static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, 1057 u8 *hfunc) 1058 { 1059 struct fm10k_intfc *interface = netdev_priv(netdev); 1060 int i, err; 1061 1062 if (hfunc) 1063 *hfunc = ETH_RSS_HASH_TOP; 1064 1065 err = fm10k_get_reta(netdev, indir); 1066 if (err || !key) 1067 return err; 1068 1069 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) 1070 *(__le32 *)key = cpu_to_le32(interface->rssrk[i]); 1071 1072 return 0; 1073 } 1074 1075 static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, 1076 const u8 *key, const u8 hfunc) 1077 { 1078 struct fm10k_intfc *interface = netdev_priv(netdev); 1079 struct fm10k_hw *hw = &interface->hw; 1080 int i, err; 1081 1082 /* We do not allow change in unsupported parameters */ 1083 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1084 return -EOPNOTSUPP; 1085 1086 err = fm10k_set_reta(netdev, indir); 1087 if (err || !key) 1088 return err; 1089 1090 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { 1091 u32 rssrk = le32_to_cpu(*(__le32 *)key); 1092 1093 if (interface->rssrk[i] == rssrk) 1094 continue; 1095 1096 interface->rssrk[i] = rssrk; 1097 fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk); 1098 } 1099 1100 return 0; 1101 } 1102 1103 static unsigned int fm10k_max_channels(struct net_device *dev) 1104 { 1105 struct fm10k_intfc *interface = netdev_priv(dev); 1106 unsigned int max_combined = interface->hw.mac.max_queues; 1107 u8 tcs = netdev_get_num_tc(dev); 1108 1109 /* For QoS report channels per traffic class */ 1110 if (tcs > 1) 1111 max_combined = BIT((fls(max_combined / tcs) - 1)); 1112 1113 return max_combined; 1114 } 1115 1116 static void fm10k_get_channels(struct net_device *dev, 1117 struct ethtool_channels *ch) 1118 { 1119 struct fm10k_intfc *interface = netdev_priv(dev); 1120 1121 /* report maximum channels */ 1122 ch->max_combined = fm10k_max_channels(dev); 1123 1124 /* report info for other vector */ 1125 ch->max_other = NON_Q_VECTORS; 1126 ch->other_count = ch->max_other; 1127 1128 /* record RSS queues */ 1129 ch->combined_count = interface->ring_feature[RING_F_RSS].indices; 1130 } 1131 1132 static int fm10k_set_channels(struct net_device *dev, 1133 struct ethtool_channels *ch) 1134 { 1135 struct fm10k_intfc *interface = netdev_priv(dev); 1136 unsigned int count = ch->combined_count; 1137 1138 /* verify they are not requesting separate vectors */ 1139 if (!count || ch->rx_count || ch->tx_count) 1140 return -EINVAL; 1141 1142 /* verify other_count has not changed */ 1143 if (ch->other_count != NON_Q_VECTORS) 1144 return -EINVAL; 1145 1146 /* verify the number of channels does not exceed hardware limits */ 1147 if (count > fm10k_max_channels(dev)) 1148 return -EINVAL; 1149 1150 interface->ring_feature[RING_F_RSS].limit = count; 1151 1152 /* use setup TC to update any traffic class queue mapping */ 1153 return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); 1154 } 1155 1156 static const struct ethtool_ops fm10k_ethtool_ops = { 1157 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1158 ETHTOOL_COALESCE_USE_ADAPTIVE, 1159 .get_strings = fm10k_get_strings, 1160 .get_sset_count = fm10k_get_sset_count, 1161 .get_ethtool_stats = fm10k_get_ethtool_stats, 1162 .get_drvinfo = fm10k_get_drvinfo, 1163 .get_link = ethtool_op_get_link, 1164 .get_pauseparam = fm10k_get_pauseparam, 1165 .set_pauseparam = fm10k_set_pauseparam, 1166 .get_msglevel = fm10k_get_msglevel, 1167 .set_msglevel = fm10k_set_msglevel, 1168 .get_ringparam = fm10k_get_ringparam, 1169 .set_ringparam = fm10k_set_ringparam, 1170 .get_coalesce = fm10k_get_coalesce, 1171 .set_coalesce = fm10k_set_coalesce, 1172 .get_rxnfc = fm10k_get_rxnfc, 1173 .set_rxnfc = fm10k_set_rxnfc, 1174 .get_regs = fm10k_get_regs, 1175 .get_regs_len = fm10k_get_regs_len, 1176 .self_test = fm10k_self_test, 1177 .get_priv_flags = fm10k_get_priv_flags, 1178 .set_priv_flags = fm10k_set_priv_flags, 1179 .get_rxfh_indir_size = fm10k_get_reta_size, 1180 .get_rxfh_key_size = fm10k_get_rssrk_size, 1181 .get_rxfh = fm10k_get_rssh, 1182 .set_rxfh = fm10k_set_rssh, 1183 .get_channels = fm10k_get_channels, 1184 .set_channels = fm10k_set_channels, 1185 .get_ts_info = ethtool_op_get_ts_info, 1186 }; 1187 1188 void fm10k_set_ethtool_ops(struct net_device *dev) 1189 { 1190 dev->ethtool_ops = &fm10k_ethtool_ops; 1191 } 1192