1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2019 Intel Corporation. */ 3 4 #include <linux/vmalloc.h> 5 6 #include "fm10k.h" 7 8 struct fm10k_stats { 9 /* The stat_string is expected to be a format string formatted using 10 * vsnprintf by fm10k_add_stat_strings. Every member of a stats array 11 * should use the same format specifiers as they will be formatted 12 * using the same variadic arguments. 13 */ 14 char stat_string[ETH_GSTRING_LEN]; 15 int sizeof_stat; 16 int stat_offset; 17 }; 18 19 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \ 20 .stat_string = _name, \ 21 .sizeof_stat = sizeof_field(_type, _stat), \ 22 .stat_offset = offsetof(_type, _stat) \ 23 } 24 25 /* netdevice statistics */ 26 #define FM10K_NETDEV_STAT(_net_stat) \ 27 FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \ 28 _net_stat) 29 30 static const struct fm10k_stats fm10k_gstrings_net_stats[] = { 31 FM10K_NETDEV_STAT(tx_packets), 32 FM10K_NETDEV_STAT(tx_bytes), 33 FM10K_NETDEV_STAT(tx_errors), 34 FM10K_NETDEV_STAT(rx_packets), 35 FM10K_NETDEV_STAT(rx_bytes), 36 FM10K_NETDEV_STAT(rx_errors), 37 FM10K_NETDEV_STAT(rx_dropped), 38 39 /* detailed Rx errors */ 40 FM10K_NETDEV_STAT(rx_length_errors), 41 FM10K_NETDEV_STAT(rx_crc_errors), 42 FM10K_NETDEV_STAT(rx_fifo_errors), 43 }; 44 45 #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) 46 47 /* General interface statistics */ 48 #define FM10K_STAT(_name, _stat) \ 49 FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat) 50 51 static const struct fm10k_stats fm10k_gstrings_global_stats[] = { 52 FM10K_STAT("tx_restart_queue", restart_queue), 53 FM10K_STAT("tx_busy", tx_busy), 54 FM10K_STAT("tx_csum_errors", tx_csum_errors), 55 FM10K_STAT("rx_alloc_failed", alloc_failed), 56 FM10K_STAT("rx_csum_errors", rx_csum_errors), 57 58 FM10K_STAT("tx_packets_nic", tx_packets_nic), 59 FM10K_STAT("tx_bytes_nic", tx_bytes_nic), 60 FM10K_STAT("rx_packets_nic", rx_packets_nic), 61 FM10K_STAT("rx_bytes_nic", rx_bytes_nic), 62 FM10K_STAT("rx_drops_nic", rx_drops_nic), 63 FM10K_STAT("rx_overrun_pf", rx_overrun_pf), 64 FM10K_STAT("rx_overrun_vf", rx_overrun_vf), 65 66 FM10K_STAT("swapi_status", hw.swapi.status), 67 FM10K_STAT("mac_rules_used", hw.swapi.mac.used), 68 FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), 69 70 FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending), 71 72 FM10K_STAT("tx_hang_count", tx_timeout_count), 73 }; 74 75 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { 76 FM10K_STAT("timeout", stats.timeout.count), 77 FM10K_STAT("ur", stats.ur.count), 78 FM10K_STAT("ca", stats.ca.count), 79 FM10K_STAT("um", stats.um.count), 80 FM10K_STAT("xec", stats.xec.count), 81 FM10K_STAT("vlan_drop", stats.vlan_drop.count), 82 FM10K_STAT("loopback_drop", stats.loopback_drop.count), 83 FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), 84 }; 85 86 /* mailbox statistics */ 87 #define FM10K_MBX_STAT(_name, _stat) \ 88 FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat) 89 90 static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { 91 FM10K_MBX_STAT("mbx_tx_busy", tx_busy), 92 FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped), 93 FM10K_MBX_STAT("mbx_tx_messages", tx_messages), 94 FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords), 95 FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled), 96 FM10K_MBX_STAT("mbx_rx_messages", rx_messages), 97 FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords), 98 FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err), 99 FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), 100 }; 101 102 /* per-queue ring statistics */ 103 #define FM10K_QUEUE_STAT(_name, _stat) \ 104 FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat) 105 106 static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { 107 FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets), 108 FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes), 109 }; 110 111 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) 112 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) 113 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) 114 #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats) 115 116 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ 117 FM10K_NETDEV_STATS_LEN + \ 118 FM10K_MBX_STATS_LEN) 119 120 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { 121 "Mailbox test (on/offline)" 122 }; 123 124 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN) 125 126 enum fm10k_self_test_types { 127 FM10K_TEST_MBX, 128 FM10K_TEST_MAX = FM10K_TEST_LEN 129 }; 130 131 enum { 132 FM10K_PRV_FLAG_LEN, 133 }; 134 135 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { 136 }; 137 138 static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], 139 const unsigned int size, ...) 140 { 141 unsigned int i; 142 143 for (i = 0; i < size; i++) { 144 va_list args; 145 146 va_start(args, size); 147 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); 148 *p += ETH_GSTRING_LEN; 149 va_end(args); 150 } 151 } 152 153 #define fm10k_add_stat_strings(p, stats, ...) \ 154 __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) 155 156 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) 157 { 158 struct fm10k_intfc *interface = netdev_priv(dev); 159 unsigned int i; 160 161 fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats); 162 163 fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats); 164 165 fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats); 166 167 if (interface->hw.mac.type != fm10k_mac_vf) 168 fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats); 169 170 for (i = 0; i < interface->hw.mac.max_queues; i++) { 171 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, 172 "tx", i); 173 174 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, 175 "rx", i); 176 } 177 } 178 179 static void fm10k_get_strings(struct net_device *dev, 180 u32 stringset, u8 *data) 181 { 182 switch (stringset) { 183 case ETH_SS_TEST: 184 memcpy(data, fm10k_gstrings_test, 185 FM10K_TEST_LEN * ETH_GSTRING_LEN); 186 break; 187 case ETH_SS_STATS: 188 fm10k_get_stat_strings(dev, data); 189 break; 190 case ETH_SS_PRIV_FLAGS: 191 memcpy(data, fm10k_prv_flags, 192 FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); 193 break; 194 } 195 } 196 197 static int fm10k_get_sset_count(struct net_device *dev, int sset) 198 { 199 struct fm10k_intfc *interface = netdev_priv(dev); 200 struct fm10k_hw *hw = &interface->hw; 201 int stats_len = FM10K_STATIC_STATS_LEN; 202 203 switch (sset) { 204 case ETH_SS_TEST: 205 return FM10K_TEST_LEN; 206 case ETH_SS_STATS: 207 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN; 208 209 if (hw->mac.type != fm10k_mac_vf) 210 stats_len += FM10K_PF_STATS_LEN; 211 212 return stats_len; 213 case ETH_SS_PRIV_FLAGS: 214 return FM10K_PRV_FLAG_LEN; 215 default: 216 return -EOPNOTSUPP; 217 } 218 } 219 220 static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, 221 const struct fm10k_stats stats[], 222 const unsigned int size) 223 { 224 unsigned int i; 225 226 if (!pointer) { 227 /* memory is not zero allocated so we have to clear it */ 228 for (i = 0; i < size; i++) 229 *((*data)++) = 0; 230 return; 231 } 232 233 for (i = 0; i < size; i++) { 234 char *p = (char *)pointer + stats[i].stat_offset; 235 236 switch (stats[i].sizeof_stat) { 237 case sizeof(u64): 238 *((*data)++) = *(u64 *)p; 239 break; 240 case sizeof(u32): 241 *((*data)++) = *(u32 *)p; 242 break; 243 case sizeof(u16): 244 *((*data)++) = *(u16 *)p; 245 break; 246 case sizeof(u8): 247 *((*data)++) = *(u8 *)p; 248 break; 249 default: 250 WARN_ONCE(1, "unexpected stat size for %s", 251 stats[i].stat_string); 252 *((*data)++) = 0; 253 } 254 } 255 } 256 257 #define fm10k_add_ethtool_stats(data, pointer, stats) \ 258 __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) 259 260 static void fm10k_get_ethtool_stats(struct net_device *netdev, 261 struct ethtool_stats __always_unused *stats, 262 u64 *data) 263 { 264 struct fm10k_intfc *interface = netdev_priv(netdev); 265 struct net_device_stats *net_stats = &netdev->stats; 266 int i; 267 268 fm10k_update_stats(interface); 269 270 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats); 271 272 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats); 273 274 fm10k_add_ethtool_stats(&data, &interface->hw.mbx, 275 fm10k_gstrings_mbx_stats); 276 277 if (interface->hw.mac.type != fm10k_mac_vf) { 278 fm10k_add_ethtool_stats(&data, interface, 279 fm10k_gstrings_pf_stats); 280 } 281 282 for (i = 0; i < interface->hw.mac.max_queues; i++) { 283 struct fm10k_ring *ring; 284 285 ring = interface->tx_ring[i]; 286 fm10k_add_ethtool_stats(&data, ring, 287 fm10k_gstrings_queue_stats); 288 289 ring = interface->rx_ring[i]; 290 fm10k_add_ethtool_stats(&data, ring, 291 fm10k_gstrings_queue_stats); 292 } 293 } 294 295 /* If function below adds more registers this define needs to be updated */ 296 #define FM10K_REGS_LEN_Q 29 297 298 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i) 299 { 300 int idx = 0; 301 302 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i)); 303 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i)); 304 buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i)); 305 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i)); 306 buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i)); 307 buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i)); 308 buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i)); 309 buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i)); 310 buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i)); 311 buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i)); 312 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i)); 313 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i)); 314 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i)); 315 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i)); 316 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i)); 317 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i)); 318 buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i)); 319 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i)); 320 buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i)); 321 buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i)); 322 buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i)); 323 buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i)); 324 buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i)); 325 buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i)); 326 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i)); 327 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i)); 328 buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i)); 329 buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i)); 330 buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i)); 331 332 BUG_ON(idx != FM10K_REGS_LEN_Q); 333 } 334 335 /* If function above adds more registers this define needs to be updated */ 336 #define FM10K_REGS_LEN_VSI 43 337 338 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i) 339 { 340 int idx = 0, j; 341 342 buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i)); 343 for (j = 0; j < 10; j++) 344 buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j)); 345 for (j = 0; j < 32; j++) 346 buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j)); 347 348 BUG_ON(idx != FM10K_REGS_LEN_VSI); 349 } 350 351 static void fm10k_get_regs(struct net_device *netdev, 352 struct ethtool_regs *regs, void *p) 353 { 354 struct fm10k_intfc *interface = netdev_priv(netdev); 355 struct fm10k_hw *hw = &interface->hw; 356 u32 *buff = p; 357 u16 i; 358 359 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id; 360 361 switch (hw->mac.type) { 362 case fm10k_mac_pf: 363 /* General PF Registers */ 364 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL); 365 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT); 366 *(buff++) = fm10k_read_reg(hw, FM10K_GCR); 367 *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT); 368 369 for (i = 0; i < 8; i++) { 370 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i)); 371 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i)); 372 } 373 374 for (i = 0; i < 65; i++) { 375 fm10k_get_reg_vsi(hw, buff, i); 376 buff += FM10K_REGS_LEN_VSI; 377 } 378 379 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL); 380 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2); 381 382 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { 383 fm10k_get_reg_q(hw, buff, i); 384 buff += FM10K_REGS_LEN_Q; 385 } 386 387 *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL); 388 389 for (i = 0; i < 8; i++) 390 *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i)); 391 392 /* Interrupt Throttling Registers */ 393 for (i = 0; i < 130; i++) 394 *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i)); 395 396 break; 397 case fm10k_mac_vf: 398 /* General VF registers */ 399 *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL); 400 *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP); 401 *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME); 402 403 /* Interrupt Throttling Registers */ 404 for (i = 0; i < 8; i++) 405 *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i)); 406 407 fm10k_get_reg_vsi(hw, buff, 0); 408 buff += FM10K_REGS_LEN_VSI; 409 410 for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) { 411 if (i < hw->mac.max_queues) 412 fm10k_get_reg_q(hw, buff, i); 413 else 414 memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q); 415 buff += FM10K_REGS_LEN_Q; 416 } 417 418 break; 419 default: 420 return; 421 } 422 } 423 424 /* If function above adds more registers these define need to be updated */ 425 #define FM10K_REGS_LEN_PF \ 426 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q)) 427 #define FM10K_REGS_LEN_VF \ 428 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q)) 429 430 static int fm10k_get_regs_len(struct net_device *netdev) 431 { 432 struct fm10k_intfc *interface = netdev_priv(netdev); 433 struct fm10k_hw *hw = &interface->hw; 434 435 switch (hw->mac.type) { 436 case fm10k_mac_pf: 437 return FM10K_REGS_LEN_PF * sizeof(u32); 438 case fm10k_mac_vf: 439 return FM10K_REGS_LEN_VF * sizeof(u32); 440 default: 441 return 0; 442 } 443 } 444 445 static void fm10k_get_drvinfo(struct net_device *dev, 446 struct ethtool_drvinfo *info) 447 { 448 struct fm10k_intfc *interface = netdev_priv(dev); 449 450 strncpy(info->driver, fm10k_driver_name, 451 sizeof(info->driver) - 1); 452 strncpy(info->version, fm10k_driver_version, 453 sizeof(info->version) - 1); 454 strncpy(info->bus_info, pci_name(interface->pdev), 455 sizeof(info->bus_info) - 1); 456 } 457 458 static void fm10k_get_pauseparam(struct net_device *dev, 459 struct ethtool_pauseparam *pause) 460 { 461 struct fm10k_intfc *interface = netdev_priv(dev); 462 463 /* record fixed values for autoneg and tx pause */ 464 pause->autoneg = 0; 465 pause->tx_pause = 1; 466 467 pause->rx_pause = interface->rx_pause ? 1 : 0; 468 } 469 470 static int fm10k_set_pauseparam(struct net_device *dev, 471 struct ethtool_pauseparam *pause) 472 { 473 struct fm10k_intfc *interface = netdev_priv(dev); 474 struct fm10k_hw *hw = &interface->hw; 475 476 if (pause->autoneg || !pause->tx_pause) 477 return -EINVAL; 478 479 /* we can only support pause on the PF to avoid head-of-line blocking */ 480 if (hw->mac.type == fm10k_mac_pf) 481 interface->rx_pause = pause->rx_pause ? ~0 : 0; 482 else if (pause->rx_pause) 483 return -EINVAL; 484 485 if (netif_running(dev)) 486 fm10k_update_rx_drop_en(interface); 487 488 return 0; 489 } 490 491 static u32 fm10k_get_msglevel(struct net_device *netdev) 492 { 493 struct fm10k_intfc *interface = netdev_priv(netdev); 494 495 return interface->msg_enable; 496 } 497 498 static void fm10k_set_msglevel(struct net_device *netdev, u32 data) 499 { 500 struct fm10k_intfc *interface = netdev_priv(netdev); 501 502 interface->msg_enable = data; 503 } 504 505 static void fm10k_get_ringparam(struct net_device *netdev, 506 struct ethtool_ringparam *ring) 507 { 508 struct fm10k_intfc *interface = netdev_priv(netdev); 509 510 ring->rx_max_pending = FM10K_MAX_RXD; 511 ring->tx_max_pending = FM10K_MAX_TXD; 512 ring->rx_mini_max_pending = 0; 513 ring->rx_jumbo_max_pending = 0; 514 ring->rx_pending = interface->rx_ring_count; 515 ring->tx_pending = interface->tx_ring_count; 516 ring->rx_mini_pending = 0; 517 ring->rx_jumbo_pending = 0; 518 } 519 520 static int fm10k_set_ringparam(struct net_device *netdev, 521 struct ethtool_ringparam *ring) 522 { 523 struct fm10k_intfc *interface = netdev_priv(netdev); 524 struct fm10k_ring *temp_ring; 525 int i, err = 0; 526 u32 new_rx_count, new_tx_count; 527 528 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 529 return -EINVAL; 530 531 new_tx_count = clamp_t(u32, ring->tx_pending, 532 FM10K_MIN_TXD, FM10K_MAX_TXD); 533 new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE); 534 535 new_rx_count = clamp_t(u32, ring->rx_pending, 536 FM10K_MIN_RXD, FM10K_MAX_RXD); 537 new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE); 538 539 if ((new_tx_count == interface->tx_ring_count) && 540 (new_rx_count == interface->rx_ring_count)) { 541 /* nothing to do */ 542 return 0; 543 } 544 545 while (test_and_set_bit(__FM10K_RESETTING, interface->state)) 546 usleep_range(1000, 2000); 547 548 if (!netif_running(interface->netdev)) { 549 for (i = 0; i < interface->num_tx_queues; i++) 550 interface->tx_ring[i]->count = new_tx_count; 551 for (i = 0; i < interface->num_rx_queues; i++) 552 interface->rx_ring[i]->count = new_rx_count; 553 interface->tx_ring_count = new_tx_count; 554 interface->rx_ring_count = new_rx_count; 555 goto clear_reset; 556 } 557 558 /* allocate temporary buffer to store rings in */ 559 i = max_t(int, interface->num_tx_queues, interface->num_rx_queues); 560 temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring))); 561 562 if (!temp_ring) { 563 err = -ENOMEM; 564 goto clear_reset; 565 } 566 567 fm10k_down(interface); 568 569 /* Setup new Tx resources and free the old Tx resources in that order. 570 * We can then assign the new resources to the rings via a memcpy. 571 * The advantage to this approach is that we are guaranteed to still 572 * have resources even in the case of an allocation failure. 573 */ 574 if (new_tx_count != interface->tx_ring_count) { 575 for (i = 0; i < interface->num_tx_queues; i++) { 576 memcpy(&temp_ring[i], interface->tx_ring[i], 577 sizeof(struct fm10k_ring)); 578 579 temp_ring[i].count = new_tx_count; 580 err = fm10k_setup_tx_resources(&temp_ring[i]); 581 if (err) { 582 while (i) { 583 i--; 584 fm10k_free_tx_resources(&temp_ring[i]); 585 } 586 goto err_setup; 587 } 588 } 589 590 for (i = 0; i < interface->num_tx_queues; i++) { 591 fm10k_free_tx_resources(interface->tx_ring[i]); 592 593 memcpy(interface->tx_ring[i], &temp_ring[i], 594 sizeof(struct fm10k_ring)); 595 } 596 597 interface->tx_ring_count = new_tx_count; 598 } 599 600 /* Repeat the process for the Rx rings if needed */ 601 if (new_rx_count != interface->rx_ring_count) { 602 for (i = 0; i < interface->num_rx_queues; i++) { 603 memcpy(&temp_ring[i], interface->rx_ring[i], 604 sizeof(struct fm10k_ring)); 605 606 temp_ring[i].count = new_rx_count; 607 err = fm10k_setup_rx_resources(&temp_ring[i]); 608 if (err) { 609 while (i) { 610 i--; 611 fm10k_free_rx_resources(&temp_ring[i]); 612 } 613 goto err_setup; 614 } 615 } 616 617 for (i = 0; i < interface->num_rx_queues; i++) { 618 fm10k_free_rx_resources(interface->rx_ring[i]); 619 620 memcpy(interface->rx_ring[i], &temp_ring[i], 621 sizeof(struct fm10k_ring)); 622 } 623 624 interface->rx_ring_count = new_rx_count; 625 } 626 627 err_setup: 628 fm10k_up(interface); 629 vfree(temp_ring); 630 clear_reset: 631 clear_bit(__FM10K_RESETTING, interface->state); 632 return err; 633 } 634 635 static int fm10k_get_coalesce(struct net_device *dev, 636 struct ethtool_coalesce *ec) 637 { 638 struct fm10k_intfc *interface = netdev_priv(dev); 639 640 ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr); 641 ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; 642 643 ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr); 644 ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; 645 646 return 0; 647 } 648 649 static int fm10k_set_coalesce(struct net_device *dev, 650 struct ethtool_coalesce *ec) 651 { 652 struct fm10k_intfc *interface = netdev_priv(dev); 653 u16 tx_itr, rx_itr; 654 int i; 655 656 /* verify limits */ 657 if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) || 658 (ec->tx_coalesce_usecs > FM10K_ITR_MAX)) 659 return -EINVAL; 660 661 /* record settings */ 662 tx_itr = ec->tx_coalesce_usecs; 663 rx_itr = ec->rx_coalesce_usecs; 664 665 /* set initial values for adaptive ITR */ 666 if (ec->use_adaptive_tx_coalesce) 667 tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT; 668 669 if (ec->use_adaptive_rx_coalesce) 670 rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; 671 672 /* update interface */ 673 interface->tx_itr = tx_itr; 674 interface->rx_itr = rx_itr; 675 676 /* update q_vectors */ 677 for (i = 0; i < interface->num_q_vectors; i++) { 678 struct fm10k_q_vector *qv = interface->q_vector[i]; 679 680 qv->tx.itr = tx_itr; 681 qv->rx.itr = rx_itr; 682 } 683 684 return 0; 685 } 686 687 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, 688 struct ethtool_rxnfc *cmd) 689 { 690 cmd->data = 0; 691 692 /* Report default options for RSS on fm10k */ 693 switch (cmd->flow_type) { 694 case TCP_V4_FLOW: 695 case TCP_V6_FLOW: 696 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 697 /* fall through */ 698 case UDP_V4_FLOW: 699 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 700 interface->flags)) 701 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 702 /* fall through */ 703 case SCTP_V4_FLOW: 704 case SCTP_V6_FLOW: 705 case AH_ESP_V4_FLOW: 706 case AH_ESP_V6_FLOW: 707 case AH_V4_FLOW: 708 case AH_V6_FLOW: 709 case ESP_V4_FLOW: 710 case ESP_V6_FLOW: 711 case IPV4_FLOW: 712 case IPV6_FLOW: 713 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 714 break; 715 case UDP_V6_FLOW: 716 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 717 interface->flags)) 718 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 719 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 720 break; 721 default: 722 return -EINVAL; 723 } 724 725 return 0; 726 } 727 728 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 729 u32 __always_unused *rule_locs) 730 { 731 struct fm10k_intfc *interface = netdev_priv(dev); 732 int ret = -EOPNOTSUPP; 733 734 switch (cmd->cmd) { 735 case ETHTOOL_GRXRINGS: 736 cmd->data = interface->num_rx_queues; 737 ret = 0; 738 break; 739 case ETHTOOL_GRXFH: 740 ret = fm10k_get_rss_hash_opts(interface, cmd); 741 break; 742 default: 743 break; 744 } 745 746 return ret; 747 } 748 749 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, 750 struct ethtool_rxnfc *nfc) 751 { 752 int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 753 interface->flags); 754 int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 755 interface->flags); 756 757 /* RSS does not support anything other than hashing 758 * to queues on src and dst IPs and ports 759 */ 760 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 761 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 762 return -EINVAL; 763 764 switch (nfc->flow_type) { 765 case TCP_V4_FLOW: 766 case TCP_V6_FLOW: 767 if (!(nfc->data & RXH_IP_SRC) || 768 !(nfc->data & RXH_IP_DST) || 769 !(nfc->data & RXH_L4_B_0_1) || 770 !(nfc->data & RXH_L4_B_2_3)) 771 return -EINVAL; 772 break; 773 case UDP_V4_FLOW: 774 if (!(nfc->data & RXH_IP_SRC) || 775 !(nfc->data & RXH_IP_DST)) 776 return -EINVAL; 777 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 778 case 0: 779 clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 780 interface->flags); 781 break; 782 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 783 set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 784 interface->flags); 785 break; 786 default: 787 return -EINVAL; 788 } 789 break; 790 case UDP_V6_FLOW: 791 if (!(nfc->data & RXH_IP_SRC) || 792 !(nfc->data & RXH_IP_DST)) 793 return -EINVAL; 794 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 795 case 0: 796 clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 797 interface->flags); 798 break; 799 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 800 set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 801 interface->flags); 802 break; 803 default: 804 return -EINVAL; 805 } 806 break; 807 case AH_ESP_V4_FLOW: 808 case AH_V4_FLOW: 809 case ESP_V4_FLOW: 810 case SCTP_V4_FLOW: 811 case AH_ESP_V6_FLOW: 812 case AH_V6_FLOW: 813 case ESP_V6_FLOW: 814 case SCTP_V6_FLOW: 815 if (!(nfc->data & RXH_IP_SRC) || 816 !(nfc->data & RXH_IP_DST) || 817 (nfc->data & RXH_L4_B_0_1) || 818 (nfc->data & RXH_L4_B_2_3)) 819 return -EINVAL; 820 break; 821 default: 822 return -EINVAL; 823 } 824 825 /* If something changed we need to update the MRQC register. Note that 826 * test_bit() is guaranteed to return strictly 0 or 1, so testing for 827 * equality is safe. 828 */ 829 if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 830 interface->flags)) || 831 (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 832 interface->flags))) { 833 struct fm10k_hw *hw = &interface->hw; 834 bool warn = false; 835 u32 mrqc; 836 837 /* Perform hash on these packet types */ 838 mrqc = FM10K_MRQC_IPV4 | 839 FM10K_MRQC_TCP_IPV4 | 840 FM10K_MRQC_IPV6 | 841 FM10K_MRQC_TCP_IPV6; 842 843 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, 844 interface->flags)) { 845 mrqc |= FM10K_MRQC_UDP_IPV4; 846 warn = true; 847 } 848 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, 849 interface->flags)) { 850 mrqc |= FM10K_MRQC_UDP_IPV6; 851 warn = true; 852 } 853 854 /* If we enable UDP RSS display a warning that this may cause 855 * fragmented UDP packets to arrive out of order. 856 */ 857 if (warn) 858 netif_warn(interface, drv, interface->netdev, 859 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); 860 861 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); 862 } 863 864 return 0; 865 } 866 867 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 868 { 869 struct fm10k_intfc *interface = netdev_priv(dev); 870 int ret = -EOPNOTSUPP; 871 872 switch (cmd->cmd) { 873 case ETHTOOL_SRXFH: 874 ret = fm10k_set_rss_hash_opt(interface, cmd); 875 break; 876 default: 877 break; 878 } 879 880 return ret; 881 } 882 883 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) 884 { 885 struct fm10k_hw *hw = &interface->hw; 886 struct fm10k_mbx_info *mbx = &hw->mbx; 887 u32 attr_flag, test_msg[6]; 888 unsigned long timeout; 889 int err = -EINVAL; 890 891 /* For now this is a VF only feature */ 892 if (hw->mac.type != fm10k_mac_vf) 893 return 0; 894 895 /* loop through both nested and unnested attribute types */ 896 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET); 897 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED); 898 attr_flag += attr_flag) { 899 /* generate message to be tested */ 900 fm10k_tlv_msg_test_create(test_msg, attr_flag); 901 902 fm10k_mbx_lock(interface); 903 mbx->test_result = FM10K_NOT_IMPLEMENTED; 904 err = mbx->ops.enqueue_tx(hw, mbx, test_msg); 905 fm10k_mbx_unlock(interface); 906 907 /* wait up to 1 second for response */ 908 timeout = jiffies + HZ; 909 do { 910 if (err < 0) 911 goto err_out; 912 913 usleep_range(500, 1000); 914 915 fm10k_mbx_lock(interface); 916 mbx->ops.process(hw, mbx); 917 fm10k_mbx_unlock(interface); 918 919 err = mbx->test_result; 920 if (!err) 921 break; 922 } while (time_is_after_jiffies(timeout)); 923 924 /* reporting errors */ 925 if (err) 926 goto err_out; 927 } 928 929 err_out: 930 *data = err < 0 ? (attr_flag) : (err > 0); 931 return err; 932 } 933 934 static void fm10k_self_test(struct net_device *dev, 935 struct ethtool_test *eth_test, u64 *data) 936 { 937 struct fm10k_intfc *interface = netdev_priv(dev); 938 struct fm10k_hw *hw = &interface->hw; 939 940 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); 941 942 if (FM10K_REMOVED(hw->hw_addr)) { 943 netif_err(interface, drv, dev, 944 "Interface removed - test blocked\n"); 945 eth_test->flags |= ETH_TEST_FL_FAILED; 946 return; 947 } 948 949 if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX])) 950 eth_test->flags |= ETH_TEST_FL_FAILED; 951 } 952 953 static u32 fm10k_get_priv_flags(struct net_device *netdev) 954 { 955 return 0; 956 } 957 958 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) 959 { 960 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN)) 961 return -EINVAL; 962 963 return 0; 964 } 965 966 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) 967 { 968 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; 969 } 970 971 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) 972 { 973 u16 rss_i = interface->ring_feature[RING_F_RSS].indices; 974 struct fm10k_hw *hw = &interface->hw; 975 u32 table[4]; 976 int i, j; 977 978 /* record entries to reta table */ 979 for (i = 0; i < FM10K_RETA_SIZE; i++) { 980 u32 reta, n; 981 982 /* generate a new table if we weren't given one */ 983 for (j = 0; j < 4; j++) { 984 if (indir) 985 n = indir[4 * i + j]; 986 else 987 n = ethtool_rxfh_indir_default(4 * i + j, 988 rss_i); 989 990 table[j] = n; 991 } 992 993 reta = table[0] | 994 (table[1] << 8) | 995 (table[2] << 16) | 996 (table[3] << 24); 997 998 if (interface->reta[i] == reta) 999 continue; 1000 1001 interface->reta[i] = reta; 1002 fm10k_write_reg(hw, FM10K_RETA(0, i), reta); 1003 } 1004 } 1005 1006 static int fm10k_get_reta(struct net_device *netdev, u32 *indir) 1007 { 1008 struct fm10k_intfc *interface = netdev_priv(netdev); 1009 int i; 1010 1011 if (!indir) 1012 return 0; 1013 1014 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { 1015 u32 reta = interface->reta[i]; 1016 1017 indir[0] = (reta << 24) >> 24; 1018 indir[1] = (reta << 16) >> 24; 1019 indir[2] = (reta << 8) >> 24; 1020 indir[3] = (reta) >> 24; 1021 } 1022 1023 return 0; 1024 } 1025 1026 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) 1027 { 1028 struct fm10k_intfc *interface = netdev_priv(netdev); 1029 int i; 1030 u16 rss_i; 1031 1032 if (!indir) 1033 return 0; 1034 1035 /* Verify user input. */ 1036 rss_i = interface->ring_feature[RING_F_RSS].indices; 1037 for (i = fm10k_get_reta_size(netdev); i--;) { 1038 if (indir[i] < rss_i) 1039 continue; 1040 return -EINVAL; 1041 } 1042 1043 fm10k_write_reta(interface, indir); 1044 1045 return 0; 1046 } 1047 1048 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) 1049 { 1050 return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; 1051 } 1052 1053 static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, 1054 u8 *hfunc) 1055 { 1056 struct fm10k_intfc *interface = netdev_priv(netdev); 1057 int i, err; 1058 1059 if (hfunc) 1060 *hfunc = ETH_RSS_HASH_TOP; 1061 1062 err = fm10k_get_reta(netdev, indir); 1063 if (err || !key) 1064 return err; 1065 1066 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) 1067 *(__le32 *)key = cpu_to_le32(interface->rssrk[i]); 1068 1069 return 0; 1070 } 1071 1072 static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, 1073 const u8 *key, const u8 hfunc) 1074 { 1075 struct fm10k_intfc *interface = netdev_priv(netdev); 1076 struct fm10k_hw *hw = &interface->hw; 1077 int i, err; 1078 1079 /* We do not allow change in unsupported parameters */ 1080 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1081 return -EOPNOTSUPP; 1082 1083 err = fm10k_set_reta(netdev, indir); 1084 if (err || !key) 1085 return err; 1086 1087 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { 1088 u32 rssrk = le32_to_cpu(*(__le32 *)key); 1089 1090 if (interface->rssrk[i] == rssrk) 1091 continue; 1092 1093 interface->rssrk[i] = rssrk; 1094 fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk); 1095 } 1096 1097 return 0; 1098 } 1099 1100 static unsigned int fm10k_max_channels(struct net_device *dev) 1101 { 1102 struct fm10k_intfc *interface = netdev_priv(dev); 1103 unsigned int max_combined = interface->hw.mac.max_queues; 1104 u8 tcs = netdev_get_num_tc(dev); 1105 1106 /* For QoS report channels per traffic class */ 1107 if (tcs > 1) 1108 max_combined = BIT((fls(max_combined / tcs) - 1)); 1109 1110 return max_combined; 1111 } 1112 1113 static void fm10k_get_channels(struct net_device *dev, 1114 struct ethtool_channels *ch) 1115 { 1116 struct fm10k_intfc *interface = netdev_priv(dev); 1117 1118 /* report maximum channels */ 1119 ch->max_combined = fm10k_max_channels(dev); 1120 1121 /* report info for other vector */ 1122 ch->max_other = NON_Q_VECTORS; 1123 ch->other_count = ch->max_other; 1124 1125 /* record RSS queues */ 1126 ch->combined_count = interface->ring_feature[RING_F_RSS].indices; 1127 } 1128 1129 static int fm10k_set_channels(struct net_device *dev, 1130 struct ethtool_channels *ch) 1131 { 1132 struct fm10k_intfc *interface = netdev_priv(dev); 1133 unsigned int count = ch->combined_count; 1134 1135 /* verify they are not requesting separate vectors */ 1136 if (!count || ch->rx_count || ch->tx_count) 1137 return -EINVAL; 1138 1139 /* verify other_count has not changed */ 1140 if (ch->other_count != NON_Q_VECTORS) 1141 return -EINVAL; 1142 1143 /* verify the number of channels does not exceed hardware limits */ 1144 if (count > fm10k_max_channels(dev)) 1145 return -EINVAL; 1146 1147 interface->ring_feature[RING_F_RSS].limit = count; 1148 1149 /* use setup TC to update any traffic class queue mapping */ 1150 return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); 1151 } 1152 1153 static const struct ethtool_ops fm10k_ethtool_ops = { 1154 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1155 ETHTOOL_COALESCE_USE_ADAPTIVE, 1156 .get_strings = fm10k_get_strings, 1157 .get_sset_count = fm10k_get_sset_count, 1158 .get_ethtool_stats = fm10k_get_ethtool_stats, 1159 .get_drvinfo = fm10k_get_drvinfo, 1160 .get_link = ethtool_op_get_link, 1161 .get_pauseparam = fm10k_get_pauseparam, 1162 .set_pauseparam = fm10k_set_pauseparam, 1163 .get_msglevel = fm10k_get_msglevel, 1164 .set_msglevel = fm10k_set_msglevel, 1165 .get_ringparam = fm10k_get_ringparam, 1166 .set_ringparam = fm10k_set_ringparam, 1167 .get_coalesce = fm10k_get_coalesce, 1168 .set_coalesce = fm10k_set_coalesce, 1169 .get_rxnfc = fm10k_get_rxnfc, 1170 .set_rxnfc = fm10k_set_rxnfc, 1171 .get_regs = fm10k_get_regs, 1172 .get_regs_len = fm10k_get_regs_len, 1173 .self_test = fm10k_self_test, 1174 .get_priv_flags = fm10k_get_priv_flags, 1175 .set_priv_flags = fm10k_set_priv_flags, 1176 .get_rxfh_indir_size = fm10k_get_reta_size, 1177 .get_rxfh_key_size = fm10k_get_rssrk_size, 1178 .get_rxfh = fm10k_get_rssh, 1179 .set_rxfh = fm10k_set_rssh, 1180 .get_channels = fm10k_get_channels, 1181 .set_channels = fm10k_set_channels, 1182 .get_ts_info = ethtool_op_get_ts_info, 1183 }; 1184 1185 void fm10k_set_ethtool_ops(struct net_device *dev) 1186 { 1187 dev->ethtool_ops = &fm10k_ethtool_ops; 1188 } 1189