1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 /* ETHTOOL Support for VNIC_VF Device*/ 10 11 #include <linux/pci.h> 12 13 #include "nic_reg.h" 14 #include "nic.h" 15 #include "nicvf_queues.h" 16 #include "q_struct.h" 17 #include "thunder_bgx.h" 18 19 #define DRV_NAME "thunder-nicvf" 20 #define DRV_VERSION "1.0" 21 22 struct nicvf_stat { 23 char name[ETH_GSTRING_LEN]; 24 unsigned int index; 25 }; 26 27 #define NICVF_HW_STAT(stat) { \ 28 .name = #stat, \ 29 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \ 30 } 31 32 #define NICVF_DRV_STAT(stat) { \ 33 .name = #stat, \ 34 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \ 35 } 36 37 static const struct nicvf_stat nicvf_hw_stats[] = { 38 NICVF_HW_STAT(rx_bytes), 39 NICVF_HW_STAT(rx_frames), 40 NICVF_HW_STAT(rx_ucast_frames), 41 NICVF_HW_STAT(rx_bcast_frames), 42 NICVF_HW_STAT(rx_mcast_frames), 43 NICVF_HW_STAT(rx_drops), 44 NICVF_HW_STAT(rx_drop_red), 45 NICVF_HW_STAT(rx_drop_red_bytes), 46 NICVF_HW_STAT(rx_drop_overrun), 47 NICVF_HW_STAT(rx_drop_overrun_bytes), 48 NICVF_HW_STAT(rx_drop_bcast), 49 NICVF_HW_STAT(rx_drop_mcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast), 52 NICVF_HW_STAT(rx_fcs_errors), 53 NICVF_HW_STAT(rx_l2_errors), 54 NICVF_HW_STAT(tx_bytes), 55 NICVF_HW_STAT(tx_frames), 56 NICVF_HW_STAT(tx_ucast_frames), 57 NICVF_HW_STAT(tx_bcast_frames), 58 NICVF_HW_STAT(tx_mcast_frames), 59 NICVF_HW_STAT(tx_drops), 60 }; 61 62 static const struct nicvf_stat nicvf_drv_stats[] = { 63 NICVF_DRV_STAT(rx_bgx_truncated_pkts), 64 NICVF_DRV_STAT(rx_jabber_errs), 65 NICVF_DRV_STAT(rx_fcs_errs), 66 NICVF_DRV_STAT(rx_bgx_errs), 67 NICVF_DRV_STAT(rx_prel2_errs), 68 NICVF_DRV_STAT(rx_l2_hdr_malformed), 69 NICVF_DRV_STAT(rx_oversize), 70 NICVF_DRV_STAT(rx_undersize), 71 NICVF_DRV_STAT(rx_l2_len_mismatch), 72 NICVF_DRV_STAT(rx_l2_pclp), 73 NICVF_DRV_STAT(rx_ip_ver_errs), 74 NICVF_DRV_STAT(rx_ip_csum_errs), 75 NICVF_DRV_STAT(rx_ip_hdr_malformed), 76 NICVF_DRV_STAT(rx_ip_payload_malformed), 77 NICVF_DRV_STAT(rx_ip_ttl_errs), 78 NICVF_DRV_STAT(rx_l3_pclp), 79 NICVF_DRV_STAT(rx_l4_malformed), 80 NICVF_DRV_STAT(rx_l4_csum_errs), 81 NICVF_DRV_STAT(rx_udp_len_errs), 82 NICVF_DRV_STAT(rx_l4_port_errs), 83 NICVF_DRV_STAT(rx_tcp_flag_errs), 84 NICVF_DRV_STAT(rx_tcp_offset_errs), 85 NICVF_DRV_STAT(rx_l4_pclp), 86 NICVF_DRV_STAT(rx_truncated_pkts), 87 88 NICVF_DRV_STAT(tx_desc_fault), 89 NICVF_DRV_STAT(tx_hdr_cons_err), 90 NICVF_DRV_STAT(tx_subdesc_err), 91 NICVF_DRV_STAT(tx_max_size_exceeded), 92 NICVF_DRV_STAT(tx_imm_size_oflow), 93 NICVF_DRV_STAT(tx_data_seq_err), 94 NICVF_DRV_STAT(tx_mem_seq_err), 95 NICVF_DRV_STAT(tx_lock_viol), 96 NICVF_DRV_STAT(tx_data_fault), 97 NICVF_DRV_STAT(tx_tstmp_conflict), 98 NICVF_DRV_STAT(tx_tstmp_timeout), 99 NICVF_DRV_STAT(tx_mem_fault), 100 NICVF_DRV_STAT(tx_csum_overlap), 101 NICVF_DRV_STAT(tx_csum_overflow), 102 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 104 NICVF_DRV_STAT(tx_tso), 105 NICVF_DRV_STAT(tx_timeout), 106 NICVF_DRV_STAT(txq_stop), 107 NICVF_DRV_STAT(txq_wake), 108 }; 109 110 static const struct nicvf_stat nicvf_queue_stats[] = { 111 { "bytes", 0 }, 112 { "frames", 1 }, 113 }; 114 115 static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); 116 static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); 117 static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); 118 119 static int nicvf_get_link_ksettings(struct net_device *netdev, 120 struct ethtool_link_ksettings *cmd) 121 { 122 struct nicvf *nic = netdev_priv(netdev); 123 u32 supported, advertising; 124 125 supported = 0; 126 advertising = 0; 127 128 if (!nic->link_up) { 129 cmd->base.duplex = DUPLEX_UNKNOWN; 130 cmd->base.speed = SPEED_UNKNOWN; 131 return 0; 132 } 133 134 switch (nic->speed) { 135 case SPEED_1000: 136 cmd->base.port = PORT_MII | PORT_TP; 137 cmd->base.autoneg = AUTONEG_ENABLE; 138 supported |= SUPPORTED_MII | SUPPORTED_TP; 139 supported |= SUPPORTED_1000baseT_Full | 140 SUPPORTED_1000baseT_Half | 141 SUPPORTED_100baseT_Full | 142 SUPPORTED_100baseT_Half | 143 SUPPORTED_10baseT_Full | 144 SUPPORTED_10baseT_Half; 145 supported |= SUPPORTED_Autoneg; 146 advertising |= ADVERTISED_1000baseT_Full | 147 ADVERTISED_1000baseT_Half | 148 ADVERTISED_100baseT_Full | 149 ADVERTISED_100baseT_Half | 150 ADVERTISED_10baseT_Full | 151 ADVERTISED_10baseT_Half; 152 break; 153 case SPEED_10000: 154 if (nic->mac_type == BGX_MODE_RXAUI) { 155 cmd->base.port = PORT_TP; 156 supported |= SUPPORTED_TP; 157 } else { 158 cmd->base.port = PORT_FIBRE; 159 supported |= SUPPORTED_FIBRE; 160 } 161 cmd->base.autoneg = AUTONEG_DISABLE; 162 supported |= SUPPORTED_10000baseT_Full; 163 break; 164 case SPEED_40000: 165 cmd->base.port = PORT_FIBRE; 166 cmd->base.autoneg = AUTONEG_DISABLE; 167 supported |= SUPPORTED_FIBRE; 168 supported |= SUPPORTED_40000baseCR4_Full; 169 break; 170 } 171 cmd->base.duplex = nic->duplex; 172 cmd->base.speed = nic->speed; 173 174 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 175 supported); 176 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 177 advertising); 178 179 return 0; 180 } 181 182 static u32 nicvf_get_link(struct net_device *netdev) 183 { 184 struct nicvf *nic = netdev_priv(netdev); 185 186 return nic->link_up; 187 } 188 189 static void nicvf_get_drvinfo(struct net_device *netdev, 190 struct ethtool_drvinfo *info) 191 { 192 struct nicvf *nic = netdev_priv(netdev); 193 194 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 195 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 196 strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); 197 } 198 199 static u32 nicvf_get_msglevel(struct net_device *netdev) 200 { 201 struct nicvf *nic = netdev_priv(netdev); 202 203 return nic->msg_enable; 204 } 205 206 static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) 207 { 208 struct nicvf *nic = netdev_priv(netdev); 209 210 nic->msg_enable = lvl; 211 } 212 213 static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset) 214 { 215 int stats, qidx; 216 int start_qidx = qset * MAX_RCV_QUEUES_PER_QS; 217 218 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { 219 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 220 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 221 nicvf_queue_stats[stats].name); 222 *data += ETH_GSTRING_LEN; 223 } 224 } 225 226 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { 227 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 228 sprintf(*data, "txq%d: %s", qidx + start_qidx, 229 nicvf_queue_stats[stats].name); 230 *data += ETH_GSTRING_LEN; 231 } 232 } 233 } 234 235 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 236 { 237 struct nicvf *nic = netdev_priv(netdev); 238 int stats; 239 int sqs; 240 241 if (sset != ETH_SS_STATS) 242 return; 243 244 for (stats = 0; stats < nicvf_n_hw_stats; stats++) { 245 memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN); 246 data += ETH_GSTRING_LEN; 247 } 248 249 for (stats = 0; stats < nicvf_n_drv_stats; stats++) { 250 memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN); 251 data += ETH_GSTRING_LEN; 252 } 253 254 nicvf_get_qset_strings(nic, &data, 0); 255 256 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 257 if (!nic->snicvf[sqs]) 258 continue; 259 nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1); 260 } 261 262 for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { 263 sprintf(data, "bgx_rxstat%d: ", stats); 264 data += ETH_GSTRING_LEN; 265 } 266 267 for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) { 268 sprintf(data, "bgx_txstat%d: ", stats); 269 data += ETH_GSTRING_LEN; 270 } 271 } 272 273 static int nicvf_get_sset_count(struct net_device *netdev, int sset) 274 { 275 struct nicvf *nic = netdev_priv(netdev); 276 int qstats_count; 277 int sqs; 278 279 if (sset != ETH_SS_STATS) 280 return -EINVAL; 281 282 qstats_count = nicvf_n_queue_stats * 283 (nic->qs->rq_cnt + nic->qs->sq_cnt); 284 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 285 struct nicvf *snic; 286 287 snic = nic->snicvf[sqs]; 288 if (!snic) 289 continue; 290 qstats_count += nicvf_n_queue_stats * 291 (snic->qs->rq_cnt + snic->qs->sq_cnt); 292 } 293 294 return nicvf_n_hw_stats + nicvf_n_drv_stats + 295 qstats_count + 296 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 297 } 298 299 static void nicvf_get_qset_stats(struct nicvf *nic, 300 struct ethtool_stats *stats, u64 **data) 301 { 302 int stat, qidx; 303 304 if (!nic) 305 return; 306 307 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { 308 nicvf_update_rq_stats(nic, qidx); 309 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 310 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) 311 [nicvf_queue_stats[stat].index]; 312 } 313 314 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { 315 nicvf_update_sq_stats(nic, qidx); 316 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 317 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) 318 [nicvf_queue_stats[stat].index]; 319 } 320 } 321 322 static void nicvf_get_ethtool_stats(struct net_device *netdev, 323 struct ethtool_stats *stats, u64 *data) 324 { 325 struct nicvf *nic = netdev_priv(netdev); 326 int stat, tmp_stats; 327 int sqs, cpu; 328 329 nicvf_update_stats(nic); 330 331 /* Update LMAC stats */ 332 nicvf_update_lmac_stats(nic); 333 334 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 335 *(data++) = ((u64 *)&nic->hw_stats) 336 [nicvf_hw_stats[stat].index]; 337 for (stat = 0; stat < nicvf_n_drv_stats; stat++) { 338 tmp_stats = 0; 339 for_each_possible_cpu(cpu) 340 tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu)) 341 [nicvf_drv_stats[stat].index]; 342 *(data++) = tmp_stats; 343 } 344 345 nicvf_get_qset_stats(nic, stats, &data); 346 347 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 348 if (!nic->snicvf[sqs]) 349 continue; 350 nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data); 351 } 352 353 for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) 354 *(data++) = nic->bgx_stats.rx_stats[stat]; 355 for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++) 356 *(data++) = nic->bgx_stats.tx_stats[stat]; 357 } 358 359 static int nicvf_get_regs_len(struct net_device *dev) 360 { 361 return sizeof(u64) * NIC_VF_REG_COUNT; 362 } 363 364 static void nicvf_get_regs(struct net_device *dev, 365 struct ethtool_regs *regs, void *reg) 366 { 367 struct nicvf *nic = netdev_priv(dev); 368 u64 *p = (u64 *)reg; 369 u64 reg_offset; 370 int mbox, key, stat, q; 371 int i = 0; 372 373 regs->version = 0; 374 memset(p, 0, NIC_VF_REG_COUNT); 375 376 p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG); 377 /* Mailbox registers */ 378 for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++) 379 p[i++] = nicvf_reg_read(nic, 380 NIC_VF_PF_MAILBOX_0_1 | (mbox << 3)); 381 382 p[i++] = nicvf_reg_read(nic, NIC_VF_INT); 383 p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S); 384 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C); 385 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 386 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); 387 388 for (key = 0; key < RSS_HASH_KEY_SIZE; key++) 389 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3)); 390 391 /* Tx/Rx statistics */ 392 for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++) 393 p[i++] = nicvf_reg_read(nic, 394 NIC_VNIC_TX_STAT_0_4 | (stat << 3)); 395 396 for (i = 0; i < RX_STATS_ENUM_LAST; i++) 397 p[i++] = nicvf_reg_read(nic, 398 NIC_VNIC_RX_STAT_0_13 | (stat << 3)); 399 400 p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG); 401 402 /* All completion queue's registers */ 403 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { 404 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); 405 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); 406 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); 407 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); 408 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); 409 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); 410 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); 411 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); 412 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); 413 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); 414 } 415 416 /* All receive queue's registers */ 417 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { 418 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); 419 p[i++] = nicvf_queue_reg_read(nic, 420 NIC_QSET_RQ_0_7_STAT_0_1, q); 421 reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3); 422 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 423 } 424 425 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { 426 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); 427 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); 428 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); 429 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); 430 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); 431 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); 432 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); 433 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); 434 /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which 435 * produces bus errors when read 436 */ 437 p[i++] = 0; 438 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); 439 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); 440 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 441 } 442 443 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { 444 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); 445 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); 446 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); 447 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); 448 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); 449 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); 450 p[i++] = nicvf_queue_reg_read(nic, 451 NIC_QSET_RBDR_0_1_STATUS0, q); 452 p[i++] = nicvf_queue_reg_read(nic, 453 NIC_QSET_RBDR_0_1_STATUS1, q); 454 reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS; 455 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 456 } 457 } 458 459 static int nicvf_get_coalesce(struct net_device *netdev, 460 struct ethtool_coalesce *cmd) 461 { 462 struct nicvf *nic = netdev_priv(netdev); 463 464 cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs; 465 return 0; 466 } 467 468 static void nicvf_get_ringparam(struct net_device *netdev, 469 struct ethtool_ringparam *ring) 470 { 471 struct nicvf *nic = netdev_priv(netdev); 472 struct queue_set *qs = nic->qs; 473 474 ring->rx_max_pending = MAX_CMP_QUEUE_LEN; 475 ring->rx_pending = qs->cq_len; 476 ring->tx_max_pending = MAX_SND_QUEUE_LEN; 477 ring->tx_pending = qs->sq_len; 478 } 479 480 static int nicvf_set_ringparam(struct net_device *netdev, 481 struct ethtool_ringparam *ring) 482 { 483 struct nicvf *nic = netdev_priv(netdev); 484 struct queue_set *qs = nic->qs; 485 u32 rx_count, tx_count; 486 487 /* Due to HW errata this is not supported on T88 pass 1.x silicon */ 488 if (pass1_silicon(nic->pdev)) 489 return -EINVAL; 490 491 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 492 return -EINVAL; 493 494 tx_count = clamp_t(u32, ring->tx_pending, 495 MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); 496 rx_count = clamp_t(u32, ring->rx_pending, 497 MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); 498 499 if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) 500 return 0; 501 502 /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ 503 qs->sq_len = rounddown_pow_of_two(tx_count); 504 qs->cq_len = rounddown_pow_of_two(rx_count); 505 506 if (netif_running(netdev)) { 507 nicvf_stop(netdev); 508 nicvf_open(netdev); 509 } 510 511 return 0; 512 } 513 514 static int nicvf_get_rss_hash_opts(struct nicvf *nic, 515 struct ethtool_rxnfc *info) 516 { 517 info->data = 0; 518 519 switch (info->flow_type) { 520 case TCP_V4_FLOW: 521 case TCP_V6_FLOW: 522 case UDP_V4_FLOW: 523 case UDP_V6_FLOW: 524 case SCTP_V4_FLOW: 525 case SCTP_V6_FLOW: 526 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 527 case IPV4_FLOW: 528 case IPV6_FLOW: 529 info->data |= RXH_IP_SRC | RXH_IP_DST; 530 break; 531 default: 532 return -EINVAL; 533 } 534 535 return 0; 536 } 537 538 static int nicvf_get_rxnfc(struct net_device *dev, 539 struct ethtool_rxnfc *info, u32 *rules) 540 { 541 struct nicvf *nic = netdev_priv(dev); 542 int ret = -EOPNOTSUPP; 543 544 switch (info->cmd) { 545 case ETHTOOL_GRXRINGS: 546 info->data = nic->rx_queues; 547 ret = 0; 548 break; 549 case ETHTOOL_GRXFH: 550 return nicvf_get_rss_hash_opts(nic, info); 551 default: 552 break; 553 } 554 return ret; 555 } 556 557 static int nicvf_set_rss_hash_opts(struct nicvf *nic, 558 struct ethtool_rxnfc *info) 559 { 560 struct nicvf_rss_info *rss = &nic->rss_info; 561 u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); 562 563 if (!rss->enable) 564 netdev_err(nic->netdev, 565 "RSS is disabled, hash cannot be set\n"); 566 567 netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", 568 info->flow_type, info->data); 569 570 if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) 571 return -EINVAL; 572 573 switch (info->flow_type) { 574 case TCP_V4_FLOW: 575 case TCP_V6_FLOW: 576 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 577 case 0: 578 rss_cfg &= ~(1ULL << RSS_HASH_TCP); 579 break; 580 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 581 rss_cfg |= (1ULL << RSS_HASH_TCP); 582 break; 583 default: 584 return -EINVAL; 585 } 586 break; 587 case UDP_V4_FLOW: 588 case UDP_V6_FLOW: 589 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 590 case 0: 591 rss_cfg &= ~(1ULL << RSS_HASH_UDP); 592 break; 593 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 594 rss_cfg |= (1ULL << RSS_HASH_UDP); 595 break; 596 default: 597 return -EINVAL; 598 } 599 break; 600 case SCTP_V4_FLOW: 601 case SCTP_V6_FLOW: 602 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 603 case 0: 604 rss_cfg &= ~(1ULL << RSS_HASH_L4ETC); 605 break; 606 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 607 rss_cfg |= (1ULL << RSS_HASH_L4ETC); 608 break; 609 default: 610 return -EINVAL; 611 } 612 break; 613 case IPV4_FLOW: 614 case IPV6_FLOW: 615 rss_cfg = RSS_HASH_IP; 616 break; 617 default: 618 return -EINVAL; 619 } 620 621 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg); 622 return 0; 623 } 624 625 static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 626 { 627 struct nicvf *nic = netdev_priv(dev); 628 629 switch (info->cmd) { 630 case ETHTOOL_SRXFH: 631 return nicvf_set_rss_hash_opts(nic, info); 632 default: 633 break; 634 } 635 return -EOPNOTSUPP; 636 } 637 638 static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) 639 { 640 return RSS_HASH_KEY_SIZE * sizeof(u64); 641 } 642 643 static u32 nicvf_get_rxfh_indir_size(struct net_device *dev) 644 { 645 struct nicvf *nic = netdev_priv(dev); 646 647 return nic->rss_info.rss_size; 648 } 649 650 static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, 651 u8 *hfunc) 652 { 653 struct nicvf *nic = netdev_priv(dev); 654 struct nicvf_rss_info *rss = &nic->rss_info; 655 int idx; 656 657 if (indir) { 658 for (idx = 0; idx < rss->rss_size; idx++) 659 indir[idx] = rss->ind_tbl[idx]; 660 } 661 662 if (hkey) 663 memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); 664 665 if (hfunc) 666 *hfunc = ETH_RSS_HASH_TOP; 667 668 return 0; 669 } 670 671 static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, 672 const u8 *hkey, const u8 hfunc) 673 { 674 struct nicvf *nic = netdev_priv(dev); 675 struct nicvf_rss_info *rss = &nic->rss_info; 676 int idx; 677 678 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 679 return -EOPNOTSUPP; 680 681 if (!rss->enable) { 682 netdev_err(nic->netdev, 683 "RSS is disabled, cannot change settings\n"); 684 return -EIO; 685 } 686 687 if (indir) { 688 for (idx = 0; idx < rss->rss_size; idx++) 689 rss->ind_tbl[idx] = indir[idx]; 690 } 691 692 if (hkey) { 693 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); 694 nicvf_set_rss_key(nic); 695 } 696 697 nicvf_config_rss(nic); 698 return 0; 699 } 700 701 /* Get no of queues device supports and current queue count */ 702 static void nicvf_get_channels(struct net_device *dev, 703 struct ethtool_channels *channel) 704 { 705 struct nicvf *nic = netdev_priv(dev); 706 707 memset(channel, 0, sizeof(*channel)); 708 709 channel->max_rx = nic->max_queues; 710 channel->max_tx = nic->max_queues; 711 712 channel->rx_count = nic->rx_queues; 713 channel->tx_count = nic->tx_queues; 714 } 715 716 /* Set no of Tx, Rx queues to be used */ 717 static int nicvf_set_channels(struct net_device *dev, 718 struct ethtool_channels *channel) 719 { 720 struct nicvf *nic = netdev_priv(dev); 721 int err = 0; 722 bool if_up = netif_running(dev); 723 int cqcount; 724 725 if (!channel->rx_count || !channel->tx_count) 726 return -EINVAL; 727 if (channel->rx_count > nic->max_queues) 728 return -EINVAL; 729 if (channel->tx_count > nic->max_queues) 730 return -EINVAL; 731 732 if (if_up) 733 nicvf_stop(dev); 734 735 cqcount = max(channel->rx_count, channel->tx_count); 736 737 if (cqcount > MAX_CMP_QUEUES_PER_QS) { 738 nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS); 739 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1; 740 } else { 741 nic->sqs_count = 0; 742 } 743 744 nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS); 745 nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS); 746 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 747 748 nic->rx_queues = channel->rx_count; 749 nic->tx_queues = channel->tx_count; 750 err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues); 751 if (err) 752 return err; 753 754 if (if_up) 755 nicvf_open(dev); 756 757 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 758 nic->tx_queues, nic->rx_queues); 759 760 return err; 761 } 762 763 static void nicvf_get_pauseparam(struct net_device *dev, 764 struct ethtool_pauseparam *pause) 765 { 766 struct nicvf *nic = netdev_priv(dev); 767 union nic_mbx mbx = {}; 768 769 /* Supported only for 10G/40G interfaces */ 770 if ((nic->mac_type == BGX_MODE_SGMII) || 771 (nic->mac_type == BGX_MODE_QSGMII) || 772 (nic->mac_type == BGX_MODE_RGMII)) 773 return; 774 775 mbx.pfc.msg = NIC_MBOX_MSG_PFC; 776 mbx.pfc.get = 1; 777 if (!nicvf_send_msg_to_pf(nic, &mbx)) { 778 pause->autoneg = nic->pfc.autoneg; 779 pause->rx_pause = nic->pfc.fc_rx; 780 pause->tx_pause = nic->pfc.fc_tx; 781 } 782 } 783 784 static int nicvf_set_pauseparam(struct net_device *dev, 785 struct ethtool_pauseparam *pause) 786 { 787 struct nicvf *nic = netdev_priv(dev); 788 union nic_mbx mbx = {}; 789 790 /* Supported only for 10G/40G interfaces */ 791 if ((nic->mac_type == BGX_MODE_SGMII) || 792 (nic->mac_type == BGX_MODE_QSGMII) || 793 (nic->mac_type == BGX_MODE_RGMII)) 794 return -EOPNOTSUPP; 795 796 if (pause->autoneg) 797 return -EOPNOTSUPP; 798 799 mbx.pfc.msg = NIC_MBOX_MSG_PFC; 800 mbx.pfc.get = 0; 801 mbx.pfc.fc_rx = pause->rx_pause; 802 mbx.pfc.fc_tx = pause->tx_pause; 803 if (nicvf_send_msg_to_pf(nic, &mbx)) 804 return -EAGAIN; 805 806 nic->pfc.fc_rx = pause->rx_pause; 807 nic->pfc.fc_tx = pause->tx_pause; 808 809 return 0; 810 } 811 812 static const struct ethtool_ops nicvf_ethtool_ops = { 813 .get_link = nicvf_get_link, 814 .get_drvinfo = nicvf_get_drvinfo, 815 .get_msglevel = nicvf_get_msglevel, 816 .set_msglevel = nicvf_set_msglevel, 817 .get_strings = nicvf_get_strings, 818 .get_sset_count = nicvf_get_sset_count, 819 .get_ethtool_stats = nicvf_get_ethtool_stats, 820 .get_regs_len = nicvf_get_regs_len, 821 .get_regs = nicvf_get_regs, 822 .get_coalesce = nicvf_get_coalesce, 823 .get_ringparam = nicvf_get_ringparam, 824 .set_ringparam = nicvf_set_ringparam, 825 .get_rxnfc = nicvf_get_rxnfc, 826 .set_rxnfc = nicvf_set_rxnfc, 827 .get_rxfh_key_size = nicvf_get_rxfh_key_size, 828 .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, 829 .get_rxfh = nicvf_get_rxfh, 830 .set_rxfh = nicvf_set_rxfh, 831 .get_channels = nicvf_get_channels, 832 .set_channels = nicvf_set_channels, 833 .get_pauseparam = nicvf_get_pauseparam, 834 .set_pauseparam = nicvf_set_pauseparam, 835 .get_ts_info = ethtool_op_get_ts_info, 836 .get_link_ksettings = nicvf_get_link_ksettings, 837 }; 838 839 void nicvf_set_ethtool_ops(struct net_device *netdev) 840 { 841 netdev->ethtool_ops = &nicvf_ethtool_ops; 842 } 843