1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 /* ETHTOOL Support for VNIC_VF Device*/ 10 11 #include <linux/pci.h> 12 13 #include "nic_reg.h" 14 #include "nic.h" 15 #include "nicvf_queues.h" 16 #include "q_struct.h" 17 #include "thunder_bgx.h" 18 19 #define DRV_NAME "thunder-nicvf" 20 #define DRV_VERSION "1.0" 21 22 struct nicvf_stat { 23 char name[ETH_GSTRING_LEN]; 24 unsigned int index; 25 }; 26 27 #define NICVF_HW_STAT(stat) { \ 28 .name = #stat, \ 29 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \ 30 } 31 32 #define NICVF_DRV_STAT(stat) { \ 33 .name = #stat, \ 34 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \ 35 } 36 37 static const struct nicvf_stat nicvf_hw_stats[] = { 38 NICVF_HW_STAT(rx_bytes), 39 NICVF_HW_STAT(rx_ucast_frames), 40 NICVF_HW_STAT(rx_bcast_frames), 41 NICVF_HW_STAT(rx_mcast_frames), 42 NICVF_HW_STAT(rx_fcs_errors), 43 NICVF_HW_STAT(rx_l2_errors), 44 NICVF_HW_STAT(rx_drop_red), 45 NICVF_HW_STAT(rx_drop_red_bytes), 46 NICVF_HW_STAT(rx_drop_overrun), 47 NICVF_HW_STAT(rx_drop_overrun_bytes), 48 NICVF_HW_STAT(rx_drop_bcast), 49 NICVF_HW_STAT(rx_drop_mcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast), 52 NICVF_HW_STAT(rx_bgx_truncated_pkts), 53 NICVF_HW_STAT(rx_jabber_errs), 54 NICVF_HW_STAT(rx_fcs_errs), 55 NICVF_HW_STAT(rx_bgx_errs), 56 NICVF_HW_STAT(rx_prel2_errs), 57 NICVF_HW_STAT(rx_l2_hdr_malformed), 58 NICVF_HW_STAT(rx_oversize), 59 NICVF_HW_STAT(rx_undersize), 60 NICVF_HW_STAT(rx_l2_len_mismatch), 61 NICVF_HW_STAT(rx_l2_pclp), 62 NICVF_HW_STAT(rx_ip_ver_errs), 63 NICVF_HW_STAT(rx_ip_csum_errs), 64 NICVF_HW_STAT(rx_ip_hdr_malformed), 65 NICVF_HW_STAT(rx_ip_payload_malformed), 66 NICVF_HW_STAT(rx_ip_ttl_errs), 67 NICVF_HW_STAT(rx_l3_pclp), 68 NICVF_HW_STAT(rx_l4_malformed), 69 NICVF_HW_STAT(rx_l4_csum_errs), 70 NICVF_HW_STAT(rx_udp_len_errs), 71 NICVF_HW_STAT(rx_l4_port_errs), 72 NICVF_HW_STAT(rx_tcp_flag_errs), 73 NICVF_HW_STAT(rx_tcp_offset_errs), 74 NICVF_HW_STAT(rx_l4_pclp), 75 NICVF_HW_STAT(rx_truncated_pkts), 76 NICVF_HW_STAT(tx_bytes_ok), 77 NICVF_HW_STAT(tx_ucast_frames_ok), 78 NICVF_HW_STAT(tx_bcast_frames_ok), 79 NICVF_HW_STAT(tx_mcast_frames_ok), 80 }; 81 82 static const struct nicvf_stat nicvf_drv_stats[] = { 83 NICVF_DRV_STAT(rx_frames_ok), 84 NICVF_DRV_STAT(rx_frames_64), 85 NICVF_DRV_STAT(rx_frames_127), 86 NICVF_DRV_STAT(rx_frames_255), 87 NICVF_DRV_STAT(rx_frames_511), 88 NICVF_DRV_STAT(rx_frames_1023), 89 NICVF_DRV_STAT(rx_frames_1518), 90 NICVF_DRV_STAT(rx_frames_jumbo), 91 NICVF_DRV_STAT(rx_drops), 92 NICVF_DRV_STAT(tx_frames_ok), 93 NICVF_DRV_STAT(tx_tso), 94 NICVF_DRV_STAT(tx_drops), 95 NICVF_DRV_STAT(txq_stop), 96 NICVF_DRV_STAT(txq_wake), 97 }; 98 99 static const struct nicvf_stat nicvf_queue_stats[] = { 100 { "bytes", 0 }, 101 { "frames", 1 }, 102 }; 103 104 static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); 105 static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); 106 static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); 107 108 static int nicvf_get_settings(struct net_device *netdev, 109 struct ethtool_cmd *cmd) 110 { 111 struct nicvf *nic = netdev_priv(netdev); 112 113 cmd->supported = 0; 114 cmd->transceiver = XCVR_EXTERNAL; 115 116 if (!nic->link_up) { 117 cmd->duplex = DUPLEX_UNKNOWN; 118 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 119 return 0; 120 } 121 122 if (nic->speed <= 1000) { 123 cmd->port = PORT_MII; 124 cmd->autoneg = AUTONEG_ENABLE; 125 } else { 126 cmd->port = PORT_FIBRE; 127 cmd->autoneg = AUTONEG_DISABLE; 128 } 129 cmd->duplex = nic->duplex; 130 ethtool_cmd_speed_set(cmd, nic->speed); 131 132 return 0; 133 } 134 135 static u32 nicvf_get_link(struct net_device *netdev) 136 { 137 struct nicvf *nic = netdev_priv(netdev); 138 139 return nic->link_up; 140 } 141 142 static void nicvf_get_drvinfo(struct net_device *netdev, 143 struct ethtool_drvinfo *info) 144 { 145 struct nicvf *nic = netdev_priv(netdev); 146 147 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 148 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 149 strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); 150 } 151 152 static u32 nicvf_get_msglevel(struct net_device *netdev) 153 { 154 struct nicvf *nic = netdev_priv(netdev); 155 156 return nic->msg_enable; 157 } 158 159 static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) 160 { 161 struct nicvf *nic = netdev_priv(netdev); 162 163 nic->msg_enable = lvl; 164 } 165 166 static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset) 167 { 168 int stats, qidx; 169 int start_qidx = qset * MAX_RCV_QUEUES_PER_QS; 170 171 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { 172 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 173 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 174 nicvf_queue_stats[stats].name); 175 *data += ETH_GSTRING_LEN; 176 } 177 } 178 179 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { 180 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 181 sprintf(*data, "txq%d: %s", qidx + start_qidx, 182 nicvf_queue_stats[stats].name); 183 *data += ETH_GSTRING_LEN; 184 } 185 } 186 } 187 188 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 189 { 190 struct nicvf *nic = netdev_priv(netdev); 191 int stats; 192 int sqs; 193 194 if (sset != ETH_SS_STATS) 195 return; 196 197 for (stats = 0; stats < nicvf_n_hw_stats; stats++) { 198 memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN); 199 data += ETH_GSTRING_LEN; 200 } 201 202 for (stats = 0; stats < nicvf_n_drv_stats; stats++) { 203 memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN); 204 data += ETH_GSTRING_LEN; 205 } 206 207 nicvf_get_qset_strings(nic, &data, 0); 208 209 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 210 if (!nic->snicvf[sqs]) 211 continue; 212 nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1); 213 } 214 215 for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { 216 sprintf(data, "bgx_rxstat%d: ", stats); 217 data += ETH_GSTRING_LEN; 218 } 219 220 for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) { 221 sprintf(data, "bgx_txstat%d: ", stats); 222 data += ETH_GSTRING_LEN; 223 } 224 } 225 226 static int nicvf_get_sset_count(struct net_device *netdev, int sset) 227 { 228 struct nicvf *nic = netdev_priv(netdev); 229 int qstats_count; 230 int sqs; 231 232 if (sset != ETH_SS_STATS) 233 return -EINVAL; 234 235 qstats_count = nicvf_n_queue_stats * 236 (nic->qs->rq_cnt + nic->qs->sq_cnt); 237 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 238 struct nicvf *snic; 239 240 snic = nic->snicvf[sqs]; 241 if (!snic) 242 continue; 243 qstats_count += nicvf_n_queue_stats * 244 (snic->qs->rq_cnt + snic->qs->sq_cnt); 245 } 246 247 return nicvf_n_hw_stats + nicvf_n_drv_stats + 248 qstats_count + 249 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 250 } 251 252 static void nicvf_get_qset_stats(struct nicvf *nic, 253 struct ethtool_stats *stats, u64 **data) 254 { 255 int stat, qidx; 256 257 if (!nic) 258 return; 259 260 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { 261 nicvf_update_rq_stats(nic, qidx); 262 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 263 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) 264 [nicvf_queue_stats[stat].index]; 265 } 266 267 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { 268 nicvf_update_sq_stats(nic, qidx); 269 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 270 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) 271 [nicvf_queue_stats[stat].index]; 272 } 273 } 274 275 static void nicvf_get_ethtool_stats(struct net_device *netdev, 276 struct ethtool_stats *stats, u64 *data) 277 { 278 struct nicvf *nic = netdev_priv(netdev); 279 int stat; 280 int sqs; 281 282 nicvf_update_stats(nic); 283 284 /* Update LMAC stats */ 285 nicvf_update_lmac_stats(nic); 286 287 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 288 *(data++) = ((u64 *)&nic->hw_stats) 289 [nicvf_hw_stats[stat].index]; 290 for (stat = 0; stat < nicvf_n_drv_stats; stat++) 291 *(data++) = ((u64 *)&nic->drv_stats) 292 [nicvf_drv_stats[stat].index]; 293 294 nicvf_get_qset_stats(nic, stats, &data); 295 296 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 297 if (!nic->snicvf[sqs]) 298 continue; 299 nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data); 300 } 301 302 for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) 303 *(data++) = nic->bgx_stats.rx_stats[stat]; 304 for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++) 305 *(data++) = nic->bgx_stats.tx_stats[stat]; 306 } 307 308 static int nicvf_get_regs_len(struct net_device *dev) 309 { 310 return sizeof(u64) * NIC_VF_REG_COUNT; 311 } 312 313 static void nicvf_get_regs(struct net_device *dev, 314 struct ethtool_regs *regs, void *reg) 315 { 316 struct nicvf *nic = netdev_priv(dev); 317 u64 *p = (u64 *)reg; 318 u64 reg_offset; 319 int mbox, key, stat, q; 320 int i = 0; 321 322 regs->version = 0; 323 memset(p, 0, NIC_VF_REG_COUNT); 324 325 p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG); 326 /* Mailbox registers */ 327 for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++) 328 p[i++] = nicvf_reg_read(nic, 329 NIC_VF_PF_MAILBOX_0_1 | (mbox << 3)); 330 331 p[i++] = nicvf_reg_read(nic, NIC_VF_INT); 332 p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S); 333 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C); 334 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 335 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); 336 337 for (key = 0; key < RSS_HASH_KEY_SIZE; key++) 338 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3)); 339 340 /* Tx/Rx statistics */ 341 for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++) 342 p[i++] = nicvf_reg_read(nic, 343 NIC_VNIC_TX_STAT_0_4 | (stat << 3)); 344 345 for (i = 0; i < RX_STATS_ENUM_LAST; i++) 346 p[i++] = nicvf_reg_read(nic, 347 NIC_VNIC_RX_STAT_0_13 | (stat << 3)); 348 349 p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG); 350 351 /* All completion queue's registers */ 352 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { 353 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); 354 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); 355 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); 356 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); 357 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); 358 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); 359 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); 360 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); 361 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); 362 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); 363 } 364 365 /* All receive queue's registers */ 366 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { 367 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); 368 p[i++] = nicvf_queue_reg_read(nic, 369 NIC_QSET_RQ_0_7_STAT_0_1, q); 370 reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3); 371 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 372 } 373 374 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { 375 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); 376 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); 377 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); 378 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); 379 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); 380 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); 381 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); 382 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); 383 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); 384 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); 385 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); 386 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 387 } 388 389 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { 390 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); 391 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); 392 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); 393 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); 394 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); 395 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); 396 p[i++] = nicvf_queue_reg_read(nic, 397 NIC_QSET_RBDR_0_1_STATUS0, q); 398 p[i++] = nicvf_queue_reg_read(nic, 399 NIC_QSET_RBDR_0_1_STATUS1, q); 400 reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS; 401 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 402 } 403 } 404 405 static int nicvf_get_coalesce(struct net_device *netdev, 406 struct ethtool_coalesce *cmd) 407 { 408 struct nicvf *nic = netdev_priv(netdev); 409 410 cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs; 411 return 0; 412 } 413 414 static void nicvf_get_ringparam(struct net_device *netdev, 415 struct ethtool_ringparam *ring) 416 { 417 struct nicvf *nic = netdev_priv(netdev); 418 struct queue_set *qs = nic->qs; 419 420 ring->rx_max_pending = MAX_RCV_BUF_COUNT; 421 ring->rx_pending = qs->rbdr_len; 422 ring->tx_max_pending = MAX_SND_QUEUE_LEN; 423 ring->tx_pending = qs->sq_len; 424 } 425 426 static int nicvf_get_rss_hash_opts(struct nicvf *nic, 427 struct ethtool_rxnfc *info) 428 { 429 info->data = 0; 430 431 switch (info->flow_type) { 432 case TCP_V4_FLOW: 433 case TCP_V6_FLOW: 434 case UDP_V4_FLOW: 435 case UDP_V6_FLOW: 436 case SCTP_V4_FLOW: 437 case SCTP_V6_FLOW: 438 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 439 case IPV4_FLOW: 440 case IPV6_FLOW: 441 info->data |= RXH_IP_SRC | RXH_IP_DST; 442 break; 443 default: 444 return -EINVAL; 445 } 446 447 return 0; 448 } 449 450 static int nicvf_get_rxnfc(struct net_device *dev, 451 struct ethtool_rxnfc *info, u32 *rules) 452 { 453 struct nicvf *nic = netdev_priv(dev); 454 int ret = -EOPNOTSUPP; 455 456 switch (info->cmd) { 457 case ETHTOOL_GRXRINGS: 458 info->data = nic->rx_queues; 459 ret = 0; 460 break; 461 case ETHTOOL_GRXFH: 462 return nicvf_get_rss_hash_opts(nic, info); 463 default: 464 break; 465 } 466 return ret; 467 } 468 469 static int nicvf_set_rss_hash_opts(struct nicvf *nic, 470 struct ethtool_rxnfc *info) 471 { 472 struct nicvf_rss_info *rss = &nic->rss_info; 473 u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); 474 475 if (!rss->enable) 476 netdev_err(nic->netdev, 477 "RSS is disabled, hash cannot be set\n"); 478 479 netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", 480 info->flow_type, info->data); 481 482 if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) 483 return -EINVAL; 484 485 switch (info->flow_type) { 486 case TCP_V4_FLOW: 487 case TCP_V6_FLOW: 488 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 489 case 0: 490 rss_cfg &= ~(1ULL << RSS_HASH_TCP); 491 break; 492 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 493 rss_cfg |= (1ULL << RSS_HASH_TCP); 494 break; 495 default: 496 return -EINVAL; 497 } 498 break; 499 case UDP_V4_FLOW: 500 case UDP_V6_FLOW: 501 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 502 case 0: 503 rss_cfg &= ~(1ULL << RSS_HASH_UDP); 504 break; 505 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 506 rss_cfg |= (1ULL << RSS_HASH_UDP); 507 break; 508 default: 509 return -EINVAL; 510 } 511 break; 512 case SCTP_V4_FLOW: 513 case SCTP_V6_FLOW: 514 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 515 case 0: 516 rss_cfg &= ~(1ULL << RSS_HASH_L4ETC); 517 break; 518 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 519 rss_cfg |= (1ULL << RSS_HASH_L4ETC); 520 break; 521 default: 522 return -EINVAL; 523 } 524 break; 525 case IPV4_FLOW: 526 case IPV6_FLOW: 527 rss_cfg = RSS_HASH_IP; 528 break; 529 default: 530 return -EINVAL; 531 } 532 533 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg); 534 return 0; 535 } 536 537 static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 538 { 539 struct nicvf *nic = netdev_priv(dev); 540 541 switch (info->cmd) { 542 case ETHTOOL_SRXFH: 543 return nicvf_set_rss_hash_opts(nic, info); 544 default: 545 break; 546 } 547 return -EOPNOTSUPP; 548 } 549 550 static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) 551 { 552 return RSS_HASH_KEY_SIZE * sizeof(u64); 553 } 554 555 static u32 nicvf_get_rxfh_indir_size(struct net_device *dev) 556 { 557 struct nicvf *nic = netdev_priv(dev); 558 559 return nic->rss_info.rss_size; 560 } 561 562 static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, 563 u8 *hfunc) 564 { 565 struct nicvf *nic = netdev_priv(dev); 566 struct nicvf_rss_info *rss = &nic->rss_info; 567 int idx; 568 569 if (indir) { 570 for (idx = 0; idx < rss->rss_size; idx++) 571 indir[idx] = rss->ind_tbl[idx]; 572 } 573 574 if (hkey) 575 memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); 576 577 if (hfunc) 578 *hfunc = ETH_RSS_HASH_TOP; 579 580 return 0; 581 } 582 583 static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, 584 const u8 *hkey, u8 hfunc) 585 { 586 struct nicvf *nic = netdev_priv(dev); 587 struct nicvf_rss_info *rss = &nic->rss_info; 588 int idx; 589 590 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 591 return -EOPNOTSUPP; 592 593 if (!rss->enable) { 594 netdev_err(nic->netdev, 595 "RSS is disabled, cannot change settings\n"); 596 return -EIO; 597 } 598 599 if (indir) { 600 for (idx = 0; idx < rss->rss_size; idx++) 601 rss->ind_tbl[idx] = indir[idx]; 602 } 603 604 if (hkey) { 605 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); 606 nicvf_set_rss_key(nic); 607 } 608 609 nicvf_config_rss(nic); 610 return 0; 611 } 612 613 /* Get no of queues device supports and current queue count */ 614 static void nicvf_get_channels(struct net_device *dev, 615 struct ethtool_channels *channel) 616 { 617 struct nicvf *nic = netdev_priv(dev); 618 619 memset(channel, 0, sizeof(*channel)); 620 621 channel->max_rx = nic->max_queues; 622 channel->max_tx = nic->max_queues; 623 624 channel->rx_count = nic->rx_queues; 625 channel->tx_count = nic->tx_queues; 626 } 627 628 /* Set no of Tx, Rx queues to be used */ 629 static int nicvf_set_channels(struct net_device *dev, 630 struct ethtool_channels *channel) 631 { 632 struct nicvf *nic = netdev_priv(dev); 633 int err = 0; 634 bool if_up = netif_running(dev); 635 int cqcount; 636 637 if (!channel->rx_count || !channel->tx_count) 638 return -EINVAL; 639 if (channel->rx_count > nic->max_queues) 640 return -EINVAL; 641 if (channel->tx_count > nic->max_queues) 642 return -EINVAL; 643 644 if (if_up) 645 nicvf_stop(dev); 646 647 cqcount = max(channel->rx_count, channel->tx_count); 648 649 if (cqcount > MAX_CMP_QUEUES_PER_QS) { 650 nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS); 651 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1; 652 } else { 653 nic->sqs_count = 0; 654 } 655 656 nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS); 657 nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS); 658 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 659 660 nic->rx_queues = channel->rx_count; 661 nic->tx_queues = channel->tx_count; 662 err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues); 663 if (err) 664 return err; 665 666 if (if_up) 667 nicvf_open(dev); 668 669 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 670 nic->tx_queues, nic->rx_queues); 671 672 return err; 673 } 674 675 static const struct ethtool_ops nicvf_ethtool_ops = { 676 .get_settings = nicvf_get_settings, 677 .get_link = nicvf_get_link, 678 .get_drvinfo = nicvf_get_drvinfo, 679 .get_msglevel = nicvf_get_msglevel, 680 .set_msglevel = nicvf_set_msglevel, 681 .get_strings = nicvf_get_strings, 682 .get_sset_count = nicvf_get_sset_count, 683 .get_ethtool_stats = nicvf_get_ethtool_stats, 684 .get_regs_len = nicvf_get_regs_len, 685 .get_regs = nicvf_get_regs, 686 .get_coalesce = nicvf_get_coalesce, 687 .get_ringparam = nicvf_get_ringparam, 688 .get_rxnfc = nicvf_get_rxnfc, 689 .set_rxnfc = nicvf_set_rxnfc, 690 .get_rxfh_key_size = nicvf_get_rxfh_key_size, 691 .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, 692 .get_rxfh = nicvf_get_rxfh, 693 .set_rxfh = nicvf_set_rxfh, 694 .get_channels = nicvf_get_channels, 695 .set_channels = nicvf_set_channels, 696 .get_ts_info = ethtool_op_get_ts_info, 697 }; 698 699 void nicvf_set_ethtool_ops(struct net_device *netdev) 700 { 701 netdev->ethtool_ops = &nicvf_ethtool_ops; 702 } 703