1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 20 #include "cna.h" 21 22 #include <linux/netdevice.h> 23 #include <linux/skbuff.h> 24 #include <linux/ethtool.h> 25 #include <linux/rtnetlink.h> 26 27 #include "bna.h" 28 29 #include "bnad.h" 30 31 #define BNAD_NUM_TXF_COUNTERS 12 32 #define BNAD_NUM_RXF_COUNTERS 10 33 #define BNAD_NUM_CQ_COUNTERS (3 + 5) 34 #define BNAD_NUM_RXQ_COUNTERS 7 35 #define BNAD_NUM_TXQ_COUNTERS 5 36 37 static const char *bnad_net_stats_strings[] = { 38 "rx_packets", 39 "tx_packets", 40 "rx_bytes", 41 "tx_bytes", 42 "rx_errors", 43 "tx_errors", 44 "rx_dropped", 45 "tx_dropped", 46 "multicast", 47 "collisions", 48 "rx_length_errors", 49 "rx_crc_errors", 50 "rx_frame_errors", 51 "tx_fifo_errors", 52 53 "netif_queue_stop", 54 "netif_queue_wakeup", 55 "netif_queue_stopped", 56 "tso4", 57 "tso6", 58 "tso_err", 59 "tcpcsum_offload", 60 "udpcsum_offload", 61 "csum_help", 62 "tx_skb_too_short", 63 "tx_skb_stopping", 64 "tx_skb_max_vectors", 65 "tx_skb_mss_too_long", 66 "tx_skb_tso_too_short", 67 "tx_skb_tso_prepare", 68 "tx_skb_non_tso_too_long", 69 "tx_skb_tcp_hdr", 70 "tx_skb_udp_hdr", 71 "tx_skb_csum_err", 72 "tx_skb_headlen_too_long", 73 "tx_skb_headlen_zero", 74 "tx_skb_frag_zero", 75 "tx_skb_len_mismatch", 76 "tx_skb_map_failed", 77 "hw_stats_updates", 78 "netif_rx_dropped", 79 80 "link_toggle", 81 "cee_toggle", 82 83 "rxp_info_alloc_failed", 84 "mbox_intr_disabled", 85 "mbox_intr_enabled", 86 "tx_unmap_q_alloc_failed", 87 "rx_unmap_q_alloc_failed", 88 "rxbuf_alloc_failed", 89 "rxbuf_map_failed", 90 91 "mac_stats_clr_cnt", 92 "mac_frame_64", 93 "mac_frame_65_127", 94 "mac_frame_128_255", 95 "mac_frame_256_511", 96 "mac_frame_512_1023", 97 "mac_frame_1024_1518", 98 "mac_frame_1518_1522", 99 "mac_rx_bytes", 100 "mac_rx_packets", 101 "mac_rx_fcs_error", 102 "mac_rx_multicast", 103 "mac_rx_broadcast", 104 "mac_rx_control_frames", 105 "mac_rx_pause", 106 "mac_rx_unknown_opcode", 107 "mac_rx_alignment_error", 108 "mac_rx_frame_length_error", 109 "mac_rx_code_error", 110 "mac_rx_carrier_sense_error", 111 "mac_rx_undersize", 112 "mac_rx_oversize", 113 "mac_rx_fragments", 114 "mac_rx_jabber", 115 "mac_rx_drop", 116 117 "mac_tx_bytes", 118 "mac_tx_packets", 119 "mac_tx_multicast", 120 "mac_tx_broadcast", 121 "mac_tx_pause", 122 "mac_tx_deferral", 123 "mac_tx_excessive_deferral", 124 "mac_tx_single_collision", 125 "mac_tx_muliple_collision", 126 "mac_tx_late_collision", 127 "mac_tx_excessive_collision", 128 "mac_tx_total_collision", 129 "mac_tx_pause_honored", 130 "mac_tx_drop", 131 "mac_tx_jabber", 132 "mac_tx_fcs_error", 133 "mac_tx_control_frame", 134 "mac_tx_oversize", 135 "mac_tx_undersize", 136 "mac_tx_fragments", 137 138 "bpc_tx_pause_0", 139 "bpc_tx_pause_1", 140 "bpc_tx_pause_2", 141 "bpc_tx_pause_3", 142 "bpc_tx_pause_4", 143 "bpc_tx_pause_5", 144 "bpc_tx_pause_6", 145 "bpc_tx_pause_7", 146 "bpc_tx_zero_pause_0", 147 "bpc_tx_zero_pause_1", 148 "bpc_tx_zero_pause_2", 149 "bpc_tx_zero_pause_3", 150 "bpc_tx_zero_pause_4", 151 "bpc_tx_zero_pause_5", 152 "bpc_tx_zero_pause_6", 153 "bpc_tx_zero_pause_7", 154 "bpc_tx_first_pause_0", 155 "bpc_tx_first_pause_1", 156 "bpc_tx_first_pause_2", 157 "bpc_tx_first_pause_3", 158 "bpc_tx_first_pause_4", 159 "bpc_tx_first_pause_5", 160 "bpc_tx_first_pause_6", 161 "bpc_tx_first_pause_7", 162 163 "bpc_rx_pause_0", 164 "bpc_rx_pause_1", 165 "bpc_rx_pause_2", 166 "bpc_rx_pause_3", 167 "bpc_rx_pause_4", 168 "bpc_rx_pause_5", 169 "bpc_rx_pause_6", 170 "bpc_rx_pause_7", 171 "bpc_rx_zero_pause_0", 172 "bpc_rx_zero_pause_1", 173 "bpc_rx_zero_pause_2", 174 "bpc_rx_zero_pause_3", 175 "bpc_rx_zero_pause_4", 176 "bpc_rx_zero_pause_5", 177 "bpc_rx_zero_pause_6", 178 "bpc_rx_zero_pause_7", 179 "bpc_rx_first_pause_0", 180 "bpc_rx_first_pause_1", 181 "bpc_rx_first_pause_2", 182 "bpc_rx_first_pause_3", 183 "bpc_rx_first_pause_4", 184 "bpc_rx_first_pause_5", 185 "bpc_rx_first_pause_6", 186 "bpc_rx_first_pause_7", 187 188 "rad_rx_frames", 189 "rad_rx_octets", 190 "rad_rx_vlan_frames", 191 "rad_rx_ucast", 192 "rad_rx_ucast_octets", 193 "rad_rx_ucast_vlan", 194 "rad_rx_mcast", 195 "rad_rx_mcast_octets", 196 "rad_rx_mcast_vlan", 197 "rad_rx_bcast", 198 "rad_rx_bcast_octets", 199 "rad_rx_bcast_vlan", 200 "rad_rx_drops", 201 202 "rlb_rad_rx_frames", 203 "rlb_rad_rx_octets", 204 "rlb_rad_rx_vlan_frames", 205 "rlb_rad_rx_ucast", 206 "rlb_rad_rx_ucast_octets", 207 "rlb_rad_rx_ucast_vlan", 208 "rlb_rad_rx_mcast", 209 "rlb_rad_rx_mcast_octets", 210 "rlb_rad_rx_mcast_vlan", 211 "rlb_rad_rx_bcast", 212 "rlb_rad_rx_bcast_octets", 213 "rlb_rad_rx_bcast_vlan", 214 "rlb_rad_rx_drops", 215 216 "fc_rx_ucast_octets", 217 "fc_rx_ucast", 218 "fc_rx_ucast_vlan", 219 "fc_rx_mcast_octets", 220 "fc_rx_mcast", 221 "fc_rx_mcast_vlan", 222 "fc_rx_bcast_octets", 223 "fc_rx_bcast", 224 "fc_rx_bcast_vlan", 225 226 "fc_tx_ucast_octets", 227 "fc_tx_ucast", 228 "fc_tx_ucast_vlan", 229 "fc_tx_mcast_octets", 230 "fc_tx_mcast", 231 "fc_tx_mcast_vlan", 232 "fc_tx_bcast_octets", 233 "fc_tx_bcast", 234 "fc_tx_bcast_vlan", 235 "fc_tx_parity_errors", 236 "fc_tx_timeout", 237 "fc_tx_fid_parity_errors", 238 }; 239 240 #define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings) 241 242 static int 243 bnad_get_link_ksettings(struct net_device *netdev, 244 struct ethtool_link_ksettings *cmd) 245 { 246 u32 supported, advertising; 247 248 supported = SUPPORTED_10000baseT_Full; 249 advertising = ADVERTISED_10000baseT_Full; 250 cmd->base.autoneg = AUTONEG_DISABLE; 251 supported |= SUPPORTED_FIBRE; 252 advertising |= ADVERTISED_FIBRE; 253 cmd->base.port = PORT_FIBRE; 254 cmd->base.phy_address = 0; 255 256 if (netif_carrier_ok(netdev)) { 257 cmd->base.speed = SPEED_10000; 258 cmd->base.duplex = DUPLEX_FULL; 259 } else { 260 cmd->base.speed = SPEED_UNKNOWN; 261 cmd->base.duplex = DUPLEX_UNKNOWN; 262 } 263 264 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 265 supported); 266 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 267 advertising); 268 269 return 0; 270 } 271 272 static int 273 bnad_set_link_ksettings(struct net_device *netdev, 274 const struct ethtool_link_ksettings *cmd) 275 { 276 /* 10G full duplex setting supported only */ 277 if (cmd->base.autoneg == AUTONEG_ENABLE) 278 return -EOPNOTSUPP; 279 280 if ((cmd->base.speed == SPEED_10000) && 281 (cmd->base.duplex == DUPLEX_FULL)) 282 return 0; 283 284 return -EOPNOTSUPP; 285 } 286 287 static void 288 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 289 { 290 struct bnad *bnad = netdev_priv(netdev); 291 struct bfa_ioc_attr *ioc_attr; 292 unsigned long flags; 293 294 strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver)); 295 strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version)); 296 297 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); 298 if (ioc_attr) { 299 spin_lock_irqsave(&bnad->bna_lock, flags); 300 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); 301 spin_unlock_irqrestore(&bnad->bna_lock, flags); 302 303 strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, 304 sizeof(drvinfo->fw_version)); 305 kfree(ioc_attr); 306 } 307 308 strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev), 309 sizeof(drvinfo->bus_info)); 310 } 311 312 static void 313 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) 314 { 315 wolinfo->supported = 0; 316 wolinfo->wolopts = 0; 317 } 318 319 static int 320 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 321 { 322 struct bnad *bnad = netdev_priv(netdev); 323 unsigned long flags; 324 325 /* Lock rqd. to access bnad->bna_lock */ 326 spin_lock_irqsave(&bnad->bna_lock, flags); 327 coalesce->use_adaptive_rx_coalesce = 328 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; 329 spin_unlock_irqrestore(&bnad->bna_lock, flags); 330 331 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * 332 BFI_COALESCING_TIMER_UNIT; 333 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * 334 BFI_COALESCING_TIMER_UNIT; 335 coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT; 336 337 return 0; 338 } 339 340 static int 341 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 342 { 343 struct bnad *bnad = netdev_priv(netdev); 344 unsigned long flags; 345 int to_del = 0; 346 347 if (coalesce->rx_coalesce_usecs == 0 || 348 coalesce->rx_coalesce_usecs > 349 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) 350 return -EINVAL; 351 352 if (coalesce->tx_coalesce_usecs == 0 || 353 coalesce->tx_coalesce_usecs > 354 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) 355 return -EINVAL; 356 357 mutex_lock(&bnad->conf_mutex); 358 /* 359 * Do not need to store rx_coalesce_usecs here 360 * Every time DIM is disabled, we can get it from the 361 * stack. 362 */ 363 spin_lock_irqsave(&bnad->bna_lock, flags); 364 if (coalesce->use_adaptive_rx_coalesce) { 365 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { 366 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; 367 bnad_dim_timer_start(bnad); 368 } 369 } else { 370 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { 371 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; 372 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && 373 test_bit(BNAD_RF_DIM_TIMER_RUNNING, 374 &bnad->run_flags)) { 375 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, 376 &bnad->run_flags); 377 to_del = 1; 378 } 379 spin_unlock_irqrestore(&bnad->bna_lock, flags); 380 if (to_del) 381 del_timer_sync(&bnad->dim_timer); 382 spin_lock_irqsave(&bnad->bna_lock, flags); 383 bnad_rx_coalescing_timeo_set(bnad); 384 } 385 } 386 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / 387 BFI_COALESCING_TIMER_UNIT) { 388 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / 389 BFI_COALESCING_TIMER_UNIT; 390 bnad_tx_coalescing_timeo_set(bnad); 391 } 392 393 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / 394 BFI_COALESCING_TIMER_UNIT) { 395 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / 396 BFI_COALESCING_TIMER_UNIT; 397 398 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) 399 bnad_rx_coalescing_timeo_set(bnad); 400 401 } 402 403 /* Add Tx Inter-pkt DMA count? */ 404 405 spin_unlock_irqrestore(&bnad->bna_lock, flags); 406 407 mutex_unlock(&bnad->conf_mutex); 408 return 0; 409 } 410 411 static void 412 bnad_get_ringparam(struct net_device *netdev, 413 struct ethtool_ringparam *ringparam) 414 { 415 struct bnad *bnad = netdev_priv(netdev); 416 417 ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH; 418 ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH; 419 420 ringparam->rx_pending = bnad->rxq_depth; 421 ringparam->tx_pending = bnad->txq_depth; 422 } 423 424 static int 425 bnad_set_ringparam(struct net_device *netdev, 426 struct ethtool_ringparam *ringparam) 427 { 428 int i, current_err, err = 0; 429 struct bnad *bnad = netdev_priv(netdev); 430 unsigned long flags; 431 432 mutex_lock(&bnad->conf_mutex); 433 if (ringparam->rx_pending == bnad->rxq_depth && 434 ringparam->tx_pending == bnad->txq_depth) { 435 mutex_unlock(&bnad->conf_mutex); 436 return 0; 437 } 438 439 if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || 440 ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH || 441 !is_power_of_2(ringparam->rx_pending)) { 442 mutex_unlock(&bnad->conf_mutex); 443 return -EINVAL; 444 } 445 if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || 446 ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH || 447 !is_power_of_2(ringparam->tx_pending)) { 448 mutex_unlock(&bnad->conf_mutex); 449 return -EINVAL; 450 } 451 452 if (ringparam->rx_pending != bnad->rxq_depth) { 453 bnad->rxq_depth = ringparam->rx_pending; 454 if (!netif_running(netdev)) { 455 mutex_unlock(&bnad->conf_mutex); 456 return 0; 457 } 458 459 for (i = 0; i < bnad->num_rx; i++) { 460 if (!bnad->rx_info[i].rx) 461 continue; 462 bnad_destroy_rx(bnad, i); 463 current_err = bnad_setup_rx(bnad, i); 464 if (current_err && !err) 465 err = current_err; 466 } 467 468 if (!err && bnad->rx_info[0].rx) { 469 /* restore rx configuration */ 470 bnad_restore_vlans(bnad, 0); 471 bnad_enable_default_bcast(bnad); 472 spin_lock_irqsave(&bnad->bna_lock, flags); 473 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); 474 spin_unlock_irqrestore(&bnad->bna_lock, flags); 475 bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | 476 BNAD_CF_PROMISC); 477 bnad_set_rx_mode(netdev); 478 } 479 } 480 if (ringparam->tx_pending != bnad->txq_depth) { 481 bnad->txq_depth = ringparam->tx_pending; 482 if (!netif_running(netdev)) { 483 mutex_unlock(&bnad->conf_mutex); 484 return 0; 485 } 486 487 for (i = 0; i < bnad->num_tx; i++) { 488 if (!bnad->tx_info[i].tx) 489 continue; 490 bnad_destroy_tx(bnad, i); 491 current_err = bnad_setup_tx(bnad, i); 492 if (current_err && !err) 493 err = current_err; 494 } 495 } 496 497 mutex_unlock(&bnad->conf_mutex); 498 return err; 499 } 500 501 static void 502 bnad_get_pauseparam(struct net_device *netdev, 503 struct ethtool_pauseparam *pauseparam) 504 { 505 struct bnad *bnad = netdev_priv(netdev); 506 507 pauseparam->autoneg = 0; 508 pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; 509 pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; 510 } 511 512 static int 513 bnad_set_pauseparam(struct net_device *netdev, 514 struct ethtool_pauseparam *pauseparam) 515 { 516 struct bnad *bnad = netdev_priv(netdev); 517 struct bna_pause_config pause_config; 518 unsigned long flags; 519 520 if (pauseparam->autoneg == AUTONEG_ENABLE) 521 return -EINVAL; 522 523 mutex_lock(&bnad->conf_mutex); 524 if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || 525 pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { 526 pause_config.rx_pause = pauseparam->rx_pause; 527 pause_config.tx_pause = pauseparam->tx_pause; 528 spin_lock_irqsave(&bnad->bna_lock, flags); 529 bna_enet_pause_config(&bnad->bna.enet, &pause_config); 530 spin_unlock_irqrestore(&bnad->bna_lock, flags); 531 } 532 mutex_unlock(&bnad->conf_mutex); 533 return 0; 534 } 535 536 static void 537 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) 538 { 539 struct bnad *bnad = netdev_priv(netdev); 540 int i, j, q_num; 541 u32 bmap; 542 543 mutex_lock(&bnad->conf_mutex); 544 545 switch (stringset) { 546 case ETH_SS_STATS: 547 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { 548 BUG_ON(!(strlen(bnad_net_stats_strings[i]) < 549 ETH_GSTRING_LEN)); 550 strncpy(string, bnad_net_stats_strings[i], 551 ETH_GSTRING_LEN); 552 string += ETH_GSTRING_LEN; 553 } 554 bmap = bna_tx_rid_mask(&bnad->bna); 555 for (i = 0; bmap; i++) { 556 if (bmap & 1) { 557 sprintf(string, "txf%d_ucast_octets", i); 558 string += ETH_GSTRING_LEN; 559 sprintf(string, "txf%d_ucast", i); 560 string += ETH_GSTRING_LEN; 561 sprintf(string, "txf%d_ucast_vlan", i); 562 string += ETH_GSTRING_LEN; 563 sprintf(string, "txf%d_mcast_octets", i); 564 string += ETH_GSTRING_LEN; 565 sprintf(string, "txf%d_mcast", i); 566 string += ETH_GSTRING_LEN; 567 sprintf(string, "txf%d_mcast_vlan", i); 568 string += ETH_GSTRING_LEN; 569 sprintf(string, "txf%d_bcast_octets", i); 570 string += ETH_GSTRING_LEN; 571 sprintf(string, "txf%d_bcast", i); 572 string += ETH_GSTRING_LEN; 573 sprintf(string, "txf%d_bcast_vlan", i); 574 string += ETH_GSTRING_LEN; 575 sprintf(string, "txf%d_errors", i); 576 string += ETH_GSTRING_LEN; 577 sprintf(string, "txf%d_filter_vlan", i); 578 string += ETH_GSTRING_LEN; 579 sprintf(string, "txf%d_filter_mac_sa", i); 580 string += ETH_GSTRING_LEN; 581 } 582 bmap >>= 1; 583 } 584 585 bmap = bna_rx_rid_mask(&bnad->bna); 586 for (i = 0; bmap; i++) { 587 if (bmap & 1) { 588 sprintf(string, "rxf%d_ucast_octets", i); 589 string += ETH_GSTRING_LEN; 590 sprintf(string, "rxf%d_ucast", i); 591 string += ETH_GSTRING_LEN; 592 sprintf(string, "rxf%d_ucast_vlan", i); 593 string += ETH_GSTRING_LEN; 594 sprintf(string, "rxf%d_mcast_octets", i); 595 string += ETH_GSTRING_LEN; 596 sprintf(string, "rxf%d_mcast", i); 597 string += ETH_GSTRING_LEN; 598 sprintf(string, "rxf%d_mcast_vlan", i); 599 string += ETH_GSTRING_LEN; 600 sprintf(string, "rxf%d_bcast_octets", i); 601 string += ETH_GSTRING_LEN; 602 sprintf(string, "rxf%d_bcast", i); 603 string += ETH_GSTRING_LEN; 604 sprintf(string, "rxf%d_bcast_vlan", i); 605 string += ETH_GSTRING_LEN; 606 sprintf(string, "rxf%d_frame_drops", i); 607 string += ETH_GSTRING_LEN; 608 } 609 bmap >>= 1; 610 } 611 612 q_num = 0; 613 for (i = 0; i < bnad->num_rx; i++) { 614 if (!bnad->rx_info[i].rx) 615 continue; 616 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 617 sprintf(string, "cq%d_producer_index", q_num); 618 string += ETH_GSTRING_LEN; 619 sprintf(string, "cq%d_consumer_index", q_num); 620 string += ETH_GSTRING_LEN; 621 sprintf(string, "cq%d_hw_producer_index", 622 q_num); 623 string += ETH_GSTRING_LEN; 624 sprintf(string, "cq%d_intr", q_num); 625 string += ETH_GSTRING_LEN; 626 sprintf(string, "cq%d_poll", q_num); 627 string += ETH_GSTRING_LEN; 628 sprintf(string, "cq%d_schedule", q_num); 629 string += ETH_GSTRING_LEN; 630 sprintf(string, "cq%d_keep_poll", q_num); 631 string += ETH_GSTRING_LEN; 632 sprintf(string, "cq%d_complete", q_num); 633 string += ETH_GSTRING_LEN; 634 q_num++; 635 } 636 } 637 638 q_num = 0; 639 for (i = 0; i < bnad->num_rx; i++) { 640 if (!bnad->rx_info[i].rx) 641 continue; 642 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 643 sprintf(string, "rxq%d_packets", q_num); 644 string += ETH_GSTRING_LEN; 645 sprintf(string, "rxq%d_bytes", q_num); 646 string += ETH_GSTRING_LEN; 647 sprintf(string, "rxq%d_packets_with_error", 648 q_num); 649 string += ETH_GSTRING_LEN; 650 sprintf(string, "rxq%d_allocbuf_failed", q_num); 651 string += ETH_GSTRING_LEN; 652 sprintf(string, "rxq%d_mapbuf_failed", q_num); 653 string += ETH_GSTRING_LEN; 654 sprintf(string, "rxq%d_producer_index", q_num); 655 string += ETH_GSTRING_LEN; 656 sprintf(string, "rxq%d_consumer_index", q_num); 657 string += ETH_GSTRING_LEN; 658 q_num++; 659 if (bnad->rx_info[i].rx_ctrl[j].ccb && 660 bnad->rx_info[i].rx_ctrl[j].ccb-> 661 rcb[1] && 662 bnad->rx_info[i].rx_ctrl[j].ccb-> 663 rcb[1]->rxq) { 664 sprintf(string, "rxq%d_packets", q_num); 665 string += ETH_GSTRING_LEN; 666 sprintf(string, "rxq%d_bytes", q_num); 667 string += ETH_GSTRING_LEN; 668 sprintf(string, 669 "rxq%d_packets_with_error", q_num); 670 string += ETH_GSTRING_LEN; 671 sprintf(string, "rxq%d_allocbuf_failed", 672 q_num); 673 string += ETH_GSTRING_LEN; 674 sprintf(string, "rxq%d_mapbuf_failed", 675 q_num); 676 string += ETH_GSTRING_LEN; 677 sprintf(string, "rxq%d_producer_index", 678 q_num); 679 string += ETH_GSTRING_LEN; 680 sprintf(string, "rxq%d_consumer_index", 681 q_num); 682 string += ETH_GSTRING_LEN; 683 q_num++; 684 } 685 } 686 } 687 688 q_num = 0; 689 for (i = 0; i < bnad->num_tx; i++) { 690 if (!bnad->tx_info[i].tx) 691 continue; 692 for (j = 0; j < bnad->num_txq_per_tx; j++) { 693 sprintf(string, "txq%d_packets", q_num); 694 string += ETH_GSTRING_LEN; 695 sprintf(string, "txq%d_bytes", q_num); 696 string += ETH_GSTRING_LEN; 697 sprintf(string, "txq%d_producer_index", q_num); 698 string += ETH_GSTRING_LEN; 699 sprintf(string, "txq%d_consumer_index", q_num); 700 string += ETH_GSTRING_LEN; 701 sprintf(string, "txq%d_hw_consumer_index", 702 q_num); 703 string += ETH_GSTRING_LEN; 704 q_num++; 705 } 706 } 707 708 break; 709 710 default: 711 break; 712 } 713 714 mutex_unlock(&bnad->conf_mutex); 715 } 716 717 static int 718 bnad_get_stats_count_locked(struct net_device *netdev) 719 { 720 struct bnad *bnad = netdev_priv(netdev); 721 int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0; 722 u32 bmap; 723 724 bmap = bna_tx_rid_mask(&bnad->bna); 725 for (i = 0; bmap; i++) { 726 if (bmap & 1) 727 txf_active_num++; 728 bmap >>= 1; 729 } 730 bmap = bna_rx_rid_mask(&bnad->bna); 731 for (i = 0; bmap; i++) { 732 if (bmap & 1) 733 rxf_active_num++; 734 bmap >>= 1; 735 } 736 count = BNAD_ETHTOOL_STATS_NUM + 737 txf_active_num * BNAD_NUM_TXF_COUNTERS + 738 rxf_active_num * BNAD_NUM_RXF_COUNTERS; 739 740 for (i = 0; i < bnad->num_rx; i++) { 741 if (!bnad->rx_info[i].rx) 742 continue; 743 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; 744 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; 745 for (j = 0; j < bnad->num_rxp_per_rx; j++) 746 if (bnad->rx_info[i].rx_ctrl[j].ccb && 747 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && 748 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) 749 count += BNAD_NUM_RXQ_COUNTERS; 750 } 751 752 for (i = 0; i < bnad->num_tx; i++) { 753 if (!bnad->tx_info[i].tx) 754 continue; 755 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; 756 } 757 return count; 758 } 759 760 static int 761 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) 762 { 763 int i, j; 764 struct bna_rcb *rcb = NULL; 765 struct bna_tcb *tcb = NULL; 766 767 for (i = 0; i < bnad->num_rx; i++) { 768 if (!bnad->rx_info[i].rx) 769 continue; 770 for (j = 0; j < bnad->num_rxp_per_rx; j++) 771 if (bnad->rx_info[i].rx_ctrl[j].ccb && 772 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && 773 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { 774 buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. 775 ccb->producer_index; 776 buf[bi++] = 0; /* ccb->consumer_index */ 777 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. 778 ccb->hw_producer_index); 779 780 buf[bi++] = bnad->rx_info[i]. 781 rx_ctrl[j].rx_intr_ctr; 782 buf[bi++] = bnad->rx_info[i]. 783 rx_ctrl[j].rx_poll_ctr; 784 buf[bi++] = bnad->rx_info[i]. 785 rx_ctrl[j].rx_schedule; 786 buf[bi++] = bnad->rx_info[i]. 787 rx_ctrl[j].rx_keep_poll; 788 buf[bi++] = bnad->rx_info[i]. 789 rx_ctrl[j].rx_complete; 790 } 791 } 792 for (i = 0; i < bnad->num_rx; i++) { 793 if (!bnad->rx_info[i].rx) 794 continue; 795 for (j = 0; j < bnad->num_rxp_per_rx; j++) 796 if (bnad->rx_info[i].rx_ctrl[j].ccb) { 797 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && 798 bnad->rx_info[i].rx_ctrl[j].ccb-> 799 rcb[0]->rxq) { 800 rcb = bnad->rx_info[i].rx_ctrl[j]. 801 ccb->rcb[0]; 802 buf[bi++] = rcb->rxq->rx_packets; 803 buf[bi++] = rcb->rxq->rx_bytes; 804 buf[bi++] = rcb->rxq-> 805 rx_packets_with_error; 806 buf[bi++] = rcb->rxq-> 807 rxbuf_alloc_failed; 808 buf[bi++] = rcb->rxq->rxbuf_map_failed; 809 buf[bi++] = rcb->producer_index; 810 buf[bi++] = rcb->consumer_index; 811 } 812 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && 813 bnad->rx_info[i].rx_ctrl[j].ccb-> 814 rcb[1]->rxq) { 815 rcb = bnad->rx_info[i].rx_ctrl[j]. 816 ccb->rcb[1]; 817 buf[bi++] = rcb->rxq->rx_packets; 818 buf[bi++] = rcb->rxq->rx_bytes; 819 buf[bi++] = rcb->rxq-> 820 rx_packets_with_error; 821 buf[bi++] = rcb->rxq-> 822 rxbuf_alloc_failed; 823 buf[bi++] = rcb->rxq->rxbuf_map_failed; 824 buf[bi++] = rcb->producer_index; 825 buf[bi++] = rcb->consumer_index; 826 } 827 } 828 } 829 830 for (i = 0; i < bnad->num_tx; i++) { 831 if (!bnad->tx_info[i].tx) 832 continue; 833 for (j = 0; j < bnad->num_txq_per_tx; j++) 834 if (bnad->tx_info[i].tcb[j] && 835 bnad->tx_info[i].tcb[j]->txq) { 836 tcb = bnad->tx_info[i].tcb[j]; 837 buf[bi++] = tcb->txq->tx_packets; 838 buf[bi++] = tcb->txq->tx_bytes; 839 buf[bi++] = tcb->producer_index; 840 buf[bi++] = tcb->consumer_index; 841 buf[bi++] = *(tcb->hw_consumer_index); 842 } 843 } 844 845 return bi; 846 } 847 848 static void 849 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, 850 u64 *buf) 851 { 852 struct bnad *bnad = netdev_priv(netdev); 853 int i, j, bi = 0; 854 unsigned long flags; 855 struct rtnl_link_stats64 net_stats64; 856 u64 *stats64; 857 u32 bmap; 858 859 mutex_lock(&bnad->conf_mutex); 860 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { 861 mutex_unlock(&bnad->conf_mutex); 862 return; 863 } 864 865 /* 866 * Used bna_lock to sync reads from bna_stats, which is written 867 * under the same lock 868 */ 869 spin_lock_irqsave(&bnad->bna_lock, flags); 870 871 memset(&net_stats64, 0, sizeof(net_stats64)); 872 bnad_netdev_qstats_fill(bnad, &net_stats64); 873 bnad_netdev_hwstats_fill(bnad, &net_stats64); 874 875 buf[bi++] = net_stats64.rx_packets; 876 buf[bi++] = net_stats64.tx_packets; 877 buf[bi++] = net_stats64.rx_bytes; 878 buf[bi++] = net_stats64.tx_bytes; 879 buf[bi++] = net_stats64.rx_errors; 880 buf[bi++] = net_stats64.tx_errors; 881 buf[bi++] = net_stats64.rx_dropped; 882 buf[bi++] = net_stats64.tx_dropped; 883 buf[bi++] = net_stats64.multicast; 884 buf[bi++] = net_stats64.collisions; 885 buf[bi++] = net_stats64.rx_length_errors; 886 buf[bi++] = net_stats64.rx_crc_errors; 887 buf[bi++] = net_stats64.rx_frame_errors; 888 buf[bi++] = net_stats64.tx_fifo_errors; 889 890 /* Get netif_queue_stopped from stack */ 891 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); 892 893 /* Fill driver stats into ethtool buffers */ 894 stats64 = (u64 *)&bnad->stats.drv_stats; 895 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) 896 buf[bi++] = stats64[i]; 897 898 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ 899 stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; 900 for (i = 0; 901 i < offsetof(struct bfi_enet_stats, rxf_stats[0]) / 902 sizeof(u64); 903 i++) 904 buf[bi++] = stats64[i]; 905 906 /* Fill txf stats into ethtool buffers */ 907 bmap = bna_tx_rid_mask(&bnad->bna); 908 for (i = 0; bmap; i++) { 909 if (bmap & 1) { 910 stats64 = (u64 *)&bnad->stats.bna_stats-> 911 hw_stats.txf_stats[i]; 912 for (j = 0; j < sizeof(struct bfi_enet_stats_txf) / 913 sizeof(u64); j++) 914 buf[bi++] = stats64[j]; 915 } 916 bmap >>= 1; 917 } 918 919 /* Fill rxf stats into ethtool buffers */ 920 bmap = bna_rx_rid_mask(&bnad->bna); 921 for (i = 0; bmap; i++) { 922 if (bmap & 1) { 923 stats64 = (u64 *)&bnad->stats.bna_stats-> 924 hw_stats.rxf_stats[i]; 925 for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) / 926 sizeof(u64); j++) 927 buf[bi++] = stats64[j]; 928 } 929 bmap >>= 1; 930 } 931 932 /* Fill per Q stats into ethtool buffers */ 933 bi = bnad_per_q_stats_fill(bnad, buf, bi); 934 935 spin_unlock_irqrestore(&bnad->bna_lock, flags); 936 937 mutex_unlock(&bnad->conf_mutex); 938 } 939 940 static int 941 bnad_get_sset_count(struct net_device *netdev, int sset) 942 { 943 switch (sset) { 944 case ETH_SS_STATS: 945 return bnad_get_stats_count_locked(netdev); 946 default: 947 return -EOPNOTSUPP; 948 } 949 } 950 951 static u32 952 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, 953 u32 *base_offset) 954 { 955 struct bfa_flash_attr *flash_attr; 956 struct bnad_iocmd_comp fcomp; 957 u32 i, flash_part = 0, ret; 958 unsigned long flags = 0; 959 960 flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL); 961 if (!flash_attr) 962 return 0; 963 964 fcomp.bnad = bnad; 965 fcomp.comp_status = 0; 966 967 init_completion(&fcomp.comp); 968 spin_lock_irqsave(&bnad->bna_lock, flags); 969 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, 970 bnad_cb_completion, &fcomp); 971 if (ret != BFA_STATUS_OK) { 972 spin_unlock_irqrestore(&bnad->bna_lock, flags); 973 kfree(flash_attr); 974 return 0; 975 } 976 spin_unlock_irqrestore(&bnad->bna_lock, flags); 977 wait_for_completion(&fcomp.comp); 978 ret = fcomp.comp_status; 979 980 /* Check for the flash type & base offset value */ 981 if (ret == BFA_STATUS_OK) { 982 for (i = 0; i < flash_attr->npart; i++) { 983 if (offset >= flash_attr->part[i].part_off && 984 offset < (flash_attr->part[i].part_off + 985 flash_attr->part[i].part_size)) { 986 flash_part = flash_attr->part[i].part_type; 987 *base_offset = flash_attr->part[i].part_off; 988 break; 989 } 990 } 991 } 992 kfree(flash_attr); 993 return flash_part; 994 } 995 996 static int 997 bnad_get_eeprom_len(struct net_device *netdev) 998 { 999 return BFA_TOTAL_FLASH_SIZE; 1000 } 1001 1002 static int 1003 bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 1004 u8 *bytes) 1005 { 1006 struct bnad *bnad = netdev_priv(netdev); 1007 struct bnad_iocmd_comp fcomp; 1008 u32 flash_part = 0, base_offset = 0; 1009 unsigned long flags = 0; 1010 int ret = 0; 1011 1012 /* Fill the magic value */ 1013 eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16); 1014 1015 /* Query the flash partition based on the offset */ 1016 flash_part = bnad_get_flash_partition_by_offset(bnad, 1017 eeprom->offset, &base_offset); 1018 if (flash_part == 0) 1019 return -EFAULT; 1020 1021 fcomp.bnad = bnad; 1022 fcomp.comp_status = 0; 1023 1024 init_completion(&fcomp.comp); 1025 spin_lock_irqsave(&bnad->bna_lock, flags); 1026 ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, 1027 bnad->id, bytes, eeprom->len, 1028 eeprom->offset - base_offset, 1029 bnad_cb_completion, &fcomp); 1030 if (ret != BFA_STATUS_OK) { 1031 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1032 goto done; 1033 } 1034 1035 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1036 wait_for_completion(&fcomp.comp); 1037 ret = fcomp.comp_status; 1038 done: 1039 return ret; 1040 } 1041 1042 static int 1043 bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 1044 u8 *bytes) 1045 { 1046 struct bnad *bnad = netdev_priv(netdev); 1047 struct bnad_iocmd_comp fcomp; 1048 u32 flash_part = 0, base_offset = 0; 1049 unsigned long flags = 0; 1050 int ret = 0; 1051 1052 /* Check if the flash update request is valid */ 1053 if (eeprom->magic != (bnad->pcidev->vendor | 1054 (bnad->pcidev->device << 16))) 1055 return -EINVAL; 1056 1057 /* Query the flash partition based on the offset */ 1058 flash_part = bnad_get_flash_partition_by_offset(bnad, 1059 eeprom->offset, &base_offset); 1060 if (flash_part == 0) 1061 return -EFAULT; 1062 1063 fcomp.bnad = bnad; 1064 fcomp.comp_status = 0; 1065 1066 init_completion(&fcomp.comp); 1067 spin_lock_irqsave(&bnad->bna_lock, flags); 1068 ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, 1069 bnad->id, bytes, eeprom->len, 1070 eeprom->offset - base_offset, 1071 bnad_cb_completion, &fcomp); 1072 if (ret != BFA_STATUS_OK) { 1073 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1074 goto done; 1075 } 1076 1077 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1078 wait_for_completion(&fcomp.comp); 1079 ret = fcomp.comp_status; 1080 done: 1081 return ret; 1082 } 1083 1084 static int 1085 bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash) 1086 { 1087 struct bnad *bnad = netdev_priv(netdev); 1088 struct bnad_iocmd_comp fcomp; 1089 const struct firmware *fw; 1090 int ret = 0; 1091 1092 ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev); 1093 if (ret) { 1094 netdev_err(netdev, "can't load firmware %s\n", eflash->data); 1095 goto out; 1096 } 1097 1098 fcomp.bnad = bnad; 1099 fcomp.comp_status = 0; 1100 1101 init_completion(&fcomp.comp); 1102 spin_lock_irq(&bnad->bna_lock); 1103 ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG, 1104 bnad->id, (u8 *)fw->data, fw->size, 0, 1105 bnad_cb_completion, &fcomp); 1106 if (ret != BFA_STATUS_OK) { 1107 netdev_warn(netdev, "flash update failed with err=%d\n", ret); 1108 ret = -EIO; 1109 spin_unlock_irq(&bnad->bna_lock); 1110 goto out; 1111 } 1112 1113 spin_unlock_irq(&bnad->bna_lock); 1114 wait_for_completion(&fcomp.comp); 1115 if (fcomp.comp_status != BFA_STATUS_OK) { 1116 ret = -EIO; 1117 netdev_warn(netdev, 1118 "firmware image update failed with err=%d\n", 1119 fcomp.comp_status); 1120 } 1121 out: 1122 release_firmware(fw); 1123 return ret; 1124 } 1125 1126 static const struct ethtool_ops bnad_ethtool_ops = { 1127 .get_drvinfo = bnad_get_drvinfo, 1128 .get_wol = bnad_get_wol, 1129 .get_link = ethtool_op_get_link, 1130 .get_coalesce = bnad_get_coalesce, 1131 .set_coalesce = bnad_set_coalesce, 1132 .get_ringparam = bnad_get_ringparam, 1133 .set_ringparam = bnad_set_ringparam, 1134 .get_pauseparam = bnad_get_pauseparam, 1135 .set_pauseparam = bnad_set_pauseparam, 1136 .get_strings = bnad_get_strings, 1137 .get_ethtool_stats = bnad_get_ethtool_stats, 1138 .get_sset_count = bnad_get_sset_count, 1139 .get_eeprom_len = bnad_get_eeprom_len, 1140 .get_eeprom = bnad_get_eeprom, 1141 .set_eeprom = bnad_set_eeprom, 1142 .flash_device = bnad_flash_device, 1143 .get_ts_info = ethtool_op_get_ts_info, 1144 .get_link_ksettings = bnad_get_link_ksettings, 1145 .set_link_ksettings = bnad_set_link_ksettings, 1146 }; 1147 1148 void 1149 bnad_set_ethtool_ops(struct net_device *netdev) 1150 { 1151 netdev->ethtool_ops = &bnad_ethtool_ops; 1152 } 1153