1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/ctype.h> 12 #include <linux/stringify.h> 13 #include <linux/ethtool.h> 14 #include <linux/interrupt.h> 15 #include <linux/pci.h> 16 #include <linux/etherdevice.h> 17 #include <linux/crc32.h> 18 #include <linux/firmware.h> 19 #include <linux/utsname.h> 20 #include <linux/time.h> 21 #include "bnxt_hsi.h" 22 #include "bnxt.h" 23 #include "bnxt_xdp.h" 24 #include "bnxt_ethtool.h" 25 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 26 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 27 #include "bnxt_coredump.h" 28 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) 29 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 30 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 31 32 static u32 bnxt_get_msglevel(struct net_device *dev) 33 { 34 struct bnxt *bp = netdev_priv(dev); 35 36 return bp->msg_enable; 37 } 38 39 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 40 { 41 struct bnxt *bp = netdev_priv(dev); 42 43 bp->msg_enable = value; 44 } 45 46 static int bnxt_get_coalesce(struct net_device *dev, 47 struct ethtool_coalesce *coal) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 struct bnxt_coal *hw_coal; 51 u16 mult; 52 53 memset(coal, 0, sizeof(*coal)); 54 55 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 56 57 hw_coal = &bp->rx_coal; 58 mult = hw_coal->bufs_per_record; 59 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 60 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 61 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 62 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 63 64 hw_coal = &bp->tx_coal; 65 mult = hw_coal->bufs_per_record; 66 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 67 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 68 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 69 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 70 71 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 72 73 return 0; 74 } 75 76 static int bnxt_set_coalesce(struct net_device *dev, 77 struct ethtool_coalesce *coal) 78 { 79 struct bnxt *bp = netdev_priv(dev); 80 bool update_stats = false; 81 struct bnxt_coal *hw_coal; 82 int rc = 0; 83 u16 mult; 84 85 if (coal->use_adaptive_rx_coalesce) { 86 bp->flags |= BNXT_FLAG_DIM; 87 } else { 88 if (bp->flags & BNXT_FLAG_DIM) { 89 bp->flags &= ~(BNXT_FLAG_DIM); 90 goto reset_coalesce; 91 } 92 } 93 94 hw_coal = &bp->rx_coal; 95 mult = hw_coal->bufs_per_record; 96 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 97 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 98 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 99 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 100 101 hw_coal = &bp->tx_coal; 102 mult = hw_coal->bufs_per_record; 103 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 104 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 105 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 106 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 107 108 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 109 u32 stats_ticks = coal->stats_block_coalesce_usecs; 110 111 /* Allow 0, which means disable. */ 112 if (stats_ticks) 113 stats_ticks = clamp_t(u32, stats_ticks, 114 BNXT_MIN_STATS_COAL_TICKS, 115 BNXT_MAX_STATS_COAL_TICKS); 116 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 117 bp->stats_coal_ticks = stats_ticks; 118 if (bp->stats_coal_ticks) 119 bp->current_interval = 120 bp->stats_coal_ticks * HZ / 1000000; 121 else 122 bp->current_interval = BNXT_TIMER_INTERVAL; 123 update_stats = true; 124 } 125 126 reset_coalesce: 127 if (netif_running(dev)) { 128 if (update_stats) { 129 rc = bnxt_close_nic(bp, true, false); 130 if (!rc) 131 rc = bnxt_open_nic(bp, true, false); 132 } else { 133 rc = bnxt_hwrm_set_coal(bp); 134 } 135 } 136 137 return rc; 138 } 139 140 #define BNXT_NUM_STATS 22 141 142 #define BNXT_RX_STATS_ENTRY(counter) \ 143 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 144 145 #define BNXT_TX_STATS_ENTRY(counter) \ 146 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 147 148 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 149 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 150 151 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 152 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 153 154 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 155 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 156 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 157 158 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 159 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 160 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 161 162 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 163 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 164 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 165 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 166 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 167 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 168 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 169 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 170 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 171 172 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 173 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 174 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 175 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 176 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 177 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 178 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 179 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 180 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 181 182 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 183 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 184 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 185 186 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 187 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 188 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 189 190 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 191 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 192 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 193 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 194 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 195 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 196 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 197 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 198 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 199 200 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 201 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 202 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 203 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 204 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 205 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 206 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 207 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 208 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 209 210 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 211 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 212 __stringify(counter##_pri##n) } 213 214 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 215 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 216 __stringify(counter##_pri##n) } 217 218 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 219 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 220 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 221 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 222 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 223 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 224 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 225 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 226 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 227 228 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 229 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 230 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 231 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 232 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 233 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 234 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 235 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 236 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 237 238 #define BNXT_PCIE_STATS_ENTRY(counter) \ 239 { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) } 240 241 enum { 242 RX_TOTAL_DISCARDS, 243 TX_TOTAL_DISCARDS, 244 }; 245 246 static struct { 247 u64 counter; 248 char string[ETH_GSTRING_LEN]; 249 } bnxt_sw_func_stats[] = { 250 {0, "rx_total_discard_pkts"}, 251 {0, "tx_total_discard_pkts"}, 252 }; 253 254 static const struct { 255 long offset; 256 char string[ETH_GSTRING_LEN]; 257 } bnxt_port_stats_arr[] = { 258 BNXT_RX_STATS_ENTRY(rx_64b_frames), 259 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 260 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 261 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 262 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 263 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 264 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 265 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 266 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 267 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 268 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 269 BNXT_RX_STATS_ENTRY(rx_total_frames), 270 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 271 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 272 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 273 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 274 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 275 BNXT_RX_STATS_ENTRY(rx_pause_frames), 276 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 277 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 278 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 279 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 280 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 281 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 282 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 283 BNXT_RX_STATS_ENTRY(rx_good_frames), 284 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 285 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 286 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 287 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 288 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 289 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 290 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 291 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 292 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 293 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 294 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 295 BNXT_RX_STATS_ENTRY(rx_bytes), 296 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 297 BNXT_RX_STATS_ENTRY(rx_runt_frames), 298 BNXT_RX_STATS_ENTRY(rx_stat_discard), 299 BNXT_RX_STATS_ENTRY(rx_stat_err), 300 301 BNXT_TX_STATS_ENTRY(tx_64b_frames), 302 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 303 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 304 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 305 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 306 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 307 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 308 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 309 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 310 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 311 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 312 BNXT_TX_STATS_ENTRY(tx_good_frames), 313 BNXT_TX_STATS_ENTRY(tx_total_frames), 314 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 315 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 316 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 317 BNXT_TX_STATS_ENTRY(tx_pause_frames), 318 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 319 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 320 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 321 BNXT_TX_STATS_ENTRY(tx_err), 322 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 323 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 324 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 325 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 326 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 327 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 328 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 329 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 330 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 331 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 332 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 333 BNXT_TX_STATS_ENTRY(tx_total_collisions), 334 BNXT_TX_STATS_ENTRY(tx_bytes), 335 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 336 BNXT_TX_STATS_ENTRY(tx_stat_discard), 337 BNXT_TX_STATS_ENTRY(tx_stat_error), 338 }; 339 340 static const struct { 341 long offset; 342 char string[ETH_GSTRING_LEN]; 343 } bnxt_port_stats_ext_arr[] = { 344 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 345 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 346 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 347 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 348 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 349 BNXT_RX_STATS_EXT_COS_ENTRIES, 350 BNXT_RX_STATS_EXT_PFC_ENTRIES, 351 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 352 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 353 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 354 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 355 }; 356 357 static const struct { 358 long offset; 359 char string[ETH_GSTRING_LEN]; 360 } bnxt_tx_port_stats_ext_arr[] = { 361 BNXT_TX_STATS_EXT_COS_ENTRIES, 362 BNXT_TX_STATS_EXT_PFC_ENTRIES, 363 }; 364 365 static const struct { 366 long base_off; 367 char string[ETH_GSTRING_LEN]; 368 } bnxt_rx_bytes_pri_arr[] = { 369 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 370 }; 371 372 static const struct { 373 long base_off; 374 char string[ETH_GSTRING_LEN]; 375 } bnxt_rx_pkts_pri_arr[] = { 376 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 377 }; 378 379 static const struct { 380 long base_off; 381 char string[ETH_GSTRING_LEN]; 382 } bnxt_tx_bytes_pri_arr[] = { 383 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 384 }; 385 386 static const struct { 387 long base_off; 388 char string[ETH_GSTRING_LEN]; 389 } bnxt_tx_pkts_pri_arr[] = { 390 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 391 }; 392 393 static const struct { 394 long offset; 395 char string[ETH_GSTRING_LEN]; 396 } bnxt_pcie_stats_arr[] = { 397 BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity), 398 BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity), 399 BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity), 400 BNXT_PCIE_STATS_ENTRY(pcie_link_integrity), 401 BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate), 402 BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate), 403 BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics), 404 BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics), 405 BNXT_PCIE_STATS_ENTRY(pcie_equalization_time), 406 BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]), 407 BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]), 408 BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram), 409 }; 410 411 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) 412 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 413 #define BNXT_NUM_STATS_PRI \ 414 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 415 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 416 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 417 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 418 #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) 419 420 static int bnxt_get_num_stats(struct bnxt *bp) 421 { 422 int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; 423 424 num_stats += BNXT_NUM_SW_FUNC_STATS; 425 426 if (bp->flags & BNXT_FLAG_PORT_STATS) 427 num_stats += BNXT_NUM_PORT_STATS; 428 429 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 430 num_stats += bp->fw_rx_stats_ext_size + 431 bp->fw_tx_stats_ext_size; 432 if (bp->pri2cos_valid) 433 num_stats += BNXT_NUM_STATS_PRI; 434 } 435 436 if (bp->flags & BNXT_FLAG_PCIE_STATS) 437 num_stats += BNXT_NUM_PCIE_STATS; 438 439 return num_stats; 440 } 441 442 static int bnxt_get_sset_count(struct net_device *dev, int sset) 443 { 444 struct bnxt *bp = netdev_priv(dev); 445 446 switch (sset) { 447 case ETH_SS_STATS: 448 return bnxt_get_num_stats(bp); 449 case ETH_SS_TEST: 450 if (!bp->num_tests) 451 return -EOPNOTSUPP; 452 return bp->num_tests; 453 default: 454 return -EOPNOTSUPP; 455 } 456 } 457 458 static void bnxt_get_ethtool_stats(struct net_device *dev, 459 struct ethtool_stats *stats, u64 *buf) 460 { 461 u32 i, j = 0; 462 struct bnxt *bp = netdev_priv(dev); 463 u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; 464 465 if (!bp->bnapi) { 466 j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; 467 goto skip_ring_stats; 468 } 469 470 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) 471 bnxt_sw_func_stats[i].counter = 0; 472 473 for (i = 0; i < bp->cp_nr_rings; i++) { 474 struct bnxt_napi *bnapi = bp->bnapi[i]; 475 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 476 __le64 *hw_stats = (__le64 *)cpr->hw_stats; 477 int k; 478 479 for (k = 0; k < stat_fields; j++, k++) 480 buf[j] = le64_to_cpu(hw_stats[k]); 481 buf[j++] = cpr->rx_l4_csum_errors; 482 buf[j++] = cpr->missed_irqs; 483 484 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 485 le64_to_cpu(cpr->hw_stats->rx_discard_pkts); 486 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += 487 le64_to_cpu(cpr->hw_stats->tx_discard_pkts); 488 } 489 490 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) 491 buf[j] = bnxt_sw_func_stats[i].counter; 492 493 skip_ring_stats: 494 if (bp->flags & BNXT_FLAG_PORT_STATS) { 495 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; 496 497 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { 498 buf[j] = le64_to_cpu(*(port_stats + 499 bnxt_port_stats_arr[i].offset)); 500 } 501 } 502 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 503 __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; 504 __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext; 505 506 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { 507 buf[j] = le64_to_cpu(*(rx_port_stats_ext + 508 bnxt_port_stats_ext_arr[i].offset)); 509 } 510 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { 511 buf[j] = le64_to_cpu(*(tx_port_stats_ext + 512 bnxt_tx_port_stats_ext_arr[i].offset)); 513 } 514 if (bp->pri2cos_valid) { 515 for (i = 0; i < 8; i++, j++) { 516 long n = bnxt_rx_bytes_pri_arr[i].base_off + 517 bp->pri2cos[i]; 518 519 buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); 520 } 521 for (i = 0; i < 8; i++, j++) { 522 long n = bnxt_rx_pkts_pri_arr[i].base_off + 523 bp->pri2cos[i]; 524 525 buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); 526 } 527 for (i = 0; i < 8; i++, j++) { 528 long n = bnxt_tx_bytes_pri_arr[i].base_off + 529 bp->pri2cos[i]; 530 531 buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); 532 } 533 for (i = 0; i < 8; i++, j++) { 534 long n = bnxt_tx_pkts_pri_arr[i].base_off + 535 bp->pri2cos[i]; 536 537 buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); 538 } 539 } 540 } 541 if (bp->flags & BNXT_FLAG_PCIE_STATS) { 542 __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats; 543 544 for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) { 545 buf[j] = le64_to_cpu(*(pcie_stats + 546 bnxt_pcie_stats_arr[i].offset)); 547 } 548 } 549 } 550 551 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 552 { 553 struct bnxt *bp = netdev_priv(dev); 554 u32 i; 555 556 switch (stringset) { 557 /* The number of strings must match BNXT_NUM_STATS defined above. */ 558 case ETH_SS_STATS: 559 for (i = 0; i < bp->cp_nr_rings; i++) { 560 sprintf(buf, "[%d]: rx_ucast_packets", i); 561 buf += ETH_GSTRING_LEN; 562 sprintf(buf, "[%d]: rx_mcast_packets", i); 563 buf += ETH_GSTRING_LEN; 564 sprintf(buf, "[%d]: rx_bcast_packets", i); 565 buf += ETH_GSTRING_LEN; 566 sprintf(buf, "[%d]: rx_discards", i); 567 buf += ETH_GSTRING_LEN; 568 sprintf(buf, "[%d]: rx_drops", i); 569 buf += ETH_GSTRING_LEN; 570 sprintf(buf, "[%d]: rx_ucast_bytes", i); 571 buf += ETH_GSTRING_LEN; 572 sprintf(buf, "[%d]: rx_mcast_bytes", i); 573 buf += ETH_GSTRING_LEN; 574 sprintf(buf, "[%d]: rx_bcast_bytes", i); 575 buf += ETH_GSTRING_LEN; 576 sprintf(buf, "[%d]: tx_ucast_packets", i); 577 buf += ETH_GSTRING_LEN; 578 sprintf(buf, "[%d]: tx_mcast_packets", i); 579 buf += ETH_GSTRING_LEN; 580 sprintf(buf, "[%d]: tx_bcast_packets", i); 581 buf += ETH_GSTRING_LEN; 582 sprintf(buf, "[%d]: tx_discards", i); 583 buf += ETH_GSTRING_LEN; 584 sprintf(buf, "[%d]: tx_drops", i); 585 buf += ETH_GSTRING_LEN; 586 sprintf(buf, "[%d]: tx_ucast_bytes", i); 587 buf += ETH_GSTRING_LEN; 588 sprintf(buf, "[%d]: tx_mcast_bytes", i); 589 buf += ETH_GSTRING_LEN; 590 sprintf(buf, "[%d]: tx_bcast_bytes", i); 591 buf += ETH_GSTRING_LEN; 592 sprintf(buf, "[%d]: tpa_packets", i); 593 buf += ETH_GSTRING_LEN; 594 sprintf(buf, "[%d]: tpa_bytes", i); 595 buf += ETH_GSTRING_LEN; 596 sprintf(buf, "[%d]: tpa_events", i); 597 buf += ETH_GSTRING_LEN; 598 sprintf(buf, "[%d]: tpa_aborts", i); 599 buf += ETH_GSTRING_LEN; 600 sprintf(buf, "[%d]: rx_l4_csum_errors", i); 601 buf += ETH_GSTRING_LEN; 602 sprintf(buf, "[%d]: missed_irqs", i); 603 buf += ETH_GSTRING_LEN; 604 } 605 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 606 strcpy(buf, bnxt_sw_func_stats[i].string); 607 buf += ETH_GSTRING_LEN; 608 } 609 610 if (bp->flags & BNXT_FLAG_PORT_STATS) { 611 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 612 strcpy(buf, bnxt_port_stats_arr[i].string); 613 buf += ETH_GSTRING_LEN; 614 } 615 } 616 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 617 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { 618 strcpy(buf, bnxt_port_stats_ext_arr[i].string); 619 buf += ETH_GSTRING_LEN; 620 } 621 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { 622 strcpy(buf, 623 bnxt_tx_port_stats_ext_arr[i].string); 624 buf += ETH_GSTRING_LEN; 625 } 626 if (bp->pri2cos_valid) { 627 for (i = 0; i < 8; i++) { 628 strcpy(buf, 629 bnxt_rx_bytes_pri_arr[i].string); 630 buf += ETH_GSTRING_LEN; 631 } 632 for (i = 0; i < 8; i++) { 633 strcpy(buf, 634 bnxt_rx_pkts_pri_arr[i].string); 635 buf += ETH_GSTRING_LEN; 636 } 637 for (i = 0; i < 8; i++) { 638 strcpy(buf, 639 bnxt_tx_bytes_pri_arr[i].string); 640 buf += ETH_GSTRING_LEN; 641 } 642 for (i = 0; i < 8; i++) { 643 strcpy(buf, 644 bnxt_tx_pkts_pri_arr[i].string); 645 buf += ETH_GSTRING_LEN; 646 } 647 } 648 } 649 if (bp->flags & BNXT_FLAG_PCIE_STATS) { 650 for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) { 651 strcpy(buf, bnxt_pcie_stats_arr[i].string); 652 buf += ETH_GSTRING_LEN; 653 } 654 } 655 break; 656 case ETH_SS_TEST: 657 if (bp->num_tests) 658 memcpy(buf, bp->test_info->string, 659 bp->num_tests * ETH_GSTRING_LEN); 660 break; 661 default: 662 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 663 stringset); 664 break; 665 } 666 } 667 668 static void bnxt_get_ringparam(struct net_device *dev, 669 struct ethtool_ringparam *ering) 670 { 671 struct bnxt *bp = netdev_priv(dev); 672 673 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 674 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 675 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 676 677 ering->rx_pending = bp->rx_ring_size; 678 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 679 ering->tx_pending = bp->tx_ring_size; 680 } 681 682 static int bnxt_set_ringparam(struct net_device *dev, 683 struct ethtool_ringparam *ering) 684 { 685 struct bnxt *bp = netdev_priv(dev); 686 687 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 688 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 689 (ering->tx_pending <= MAX_SKB_FRAGS)) 690 return -EINVAL; 691 692 if (netif_running(dev)) 693 bnxt_close_nic(bp, false, false); 694 695 bp->rx_ring_size = ering->rx_pending; 696 bp->tx_ring_size = ering->tx_pending; 697 bnxt_set_ring_params(bp); 698 699 if (netif_running(dev)) 700 return bnxt_open_nic(bp, false, false); 701 702 return 0; 703 } 704 705 static void bnxt_get_channels(struct net_device *dev, 706 struct ethtool_channels *channel) 707 { 708 struct bnxt *bp = netdev_priv(dev); 709 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 710 int max_rx_rings, max_tx_rings, tcs; 711 int max_tx_sch_inputs; 712 713 /* Get the most up-to-date max_tx_sch_inputs. */ 714 if (BNXT_NEW_RM(bp)) 715 bnxt_hwrm_func_resc_qcaps(bp, false); 716 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 717 718 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 719 if (max_tx_sch_inputs) 720 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 721 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 722 723 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 724 max_rx_rings = 0; 725 max_tx_rings = 0; 726 } 727 if (max_tx_sch_inputs) 728 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 729 730 tcs = netdev_get_num_tc(dev); 731 if (tcs > 1) 732 max_tx_rings /= tcs; 733 734 channel->max_rx = max_rx_rings; 735 channel->max_tx = max_tx_rings; 736 channel->max_other = 0; 737 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 738 channel->combined_count = bp->rx_nr_rings; 739 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 740 channel->combined_count--; 741 } else { 742 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 743 channel->rx_count = bp->rx_nr_rings; 744 channel->tx_count = bp->tx_nr_rings_per_tc; 745 } 746 } 747 } 748 749 static int bnxt_set_channels(struct net_device *dev, 750 struct ethtool_channels *channel) 751 { 752 struct bnxt *bp = netdev_priv(dev); 753 int req_tx_rings, req_rx_rings, tcs; 754 bool sh = false; 755 int tx_xdp = 0; 756 int rc = 0; 757 758 if (channel->other_count) 759 return -EINVAL; 760 761 if (!channel->combined_count && 762 (!channel->rx_count || !channel->tx_count)) 763 return -EINVAL; 764 765 if (channel->combined_count && 766 (channel->rx_count || channel->tx_count)) 767 return -EINVAL; 768 769 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 770 channel->tx_count)) 771 return -EINVAL; 772 773 if (channel->combined_count) 774 sh = true; 775 776 tcs = netdev_get_num_tc(dev); 777 778 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 779 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 780 if (bp->tx_nr_rings_xdp) { 781 if (!sh) { 782 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 783 return -EINVAL; 784 } 785 tx_xdp = req_rx_rings; 786 } 787 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 788 if (rc) { 789 netdev_warn(dev, "Unable to allocate the requested rings\n"); 790 return rc; 791 } 792 793 if (netif_running(dev)) { 794 if (BNXT_PF(bp)) { 795 /* TODO CHIMP_FW: Send message to all VF's 796 * before PF unload 797 */ 798 } 799 rc = bnxt_close_nic(bp, true, false); 800 if (rc) { 801 netdev_err(bp->dev, "Set channel failure rc :%x\n", 802 rc); 803 return rc; 804 } 805 } 806 807 if (sh) { 808 bp->flags |= BNXT_FLAG_SHARED_RINGS; 809 bp->rx_nr_rings = channel->combined_count; 810 bp->tx_nr_rings_per_tc = channel->combined_count; 811 } else { 812 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 813 bp->rx_nr_rings = channel->rx_count; 814 bp->tx_nr_rings_per_tc = channel->tx_count; 815 } 816 bp->tx_nr_rings_xdp = tx_xdp; 817 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 818 if (tcs > 1) 819 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 820 821 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 822 bp->tx_nr_rings + bp->rx_nr_rings; 823 824 /* After changing number of rx channels, update NTUPLE feature. */ 825 netdev_update_features(dev); 826 if (netif_running(dev)) { 827 rc = bnxt_open_nic(bp, true, false); 828 if ((!rc) && BNXT_PF(bp)) { 829 /* TODO CHIMP_FW: Send message to all VF's 830 * to renable 831 */ 832 } 833 } else { 834 rc = bnxt_reserve_rings(bp); 835 } 836 837 return rc; 838 } 839 840 #ifdef CONFIG_RFS_ACCEL 841 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 842 u32 *rule_locs) 843 { 844 int i, j = 0; 845 846 cmd->data = bp->ntp_fltr_count; 847 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 848 struct hlist_head *head; 849 struct bnxt_ntuple_filter *fltr; 850 851 head = &bp->ntp_fltr_hash_tbl[i]; 852 rcu_read_lock(); 853 hlist_for_each_entry_rcu(fltr, head, hash) { 854 if (j == cmd->rule_cnt) 855 break; 856 rule_locs[j++] = fltr->sw_id; 857 } 858 rcu_read_unlock(); 859 if (j == cmd->rule_cnt) 860 break; 861 } 862 cmd->rule_cnt = j; 863 return 0; 864 } 865 866 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 867 { 868 struct ethtool_rx_flow_spec *fs = 869 (struct ethtool_rx_flow_spec *)&cmd->fs; 870 struct bnxt_ntuple_filter *fltr; 871 struct flow_keys *fkeys; 872 int i, rc = -EINVAL; 873 874 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) 875 return rc; 876 877 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 878 struct hlist_head *head; 879 880 head = &bp->ntp_fltr_hash_tbl[i]; 881 rcu_read_lock(); 882 hlist_for_each_entry_rcu(fltr, head, hash) { 883 if (fltr->sw_id == fs->location) 884 goto fltr_found; 885 } 886 rcu_read_unlock(); 887 } 888 return rc; 889 890 fltr_found: 891 fkeys = &fltr->fkeys; 892 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 893 if (fkeys->basic.ip_proto == IPPROTO_TCP) 894 fs->flow_type = TCP_V4_FLOW; 895 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 896 fs->flow_type = UDP_V4_FLOW; 897 else 898 goto fltr_err; 899 900 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 901 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); 902 903 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 904 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); 905 906 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 907 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); 908 909 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 910 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); 911 } else { 912 int i; 913 914 if (fkeys->basic.ip_proto == IPPROTO_TCP) 915 fs->flow_type = TCP_V6_FLOW; 916 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 917 fs->flow_type = UDP_V6_FLOW; 918 else 919 goto fltr_err; 920 921 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 922 fkeys->addrs.v6addrs.src; 923 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 924 fkeys->addrs.v6addrs.dst; 925 for (i = 0; i < 4; i++) { 926 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); 927 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); 928 } 929 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 930 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); 931 932 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 933 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); 934 } 935 936 fs->ring_cookie = fltr->rxq; 937 rc = 0; 938 939 fltr_err: 940 rcu_read_unlock(); 941 942 return rc; 943 } 944 #endif 945 946 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 947 { 948 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 949 return RXH_IP_SRC | RXH_IP_DST; 950 return 0; 951 } 952 953 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 954 { 955 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 956 return RXH_IP_SRC | RXH_IP_DST; 957 return 0; 958 } 959 960 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 961 { 962 cmd->data = 0; 963 switch (cmd->flow_type) { 964 case TCP_V4_FLOW: 965 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 966 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 967 RXH_L4_B_0_1 | RXH_L4_B_2_3; 968 cmd->data |= get_ethtool_ipv4_rss(bp); 969 break; 970 case UDP_V4_FLOW: 971 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 972 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 973 RXH_L4_B_0_1 | RXH_L4_B_2_3; 974 /* fall through */ 975 case SCTP_V4_FLOW: 976 case AH_ESP_V4_FLOW: 977 case AH_V4_FLOW: 978 case ESP_V4_FLOW: 979 case IPV4_FLOW: 980 cmd->data |= get_ethtool_ipv4_rss(bp); 981 break; 982 983 case TCP_V6_FLOW: 984 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 985 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 986 RXH_L4_B_0_1 | RXH_L4_B_2_3; 987 cmd->data |= get_ethtool_ipv6_rss(bp); 988 break; 989 case UDP_V6_FLOW: 990 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 991 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 992 RXH_L4_B_0_1 | RXH_L4_B_2_3; 993 /* fall through */ 994 case SCTP_V6_FLOW: 995 case AH_ESP_V6_FLOW: 996 case AH_V6_FLOW: 997 case ESP_V6_FLOW: 998 case IPV6_FLOW: 999 cmd->data |= get_ethtool_ipv6_rss(bp); 1000 break; 1001 } 1002 return 0; 1003 } 1004 1005 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1006 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1007 1008 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1009 { 1010 u32 rss_hash_cfg = bp->rss_hash_cfg; 1011 int tuple, rc = 0; 1012 1013 if (cmd->data == RXH_4TUPLE) 1014 tuple = 4; 1015 else if (cmd->data == RXH_2TUPLE) 1016 tuple = 2; 1017 else if (!cmd->data) 1018 tuple = 0; 1019 else 1020 return -EINVAL; 1021 1022 if (cmd->flow_type == TCP_V4_FLOW) { 1023 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1024 if (tuple == 4) 1025 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1026 } else if (cmd->flow_type == UDP_V4_FLOW) { 1027 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1028 return -EINVAL; 1029 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1030 if (tuple == 4) 1031 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1032 } else if (cmd->flow_type == TCP_V6_FLOW) { 1033 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1034 if (tuple == 4) 1035 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1036 } else if (cmd->flow_type == UDP_V6_FLOW) { 1037 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1038 return -EINVAL; 1039 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1040 if (tuple == 4) 1041 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1042 } else if (tuple == 4) { 1043 return -EINVAL; 1044 } 1045 1046 switch (cmd->flow_type) { 1047 case TCP_V4_FLOW: 1048 case UDP_V4_FLOW: 1049 case SCTP_V4_FLOW: 1050 case AH_ESP_V4_FLOW: 1051 case AH_V4_FLOW: 1052 case ESP_V4_FLOW: 1053 case IPV4_FLOW: 1054 if (tuple == 2) 1055 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1056 else if (!tuple) 1057 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1058 break; 1059 1060 case TCP_V6_FLOW: 1061 case UDP_V6_FLOW: 1062 case SCTP_V6_FLOW: 1063 case AH_ESP_V6_FLOW: 1064 case AH_V6_FLOW: 1065 case ESP_V6_FLOW: 1066 case IPV6_FLOW: 1067 if (tuple == 2) 1068 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1069 else if (!tuple) 1070 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1071 break; 1072 } 1073 1074 if (bp->rss_hash_cfg == rss_hash_cfg) 1075 return 0; 1076 1077 bp->rss_hash_cfg = rss_hash_cfg; 1078 if (netif_running(bp->dev)) { 1079 bnxt_close_nic(bp, false, false); 1080 rc = bnxt_open_nic(bp, false, false); 1081 } 1082 return rc; 1083 } 1084 1085 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1086 u32 *rule_locs) 1087 { 1088 struct bnxt *bp = netdev_priv(dev); 1089 int rc = 0; 1090 1091 switch (cmd->cmd) { 1092 #ifdef CONFIG_RFS_ACCEL 1093 case ETHTOOL_GRXRINGS: 1094 cmd->data = bp->rx_nr_rings; 1095 break; 1096 1097 case ETHTOOL_GRXCLSRLCNT: 1098 cmd->rule_cnt = bp->ntp_fltr_count; 1099 cmd->data = BNXT_NTP_FLTR_MAX_FLTR; 1100 break; 1101 1102 case ETHTOOL_GRXCLSRLALL: 1103 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1104 break; 1105 1106 case ETHTOOL_GRXCLSRULE: 1107 rc = bnxt_grxclsrule(bp, cmd); 1108 break; 1109 #endif 1110 1111 case ETHTOOL_GRXFH: 1112 rc = bnxt_grxfh(bp, cmd); 1113 break; 1114 1115 default: 1116 rc = -EOPNOTSUPP; 1117 break; 1118 } 1119 1120 return rc; 1121 } 1122 1123 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1124 { 1125 struct bnxt *bp = netdev_priv(dev); 1126 int rc; 1127 1128 switch (cmd->cmd) { 1129 case ETHTOOL_SRXFH: 1130 rc = bnxt_srxfh(bp, cmd); 1131 break; 1132 1133 default: 1134 rc = -EOPNOTSUPP; 1135 break; 1136 } 1137 return rc; 1138 } 1139 1140 static u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1141 { 1142 return HW_HASH_INDEX_SIZE; 1143 } 1144 1145 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1146 { 1147 return HW_HASH_KEY_SIZE; 1148 } 1149 1150 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1151 u8 *hfunc) 1152 { 1153 struct bnxt *bp = netdev_priv(dev); 1154 struct bnxt_vnic_info *vnic; 1155 int i = 0; 1156 1157 if (hfunc) 1158 *hfunc = ETH_RSS_HASH_TOP; 1159 1160 if (!bp->vnic_info) 1161 return 0; 1162 1163 vnic = &bp->vnic_info[0]; 1164 if (indir && vnic->rss_table) { 1165 for (i = 0; i < HW_HASH_INDEX_SIZE; i++) 1166 indir[i] = le16_to_cpu(vnic->rss_table[i]); 1167 } 1168 1169 if (key && vnic->rss_hash_key) 1170 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1171 1172 return 0; 1173 } 1174 1175 static void bnxt_get_drvinfo(struct net_device *dev, 1176 struct ethtool_drvinfo *info) 1177 { 1178 struct bnxt *bp = netdev_priv(dev); 1179 1180 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 1181 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 1182 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 1183 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 1184 info->n_stats = bnxt_get_num_stats(bp); 1185 info->testinfo_len = bp->num_tests; 1186 /* TODO CHIMP_FW: eeprom dump details */ 1187 info->eedump_len = 0; 1188 /* TODO CHIMP FW: reg dump details */ 1189 info->regdump_len = 0; 1190 } 1191 1192 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1193 { 1194 struct bnxt *bp = netdev_priv(dev); 1195 1196 wol->supported = 0; 1197 wol->wolopts = 0; 1198 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1199 if (bp->flags & BNXT_FLAG_WOL_CAP) { 1200 wol->supported = WAKE_MAGIC; 1201 if (bp->wol) 1202 wol->wolopts = WAKE_MAGIC; 1203 } 1204 } 1205 1206 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1207 { 1208 struct bnxt *bp = netdev_priv(dev); 1209 1210 if (wol->wolopts & ~WAKE_MAGIC) 1211 return -EINVAL; 1212 1213 if (wol->wolopts & WAKE_MAGIC) { 1214 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 1215 return -EINVAL; 1216 if (!bp->wol) { 1217 if (bnxt_hwrm_alloc_wol_fltr(bp)) 1218 return -EBUSY; 1219 bp->wol = 1; 1220 } 1221 } else { 1222 if (bp->wol) { 1223 if (bnxt_hwrm_free_wol_fltr(bp)) 1224 return -EBUSY; 1225 bp->wol = 0; 1226 } 1227 } 1228 return 0; 1229 } 1230 1231 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) 1232 { 1233 u32 speed_mask = 0; 1234 1235 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 1236 /* set the advertised speeds */ 1237 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 1238 speed_mask |= ADVERTISED_100baseT_Full; 1239 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 1240 speed_mask |= ADVERTISED_1000baseT_Full; 1241 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 1242 speed_mask |= ADVERTISED_2500baseX_Full; 1243 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 1244 speed_mask |= ADVERTISED_10000baseT_Full; 1245 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 1246 speed_mask |= ADVERTISED_40000baseCR4_Full; 1247 1248 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) 1249 speed_mask |= ADVERTISED_Pause; 1250 else if (fw_pause & BNXT_LINK_PAUSE_TX) 1251 speed_mask |= ADVERTISED_Asym_Pause; 1252 else if (fw_pause & BNXT_LINK_PAUSE_RX) 1253 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 1254 1255 return speed_mask; 1256 } 1257 1258 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ 1259 { \ 1260 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ 1261 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1262 100baseT_Full); \ 1263 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ 1264 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1265 1000baseT_Full); \ 1266 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ 1267 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1268 10000baseT_Full); \ 1269 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ 1270 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1271 25000baseCR_Full); \ 1272 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ 1273 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1274 40000baseCR4_Full);\ 1275 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ 1276 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1277 50000baseCR2_Full);\ 1278 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ 1279 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1280 100000baseCR4_Full);\ 1281 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ 1282 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1283 Pause); \ 1284 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ 1285 ethtool_link_ksettings_add_link_mode( \ 1286 lk_ksettings, name, Asym_Pause);\ 1287 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ 1288 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1289 Asym_Pause); \ 1290 } \ 1291 } 1292 1293 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ 1294 { \ 1295 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1296 100baseT_Full) || \ 1297 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1298 100baseT_Half)) \ 1299 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ 1300 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1301 1000baseT_Full) || \ 1302 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1303 1000baseT_Half)) \ 1304 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ 1305 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1306 10000baseT_Full)) \ 1307 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ 1308 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1309 25000baseCR_Full)) \ 1310 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ 1311 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1312 40000baseCR4_Full)) \ 1313 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ 1314 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1315 50000baseCR2_Full)) \ 1316 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ 1317 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1318 100000baseCR4_Full)) \ 1319 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ 1320 } 1321 1322 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, 1323 struct ethtool_link_ksettings *lk_ksettings) 1324 { 1325 u16 fw_speeds = link_info->advertising; 1326 u8 fw_pause = 0; 1327 1328 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1329 fw_pause = link_info->auto_pause_setting; 1330 1331 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); 1332 } 1333 1334 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, 1335 struct ethtool_link_ksettings *lk_ksettings) 1336 { 1337 u16 fw_speeds = link_info->lp_auto_link_speeds; 1338 u8 fw_pause = 0; 1339 1340 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1341 fw_pause = link_info->lp_pause; 1342 1343 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, 1344 lp_advertising); 1345 } 1346 1347 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, 1348 struct ethtool_link_ksettings *lk_ksettings) 1349 { 1350 u16 fw_speeds = link_info->support_speeds; 1351 1352 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); 1353 1354 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); 1355 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1356 Asym_Pause); 1357 1358 if (link_info->support_auto_speeds) 1359 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1360 Autoneg); 1361 } 1362 1363 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 1364 { 1365 switch (fw_link_speed) { 1366 case BNXT_LINK_SPEED_100MB: 1367 return SPEED_100; 1368 case BNXT_LINK_SPEED_1GB: 1369 return SPEED_1000; 1370 case BNXT_LINK_SPEED_2_5GB: 1371 return SPEED_2500; 1372 case BNXT_LINK_SPEED_10GB: 1373 return SPEED_10000; 1374 case BNXT_LINK_SPEED_20GB: 1375 return SPEED_20000; 1376 case BNXT_LINK_SPEED_25GB: 1377 return SPEED_25000; 1378 case BNXT_LINK_SPEED_40GB: 1379 return SPEED_40000; 1380 case BNXT_LINK_SPEED_50GB: 1381 return SPEED_50000; 1382 case BNXT_LINK_SPEED_100GB: 1383 return SPEED_100000; 1384 default: 1385 return SPEED_UNKNOWN; 1386 } 1387 } 1388 1389 static int bnxt_get_link_ksettings(struct net_device *dev, 1390 struct ethtool_link_ksettings *lk_ksettings) 1391 { 1392 struct bnxt *bp = netdev_priv(dev); 1393 struct bnxt_link_info *link_info = &bp->link_info; 1394 struct ethtool_link_settings *base = &lk_ksettings->base; 1395 u32 ethtool_speed; 1396 1397 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1398 mutex_lock(&bp->link_lock); 1399 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1400 1401 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1402 if (link_info->autoneg) { 1403 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); 1404 ethtool_link_ksettings_add_link_mode(lk_ksettings, 1405 advertising, Autoneg); 1406 base->autoneg = AUTONEG_ENABLE; 1407 if (link_info->phy_link_status == BNXT_LINK_LINK) 1408 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); 1409 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 1410 if (!netif_carrier_ok(dev)) 1411 base->duplex = DUPLEX_UNKNOWN; 1412 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 1413 base->duplex = DUPLEX_FULL; 1414 else 1415 base->duplex = DUPLEX_HALF; 1416 } else { 1417 base->autoneg = AUTONEG_DISABLE; 1418 ethtool_speed = 1419 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 1420 base->duplex = DUPLEX_HALF; 1421 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 1422 base->duplex = DUPLEX_FULL; 1423 } 1424 base->speed = ethtool_speed; 1425 1426 base->port = PORT_NONE; 1427 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1428 base->port = PORT_TP; 1429 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1430 TP); 1431 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1432 TP); 1433 } else { 1434 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1435 FIBRE); 1436 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1437 FIBRE); 1438 1439 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 1440 base->port = PORT_DA; 1441 else if (link_info->media_type == 1442 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 1443 base->port = PORT_FIBRE; 1444 } 1445 base->phy_address = link_info->phy_addr; 1446 mutex_unlock(&bp->link_lock); 1447 1448 return 0; 1449 } 1450 1451 static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed) 1452 { 1453 struct bnxt *bp = netdev_priv(dev); 1454 struct bnxt_link_info *link_info = &bp->link_info; 1455 u16 support_spds = link_info->support_speeds; 1456 u32 fw_speed = 0; 1457 1458 switch (ethtool_speed) { 1459 case SPEED_100: 1460 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 1461 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; 1462 break; 1463 case SPEED_1000: 1464 if (support_spds & BNXT_LINK_SPEED_MSK_1GB) 1465 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; 1466 break; 1467 case SPEED_2500: 1468 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 1469 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; 1470 break; 1471 case SPEED_10000: 1472 if (support_spds & BNXT_LINK_SPEED_MSK_10GB) 1473 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; 1474 break; 1475 case SPEED_20000: 1476 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) 1477 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; 1478 break; 1479 case SPEED_25000: 1480 if (support_spds & BNXT_LINK_SPEED_MSK_25GB) 1481 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; 1482 break; 1483 case SPEED_40000: 1484 if (support_spds & BNXT_LINK_SPEED_MSK_40GB) 1485 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; 1486 break; 1487 case SPEED_50000: 1488 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) 1489 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; 1490 break; 1491 case SPEED_100000: 1492 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) 1493 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB; 1494 break; 1495 default: 1496 netdev_err(dev, "unsupported speed!\n"); 1497 break; 1498 } 1499 return fw_speed; 1500 } 1501 1502 u16 bnxt_get_fw_auto_link_speeds(u32 advertising) 1503 { 1504 u16 fw_speed_mask = 0; 1505 1506 /* only support autoneg at speed 100, 1000, and 10000 */ 1507 if (advertising & (ADVERTISED_100baseT_Full | 1508 ADVERTISED_100baseT_Half)) { 1509 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 1510 } 1511 if (advertising & (ADVERTISED_1000baseT_Full | 1512 ADVERTISED_1000baseT_Half)) { 1513 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 1514 } 1515 if (advertising & ADVERTISED_10000baseT_Full) 1516 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 1517 1518 if (advertising & ADVERTISED_40000baseCR4_Full) 1519 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 1520 1521 return fw_speed_mask; 1522 } 1523 1524 static int bnxt_set_link_ksettings(struct net_device *dev, 1525 const struct ethtool_link_ksettings *lk_ksettings) 1526 { 1527 struct bnxt *bp = netdev_priv(dev); 1528 struct bnxt_link_info *link_info = &bp->link_info; 1529 const struct ethtool_link_settings *base = &lk_ksettings->base; 1530 bool set_pause = false; 1531 u16 fw_advertising = 0; 1532 u32 speed; 1533 int rc = 0; 1534 1535 if (!BNXT_SINGLE_PF(bp)) 1536 return -EOPNOTSUPP; 1537 1538 mutex_lock(&bp->link_lock); 1539 if (base->autoneg == AUTONEG_ENABLE) { 1540 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1541 advertising); 1542 link_info->autoneg |= BNXT_AUTONEG_SPEED; 1543 if (!fw_advertising) 1544 link_info->advertising = link_info->support_auto_speeds; 1545 else 1546 link_info->advertising = fw_advertising; 1547 /* any change to autoneg will cause link change, therefore the 1548 * driver should put back the original pause setting in autoneg 1549 */ 1550 set_pause = true; 1551 } else { 1552 u16 fw_speed; 1553 u8 phy_type = link_info->phy_type; 1554 1555 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 1556 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 1557 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1558 netdev_err(dev, "10GBase-T devices must autoneg\n"); 1559 rc = -EINVAL; 1560 goto set_setting_exit; 1561 } 1562 if (base->duplex == DUPLEX_HALF) { 1563 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 1564 rc = -EINVAL; 1565 goto set_setting_exit; 1566 } 1567 speed = base->speed; 1568 fw_speed = bnxt_get_fw_speed(dev, speed); 1569 if (!fw_speed) { 1570 rc = -EINVAL; 1571 goto set_setting_exit; 1572 } 1573 link_info->req_link_speed = fw_speed; 1574 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 1575 link_info->autoneg = 0; 1576 link_info->advertising = 0; 1577 } 1578 1579 if (netif_running(dev)) 1580 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1581 1582 set_setting_exit: 1583 mutex_unlock(&bp->link_lock); 1584 return rc; 1585 } 1586 1587 static void bnxt_get_pauseparam(struct net_device *dev, 1588 struct ethtool_pauseparam *epause) 1589 { 1590 struct bnxt *bp = netdev_priv(dev); 1591 struct bnxt_link_info *link_info = &bp->link_info; 1592 1593 if (BNXT_VF(bp)) 1594 return; 1595 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 1596 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 1597 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 1598 } 1599 1600 static int bnxt_set_pauseparam(struct net_device *dev, 1601 struct ethtool_pauseparam *epause) 1602 { 1603 int rc = 0; 1604 struct bnxt *bp = netdev_priv(dev); 1605 struct bnxt_link_info *link_info = &bp->link_info; 1606 1607 if (!BNXT_SINGLE_PF(bp)) 1608 return -EOPNOTSUPP; 1609 1610 if (epause->autoneg) { 1611 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 1612 return -EINVAL; 1613 1614 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 1615 if (bp->hwrm_spec_code >= 0x10201) 1616 link_info->req_flow_ctrl = 1617 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 1618 } else { 1619 /* when transition from auto pause to force pause, 1620 * force a link change 1621 */ 1622 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1623 link_info->force_link_chng = true; 1624 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 1625 link_info->req_flow_ctrl = 0; 1626 } 1627 if (epause->rx_pause) 1628 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 1629 1630 if (epause->tx_pause) 1631 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 1632 1633 if (netif_running(dev)) 1634 rc = bnxt_hwrm_set_pause(bp); 1635 return rc; 1636 } 1637 1638 static u32 bnxt_get_link(struct net_device *dev) 1639 { 1640 struct bnxt *bp = netdev_priv(dev); 1641 1642 /* TODO: handle MF, VF, driver close case */ 1643 return bp->link_info.link_up; 1644 } 1645 1646 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 1647 u16 ext, u16 *index, u32 *item_length, 1648 u32 *data_length); 1649 1650 static int bnxt_flash_nvram(struct net_device *dev, 1651 u16 dir_type, 1652 u16 dir_ordinal, 1653 u16 dir_ext, 1654 u16 dir_attr, 1655 const u8 *data, 1656 size_t data_len) 1657 { 1658 struct bnxt *bp = netdev_priv(dev); 1659 int rc; 1660 struct hwrm_nvm_write_input req = {0}; 1661 dma_addr_t dma_handle; 1662 u8 *kmem; 1663 1664 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); 1665 1666 req.dir_type = cpu_to_le16(dir_type); 1667 req.dir_ordinal = cpu_to_le16(dir_ordinal); 1668 req.dir_ext = cpu_to_le16(dir_ext); 1669 req.dir_attr = cpu_to_le16(dir_attr); 1670 req.dir_data_length = cpu_to_le32(data_len); 1671 1672 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, 1673 GFP_KERNEL); 1674 if (!kmem) { 1675 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 1676 (unsigned)data_len); 1677 return -ENOMEM; 1678 } 1679 memcpy(kmem, data, data_len); 1680 req.host_src_addr = cpu_to_le64(dma_handle); 1681 1682 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); 1683 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); 1684 1685 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { 1686 netdev_info(dev, 1687 "PF does not have admin privileges to flash the device\n"); 1688 rc = -EACCES; 1689 } else if (rc) { 1690 rc = -EIO; 1691 } 1692 return rc; 1693 } 1694 1695 static int bnxt_firmware_reset(struct net_device *dev, 1696 u16 dir_type) 1697 { 1698 struct hwrm_fw_reset_input req = {0}; 1699 struct bnxt *bp = netdev_priv(dev); 1700 int rc; 1701 1702 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 1703 1704 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 1705 /* (e.g. when firmware isn't already running) */ 1706 switch (dir_type) { 1707 case BNX_DIR_TYPE_CHIMP_PATCH: 1708 case BNX_DIR_TYPE_BOOTCODE: 1709 case BNX_DIR_TYPE_BOOTCODE_2: 1710 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 1711 /* Self-reset ChiMP upon next PCIe reset: */ 1712 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 1713 break; 1714 case BNX_DIR_TYPE_APE_FW: 1715 case BNX_DIR_TYPE_APE_PATCH: 1716 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 1717 /* Self-reset APE upon next PCIe reset: */ 1718 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 1719 break; 1720 case BNX_DIR_TYPE_KONG_FW: 1721 case BNX_DIR_TYPE_KONG_PATCH: 1722 req.embedded_proc_type = 1723 FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 1724 break; 1725 case BNX_DIR_TYPE_BONO_FW: 1726 case BNX_DIR_TYPE_BONO_PATCH: 1727 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 1728 break; 1729 case BNXT_FW_RESET_CHIP: 1730 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 1731 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 1732 break; 1733 case BNXT_FW_RESET_AP: 1734 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; 1735 break; 1736 default: 1737 return -EINVAL; 1738 } 1739 1740 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1741 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { 1742 netdev_info(dev, 1743 "PF does not have admin privileges to reset the device\n"); 1744 rc = -EACCES; 1745 } else if (rc) { 1746 rc = -EIO; 1747 } 1748 return rc; 1749 } 1750 1751 static int bnxt_flash_firmware(struct net_device *dev, 1752 u16 dir_type, 1753 const u8 *fw_data, 1754 size_t fw_size) 1755 { 1756 int rc = 0; 1757 u16 code_type; 1758 u32 stored_crc; 1759 u32 calculated_crc; 1760 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 1761 1762 switch (dir_type) { 1763 case BNX_DIR_TYPE_BOOTCODE: 1764 case BNX_DIR_TYPE_BOOTCODE_2: 1765 code_type = CODE_BOOT; 1766 break; 1767 case BNX_DIR_TYPE_CHIMP_PATCH: 1768 code_type = CODE_CHIMP_PATCH; 1769 break; 1770 case BNX_DIR_TYPE_APE_FW: 1771 code_type = CODE_MCTP_PASSTHRU; 1772 break; 1773 case BNX_DIR_TYPE_APE_PATCH: 1774 code_type = CODE_APE_PATCH; 1775 break; 1776 case BNX_DIR_TYPE_KONG_FW: 1777 code_type = CODE_KONG_FW; 1778 break; 1779 case BNX_DIR_TYPE_KONG_PATCH: 1780 code_type = CODE_KONG_PATCH; 1781 break; 1782 case BNX_DIR_TYPE_BONO_FW: 1783 code_type = CODE_BONO_FW; 1784 break; 1785 case BNX_DIR_TYPE_BONO_PATCH: 1786 code_type = CODE_BONO_PATCH; 1787 break; 1788 default: 1789 netdev_err(dev, "Unsupported directory entry type: %u\n", 1790 dir_type); 1791 return -EINVAL; 1792 } 1793 if (fw_size < sizeof(struct bnxt_fw_header)) { 1794 netdev_err(dev, "Invalid firmware file size: %u\n", 1795 (unsigned int)fw_size); 1796 return -EINVAL; 1797 } 1798 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 1799 netdev_err(dev, "Invalid firmware signature: %08X\n", 1800 le32_to_cpu(header->signature)); 1801 return -EINVAL; 1802 } 1803 if (header->code_type != code_type) { 1804 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 1805 code_type, header->code_type); 1806 return -EINVAL; 1807 } 1808 if (header->device != DEVICE_CUMULUS_FAMILY) { 1809 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 1810 DEVICE_CUMULUS_FAMILY, header->device); 1811 return -EINVAL; 1812 } 1813 /* Confirm the CRC32 checksum of the file: */ 1814 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 1815 sizeof(stored_crc))); 1816 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 1817 if (calculated_crc != stored_crc) { 1818 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 1819 (unsigned long)stored_crc, 1820 (unsigned long)calculated_crc); 1821 return -EINVAL; 1822 } 1823 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 1824 0, 0, fw_data, fw_size); 1825 if (rc == 0) /* Firmware update successful */ 1826 rc = bnxt_firmware_reset(dev, dir_type); 1827 1828 return rc; 1829 } 1830 1831 static int bnxt_flash_microcode(struct net_device *dev, 1832 u16 dir_type, 1833 const u8 *fw_data, 1834 size_t fw_size) 1835 { 1836 struct bnxt_ucode_trailer *trailer; 1837 u32 calculated_crc; 1838 u32 stored_crc; 1839 int rc = 0; 1840 1841 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 1842 netdev_err(dev, "Invalid microcode file size: %u\n", 1843 (unsigned int)fw_size); 1844 return -EINVAL; 1845 } 1846 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 1847 sizeof(*trailer))); 1848 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 1849 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 1850 le32_to_cpu(trailer->sig)); 1851 return -EINVAL; 1852 } 1853 if (le16_to_cpu(trailer->dir_type) != dir_type) { 1854 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 1855 dir_type, le16_to_cpu(trailer->dir_type)); 1856 return -EINVAL; 1857 } 1858 if (le16_to_cpu(trailer->trailer_length) < 1859 sizeof(struct bnxt_ucode_trailer)) { 1860 netdev_err(dev, "Invalid microcode trailer length: %d\n", 1861 le16_to_cpu(trailer->trailer_length)); 1862 return -EINVAL; 1863 } 1864 1865 /* Confirm the CRC32 checksum of the file: */ 1866 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 1867 sizeof(stored_crc))); 1868 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 1869 if (calculated_crc != stored_crc) { 1870 netdev_err(dev, 1871 "CRC32 (%08lX) does not match calculated: %08lX\n", 1872 (unsigned long)stored_crc, 1873 (unsigned long)calculated_crc); 1874 return -EINVAL; 1875 } 1876 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 1877 0, 0, fw_data, fw_size); 1878 1879 return rc; 1880 } 1881 1882 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 1883 { 1884 switch (dir_type) { 1885 case BNX_DIR_TYPE_CHIMP_PATCH: 1886 case BNX_DIR_TYPE_BOOTCODE: 1887 case BNX_DIR_TYPE_BOOTCODE_2: 1888 case BNX_DIR_TYPE_APE_FW: 1889 case BNX_DIR_TYPE_APE_PATCH: 1890 case BNX_DIR_TYPE_KONG_FW: 1891 case BNX_DIR_TYPE_KONG_PATCH: 1892 case BNX_DIR_TYPE_BONO_FW: 1893 case BNX_DIR_TYPE_BONO_PATCH: 1894 return true; 1895 } 1896 1897 return false; 1898 } 1899 1900 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 1901 { 1902 switch (dir_type) { 1903 case BNX_DIR_TYPE_AVS: 1904 case BNX_DIR_TYPE_EXP_ROM_MBA: 1905 case BNX_DIR_TYPE_PCIE: 1906 case BNX_DIR_TYPE_TSCF_UCODE: 1907 case BNX_DIR_TYPE_EXT_PHY: 1908 case BNX_DIR_TYPE_CCM: 1909 case BNX_DIR_TYPE_ISCSI_BOOT: 1910 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 1911 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 1912 return true; 1913 } 1914 1915 return false; 1916 } 1917 1918 static bool bnxt_dir_type_is_executable(u16 dir_type) 1919 { 1920 return bnxt_dir_type_is_ape_bin_format(dir_type) || 1921 bnxt_dir_type_is_other_exec_format(dir_type); 1922 } 1923 1924 static int bnxt_flash_firmware_from_file(struct net_device *dev, 1925 u16 dir_type, 1926 const char *filename) 1927 { 1928 const struct firmware *fw; 1929 int rc; 1930 1931 rc = request_firmware(&fw, filename, &dev->dev); 1932 if (rc != 0) { 1933 netdev_err(dev, "Error %d requesting firmware file: %s\n", 1934 rc, filename); 1935 return rc; 1936 } 1937 if (bnxt_dir_type_is_ape_bin_format(dir_type) == true) 1938 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 1939 else if (bnxt_dir_type_is_other_exec_format(dir_type) == true) 1940 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 1941 else 1942 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 1943 0, 0, fw->data, fw->size); 1944 release_firmware(fw); 1945 return rc; 1946 } 1947 1948 static int bnxt_flash_package_from_file(struct net_device *dev, 1949 char *filename, u32 install_type) 1950 { 1951 struct bnxt *bp = netdev_priv(dev); 1952 struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; 1953 struct hwrm_nvm_install_update_input install = {0}; 1954 const struct firmware *fw; 1955 int rc, hwrm_err = 0; 1956 u32 item_len; 1957 u16 index; 1958 1959 bnxt_hwrm_fw_set_time(bp); 1960 1961 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 1962 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 1963 &index, &item_len, NULL) != 0) { 1964 netdev_err(dev, "PKG update area not created in nvram\n"); 1965 return -ENOBUFS; 1966 } 1967 1968 rc = request_firmware(&fw, filename, &dev->dev); 1969 if (rc != 0) { 1970 netdev_err(dev, "PKG error %d requesting file: %s\n", 1971 rc, filename); 1972 return rc; 1973 } 1974 1975 if (fw->size > item_len) { 1976 netdev_err(dev, "PKG insufficient update area in nvram: %lu", 1977 (unsigned long)fw->size); 1978 rc = -EFBIG; 1979 } else { 1980 dma_addr_t dma_handle; 1981 u8 *kmem; 1982 struct hwrm_nvm_modify_input modify = {0}; 1983 1984 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); 1985 1986 modify.dir_idx = cpu_to_le16(index); 1987 modify.len = cpu_to_le32(fw->size); 1988 1989 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size, 1990 &dma_handle, GFP_KERNEL); 1991 if (!kmem) { 1992 netdev_err(dev, 1993 "dma_alloc_coherent failure, length = %u\n", 1994 (unsigned int)fw->size); 1995 rc = -ENOMEM; 1996 } else { 1997 memcpy(kmem, fw->data, fw->size); 1998 modify.host_src_addr = cpu_to_le64(dma_handle); 1999 2000 hwrm_err = hwrm_send_message(bp, &modify, 2001 sizeof(modify), 2002 FLASH_PACKAGE_TIMEOUT); 2003 dma_free_coherent(&bp->pdev->dev, fw->size, kmem, 2004 dma_handle); 2005 } 2006 } 2007 release_firmware(fw); 2008 if (rc || hwrm_err) 2009 goto err_exit; 2010 2011 if ((install_type & 0xffff) == 0) 2012 install_type >>= 16; 2013 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); 2014 install.install_type = cpu_to_le32(install_type); 2015 2016 mutex_lock(&bp->hwrm_cmd_lock); 2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), 2018 INSTALL_PACKAGE_TIMEOUT); 2019 if (hwrm_err) 2020 goto flash_pkg_exit; 2021 2022 if (resp->error_code) { 2023 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; 2024 2025 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2026 install.flags |= cpu_to_le16( 2027 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2028 hwrm_err = _hwrm_send_message(bp, &install, 2029 sizeof(install), 2030 INSTALL_PACKAGE_TIMEOUT); 2031 if (hwrm_err) 2032 goto flash_pkg_exit; 2033 } 2034 } 2035 2036 if (resp->result) { 2037 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 2038 (s8)resp->result, (int)resp->problem_item); 2039 rc = -ENOPKG; 2040 } 2041 flash_pkg_exit: 2042 mutex_unlock(&bp->hwrm_cmd_lock); 2043 err_exit: 2044 if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { 2045 netdev_info(dev, 2046 "PF does not have admin privileges to flash the device\n"); 2047 rc = -EACCES; 2048 } else if (hwrm_err) { 2049 rc = -EOPNOTSUPP; 2050 } 2051 return rc; 2052 } 2053 2054 static int bnxt_flash_device(struct net_device *dev, 2055 struct ethtool_flash *flash) 2056 { 2057 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 2058 netdev_err(dev, "flashdev not supported from a virtual function\n"); 2059 return -EINVAL; 2060 } 2061 2062 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 2063 flash->region > 0xffff) 2064 return bnxt_flash_package_from_file(dev, flash->data, 2065 flash->region); 2066 2067 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 2068 } 2069 2070 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 2071 { 2072 struct bnxt *bp = netdev_priv(dev); 2073 int rc; 2074 struct hwrm_nvm_get_dir_info_input req = {0}; 2075 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; 2076 2077 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); 2078 2079 mutex_lock(&bp->hwrm_cmd_lock); 2080 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2081 if (!rc) { 2082 *entries = le32_to_cpu(output->entries); 2083 *length = le32_to_cpu(output->entry_length); 2084 } 2085 mutex_unlock(&bp->hwrm_cmd_lock); 2086 return rc; 2087 } 2088 2089 static int bnxt_get_eeprom_len(struct net_device *dev) 2090 { 2091 struct bnxt *bp = netdev_priv(dev); 2092 2093 if (BNXT_VF(bp)) 2094 return 0; 2095 2096 /* The -1 return value allows the entire 32-bit range of offsets to be 2097 * passed via the ethtool command-line utility. 2098 */ 2099 return -1; 2100 } 2101 2102 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 2103 { 2104 struct bnxt *bp = netdev_priv(dev); 2105 int rc; 2106 u32 dir_entries; 2107 u32 entry_length; 2108 u8 *buf; 2109 size_t buflen; 2110 dma_addr_t dma_handle; 2111 struct hwrm_nvm_get_dir_entries_input req = {0}; 2112 2113 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 2114 if (rc != 0) 2115 return rc; 2116 2117 /* Insert 2 bytes of directory info (count and size of entries) */ 2118 if (len < 2) 2119 return -EINVAL; 2120 2121 *data++ = dir_entries; 2122 *data++ = entry_length; 2123 len -= 2; 2124 memset(data, 0xff, len); 2125 2126 buflen = dir_entries * entry_length; 2127 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, 2128 GFP_KERNEL); 2129 if (!buf) { 2130 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2131 (unsigned)buflen); 2132 return -ENOMEM; 2133 } 2134 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); 2135 req.host_dest_addr = cpu_to_le64(dma_handle); 2136 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2137 if (rc == 0) 2138 memcpy(data, buf, len > buflen ? buflen : len); 2139 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); 2140 return rc; 2141 } 2142 2143 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 2144 u32 length, u8 *data) 2145 { 2146 struct bnxt *bp = netdev_priv(dev); 2147 int rc; 2148 u8 *buf; 2149 dma_addr_t dma_handle; 2150 struct hwrm_nvm_read_input req = {0}; 2151 2152 if (!length) 2153 return -EINVAL; 2154 2155 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, 2156 GFP_KERNEL); 2157 if (!buf) { 2158 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2159 (unsigned)length); 2160 return -ENOMEM; 2161 } 2162 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); 2163 req.host_dest_addr = cpu_to_le64(dma_handle); 2164 req.dir_idx = cpu_to_le16(index); 2165 req.offset = cpu_to_le32(offset); 2166 req.len = cpu_to_le32(length); 2167 2168 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2169 if (rc == 0) 2170 memcpy(data, buf, length); 2171 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); 2172 return rc; 2173 } 2174 2175 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2176 u16 ext, u16 *index, u32 *item_length, 2177 u32 *data_length) 2178 { 2179 struct bnxt *bp = netdev_priv(dev); 2180 int rc; 2181 struct hwrm_nvm_find_dir_entry_input req = {0}; 2182 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; 2183 2184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); 2185 req.enables = 0; 2186 req.dir_idx = 0; 2187 req.dir_type = cpu_to_le16(type); 2188 req.dir_ordinal = cpu_to_le16(ordinal); 2189 req.dir_ext = cpu_to_le16(ext); 2190 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 2191 mutex_lock(&bp->hwrm_cmd_lock); 2192 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2193 if (rc == 0) { 2194 if (index) 2195 *index = le16_to_cpu(output->dir_idx); 2196 if (item_length) 2197 *item_length = le32_to_cpu(output->dir_item_length); 2198 if (data_length) 2199 *data_length = le32_to_cpu(output->dir_data_length); 2200 } 2201 mutex_unlock(&bp->hwrm_cmd_lock); 2202 return rc; 2203 } 2204 2205 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 2206 { 2207 char *retval = NULL; 2208 char *p; 2209 char *value; 2210 int field = 0; 2211 2212 if (datalen < 1) 2213 return NULL; 2214 /* null-terminate the log data (removing last '\n'): */ 2215 data[datalen - 1] = 0; 2216 for (p = data; *p != 0; p++) { 2217 field = 0; 2218 retval = NULL; 2219 while (*p != 0 && *p != '\n') { 2220 value = p; 2221 while (*p != 0 && *p != '\t' && *p != '\n') 2222 p++; 2223 if (field == desired_field) 2224 retval = value; 2225 if (*p != '\t') 2226 break; 2227 *p = 0; 2228 field++; 2229 p++; 2230 } 2231 if (*p == 0) 2232 break; 2233 *p = 0; 2234 } 2235 return retval; 2236 } 2237 2238 static void bnxt_get_pkgver(struct net_device *dev) 2239 { 2240 struct bnxt *bp = netdev_priv(dev); 2241 u16 index = 0; 2242 char *pkgver; 2243 u32 pkglen; 2244 u8 *pkgbuf; 2245 int len; 2246 2247 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 2248 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2249 &index, NULL, &pkglen) != 0) 2250 return; 2251 2252 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 2253 if (!pkgbuf) { 2254 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 2255 pkglen); 2256 return; 2257 } 2258 2259 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) 2260 goto err; 2261 2262 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 2263 pkglen); 2264 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 2265 len = strlen(bp->fw_ver_str); 2266 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 2267 "/pkg %s", pkgver); 2268 } 2269 err: 2270 kfree(pkgbuf); 2271 } 2272 2273 static int bnxt_get_eeprom(struct net_device *dev, 2274 struct ethtool_eeprom *eeprom, 2275 u8 *data) 2276 { 2277 u32 index; 2278 u32 offset; 2279 2280 if (eeprom->offset == 0) /* special offset value to get directory */ 2281 return bnxt_get_nvram_directory(dev, eeprom->len, data); 2282 2283 index = eeprom->offset >> 24; 2284 offset = eeprom->offset & 0xffffff; 2285 2286 if (index == 0) { 2287 netdev_err(dev, "unsupported index value: %d\n", index); 2288 return -EINVAL; 2289 } 2290 2291 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 2292 } 2293 2294 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 2295 { 2296 struct bnxt *bp = netdev_priv(dev); 2297 struct hwrm_nvm_erase_dir_entry_input req = {0}; 2298 2299 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); 2300 req.dir_idx = cpu_to_le16(index); 2301 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2302 } 2303 2304 static int bnxt_set_eeprom(struct net_device *dev, 2305 struct ethtool_eeprom *eeprom, 2306 u8 *data) 2307 { 2308 struct bnxt *bp = netdev_priv(dev); 2309 u8 index, dir_op; 2310 u16 type, ext, ordinal, attr; 2311 2312 if (!BNXT_PF(bp)) { 2313 netdev_err(dev, "NVM write not supported from a virtual function\n"); 2314 return -EINVAL; 2315 } 2316 2317 type = eeprom->magic >> 16; 2318 2319 if (type == 0xffff) { /* special value for directory operations */ 2320 index = eeprom->magic & 0xff; 2321 dir_op = eeprom->magic >> 8; 2322 if (index == 0) 2323 return -EINVAL; 2324 switch (dir_op) { 2325 case 0x0e: /* erase */ 2326 if (eeprom->offset != ~eeprom->magic) 2327 return -EINVAL; 2328 return bnxt_erase_nvram_directory(dev, index - 1); 2329 default: 2330 return -EINVAL; 2331 } 2332 } 2333 2334 /* Create or re-write an NVM item: */ 2335 if (bnxt_dir_type_is_executable(type) == true) 2336 return -EOPNOTSUPP; 2337 ext = eeprom->magic & 0xffff; 2338 ordinal = eeprom->offset >> 16; 2339 attr = eeprom->offset & 0xffff; 2340 2341 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, 2342 eeprom->len); 2343 } 2344 2345 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) 2346 { 2347 struct bnxt *bp = netdev_priv(dev); 2348 struct ethtool_eee *eee = &bp->eee; 2349 struct bnxt_link_info *link_info = &bp->link_info; 2350 u32 advertising = 2351 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 2352 int rc = 0; 2353 2354 if (!BNXT_SINGLE_PF(bp)) 2355 return -EOPNOTSUPP; 2356 2357 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 2358 return -EOPNOTSUPP; 2359 2360 if (!edata->eee_enabled) 2361 goto eee_ok; 2362 2363 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2364 netdev_warn(dev, "EEE requires autoneg\n"); 2365 return -EINVAL; 2366 } 2367 if (edata->tx_lpi_enabled) { 2368 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 2369 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 2370 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 2371 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 2372 return -EINVAL; 2373 } else if (!bp->lpi_tmr_hi) { 2374 edata->tx_lpi_timer = eee->tx_lpi_timer; 2375 } 2376 } 2377 if (!edata->advertised) { 2378 edata->advertised = advertising & eee->supported; 2379 } else if (edata->advertised & ~advertising) { 2380 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", 2381 edata->advertised, advertising); 2382 return -EINVAL; 2383 } 2384 2385 eee->advertised = edata->advertised; 2386 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 2387 eee->tx_lpi_timer = edata->tx_lpi_timer; 2388 eee_ok: 2389 eee->eee_enabled = edata->eee_enabled; 2390 2391 if (netif_running(dev)) 2392 rc = bnxt_hwrm_set_link_setting(bp, false, true); 2393 2394 return rc; 2395 } 2396 2397 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) 2398 { 2399 struct bnxt *bp = netdev_priv(dev); 2400 2401 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 2402 return -EOPNOTSUPP; 2403 2404 *edata = bp->eee; 2405 if (!bp->eee.eee_enabled) { 2406 /* Preserve tx_lpi_timer so that the last value will be used 2407 * by default when it is re-enabled. 2408 */ 2409 edata->advertised = 0; 2410 edata->tx_lpi_enabled = 0; 2411 } 2412 2413 if (!bp->eee.eee_active) 2414 edata->lp_advertised = 0; 2415 2416 return 0; 2417 } 2418 2419 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 2420 u16 page_number, u16 start_addr, 2421 u16 data_length, u8 *buf) 2422 { 2423 struct hwrm_port_phy_i2c_read_input req = {0}; 2424 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; 2425 int rc, byte_offset = 0; 2426 2427 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); 2428 req.i2c_slave_addr = i2c_addr; 2429 req.page_number = cpu_to_le16(page_number); 2430 req.port_id = cpu_to_le16(bp->pf.port_id); 2431 do { 2432 u16 xfer_size; 2433 2434 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 2435 data_length -= xfer_size; 2436 req.page_offset = cpu_to_le16(start_addr + byte_offset); 2437 req.data_length = xfer_size; 2438 req.enables = cpu_to_le32(start_addr + byte_offset ? 2439 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); 2440 mutex_lock(&bp->hwrm_cmd_lock); 2441 rc = _hwrm_send_message(bp, &req, sizeof(req), 2442 HWRM_CMD_TIMEOUT); 2443 if (!rc) 2444 memcpy(buf + byte_offset, output->data, xfer_size); 2445 mutex_unlock(&bp->hwrm_cmd_lock); 2446 byte_offset += xfer_size; 2447 } while (!rc && data_length > 0); 2448 2449 return rc; 2450 } 2451 2452 static int bnxt_get_module_info(struct net_device *dev, 2453 struct ethtool_modinfo *modinfo) 2454 { 2455 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 2456 struct bnxt *bp = netdev_priv(dev); 2457 int rc; 2458 2459 /* No point in going further if phy status indicates 2460 * module is not inserted or if it is powered down or 2461 * if it is of type 10GBase-T 2462 */ 2463 if (bp->link_info.module_status > 2464 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 2465 return -EOPNOTSUPP; 2466 2467 /* This feature is not supported in older firmware versions */ 2468 if (bp->hwrm_spec_code < 0x10202) 2469 return -EOPNOTSUPP; 2470 2471 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 2472 SFF_DIAG_SUPPORT_OFFSET + 1, 2473 data); 2474 if (!rc) { 2475 u8 module_id = data[0]; 2476 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 2477 2478 switch (module_id) { 2479 case SFF_MODULE_ID_SFP: 2480 modinfo->type = ETH_MODULE_SFF_8472; 2481 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2482 if (!diag_supported) 2483 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2484 break; 2485 case SFF_MODULE_ID_QSFP: 2486 case SFF_MODULE_ID_QSFP_PLUS: 2487 modinfo->type = ETH_MODULE_SFF_8436; 2488 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2489 break; 2490 case SFF_MODULE_ID_QSFP28: 2491 modinfo->type = ETH_MODULE_SFF_8636; 2492 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2493 break; 2494 default: 2495 rc = -EOPNOTSUPP; 2496 break; 2497 } 2498 } 2499 return rc; 2500 } 2501 2502 static int bnxt_get_module_eeprom(struct net_device *dev, 2503 struct ethtool_eeprom *eeprom, 2504 u8 *data) 2505 { 2506 struct bnxt *bp = netdev_priv(dev); 2507 u16 start = eeprom->offset, length = eeprom->len; 2508 int rc = 0; 2509 2510 memset(data, 0, eeprom->len); 2511 2512 /* Read A0 portion of the EEPROM */ 2513 if (start < ETH_MODULE_SFF_8436_LEN) { 2514 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 2515 length = ETH_MODULE_SFF_8436_LEN - start; 2516 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 2517 start, length, data); 2518 if (rc) 2519 return rc; 2520 start += length; 2521 data += length; 2522 length = eeprom->len - length; 2523 } 2524 2525 /* Read A2 portion of the EEPROM */ 2526 if (length) { 2527 start -= ETH_MODULE_SFF_8436_LEN; 2528 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, 2529 start, length, data); 2530 } 2531 return rc; 2532 } 2533 2534 static int bnxt_nway_reset(struct net_device *dev) 2535 { 2536 int rc = 0; 2537 2538 struct bnxt *bp = netdev_priv(dev); 2539 struct bnxt_link_info *link_info = &bp->link_info; 2540 2541 if (!BNXT_SINGLE_PF(bp)) 2542 return -EOPNOTSUPP; 2543 2544 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 2545 return -EINVAL; 2546 2547 if (netif_running(dev)) 2548 rc = bnxt_hwrm_set_link_setting(bp, true, false); 2549 2550 return rc; 2551 } 2552 2553 static int bnxt_set_phys_id(struct net_device *dev, 2554 enum ethtool_phys_id_state state) 2555 { 2556 struct hwrm_port_led_cfg_input req = {0}; 2557 struct bnxt *bp = netdev_priv(dev); 2558 struct bnxt_pf_info *pf = &bp->pf; 2559 struct bnxt_led_cfg *led_cfg; 2560 u8 led_state; 2561 __le16 duration; 2562 int i, rc; 2563 2564 if (!bp->num_leds || BNXT_VF(bp)) 2565 return -EOPNOTSUPP; 2566 2567 if (state == ETHTOOL_ID_ACTIVE) { 2568 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 2569 duration = cpu_to_le16(500); 2570 } else if (state == ETHTOOL_ID_INACTIVE) { 2571 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 2572 duration = cpu_to_le16(0); 2573 } else { 2574 return -EINVAL; 2575 } 2576 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); 2577 req.port_id = cpu_to_le16(pf->port_id); 2578 req.num_leds = bp->num_leds; 2579 led_cfg = (struct bnxt_led_cfg *)&req.led0_id; 2580 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 2581 req.enables |= BNXT_LED_DFLT_ENABLES(i); 2582 led_cfg->led_id = bp->leds[i].led_id; 2583 led_cfg->led_state = led_state; 2584 led_cfg->led_blink_on = duration; 2585 led_cfg->led_blink_off = duration; 2586 led_cfg->led_group_id = bp->leds[i].led_group_id; 2587 } 2588 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2589 if (rc) 2590 rc = -EIO; 2591 return rc; 2592 } 2593 2594 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 2595 { 2596 struct hwrm_selftest_irq_input req = {0}; 2597 2598 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); 2599 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2600 } 2601 2602 static int bnxt_test_irq(struct bnxt *bp) 2603 { 2604 int i; 2605 2606 for (i = 0; i < bp->cp_nr_rings; i++) { 2607 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 2608 int rc; 2609 2610 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 2611 if (rc) 2612 return rc; 2613 } 2614 return 0; 2615 } 2616 2617 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 2618 { 2619 struct hwrm_port_mac_cfg_input req = {0}; 2620 2621 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); 2622 2623 req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 2624 if (enable) 2625 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 2626 else 2627 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 2628 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2629 } 2630 2631 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 2632 { 2633 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 2634 struct hwrm_port_phy_qcaps_input req = {0}; 2635 int rc; 2636 2637 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 2638 mutex_lock(&bp->hwrm_cmd_lock); 2639 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2640 if (!rc) 2641 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 2642 2643 mutex_unlock(&bp->hwrm_cmd_lock); 2644 return rc; 2645 } 2646 2647 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 2648 struct hwrm_port_phy_cfg_input *req) 2649 { 2650 struct bnxt_link_info *link_info = &bp->link_info; 2651 u16 fw_advertising; 2652 u16 fw_speed; 2653 int rc; 2654 2655 if (!link_info->autoneg) 2656 return 0; 2657 2658 rc = bnxt_query_force_speeds(bp, &fw_advertising); 2659 if (rc) 2660 return rc; 2661 2662 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2663 if (netif_carrier_ok(bp->dev)) 2664 fw_speed = bp->link_info.link_speed; 2665 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 2666 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2667 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 2668 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2669 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 2670 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 2671 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 2672 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 2673 2674 req->force_link_speed = cpu_to_le16(fw_speed); 2675 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 2676 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 2677 rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); 2678 req->flags = 0; 2679 req->force_link_speed = cpu_to_le16(0); 2680 return rc; 2681 } 2682 2683 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 2684 { 2685 struct hwrm_port_phy_cfg_input req = {0}; 2686 2687 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 2688 2689 if (enable) { 2690 bnxt_disable_an_for_lpbk(bp, &req); 2691 if (ext) 2692 req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 2693 else 2694 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 2695 } else { 2696 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 2697 } 2698 req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 2699 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2700 } 2701 2702 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2703 u32 raw_cons, int pkt_size) 2704 { 2705 struct bnxt_napi *bnapi = cpr->bnapi; 2706 struct bnxt_rx_ring_info *rxr; 2707 struct bnxt_sw_rx_bd *rx_buf; 2708 struct rx_cmp *rxcmp; 2709 u16 cp_cons, cons; 2710 u8 *data; 2711 u32 len; 2712 int i; 2713 2714 rxr = bnapi->rx_ring; 2715 cp_cons = RING_CMP(raw_cons); 2716 rxcmp = (struct rx_cmp *) 2717 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2718 cons = rxcmp->rx_cmp_opaque; 2719 rx_buf = &rxr->rx_buf_ring[cons]; 2720 data = rx_buf->data_ptr; 2721 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 2722 if (len != pkt_size) 2723 return -EIO; 2724 i = ETH_ALEN; 2725 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 2726 return -EIO; 2727 i += ETH_ALEN; 2728 for ( ; i < pkt_size; i++) { 2729 if (data[i] != (u8)(i & 0xff)) 2730 return -EIO; 2731 } 2732 return 0; 2733 } 2734 2735 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2736 int pkt_size) 2737 { 2738 struct tx_cmp *txcmp; 2739 int rc = -EIO; 2740 u32 raw_cons; 2741 u32 cons; 2742 int i; 2743 2744 raw_cons = cpr->cp_raw_cons; 2745 for (i = 0; i < 200; i++) { 2746 cons = RING_CMP(raw_cons); 2747 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2748 2749 if (!TX_CMP_VALID(txcmp, raw_cons)) { 2750 udelay(5); 2751 continue; 2752 } 2753 2754 /* The valid test of the entry must be done first before 2755 * reading any further. 2756 */ 2757 dma_rmb(); 2758 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { 2759 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 2760 raw_cons = NEXT_RAW_CMP(raw_cons); 2761 raw_cons = NEXT_RAW_CMP(raw_cons); 2762 break; 2763 } 2764 raw_cons = NEXT_RAW_CMP(raw_cons); 2765 } 2766 cpr->cp_raw_cons = raw_cons; 2767 return rc; 2768 } 2769 2770 static int bnxt_run_loopback(struct bnxt *bp) 2771 { 2772 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 2773 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 2774 struct bnxt_cp_ring_info *cpr; 2775 int pkt_size, i = 0; 2776 struct sk_buff *skb; 2777 dma_addr_t map; 2778 u8 *data; 2779 int rc; 2780 2781 cpr = &rxr->bnapi->cp_ring; 2782 if (bp->flags & BNXT_FLAG_CHIP_P5) 2783 cpr = cpr->cp_ring_arr[BNXT_RX_HDL]; 2784 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); 2785 skb = netdev_alloc_skb(bp->dev, pkt_size); 2786 if (!skb) 2787 return -ENOMEM; 2788 data = skb_put(skb, pkt_size); 2789 eth_broadcast_addr(data); 2790 i += ETH_ALEN; 2791 ether_addr_copy(&data[i], bp->dev->dev_addr); 2792 i += ETH_ALEN; 2793 for ( ; i < pkt_size; i++) 2794 data[i] = (u8)(i & 0xff); 2795 2796 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 2797 PCI_DMA_TODEVICE); 2798 if (dma_mapping_error(&bp->pdev->dev, map)) { 2799 dev_kfree_skb(skb); 2800 return -EIO; 2801 } 2802 bnxt_xmit_xdp(bp, txr, map, pkt_size, 0); 2803 2804 /* Sync BD data before updating doorbell */ 2805 wmb(); 2806 2807 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 2808 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 2809 2810 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); 2811 dev_kfree_skb(skb); 2812 return rc; 2813 } 2814 2815 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 2816 { 2817 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; 2818 struct hwrm_selftest_exec_input req = {0}; 2819 int rc; 2820 2821 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); 2822 mutex_lock(&bp->hwrm_cmd_lock); 2823 resp->test_success = 0; 2824 req.flags = test_mask; 2825 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); 2826 *test_results = resp->test_success; 2827 mutex_unlock(&bp->hwrm_cmd_lock); 2828 return rc; 2829 } 2830 2831 #define BNXT_DRV_TESTS 4 2832 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 2833 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 2834 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 2835 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 2836 2837 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 2838 u64 *buf) 2839 { 2840 struct bnxt *bp = netdev_priv(dev); 2841 bool do_ext_lpbk = false; 2842 bool offline = false; 2843 u8 test_results = 0; 2844 u8 test_mask = 0; 2845 int rc, i; 2846 2847 if (!bp->num_tests || !BNXT_SINGLE_PF(bp)) 2848 return; 2849 memset(buf, 0, sizeof(u64) * bp->num_tests); 2850 if (!netif_running(dev)) { 2851 etest->flags |= ETH_TEST_FL_FAILED; 2852 return; 2853 } 2854 2855 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 2856 (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK)) 2857 do_ext_lpbk = true; 2858 2859 if (etest->flags & ETH_TEST_FL_OFFLINE) { 2860 if (bp->pf.active_vfs) { 2861 etest->flags |= ETH_TEST_FL_FAILED; 2862 netdev_warn(dev, "Offline tests cannot be run with active VFs\n"); 2863 return; 2864 } 2865 offline = true; 2866 } 2867 2868 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 2869 u8 bit_val = 1 << i; 2870 2871 if (!(bp->test_info->offline_mask & bit_val)) 2872 test_mask |= bit_val; 2873 else if (offline) 2874 test_mask |= bit_val; 2875 } 2876 if (!offline) { 2877 bnxt_run_fw_tests(bp, test_mask, &test_results); 2878 } else { 2879 rc = bnxt_close_nic(bp, false, false); 2880 if (rc) 2881 return; 2882 bnxt_run_fw_tests(bp, test_mask, &test_results); 2883 2884 buf[BNXT_MACLPBK_TEST_IDX] = 1; 2885 bnxt_hwrm_mac_loopback(bp, true); 2886 msleep(250); 2887 rc = bnxt_half_open_nic(bp); 2888 if (rc) { 2889 bnxt_hwrm_mac_loopback(bp, false); 2890 etest->flags |= ETH_TEST_FL_FAILED; 2891 return; 2892 } 2893 if (bnxt_run_loopback(bp)) 2894 etest->flags |= ETH_TEST_FL_FAILED; 2895 else 2896 buf[BNXT_MACLPBK_TEST_IDX] = 0; 2897 2898 bnxt_hwrm_mac_loopback(bp, false); 2899 bnxt_hwrm_phy_loopback(bp, true, false); 2900 msleep(1000); 2901 if (bnxt_run_loopback(bp)) { 2902 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 2903 etest->flags |= ETH_TEST_FL_FAILED; 2904 } 2905 if (do_ext_lpbk) { 2906 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 2907 bnxt_hwrm_phy_loopback(bp, true, true); 2908 msleep(1000); 2909 if (bnxt_run_loopback(bp)) { 2910 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 2911 etest->flags |= ETH_TEST_FL_FAILED; 2912 } 2913 } 2914 bnxt_hwrm_phy_loopback(bp, false, false); 2915 bnxt_half_close_nic(bp); 2916 bnxt_open_nic(bp, false, true); 2917 } 2918 if (bnxt_test_irq(bp)) { 2919 buf[BNXT_IRQ_TEST_IDX] = 1; 2920 etest->flags |= ETH_TEST_FL_FAILED; 2921 } 2922 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 2923 u8 bit_val = 1 << i; 2924 2925 if ((test_mask & bit_val) && !(test_results & bit_val)) { 2926 buf[i] = 1; 2927 etest->flags |= ETH_TEST_FL_FAILED; 2928 } 2929 } 2930 } 2931 2932 static int bnxt_reset(struct net_device *dev, u32 *flags) 2933 { 2934 struct bnxt *bp = netdev_priv(dev); 2935 int rc = 0; 2936 2937 if (!BNXT_PF(bp)) { 2938 netdev_err(dev, "Reset is not supported from a VF\n"); 2939 return -EOPNOTSUPP; 2940 } 2941 2942 if (pci_vfs_assigned(bp->pdev)) { 2943 netdev_err(dev, 2944 "Reset not allowed when VFs are assigned to VMs\n"); 2945 return -EBUSY; 2946 } 2947 2948 if (*flags == ETH_RESET_ALL) { 2949 /* This feature is not supported in older firmware versions */ 2950 if (bp->hwrm_spec_code < 0x10803) 2951 return -EOPNOTSUPP; 2952 2953 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); 2954 if (!rc) { 2955 netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); 2956 *flags = 0; 2957 } 2958 } else if (*flags == ETH_RESET_AP) { 2959 /* This feature is not supported in older firmware versions */ 2960 if (bp->hwrm_spec_code < 0x10803) 2961 return -EOPNOTSUPP; 2962 2963 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); 2964 if (!rc) { 2965 netdev_info(dev, "Reset Application Processor request successful.\n"); 2966 *flags = 0; 2967 } 2968 } else { 2969 rc = -EINVAL; 2970 } 2971 2972 return rc; 2973 } 2974 2975 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, 2976 struct bnxt_hwrm_dbg_dma_info *info) 2977 { 2978 struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; 2979 struct hwrm_dbg_cmn_input *cmn_req = msg; 2980 __le16 *seq_ptr = msg + info->seq_off; 2981 u16 seq = 0, len, segs_off; 2982 void *resp = cmn_resp; 2983 dma_addr_t dma_handle; 2984 int rc, off = 0; 2985 void *dma_buf; 2986 2987 dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, 2988 GFP_KERNEL); 2989 if (!dma_buf) 2990 return -ENOMEM; 2991 2992 segs_off = offsetof(struct hwrm_dbg_coredump_list_output, 2993 total_segments); 2994 cmn_req->host_dest_addr = cpu_to_le64(dma_handle); 2995 cmn_req->host_buf_len = cpu_to_le32(info->dma_len); 2996 mutex_lock(&bp->hwrm_cmd_lock); 2997 while (1) { 2998 *seq_ptr = cpu_to_le16(seq); 2999 rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 3000 if (rc) 3001 break; 3002 3003 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); 3004 if (!seq && 3005 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { 3006 info->segs = le16_to_cpu(*((__le16 *)(resp + 3007 segs_off))); 3008 if (!info->segs) { 3009 rc = -EIO; 3010 break; 3011 } 3012 3013 info->dest_buf_size = info->segs * 3014 sizeof(struct coredump_segment_record); 3015 info->dest_buf = kmalloc(info->dest_buf_size, 3016 GFP_KERNEL); 3017 if (!info->dest_buf) { 3018 rc = -ENOMEM; 3019 break; 3020 } 3021 } 3022 3023 if (info->dest_buf) 3024 memcpy(info->dest_buf + off, dma_buf, len); 3025 3026 if (cmn_req->req_type == 3027 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) 3028 info->dest_buf_size += len; 3029 3030 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) 3031 break; 3032 3033 seq++; 3034 off += len; 3035 } 3036 mutex_unlock(&bp->hwrm_cmd_lock); 3037 dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); 3038 return rc; 3039 } 3040 3041 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, 3042 struct bnxt_coredump *coredump) 3043 { 3044 struct hwrm_dbg_coredump_list_input req = {0}; 3045 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3046 int rc; 3047 3048 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); 3049 3050 info.dma_len = COREDUMP_LIST_BUF_LEN; 3051 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); 3052 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, 3053 data_len); 3054 3055 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3056 if (!rc) { 3057 coredump->data = info.dest_buf; 3058 coredump->data_size = info.dest_buf_size; 3059 coredump->total_segs = info.segs; 3060 } 3061 return rc; 3062 } 3063 3064 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, 3065 u16 segment_id) 3066 { 3067 struct hwrm_dbg_coredump_initiate_input req = {0}; 3068 3069 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); 3070 req.component_id = cpu_to_le16(component_id); 3071 req.segment_id = cpu_to_le16(segment_id); 3072 3073 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3074 } 3075 3076 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, 3077 u16 segment_id, u32 *seg_len, 3078 void *buf, u32 offset) 3079 { 3080 struct hwrm_dbg_coredump_retrieve_input req = {0}; 3081 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3082 int rc; 3083 3084 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); 3085 req.component_id = cpu_to_le16(component_id); 3086 req.segment_id = cpu_to_le16(segment_id); 3087 3088 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; 3089 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, 3090 seq_no); 3091 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, 3092 data_len); 3093 if (buf) 3094 info.dest_buf = buf + offset; 3095 3096 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3097 if (!rc) 3098 *seg_len = info.dest_buf_size; 3099 3100 return rc; 3101 } 3102 3103 static void 3104 bnxt_fill_coredump_seg_hdr(struct bnxt *bp, 3105 struct bnxt_coredump_segment_hdr *seg_hdr, 3106 struct coredump_segment_record *seg_rec, u32 seg_len, 3107 int status, u32 duration, u32 instance) 3108 { 3109 memset(seg_hdr, 0, sizeof(*seg_hdr)); 3110 memcpy(seg_hdr->signature, "sEgM", 4); 3111 if (seg_rec) { 3112 seg_hdr->component_id = (__force __le32)seg_rec->component_id; 3113 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; 3114 seg_hdr->low_version = seg_rec->version_low; 3115 seg_hdr->high_version = seg_rec->version_hi; 3116 } else { 3117 /* For hwrm_ver_get response Component id = 2 3118 * and Segment id = 0 3119 */ 3120 seg_hdr->component_id = cpu_to_le32(2); 3121 seg_hdr->segment_id = 0; 3122 } 3123 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); 3124 seg_hdr->length = cpu_to_le32(seg_len); 3125 seg_hdr->status = cpu_to_le32(status); 3126 seg_hdr->duration = cpu_to_le32(duration); 3127 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); 3128 seg_hdr->instance = cpu_to_le32(instance); 3129 } 3130 3131 static void 3132 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, 3133 time64_t start, s16 start_utc, u16 total_segs, 3134 int status) 3135 { 3136 time64_t end = ktime_get_real_seconds(); 3137 u32 os_ver_major = 0, os_ver_minor = 0; 3138 struct tm tm; 3139 3140 time64_to_tm(start, 0, &tm); 3141 memset(record, 0, sizeof(*record)); 3142 memcpy(record->signature, "cOrE", 4); 3143 record->flags = 0; 3144 record->low_version = 0; 3145 record->high_version = 1; 3146 record->asic_state = 0; 3147 strlcpy(record->system_name, utsname()->nodename, 3148 sizeof(record->system_name)); 3149 record->year = cpu_to_le16(tm.tm_year + 1900); 3150 record->month = cpu_to_le16(tm.tm_mon + 1); 3151 record->day = cpu_to_le16(tm.tm_mday); 3152 record->hour = cpu_to_le16(tm.tm_hour); 3153 record->minute = cpu_to_le16(tm.tm_min); 3154 record->second = cpu_to_le16(tm.tm_sec); 3155 record->utc_bias = cpu_to_le16(start_utc); 3156 strcpy(record->commandline, "ethtool -w"); 3157 record->total_segments = cpu_to_le32(total_segs); 3158 3159 sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); 3160 record->os_ver_major = cpu_to_le32(os_ver_major); 3161 record->os_ver_minor = cpu_to_le32(os_ver_minor); 3162 3163 strlcpy(record->os_name, utsname()->sysname, 32); 3164 time64_to_tm(end, 0, &tm); 3165 record->end_year = cpu_to_le16(tm.tm_year + 1900); 3166 record->end_month = cpu_to_le16(tm.tm_mon + 1); 3167 record->end_day = cpu_to_le16(tm.tm_mday); 3168 record->end_hour = cpu_to_le16(tm.tm_hour); 3169 record->end_minute = cpu_to_le16(tm.tm_min); 3170 record->end_second = cpu_to_le16(tm.tm_sec); 3171 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); 3172 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | 3173 bp->ver_resp.chip_rev << 8 | 3174 bp->ver_resp.chip_metal); 3175 record->asic_id2 = 0; 3176 record->coredump_status = cpu_to_le32(status); 3177 record->ioctl_low_version = 0; 3178 record->ioctl_high_version = 0; 3179 } 3180 3181 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) 3182 { 3183 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); 3184 struct coredump_segment_record *seg_record = NULL; 3185 u32 offset = 0, seg_hdr_len, seg_record_len; 3186 struct bnxt_coredump_segment_hdr seg_hdr; 3187 struct bnxt_coredump coredump = {NULL}; 3188 time64_t start_time; 3189 u16 start_utc; 3190 int rc = 0, i; 3191 3192 start_time = ktime_get_real_seconds(); 3193 start_utc = sys_tz.tz_minuteswest * 60; 3194 seg_hdr_len = sizeof(seg_hdr); 3195 3196 /* First segment should be hwrm_ver_get response */ 3197 *dump_len = seg_hdr_len + ver_get_resp_len; 3198 if (buf) { 3199 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, 3200 0, 0, 0); 3201 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3202 offset += seg_hdr_len; 3203 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); 3204 offset += ver_get_resp_len; 3205 } 3206 3207 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); 3208 if (rc) { 3209 netdev_err(bp->dev, "Failed to get coredump segment list\n"); 3210 goto err; 3211 } 3212 3213 *dump_len += seg_hdr_len * coredump.total_segs; 3214 3215 seg_record = (struct coredump_segment_record *)coredump.data; 3216 seg_record_len = sizeof(*seg_record); 3217 3218 for (i = 0; i < coredump.total_segs; i++) { 3219 u16 comp_id = le16_to_cpu(seg_record->component_id); 3220 u16 seg_id = le16_to_cpu(seg_record->segment_id); 3221 u32 duration = 0, seg_len = 0; 3222 unsigned long start, end; 3223 3224 start = jiffies; 3225 3226 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); 3227 if (rc) { 3228 netdev_err(bp->dev, 3229 "Failed to initiate coredump for seg = %d\n", 3230 seg_record->segment_id); 3231 goto next_seg; 3232 } 3233 3234 /* Write segment data into the buffer */ 3235 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, 3236 &seg_len, buf, 3237 offset + seg_hdr_len); 3238 if (rc) 3239 netdev_err(bp->dev, 3240 "Failed to retrieve coredump for seg = %d\n", 3241 seg_record->segment_id); 3242 3243 next_seg: 3244 end = jiffies; 3245 duration = jiffies_to_msecs(end - start); 3246 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, 3247 rc, duration, 0); 3248 3249 if (buf) { 3250 /* Write segment header into the buffer */ 3251 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3252 offset += seg_hdr_len + seg_len; 3253 } 3254 3255 *dump_len += seg_len; 3256 seg_record = 3257 (struct coredump_segment_record *)((u8 *)seg_record + 3258 seg_record_len); 3259 } 3260 3261 err: 3262 if (buf) 3263 bnxt_fill_coredump_record(bp, buf + offset, start_time, 3264 start_utc, coredump.total_segs + 1, 3265 rc); 3266 kfree(coredump.data); 3267 *dump_len += sizeof(struct bnxt_coredump_record); 3268 3269 return rc; 3270 } 3271 3272 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 3273 { 3274 struct bnxt *bp = netdev_priv(dev); 3275 3276 if (bp->hwrm_spec_code < 0x10801) 3277 return -EOPNOTSUPP; 3278 3279 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 3280 bp->ver_resp.hwrm_fw_min_8b << 16 | 3281 bp->ver_resp.hwrm_fw_bld_8b << 8 | 3282 bp->ver_resp.hwrm_fw_rsvd_8b; 3283 3284 return bnxt_get_coredump(bp, NULL, &dump->len); 3285 } 3286 3287 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 3288 void *buf) 3289 { 3290 struct bnxt *bp = netdev_priv(dev); 3291 3292 if (bp->hwrm_spec_code < 0x10801) 3293 return -EOPNOTSUPP; 3294 3295 memset(buf, 0, dump->len); 3296 3297 return bnxt_get_coredump(bp, buf, &dump->len); 3298 } 3299 3300 void bnxt_ethtool_init(struct bnxt *bp) 3301 { 3302 struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; 3303 struct hwrm_selftest_qlist_input req = {0}; 3304 struct bnxt_test_info *test_info; 3305 struct net_device *dev = bp->dev; 3306 int i, rc; 3307 3308 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 3309 bnxt_get_pkgver(dev); 3310 3311 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) 3312 return; 3313 3314 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); 3315 mutex_lock(&bp->hwrm_cmd_lock); 3316 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3317 if (rc) 3318 goto ethtool_init_exit; 3319 3320 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 3321 if (!test_info) 3322 goto ethtool_init_exit; 3323 3324 bp->test_info = test_info; 3325 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 3326 if (bp->num_tests > BNXT_MAX_TEST) 3327 bp->num_tests = BNXT_MAX_TEST; 3328 3329 test_info->offline_mask = resp->offline_tests; 3330 test_info->timeout = le16_to_cpu(resp->test_timeout); 3331 if (!test_info->timeout) 3332 test_info->timeout = HWRM_CMD_TIMEOUT; 3333 for (i = 0; i < bp->num_tests; i++) { 3334 char *str = test_info->string[i]; 3335 char *fw_str = resp->test0_name + i * 32; 3336 3337 if (i == BNXT_MACLPBK_TEST_IDX) { 3338 strcpy(str, "Mac loopback test (offline)"); 3339 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 3340 strcpy(str, "Phy loopback test (offline)"); 3341 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 3342 strcpy(str, "Ext loopback test (offline)"); 3343 } else if (i == BNXT_IRQ_TEST_IDX) { 3344 strcpy(str, "Interrupt_test (offline)"); 3345 } else { 3346 strlcpy(str, fw_str, ETH_GSTRING_LEN); 3347 strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 3348 if (test_info->offline_mask & (1 << i)) 3349 strncat(str, " (offline)", 3350 ETH_GSTRING_LEN - strlen(str)); 3351 else 3352 strncat(str, " (online)", 3353 ETH_GSTRING_LEN - strlen(str)); 3354 } 3355 } 3356 3357 ethtool_init_exit: 3358 mutex_unlock(&bp->hwrm_cmd_lock); 3359 } 3360 3361 void bnxt_ethtool_free(struct bnxt *bp) 3362 { 3363 kfree(bp->test_info); 3364 bp->test_info = NULL; 3365 } 3366 3367 const struct ethtool_ops bnxt_ethtool_ops = { 3368 .get_link_ksettings = bnxt_get_link_ksettings, 3369 .set_link_ksettings = bnxt_set_link_ksettings, 3370 .get_pauseparam = bnxt_get_pauseparam, 3371 .set_pauseparam = bnxt_set_pauseparam, 3372 .get_drvinfo = bnxt_get_drvinfo, 3373 .get_wol = bnxt_get_wol, 3374 .set_wol = bnxt_set_wol, 3375 .get_coalesce = bnxt_get_coalesce, 3376 .set_coalesce = bnxt_set_coalesce, 3377 .get_msglevel = bnxt_get_msglevel, 3378 .set_msglevel = bnxt_set_msglevel, 3379 .get_sset_count = bnxt_get_sset_count, 3380 .get_strings = bnxt_get_strings, 3381 .get_ethtool_stats = bnxt_get_ethtool_stats, 3382 .set_ringparam = bnxt_set_ringparam, 3383 .get_ringparam = bnxt_get_ringparam, 3384 .get_channels = bnxt_get_channels, 3385 .set_channels = bnxt_set_channels, 3386 .get_rxnfc = bnxt_get_rxnfc, 3387 .set_rxnfc = bnxt_set_rxnfc, 3388 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 3389 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 3390 .get_rxfh = bnxt_get_rxfh, 3391 .flash_device = bnxt_flash_device, 3392 .get_eeprom_len = bnxt_get_eeprom_len, 3393 .get_eeprom = bnxt_get_eeprom, 3394 .set_eeprom = bnxt_set_eeprom, 3395 .get_link = bnxt_get_link, 3396 .get_eee = bnxt_get_eee, 3397 .set_eee = bnxt_set_eee, 3398 .get_module_info = bnxt_get_module_info, 3399 .get_module_eeprom = bnxt_get_module_eeprom, 3400 .nway_reset = bnxt_nway_reset, 3401 .set_phys_id = bnxt_set_phys_id, 3402 .self_test = bnxt_self_test, 3403 .reset = bnxt_reset, 3404 .get_dump_flag = bnxt_get_dump_flag, 3405 .get_dump_data = bnxt_get_dump_data, 3406 }; 3407