1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/ctype.h> 12 #include <linux/stringify.h> 13 #include <linux/ethtool.h> 14 #include <linux/interrupt.h> 15 #include <linux/pci.h> 16 #include <linux/etherdevice.h> 17 #include <linux/crc32.h> 18 #include <linux/firmware.h> 19 #include <linux/utsname.h> 20 #include <linux/time.h> 21 #include "bnxt_hsi.h" 22 #include "bnxt.h" 23 #include "bnxt_xdp.h" 24 #include "bnxt_ethtool.h" 25 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 26 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 27 #include "bnxt_coredump.h" 28 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) 29 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 30 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 31 32 static u32 bnxt_get_msglevel(struct net_device *dev) 33 { 34 struct bnxt *bp = netdev_priv(dev); 35 36 return bp->msg_enable; 37 } 38 39 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 40 { 41 struct bnxt *bp = netdev_priv(dev); 42 43 bp->msg_enable = value; 44 } 45 46 static int bnxt_get_coalesce(struct net_device *dev, 47 struct ethtool_coalesce *coal) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 struct bnxt_coal *hw_coal; 51 u16 mult; 52 53 memset(coal, 0, sizeof(*coal)); 54 55 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 56 57 hw_coal = &bp->rx_coal; 58 mult = hw_coal->bufs_per_record; 59 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 60 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 61 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 62 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 63 64 hw_coal = &bp->tx_coal; 65 mult = hw_coal->bufs_per_record; 66 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 67 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 68 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 69 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 70 71 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 72 73 return 0; 74 } 75 76 static int bnxt_set_coalesce(struct net_device *dev, 77 struct ethtool_coalesce *coal) 78 { 79 struct bnxt *bp = netdev_priv(dev); 80 bool update_stats = false; 81 struct bnxt_coal *hw_coal; 82 int rc = 0; 83 u16 mult; 84 85 if (coal->use_adaptive_rx_coalesce) { 86 bp->flags |= BNXT_FLAG_DIM; 87 } else { 88 if (bp->flags & BNXT_FLAG_DIM) { 89 bp->flags &= ~(BNXT_FLAG_DIM); 90 goto reset_coalesce; 91 } 92 } 93 94 hw_coal = &bp->rx_coal; 95 mult = hw_coal->bufs_per_record; 96 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 97 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 98 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 99 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 100 101 hw_coal = &bp->tx_coal; 102 mult = hw_coal->bufs_per_record; 103 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 104 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 105 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 106 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 107 108 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 109 u32 stats_ticks = coal->stats_block_coalesce_usecs; 110 111 /* Allow 0, which means disable. */ 112 if (stats_ticks) 113 stats_ticks = clamp_t(u32, stats_ticks, 114 BNXT_MIN_STATS_COAL_TICKS, 115 BNXT_MAX_STATS_COAL_TICKS); 116 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 117 bp->stats_coal_ticks = stats_ticks; 118 if (bp->stats_coal_ticks) 119 bp->current_interval = 120 bp->stats_coal_ticks * HZ / 1000000; 121 else 122 bp->current_interval = BNXT_TIMER_INTERVAL; 123 update_stats = true; 124 } 125 126 reset_coalesce: 127 if (netif_running(dev)) { 128 if (update_stats) { 129 rc = bnxt_close_nic(bp, true, false); 130 if (!rc) 131 rc = bnxt_open_nic(bp, true, false); 132 } else { 133 rc = bnxt_hwrm_set_coal(bp); 134 } 135 } 136 137 return rc; 138 } 139 140 #define BNXT_NUM_STATS 22 141 142 #define BNXT_RX_STATS_ENTRY(counter) \ 143 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 144 145 #define BNXT_TX_STATS_ENTRY(counter) \ 146 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 147 148 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 149 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 150 151 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 152 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 153 154 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 155 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 156 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 157 158 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 159 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 160 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 161 162 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 163 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 164 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 165 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 166 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 167 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 168 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 169 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 170 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 171 172 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 173 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 174 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 175 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 176 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 177 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 178 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 179 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 180 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 181 182 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 183 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 184 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 185 186 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 187 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 188 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 189 190 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 191 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 192 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 193 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 194 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 195 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 196 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 197 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 198 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 199 200 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 201 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 202 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 203 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 204 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 205 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 206 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 207 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 208 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 209 210 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 211 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 212 __stringify(counter##_pri##n) } 213 214 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 215 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 216 __stringify(counter##_pri##n) } 217 218 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 219 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 220 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 221 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 222 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 223 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 224 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 225 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 226 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 227 228 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 229 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 230 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 231 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 232 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 233 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 234 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 235 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 236 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 237 238 enum { 239 RX_TOTAL_DISCARDS, 240 TX_TOTAL_DISCARDS, 241 }; 242 243 static struct { 244 u64 counter; 245 char string[ETH_GSTRING_LEN]; 246 } bnxt_sw_func_stats[] = { 247 {0, "rx_total_discard_pkts"}, 248 {0, "tx_total_discard_pkts"}, 249 }; 250 251 static const struct { 252 long offset; 253 char string[ETH_GSTRING_LEN]; 254 } bnxt_port_stats_arr[] = { 255 BNXT_RX_STATS_ENTRY(rx_64b_frames), 256 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 257 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 258 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 259 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 260 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 261 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 262 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 263 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 264 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 265 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 266 BNXT_RX_STATS_ENTRY(rx_total_frames), 267 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 268 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 269 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 270 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 271 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 272 BNXT_RX_STATS_ENTRY(rx_pause_frames), 273 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 274 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 275 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 276 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 277 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 278 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 279 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 280 BNXT_RX_STATS_ENTRY(rx_good_frames), 281 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 282 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 283 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 284 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 285 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 286 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 287 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 288 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 289 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 290 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 291 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 292 BNXT_RX_STATS_ENTRY(rx_bytes), 293 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 294 BNXT_RX_STATS_ENTRY(rx_runt_frames), 295 BNXT_RX_STATS_ENTRY(rx_stat_discard), 296 BNXT_RX_STATS_ENTRY(rx_stat_err), 297 298 BNXT_TX_STATS_ENTRY(tx_64b_frames), 299 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 300 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 301 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 302 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 303 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 304 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 305 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 306 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 307 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 308 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 309 BNXT_TX_STATS_ENTRY(tx_good_frames), 310 BNXT_TX_STATS_ENTRY(tx_total_frames), 311 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 312 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 313 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 314 BNXT_TX_STATS_ENTRY(tx_pause_frames), 315 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 316 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 317 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 318 BNXT_TX_STATS_ENTRY(tx_err), 319 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 320 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 321 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 322 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 323 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 324 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 325 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 326 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 327 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 328 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 329 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 330 BNXT_TX_STATS_ENTRY(tx_total_collisions), 331 BNXT_TX_STATS_ENTRY(tx_bytes), 332 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 333 BNXT_TX_STATS_ENTRY(tx_stat_discard), 334 BNXT_TX_STATS_ENTRY(tx_stat_error), 335 }; 336 337 static const struct { 338 long offset; 339 char string[ETH_GSTRING_LEN]; 340 } bnxt_port_stats_ext_arr[] = { 341 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 342 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 343 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 344 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 345 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 346 BNXT_RX_STATS_EXT_COS_ENTRIES, 347 BNXT_RX_STATS_EXT_PFC_ENTRIES, 348 }; 349 350 static const struct { 351 long offset; 352 char string[ETH_GSTRING_LEN]; 353 } bnxt_tx_port_stats_ext_arr[] = { 354 BNXT_TX_STATS_EXT_COS_ENTRIES, 355 BNXT_TX_STATS_EXT_PFC_ENTRIES, 356 }; 357 358 static const struct { 359 long base_off; 360 char string[ETH_GSTRING_LEN]; 361 } bnxt_rx_bytes_pri_arr[] = { 362 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 363 }; 364 365 static const struct { 366 long base_off; 367 char string[ETH_GSTRING_LEN]; 368 } bnxt_rx_pkts_pri_arr[] = { 369 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 370 }; 371 372 static const struct { 373 long base_off; 374 char string[ETH_GSTRING_LEN]; 375 } bnxt_tx_bytes_pri_arr[] = { 376 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 377 }; 378 379 static const struct { 380 long base_off; 381 char string[ETH_GSTRING_LEN]; 382 } bnxt_tx_pkts_pri_arr[] = { 383 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 384 }; 385 386 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) 387 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 388 #define BNXT_NUM_STATS_PRI \ 389 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 390 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 391 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 392 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 393 394 static int bnxt_get_num_stats(struct bnxt *bp) 395 { 396 int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; 397 398 num_stats += BNXT_NUM_SW_FUNC_STATS; 399 400 if (bp->flags & BNXT_FLAG_PORT_STATS) 401 num_stats += BNXT_NUM_PORT_STATS; 402 403 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 404 num_stats += bp->fw_rx_stats_ext_size + 405 bp->fw_tx_stats_ext_size; 406 if (bp->pri2cos_valid) 407 num_stats += BNXT_NUM_STATS_PRI; 408 } 409 410 return num_stats; 411 } 412 413 static int bnxt_get_sset_count(struct net_device *dev, int sset) 414 { 415 struct bnxt *bp = netdev_priv(dev); 416 417 switch (sset) { 418 case ETH_SS_STATS: 419 return bnxt_get_num_stats(bp); 420 case ETH_SS_TEST: 421 if (!bp->num_tests) 422 return -EOPNOTSUPP; 423 return bp->num_tests; 424 default: 425 return -EOPNOTSUPP; 426 } 427 } 428 429 static void bnxt_get_ethtool_stats(struct net_device *dev, 430 struct ethtool_stats *stats, u64 *buf) 431 { 432 u32 i, j = 0; 433 struct bnxt *bp = netdev_priv(dev); 434 u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; 435 436 if (!bp->bnapi) { 437 j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; 438 goto skip_ring_stats; 439 } 440 441 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) 442 bnxt_sw_func_stats[i].counter = 0; 443 444 for (i = 0; i < bp->cp_nr_rings; i++) { 445 struct bnxt_napi *bnapi = bp->bnapi[i]; 446 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 447 __le64 *hw_stats = (__le64 *)cpr->hw_stats; 448 int k; 449 450 for (k = 0; k < stat_fields; j++, k++) 451 buf[j] = le64_to_cpu(hw_stats[k]); 452 buf[j++] = cpr->rx_l4_csum_errors; 453 buf[j++] = cpr->missed_irqs; 454 455 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 456 le64_to_cpu(cpr->hw_stats->rx_discard_pkts); 457 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += 458 le64_to_cpu(cpr->hw_stats->tx_discard_pkts); 459 } 460 461 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) 462 buf[j] = bnxt_sw_func_stats[i].counter; 463 464 skip_ring_stats: 465 if (bp->flags & BNXT_FLAG_PORT_STATS) { 466 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; 467 468 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { 469 buf[j] = le64_to_cpu(*(port_stats + 470 bnxt_port_stats_arr[i].offset)); 471 } 472 } 473 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 474 __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; 475 __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext; 476 477 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { 478 buf[j] = le64_to_cpu(*(rx_port_stats_ext + 479 bnxt_port_stats_ext_arr[i].offset)); 480 } 481 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { 482 buf[j] = le64_to_cpu(*(tx_port_stats_ext + 483 bnxt_tx_port_stats_ext_arr[i].offset)); 484 } 485 if (bp->pri2cos_valid) { 486 for (i = 0; i < 8; i++, j++) { 487 long n = bnxt_rx_bytes_pri_arr[i].base_off + 488 bp->pri2cos[i]; 489 490 buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); 491 } 492 for (i = 0; i < 8; i++, j++) { 493 long n = bnxt_rx_pkts_pri_arr[i].base_off + 494 bp->pri2cos[i]; 495 496 buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); 497 } 498 for (i = 0; i < 8; i++, j++) { 499 long n = bnxt_tx_bytes_pri_arr[i].base_off + 500 bp->pri2cos[i]; 501 502 buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); 503 } 504 for (i = 0; i < 8; i++, j++) { 505 long n = bnxt_tx_pkts_pri_arr[i].base_off + 506 bp->pri2cos[i]; 507 508 buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); 509 } 510 } 511 } 512 } 513 514 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 515 { 516 struct bnxt *bp = netdev_priv(dev); 517 u32 i; 518 519 switch (stringset) { 520 /* The number of strings must match BNXT_NUM_STATS defined above. */ 521 case ETH_SS_STATS: 522 for (i = 0; i < bp->cp_nr_rings; i++) { 523 sprintf(buf, "[%d]: rx_ucast_packets", i); 524 buf += ETH_GSTRING_LEN; 525 sprintf(buf, "[%d]: rx_mcast_packets", i); 526 buf += ETH_GSTRING_LEN; 527 sprintf(buf, "[%d]: rx_bcast_packets", i); 528 buf += ETH_GSTRING_LEN; 529 sprintf(buf, "[%d]: rx_discards", i); 530 buf += ETH_GSTRING_LEN; 531 sprintf(buf, "[%d]: rx_drops", i); 532 buf += ETH_GSTRING_LEN; 533 sprintf(buf, "[%d]: rx_ucast_bytes", i); 534 buf += ETH_GSTRING_LEN; 535 sprintf(buf, "[%d]: rx_mcast_bytes", i); 536 buf += ETH_GSTRING_LEN; 537 sprintf(buf, "[%d]: rx_bcast_bytes", i); 538 buf += ETH_GSTRING_LEN; 539 sprintf(buf, "[%d]: tx_ucast_packets", i); 540 buf += ETH_GSTRING_LEN; 541 sprintf(buf, "[%d]: tx_mcast_packets", i); 542 buf += ETH_GSTRING_LEN; 543 sprintf(buf, "[%d]: tx_bcast_packets", i); 544 buf += ETH_GSTRING_LEN; 545 sprintf(buf, "[%d]: tx_discards", i); 546 buf += ETH_GSTRING_LEN; 547 sprintf(buf, "[%d]: tx_drops", i); 548 buf += ETH_GSTRING_LEN; 549 sprintf(buf, "[%d]: tx_ucast_bytes", i); 550 buf += ETH_GSTRING_LEN; 551 sprintf(buf, "[%d]: tx_mcast_bytes", i); 552 buf += ETH_GSTRING_LEN; 553 sprintf(buf, "[%d]: tx_bcast_bytes", i); 554 buf += ETH_GSTRING_LEN; 555 sprintf(buf, "[%d]: tpa_packets", i); 556 buf += ETH_GSTRING_LEN; 557 sprintf(buf, "[%d]: tpa_bytes", i); 558 buf += ETH_GSTRING_LEN; 559 sprintf(buf, "[%d]: tpa_events", i); 560 buf += ETH_GSTRING_LEN; 561 sprintf(buf, "[%d]: tpa_aborts", i); 562 buf += ETH_GSTRING_LEN; 563 sprintf(buf, "[%d]: rx_l4_csum_errors", i); 564 buf += ETH_GSTRING_LEN; 565 sprintf(buf, "[%d]: missed_irqs", i); 566 buf += ETH_GSTRING_LEN; 567 } 568 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 569 strcpy(buf, bnxt_sw_func_stats[i].string); 570 buf += ETH_GSTRING_LEN; 571 } 572 573 if (bp->flags & BNXT_FLAG_PORT_STATS) { 574 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 575 strcpy(buf, bnxt_port_stats_arr[i].string); 576 buf += ETH_GSTRING_LEN; 577 } 578 } 579 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 580 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { 581 strcpy(buf, bnxt_port_stats_ext_arr[i].string); 582 buf += ETH_GSTRING_LEN; 583 } 584 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { 585 strcpy(buf, 586 bnxt_tx_port_stats_ext_arr[i].string); 587 buf += ETH_GSTRING_LEN; 588 } 589 if (bp->pri2cos_valid) { 590 for (i = 0; i < 8; i++) { 591 strcpy(buf, 592 bnxt_rx_bytes_pri_arr[i].string); 593 buf += ETH_GSTRING_LEN; 594 } 595 for (i = 0; i < 8; i++) { 596 strcpy(buf, 597 bnxt_rx_pkts_pri_arr[i].string); 598 buf += ETH_GSTRING_LEN; 599 } 600 for (i = 0; i < 8; i++) { 601 strcpy(buf, 602 bnxt_tx_bytes_pri_arr[i].string); 603 buf += ETH_GSTRING_LEN; 604 } 605 for (i = 0; i < 8; i++) { 606 strcpy(buf, 607 bnxt_tx_pkts_pri_arr[i].string); 608 buf += ETH_GSTRING_LEN; 609 } 610 } 611 } 612 break; 613 case ETH_SS_TEST: 614 if (bp->num_tests) 615 memcpy(buf, bp->test_info->string, 616 bp->num_tests * ETH_GSTRING_LEN); 617 break; 618 default: 619 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 620 stringset); 621 break; 622 } 623 } 624 625 static void bnxt_get_ringparam(struct net_device *dev, 626 struct ethtool_ringparam *ering) 627 { 628 struct bnxt *bp = netdev_priv(dev); 629 630 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 631 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 632 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 633 634 ering->rx_pending = bp->rx_ring_size; 635 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 636 ering->tx_pending = bp->tx_ring_size; 637 } 638 639 static int bnxt_set_ringparam(struct net_device *dev, 640 struct ethtool_ringparam *ering) 641 { 642 struct bnxt *bp = netdev_priv(dev); 643 644 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 645 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 646 (ering->tx_pending <= MAX_SKB_FRAGS)) 647 return -EINVAL; 648 649 if (netif_running(dev)) 650 bnxt_close_nic(bp, false, false); 651 652 bp->rx_ring_size = ering->rx_pending; 653 bp->tx_ring_size = ering->tx_pending; 654 bnxt_set_ring_params(bp); 655 656 if (netif_running(dev)) 657 return bnxt_open_nic(bp, false, false); 658 659 return 0; 660 } 661 662 static void bnxt_get_channels(struct net_device *dev, 663 struct ethtool_channels *channel) 664 { 665 struct bnxt *bp = netdev_priv(dev); 666 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 667 int max_rx_rings, max_tx_rings, tcs; 668 int max_tx_sch_inputs; 669 670 /* Get the most up-to-date max_tx_sch_inputs. */ 671 if (BNXT_NEW_RM(bp)) 672 bnxt_hwrm_func_resc_qcaps(bp, false); 673 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 674 675 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 676 if (max_tx_sch_inputs) 677 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 678 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 679 680 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 681 max_rx_rings = 0; 682 max_tx_rings = 0; 683 } 684 if (max_tx_sch_inputs) 685 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 686 687 tcs = netdev_get_num_tc(dev); 688 if (tcs > 1) 689 max_tx_rings /= tcs; 690 691 channel->max_rx = max_rx_rings; 692 channel->max_tx = max_tx_rings; 693 channel->max_other = 0; 694 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 695 channel->combined_count = bp->rx_nr_rings; 696 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 697 channel->combined_count--; 698 } else { 699 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 700 channel->rx_count = bp->rx_nr_rings; 701 channel->tx_count = bp->tx_nr_rings_per_tc; 702 } 703 } 704 } 705 706 static int bnxt_set_channels(struct net_device *dev, 707 struct ethtool_channels *channel) 708 { 709 struct bnxt *bp = netdev_priv(dev); 710 int req_tx_rings, req_rx_rings, tcs; 711 bool sh = false; 712 int tx_xdp = 0; 713 int rc = 0; 714 715 if (channel->other_count) 716 return -EINVAL; 717 718 if (!channel->combined_count && 719 (!channel->rx_count || !channel->tx_count)) 720 return -EINVAL; 721 722 if (channel->combined_count && 723 (channel->rx_count || channel->tx_count)) 724 return -EINVAL; 725 726 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 727 channel->tx_count)) 728 return -EINVAL; 729 730 if (channel->combined_count) 731 sh = true; 732 733 tcs = netdev_get_num_tc(dev); 734 735 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 736 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 737 if (bp->tx_nr_rings_xdp) { 738 if (!sh) { 739 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 740 return -EINVAL; 741 } 742 tx_xdp = req_rx_rings; 743 } 744 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 745 if (rc) { 746 netdev_warn(dev, "Unable to allocate the requested rings\n"); 747 return rc; 748 } 749 750 if (netif_running(dev)) { 751 if (BNXT_PF(bp)) { 752 /* TODO CHIMP_FW: Send message to all VF's 753 * before PF unload 754 */ 755 } 756 rc = bnxt_close_nic(bp, true, false); 757 if (rc) { 758 netdev_err(bp->dev, "Set channel failure rc :%x\n", 759 rc); 760 return rc; 761 } 762 } 763 764 if (sh) { 765 bp->flags |= BNXT_FLAG_SHARED_RINGS; 766 bp->rx_nr_rings = channel->combined_count; 767 bp->tx_nr_rings_per_tc = channel->combined_count; 768 } else { 769 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 770 bp->rx_nr_rings = channel->rx_count; 771 bp->tx_nr_rings_per_tc = channel->tx_count; 772 } 773 bp->tx_nr_rings_xdp = tx_xdp; 774 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 775 if (tcs > 1) 776 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 777 778 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 779 bp->tx_nr_rings + bp->rx_nr_rings; 780 781 /* After changing number of rx channels, update NTUPLE feature. */ 782 netdev_update_features(dev); 783 if (netif_running(dev)) { 784 rc = bnxt_open_nic(bp, true, false); 785 if ((!rc) && BNXT_PF(bp)) { 786 /* TODO CHIMP_FW: Send message to all VF's 787 * to renable 788 */ 789 } 790 } else { 791 rc = bnxt_reserve_rings(bp); 792 } 793 794 return rc; 795 } 796 797 #ifdef CONFIG_RFS_ACCEL 798 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 799 u32 *rule_locs) 800 { 801 int i, j = 0; 802 803 cmd->data = bp->ntp_fltr_count; 804 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 805 struct hlist_head *head; 806 struct bnxt_ntuple_filter *fltr; 807 808 head = &bp->ntp_fltr_hash_tbl[i]; 809 rcu_read_lock(); 810 hlist_for_each_entry_rcu(fltr, head, hash) { 811 if (j == cmd->rule_cnt) 812 break; 813 rule_locs[j++] = fltr->sw_id; 814 } 815 rcu_read_unlock(); 816 if (j == cmd->rule_cnt) 817 break; 818 } 819 cmd->rule_cnt = j; 820 return 0; 821 } 822 823 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 824 { 825 struct ethtool_rx_flow_spec *fs = 826 (struct ethtool_rx_flow_spec *)&cmd->fs; 827 struct bnxt_ntuple_filter *fltr; 828 struct flow_keys *fkeys; 829 int i, rc = -EINVAL; 830 831 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) 832 return rc; 833 834 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 835 struct hlist_head *head; 836 837 head = &bp->ntp_fltr_hash_tbl[i]; 838 rcu_read_lock(); 839 hlist_for_each_entry_rcu(fltr, head, hash) { 840 if (fltr->sw_id == fs->location) 841 goto fltr_found; 842 } 843 rcu_read_unlock(); 844 } 845 return rc; 846 847 fltr_found: 848 fkeys = &fltr->fkeys; 849 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 850 if (fkeys->basic.ip_proto == IPPROTO_TCP) 851 fs->flow_type = TCP_V4_FLOW; 852 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 853 fs->flow_type = UDP_V4_FLOW; 854 else 855 goto fltr_err; 856 857 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 858 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); 859 860 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 861 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); 862 863 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 864 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); 865 866 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 867 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); 868 } else { 869 int i; 870 871 if (fkeys->basic.ip_proto == IPPROTO_TCP) 872 fs->flow_type = TCP_V6_FLOW; 873 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 874 fs->flow_type = UDP_V6_FLOW; 875 else 876 goto fltr_err; 877 878 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 879 fkeys->addrs.v6addrs.src; 880 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 881 fkeys->addrs.v6addrs.dst; 882 for (i = 0; i < 4; i++) { 883 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); 884 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); 885 } 886 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 887 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); 888 889 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 890 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); 891 } 892 893 fs->ring_cookie = fltr->rxq; 894 rc = 0; 895 896 fltr_err: 897 rcu_read_unlock(); 898 899 return rc; 900 } 901 #endif 902 903 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 904 { 905 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 906 return RXH_IP_SRC | RXH_IP_DST; 907 return 0; 908 } 909 910 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 911 { 912 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 913 return RXH_IP_SRC | RXH_IP_DST; 914 return 0; 915 } 916 917 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 918 { 919 cmd->data = 0; 920 switch (cmd->flow_type) { 921 case TCP_V4_FLOW: 922 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 923 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 924 RXH_L4_B_0_1 | RXH_L4_B_2_3; 925 cmd->data |= get_ethtool_ipv4_rss(bp); 926 break; 927 case UDP_V4_FLOW: 928 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 929 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 930 RXH_L4_B_0_1 | RXH_L4_B_2_3; 931 /* fall through */ 932 case SCTP_V4_FLOW: 933 case AH_ESP_V4_FLOW: 934 case AH_V4_FLOW: 935 case ESP_V4_FLOW: 936 case IPV4_FLOW: 937 cmd->data |= get_ethtool_ipv4_rss(bp); 938 break; 939 940 case TCP_V6_FLOW: 941 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 942 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 943 RXH_L4_B_0_1 | RXH_L4_B_2_3; 944 cmd->data |= get_ethtool_ipv6_rss(bp); 945 break; 946 case UDP_V6_FLOW: 947 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 948 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 949 RXH_L4_B_0_1 | RXH_L4_B_2_3; 950 /* fall through */ 951 case SCTP_V6_FLOW: 952 case AH_ESP_V6_FLOW: 953 case AH_V6_FLOW: 954 case ESP_V6_FLOW: 955 case IPV6_FLOW: 956 cmd->data |= get_ethtool_ipv6_rss(bp); 957 break; 958 } 959 return 0; 960 } 961 962 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 963 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 964 965 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 966 { 967 u32 rss_hash_cfg = bp->rss_hash_cfg; 968 int tuple, rc = 0; 969 970 if (cmd->data == RXH_4TUPLE) 971 tuple = 4; 972 else if (cmd->data == RXH_2TUPLE) 973 tuple = 2; 974 else if (!cmd->data) 975 tuple = 0; 976 else 977 return -EINVAL; 978 979 if (cmd->flow_type == TCP_V4_FLOW) { 980 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 981 if (tuple == 4) 982 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 983 } else if (cmd->flow_type == UDP_V4_FLOW) { 984 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 985 return -EINVAL; 986 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 987 if (tuple == 4) 988 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 989 } else if (cmd->flow_type == TCP_V6_FLOW) { 990 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 991 if (tuple == 4) 992 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 993 } else if (cmd->flow_type == UDP_V6_FLOW) { 994 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 995 return -EINVAL; 996 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 997 if (tuple == 4) 998 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 999 } else if (tuple == 4) { 1000 return -EINVAL; 1001 } 1002 1003 switch (cmd->flow_type) { 1004 case TCP_V4_FLOW: 1005 case UDP_V4_FLOW: 1006 case SCTP_V4_FLOW: 1007 case AH_ESP_V4_FLOW: 1008 case AH_V4_FLOW: 1009 case ESP_V4_FLOW: 1010 case IPV4_FLOW: 1011 if (tuple == 2) 1012 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1013 else if (!tuple) 1014 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1015 break; 1016 1017 case TCP_V6_FLOW: 1018 case UDP_V6_FLOW: 1019 case SCTP_V6_FLOW: 1020 case AH_ESP_V6_FLOW: 1021 case AH_V6_FLOW: 1022 case ESP_V6_FLOW: 1023 case IPV6_FLOW: 1024 if (tuple == 2) 1025 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1026 else if (!tuple) 1027 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1028 break; 1029 } 1030 1031 if (bp->rss_hash_cfg == rss_hash_cfg) 1032 return 0; 1033 1034 bp->rss_hash_cfg = rss_hash_cfg; 1035 if (netif_running(bp->dev)) { 1036 bnxt_close_nic(bp, false, false); 1037 rc = bnxt_open_nic(bp, false, false); 1038 } 1039 return rc; 1040 } 1041 1042 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1043 u32 *rule_locs) 1044 { 1045 struct bnxt *bp = netdev_priv(dev); 1046 int rc = 0; 1047 1048 switch (cmd->cmd) { 1049 #ifdef CONFIG_RFS_ACCEL 1050 case ETHTOOL_GRXRINGS: 1051 cmd->data = bp->rx_nr_rings; 1052 break; 1053 1054 case ETHTOOL_GRXCLSRLCNT: 1055 cmd->rule_cnt = bp->ntp_fltr_count; 1056 cmd->data = BNXT_NTP_FLTR_MAX_FLTR; 1057 break; 1058 1059 case ETHTOOL_GRXCLSRLALL: 1060 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1061 break; 1062 1063 case ETHTOOL_GRXCLSRULE: 1064 rc = bnxt_grxclsrule(bp, cmd); 1065 break; 1066 #endif 1067 1068 case ETHTOOL_GRXFH: 1069 rc = bnxt_grxfh(bp, cmd); 1070 break; 1071 1072 default: 1073 rc = -EOPNOTSUPP; 1074 break; 1075 } 1076 1077 return rc; 1078 } 1079 1080 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1081 { 1082 struct bnxt *bp = netdev_priv(dev); 1083 int rc; 1084 1085 switch (cmd->cmd) { 1086 case ETHTOOL_SRXFH: 1087 rc = bnxt_srxfh(bp, cmd); 1088 break; 1089 1090 default: 1091 rc = -EOPNOTSUPP; 1092 break; 1093 } 1094 return rc; 1095 } 1096 1097 static u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1098 { 1099 return HW_HASH_INDEX_SIZE; 1100 } 1101 1102 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1103 { 1104 return HW_HASH_KEY_SIZE; 1105 } 1106 1107 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1108 u8 *hfunc) 1109 { 1110 struct bnxt *bp = netdev_priv(dev); 1111 struct bnxt_vnic_info *vnic; 1112 int i = 0; 1113 1114 if (hfunc) 1115 *hfunc = ETH_RSS_HASH_TOP; 1116 1117 if (!bp->vnic_info) 1118 return 0; 1119 1120 vnic = &bp->vnic_info[0]; 1121 if (indir && vnic->rss_table) { 1122 for (i = 0; i < HW_HASH_INDEX_SIZE; i++) 1123 indir[i] = le16_to_cpu(vnic->rss_table[i]); 1124 } 1125 1126 if (key && vnic->rss_hash_key) 1127 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1128 1129 return 0; 1130 } 1131 1132 static void bnxt_get_drvinfo(struct net_device *dev, 1133 struct ethtool_drvinfo *info) 1134 { 1135 struct bnxt *bp = netdev_priv(dev); 1136 1137 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 1138 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 1139 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 1140 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 1141 info->n_stats = bnxt_get_num_stats(bp); 1142 info->testinfo_len = bp->num_tests; 1143 /* TODO CHIMP_FW: eeprom dump details */ 1144 info->eedump_len = 0; 1145 /* TODO CHIMP FW: reg dump details */ 1146 info->regdump_len = 0; 1147 } 1148 1149 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1150 { 1151 struct bnxt *bp = netdev_priv(dev); 1152 1153 wol->supported = 0; 1154 wol->wolopts = 0; 1155 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1156 if (bp->flags & BNXT_FLAG_WOL_CAP) { 1157 wol->supported = WAKE_MAGIC; 1158 if (bp->wol) 1159 wol->wolopts = WAKE_MAGIC; 1160 } 1161 } 1162 1163 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1164 { 1165 struct bnxt *bp = netdev_priv(dev); 1166 1167 if (wol->wolopts & ~WAKE_MAGIC) 1168 return -EINVAL; 1169 1170 if (wol->wolopts & WAKE_MAGIC) { 1171 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 1172 return -EINVAL; 1173 if (!bp->wol) { 1174 if (bnxt_hwrm_alloc_wol_fltr(bp)) 1175 return -EBUSY; 1176 bp->wol = 1; 1177 } 1178 } else { 1179 if (bp->wol) { 1180 if (bnxt_hwrm_free_wol_fltr(bp)) 1181 return -EBUSY; 1182 bp->wol = 0; 1183 } 1184 } 1185 return 0; 1186 } 1187 1188 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) 1189 { 1190 u32 speed_mask = 0; 1191 1192 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 1193 /* set the advertised speeds */ 1194 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 1195 speed_mask |= ADVERTISED_100baseT_Full; 1196 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 1197 speed_mask |= ADVERTISED_1000baseT_Full; 1198 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 1199 speed_mask |= ADVERTISED_2500baseX_Full; 1200 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 1201 speed_mask |= ADVERTISED_10000baseT_Full; 1202 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 1203 speed_mask |= ADVERTISED_40000baseCR4_Full; 1204 1205 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) 1206 speed_mask |= ADVERTISED_Pause; 1207 else if (fw_pause & BNXT_LINK_PAUSE_TX) 1208 speed_mask |= ADVERTISED_Asym_Pause; 1209 else if (fw_pause & BNXT_LINK_PAUSE_RX) 1210 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 1211 1212 return speed_mask; 1213 } 1214 1215 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ 1216 { \ 1217 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ 1218 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1219 100baseT_Full); \ 1220 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ 1221 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1222 1000baseT_Full); \ 1223 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ 1224 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1225 10000baseT_Full); \ 1226 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ 1227 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1228 25000baseCR_Full); \ 1229 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ 1230 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1231 40000baseCR4_Full);\ 1232 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ 1233 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1234 50000baseCR2_Full);\ 1235 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ 1236 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1237 100000baseCR4_Full);\ 1238 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ 1239 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1240 Pause); \ 1241 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ 1242 ethtool_link_ksettings_add_link_mode( \ 1243 lk_ksettings, name, Asym_Pause);\ 1244 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ 1245 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1246 Asym_Pause); \ 1247 } \ 1248 } 1249 1250 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ 1251 { \ 1252 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1253 100baseT_Full) || \ 1254 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1255 100baseT_Half)) \ 1256 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ 1257 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1258 1000baseT_Full) || \ 1259 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1260 1000baseT_Half)) \ 1261 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ 1262 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1263 10000baseT_Full)) \ 1264 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ 1265 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1266 25000baseCR_Full)) \ 1267 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ 1268 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1269 40000baseCR4_Full)) \ 1270 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ 1271 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1272 50000baseCR2_Full)) \ 1273 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ 1274 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1275 100000baseCR4_Full)) \ 1276 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ 1277 } 1278 1279 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, 1280 struct ethtool_link_ksettings *lk_ksettings) 1281 { 1282 u16 fw_speeds = link_info->advertising; 1283 u8 fw_pause = 0; 1284 1285 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1286 fw_pause = link_info->auto_pause_setting; 1287 1288 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); 1289 } 1290 1291 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, 1292 struct ethtool_link_ksettings *lk_ksettings) 1293 { 1294 u16 fw_speeds = link_info->lp_auto_link_speeds; 1295 u8 fw_pause = 0; 1296 1297 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1298 fw_pause = link_info->lp_pause; 1299 1300 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, 1301 lp_advertising); 1302 } 1303 1304 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, 1305 struct ethtool_link_ksettings *lk_ksettings) 1306 { 1307 u16 fw_speeds = link_info->support_speeds; 1308 1309 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); 1310 1311 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); 1312 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1313 Asym_Pause); 1314 1315 if (link_info->support_auto_speeds) 1316 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1317 Autoneg); 1318 } 1319 1320 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 1321 { 1322 switch (fw_link_speed) { 1323 case BNXT_LINK_SPEED_100MB: 1324 return SPEED_100; 1325 case BNXT_LINK_SPEED_1GB: 1326 return SPEED_1000; 1327 case BNXT_LINK_SPEED_2_5GB: 1328 return SPEED_2500; 1329 case BNXT_LINK_SPEED_10GB: 1330 return SPEED_10000; 1331 case BNXT_LINK_SPEED_20GB: 1332 return SPEED_20000; 1333 case BNXT_LINK_SPEED_25GB: 1334 return SPEED_25000; 1335 case BNXT_LINK_SPEED_40GB: 1336 return SPEED_40000; 1337 case BNXT_LINK_SPEED_50GB: 1338 return SPEED_50000; 1339 case BNXT_LINK_SPEED_100GB: 1340 return SPEED_100000; 1341 default: 1342 return SPEED_UNKNOWN; 1343 } 1344 } 1345 1346 static int bnxt_get_link_ksettings(struct net_device *dev, 1347 struct ethtool_link_ksettings *lk_ksettings) 1348 { 1349 struct bnxt *bp = netdev_priv(dev); 1350 struct bnxt_link_info *link_info = &bp->link_info; 1351 struct ethtool_link_settings *base = &lk_ksettings->base; 1352 u32 ethtool_speed; 1353 1354 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1355 mutex_lock(&bp->link_lock); 1356 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1357 1358 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1359 if (link_info->autoneg) { 1360 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); 1361 ethtool_link_ksettings_add_link_mode(lk_ksettings, 1362 advertising, Autoneg); 1363 base->autoneg = AUTONEG_ENABLE; 1364 if (link_info->phy_link_status == BNXT_LINK_LINK) 1365 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); 1366 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 1367 if (!netif_carrier_ok(dev)) 1368 base->duplex = DUPLEX_UNKNOWN; 1369 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 1370 base->duplex = DUPLEX_FULL; 1371 else 1372 base->duplex = DUPLEX_HALF; 1373 } else { 1374 base->autoneg = AUTONEG_DISABLE; 1375 ethtool_speed = 1376 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 1377 base->duplex = DUPLEX_HALF; 1378 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 1379 base->duplex = DUPLEX_FULL; 1380 } 1381 base->speed = ethtool_speed; 1382 1383 base->port = PORT_NONE; 1384 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1385 base->port = PORT_TP; 1386 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1387 TP); 1388 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1389 TP); 1390 } else { 1391 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1392 FIBRE); 1393 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1394 FIBRE); 1395 1396 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 1397 base->port = PORT_DA; 1398 else if (link_info->media_type == 1399 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 1400 base->port = PORT_FIBRE; 1401 } 1402 base->phy_address = link_info->phy_addr; 1403 mutex_unlock(&bp->link_lock); 1404 1405 return 0; 1406 } 1407 1408 static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed) 1409 { 1410 struct bnxt *bp = netdev_priv(dev); 1411 struct bnxt_link_info *link_info = &bp->link_info; 1412 u16 support_spds = link_info->support_speeds; 1413 u32 fw_speed = 0; 1414 1415 switch (ethtool_speed) { 1416 case SPEED_100: 1417 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 1418 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; 1419 break; 1420 case SPEED_1000: 1421 if (support_spds & BNXT_LINK_SPEED_MSK_1GB) 1422 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; 1423 break; 1424 case SPEED_2500: 1425 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 1426 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; 1427 break; 1428 case SPEED_10000: 1429 if (support_spds & BNXT_LINK_SPEED_MSK_10GB) 1430 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; 1431 break; 1432 case SPEED_20000: 1433 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) 1434 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; 1435 break; 1436 case SPEED_25000: 1437 if (support_spds & BNXT_LINK_SPEED_MSK_25GB) 1438 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; 1439 break; 1440 case SPEED_40000: 1441 if (support_spds & BNXT_LINK_SPEED_MSK_40GB) 1442 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; 1443 break; 1444 case SPEED_50000: 1445 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) 1446 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; 1447 break; 1448 case SPEED_100000: 1449 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) 1450 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB; 1451 break; 1452 default: 1453 netdev_err(dev, "unsupported speed!\n"); 1454 break; 1455 } 1456 return fw_speed; 1457 } 1458 1459 u16 bnxt_get_fw_auto_link_speeds(u32 advertising) 1460 { 1461 u16 fw_speed_mask = 0; 1462 1463 /* only support autoneg at speed 100, 1000, and 10000 */ 1464 if (advertising & (ADVERTISED_100baseT_Full | 1465 ADVERTISED_100baseT_Half)) { 1466 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 1467 } 1468 if (advertising & (ADVERTISED_1000baseT_Full | 1469 ADVERTISED_1000baseT_Half)) { 1470 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 1471 } 1472 if (advertising & ADVERTISED_10000baseT_Full) 1473 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 1474 1475 if (advertising & ADVERTISED_40000baseCR4_Full) 1476 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 1477 1478 return fw_speed_mask; 1479 } 1480 1481 static int bnxt_set_link_ksettings(struct net_device *dev, 1482 const struct ethtool_link_ksettings *lk_ksettings) 1483 { 1484 struct bnxt *bp = netdev_priv(dev); 1485 struct bnxt_link_info *link_info = &bp->link_info; 1486 const struct ethtool_link_settings *base = &lk_ksettings->base; 1487 bool set_pause = false; 1488 u16 fw_advertising = 0; 1489 u32 speed; 1490 int rc = 0; 1491 1492 if (!BNXT_SINGLE_PF(bp)) 1493 return -EOPNOTSUPP; 1494 1495 mutex_lock(&bp->link_lock); 1496 if (base->autoneg == AUTONEG_ENABLE) { 1497 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1498 advertising); 1499 link_info->autoneg |= BNXT_AUTONEG_SPEED; 1500 if (!fw_advertising) 1501 link_info->advertising = link_info->support_auto_speeds; 1502 else 1503 link_info->advertising = fw_advertising; 1504 /* any change to autoneg will cause link change, therefore the 1505 * driver should put back the original pause setting in autoneg 1506 */ 1507 set_pause = true; 1508 } else { 1509 u16 fw_speed; 1510 u8 phy_type = link_info->phy_type; 1511 1512 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 1513 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 1514 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1515 netdev_err(dev, "10GBase-T devices must autoneg\n"); 1516 rc = -EINVAL; 1517 goto set_setting_exit; 1518 } 1519 if (base->duplex == DUPLEX_HALF) { 1520 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 1521 rc = -EINVAL; 1522 goto set_setting_exit; 1523 } 1524 speed = base->speed; 1525 fw_speed = bnxt_get_fw_speed(dev, speed); 1526 if (!fw_speed) { 1527 rc = -EINVAL; 1528 goto set_setting_exit; 1529 } 1530 link_info->req_link_speed = fw_speed; 1531 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 1532 link_info->autoneg = 0; 1533 link_info->advertising = 0; 1534 } 1535 1536 if (netif_running(dev)) 1537 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1538 1539 set_setting_exit: 1540 mutex_unlock(&bp->link_lock); 1541 return rc; 1542 } 1543 1544 static void bnxt_get_pauseparam(struct net_device *dev, 1545 struct ethtool_pauseparam *epause) 1546 { 1547 struct bnxt *bp = netdev_priv(dev); 1548 struct bnxt_link_info *link_info = &bp->link_info; 1549 1550 if (BNXT_VF(bp)) 1551 return; 1552 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 1553 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 1554 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 1555 } 1556 1557 static int bnxt_set_pauseparam(struct net_device *dev, 1558 struct ethtool_pauseparam *epause) 1559 { 1560 int rc = 0; 1561 struct bnxt *bp = netdev_priv(dev); 1562 struct bnxt_link_info *link_info = &bp->link_info; 1563 1564 if (!BNXT_SINGLE_PF(bp)) 1565 return -EOPNOTSUPP; 1566 1567 if (epause->autoneg) { 1568 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 1569 return -EINVAL; 1570 1571 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 1572 if (bp->hwrm_spec_code >= 0x10201) 1573 link_info->req_flow_ctrl = 1574 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 1575 } else { 1576 /* when transition from auto pause to force pause, 1577 * force a link change 1578 */ 1579 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1580 link_info->force_link_chng = true; 1581 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 1582 link_info->req_flow_ctrl = 0; 1583 } 1584 if (epause->rx_pause) 1585 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 1586 1587 if (epause->tx_pause) 1588 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 1589 1590 if (netif_running(dev)) 1591 rc = bnxt_hwrm_set_pause(bp); 1592 return rc; 1593 } 1594 1595 static u32 bnxt_get_link(struct net_device *dev) 1596 { 1597 struct bnxt *bp = netdev_priv(dev); 1598 1599 /* TODO: handle MF, VF, driver close case */ 1600 return bp->link_info.link_up; 1601 } 1602 1603 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 1604 u16 ext, u16 *index, u32 *item_length, 1605 u32 *data_length); 1606 1607 static int bnxt_flash_nvram(struct net_device *dev, 1608 u16 dir_type, 1609 u16 dir_ordinal, 1610 u16 dir_ext, 1611 u16 dir_attr, 1612 const u8 *data, 1613 size_t data_len) 1614 { 1615 struct bnxt *bp = netdev_priv(dev); 1616 int rc; 1617 struct hwrm_nvm_write_input req = {0}; 1618 dma_addr_t dma_handle; 1619 u8 *kmem; 1620 1621 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); 1622 1623 req.dir_type = cpu_to_le16(dir_type); 1624 req.dir_ordinal = cpu_to_le16(dir_ordinal); 1625 req.dir_ext = cpu_to_le16(dir_ext); 1626 req.dir_attr = cpu_to_le16(dir_attr); 1627 req.dir_data_length = cpu_to_le32(data_len); 1628 1629 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, 1630 GFP_KERNEL); 1631 if (!kmem) { 1632 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 1633 (unsigned)data_len); 1634 return -ENOMEM; 1635 } 1636 memcpy(kmem, data, data_len); 1637 req.host_src_addr = cpu_to_le64(dma_handle); 1638 1639 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); 1640 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); 1641 1642 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { 1643 netdev_info(dev, 1644 "PF does not have admin privileges to flash the device\n"); 1645 rc = -EACCES; 1646 } else if (rc) { 1647 rc = -EIO; 1648 } 1649 return rc; 1650 } 1651 1652 static int bnxt_firmware_reset(struct net_device *dev, 1653 u16 dir_type) 1654 { 1655 struct hwrm_fw_reset_input req = {0}; 1656 struct bnxt *bp = netdev_priv(dev); 1657 int rc; 1658 1659 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 1660 1661 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 1662 /* (e.g. when firmware isn't already running) */ 1663 switch (dir_type) { 1664 case BNX_DIR_TYPE_CHIMP_PATCH: 1665 case BNX_DIR_TYPE_BOOTCODE: 1666 case BNX_DIR_TYPE_BOOTCODE_2: 1667 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 1668 /* Self-reset ChiMP upon next PCIe reset: */ 1669 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 1670 break; 1671 case BNX_DIR_TYPE_APE_FW: 1672 case BNX_DIR_TYPE_APE_PATCH: 1673 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 1674 /* Self-reset APE upon next PCIe reset: */ 1675 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 1676 break; 1677 case BNX_DIR_TYPE_KONG_FW: 1678 case BNX_DIR_TYPE_KONG_PATCH: 1679 req.embedded_proc_type = 1680 FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 1681 break; 1682 case BNX_DIR_TYPE_BONO_FW: 1683 case BNX_DIR_TYPE_BONO_PATCH: 1684 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 1685 break; 1686 case BNXT_FW_RESET_CHIP: 1687 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 1688 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 1689 break; 1690 case BNXT_FW_RESET_AP: 1691 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; 1692 break; 1693 default: 1694 return -EINVAL; 1695 } 1696 1697 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1698 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { 1699 netdev_info(dev, 1700 "PF does not have admin privileges to reset the device\n"); 1701 rc = -EACCES; 1702 } else if (rc) { 1703 rc = -EIO; 1704 } 1705 return rc; 1706 } 1707 1708 static int bnxt_flash_firmware(struct net_device *dev, 1709 u16 dir_type, 1710 const u8 *fw_data, 1711 size_t fw_size) 1712 { 1713 int rc = 0; 1714 u16 code_type; 1715 u32 stored_crc; 1716 u32 calculated_crc; 1717 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 1718 1719 switch (dir_type) { 1720 case BNX_DIR_TYPE_BOOTCODE: 1721 case BNX_DIR_TYPE_BOOTCODE_2: 1722 code_type = CODE_BOOT; 1723 break; 1724 case BNX_DIR_TYPE_CHIMP_PATCH: 1725 code_type = CODE_CHIMP_PATCH; 1726 break; 1727 case BNX_DIR_TYPE_APE_FW: 1728 code_type = CODE_MCTP_PASSTHRU; 1729 break; 1730 case BNX_DIR_TYPE_APE_PATCH: 1731 code_type = CODE_APE_PATCH; 1732 break; 1733 case BNX_DIR_TYPE_KONG_FW: 1734 code_type = CODE_KONG_FW; 1735 break; 1736 case BNX_DIR_TYPE_KONG_PATCH: 1737 code_type = CODE_KONG_PATCH; 1738 break; 1739 case BNX_DIR_TYPE_BONO_FW: 1740 code_type = CODE_BONO_FW; 1741 break; 1742 case BNX_DIR_TYPE_BONO_PATCH: 1743 code_type = CODE_BONO_PATCH; 1744 break; 1745 default: 1746 netdev_err(dev, "Unsupported directory entry type: %u\n", 1747 dir_type); 1748 return -EINVAL; 1749 } 1750 if (fw_size < sizeof(struct bnxt_fw_header)) { 1751 netdev_err(dev, "Invalid firmware file size: %u\n", 1752 (unsigned int)fw_size); 1753 return -EINVAL; 1754 } 1755 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 1756 netdev_err(dev, "Invalid firmware signature: %08X\n", 1757 le32_to_cpu(header->signature)); 1758 return -EINVAL; 1759 } 1760 if (header->code_type != code_type) { 1761 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 1762 code_type, header->code_type); 1763 return -EINVAL; 1764 } 1765 if (header->device != DEVICE_CUMULUS_FAMILY) { 1766 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 1767 DEVICE_CUMULUS_FAMILY, header->device); 1768 return -EINVAL; 1769 } 1770 /* Confirm the CRC32 checksum of the file: */ 1771 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 1772 sizeof(stored_crc))); 1773 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 1774 if (calculated_crc != stored_crc) { 1775 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 1776 (unsigned long)stored_crc, 1777 (unsigned long)calculated_crc); 1778 return -EINVAL; 1779 } 1780 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 1781 0, 0, fw_data, fw_size); 1782 if (rc == 0) /* Firmware update successful */ 1783 rc = bnxt_firmware_reset(dev, dir_type); 1784 1785 return rc; 1786 } 1787 1788 static int bnxt_flash_microcode(struct net_device *dev, 1789 u16 dir_type, 1790 const u8 *fw_data, 1791 size_t fw_size) 1792 { 1793 struct bnxt_ucode_trailer *trailer; 1794 u32 calculated_crc; 1795 u32 stored_crc; 1796 int rc = 0; 1797 1798 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 1799 netdev_err(dev, "Invalid microcode file size: %u\n", 1800 (unsigned int)fw_size); 1801 return -EINVAL; 1802 } 1803 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 1804 sizeof(*trailer))); 1805 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 1806 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 1807 le32_to_cpu(trailer->sig)); 1808 return -EINVAL; 1809 } 1810 if (le16_to_cpu(trailer->dir_type) != dir_type) { 1811 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 1812 dir_type, le16_to_cpu(trailer->dir_type)); 1813 return -EINVAL; 1814 } 1815 if (le16_to_cpu(trailer->trailer_length) < 1816 sizeof(struct bnxt_ucode_trailer)) { 1817 netdev_err(dev, "Invalid microcode trailer length: %d\n", 1818 le16_to_cpu(trailer->trailer_length)); 1819 return -EINVAL; 1820 } 1821 1822 /* Confirm the CRC32 checksum of the file: */ 1823 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 1824 sizeof(stored_crc))); 1825 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 1826 if (calculated_crc != stored_crc) { 1827 netdev_err(dev, 1828 "CRC32 (%08lX) does not match calculated: %08lX\n", 1829 (unsigned long)stored_crc, 1830 (unsigned long)calculated_crc); 1831 return -EINVAL; 1832 } 1833 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 1834 0, 0, fw_data, fw_size); 1835 1836 return rc; 1837 } 1838 1839 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 1840 { 1841 switch (dir_type) { 1842 case BNX_DIR_TYPE_CHIMP_PATCH: 1843 case BNX_DIR_TYPE_BOOTCODE: 1844 case BNX_DIR_TYPE_BOOTCODE_2: 1845 case BNX_DIR_TYPE_APE_FW: 1846 case BNX_DIR_TYPE_APE_PATCH: 1847 case BNX_DIR_TYPE_KONG_FW: 1848 case BNX_DIR_TYPE_KONG_PATCH: 1849 case BNX_DIR_TYPE_BONO_FW: 1850 case BNX_DIR_TYPE_BONO_PATCH: 1851 return true; 1852 } 1853 1854 return false; 1855 } 1856 1857 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 1858 { 1859 switch (dir_type) { 1860 case BNX_DIR_TYPE_AVS: 1861 case BNX_DIR_TYPE_EXP_ROM_MBA: 1862 case BNX_DIR_TYPE_PCIE: 1863 case BNX_DIR_TYPE_TSCF_UCODE: 1864 case BNX_DIR_TYPE_EXT_PHY: 1865 case BNX_DIR_TYPE_CCM: 1866 case BNX_DIR_TYPE_ISCSI_BOOT: 1867 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 1868 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 1869 return true; 1870 } 1871 1872 return false; 1873 } 1874 1875 static bool bnxt_dir_type_is_executable(u16 dir_type) 1876 { 1877 return bnxt_dir_type_is_ape_bin_format(dir_type) || 1878 bnxt_dir_type_is_other_exec_format(dir_type); 1879 } 1880 1881 static int bnxt_flash_firmware_from_file(struct net_device *dev, 1882 u16 dir_type, 1883 const char *filename) 1884 { 1885 const struct firmware *fw; 1886 int rc; 1887 1888 rc = request_firmware(&fw, filename, &dev->dev); 1889 if (rc != 0) { 1890 netdev_err(dev, "Error %d requesting firmware file: %s\n", 1891 rc, filename); 1892 return rc; 1893 } 1894 if (bnxt_dir_type_is_ape_bin_format(dir_type) == true) 1895 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 1896 else if (bnxt_dir_type_is_other_exec_format(dir_type) == true) 1897 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 1898 else 1899 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 1900 0, 0, fw->data, fw->size); 1901 release_firmware(fw); 1902 return rc; 1903 } 1904 1905 static int bnxt_flash_package_from_file(struct net_device *dev, 1906 char *filename, u32 install_type) 1907 { 1908 struct bnxt *bp = netdev_priv(dev); 1909 struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; 1910 struct hwrm_nvm_install_update_input install = {0}; 1911 const struct firmware *fw; 1912 int rc, hwrm_err = 0; 1913 u32 item_len; 1914 u16 index; 1915 1916 bnxt_hwrm_fw_set_time(bp); 1917 1918 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 1919 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 1920 &index, &item_len, NULL) != 0) { 1921 netdev_err(dev, "PKG update area not created in nvram\n"); 1922 return -ENOBUFS; 1923 } 1924 1925 rc = request_firmware(&fw, filename, &dev->dev); 1926 if (rc != 0) { 1927 netdev_err(dev, "PKG error %d requesting file: %s\n", 1928 rc, filename); 1929 return rc; 1930 } 1931 1932 if (fw->size > item_len) { 1933 netdev_err(dev, "PKG insufficient update area in nvram: %lu", 1934 (unsigned long)fw->size); 1935 rc = -EFBIG; 1936 } else { 1937 dma_addr_t dma_handle; 1938 u8 *kmem; 1939 struct hwrm_nvm_modify_input modify = {0}; 1940 1941 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); 1942 1943 modify.dir_idx = cpu_to_le16(index); 1944 modify.len = cpu_to_le32(fw->size); 1945 1946 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size, 1947 &dma_handle, GFP_KERNEL); 1948 if (!kmem) { 1949 netdev_err(dev, 1950 "dma_alloc_coherent failure, length = %u\n", 1951 (unsigned int)fw->size); 1952 rc = -ENOMEM; 1953 } else { 1954 memcpy(kmem, fw->data, fw->size); 1955 modify.host_src_addr = cpu_to_le64(dma_handle); 1956 1957 hwrm_err = hwrm_send_message(bp, &modify, 1958 sizeof(modify), 1959 FLASH_PACKAGE_TIMEOUT); 1960 dma_free_coherent(&bp->pdev->dev, fw->size, kmem, 1961 dma_handle); 1962 } 1963 } 1964 release_firmware(fw); 1965 if (rc || hwrm_err) 1966 goto err_exit; 1967 1968 if ((install_type & 0xffff) == 0) 1969 install_type >>= 16; 1970 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); 1971 install.install_type = cpu_to_le32(install_type); 1972 1973 mutex_lock(&bp->hwrm_cmd_lock); 1974 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), 1975 INSTALL_PACKAGE_TIMEOUT); 1976 if (hwrm_err) 1977 goto flash_pkg_exit; 1978 1979 if (resp->error_code) { 1980 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; 1981 1982 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 1983 install.flags |= cpu_to_le16( 1984 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 1985 hwrm_err = _hwrm_send_message(bp, &install, 1986 sizeof(install), 1987 INSTALL_PACKAGE_TIMEOUT); 1988 if (hwrm_err) 1989 goto flash_pkg_exit; 1990 } 1991 } 1992 1993 if (resp->result) { 1994 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 1995 (s8)resp->result, (int)resp->problem_item); 1996 rc = -ENOPKG; 1997 } 1998 flash_pkg_exit: 1999 mutex_unlock(&bp->hwrm_cmd_lock); 2000 err_exit: 2001 if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { 2002 netdev_info(dev, 2003 "PF does not have admin privileges to flash the device\n"); 2004 rc = -EACCES; 2005 } else if (hwrm_err) { 2006 rc = -EOPNOTSUPP; 2007 } 2008 return rc; 2009 } 2010 2011 static int bnxt_flash_device(struct net_device *dev, 2012 struct ethtool_flash *flash) 2013 { 2014 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 2015 netdev_err(dev, "flashdev not supported from a virtual function\n"); 2016 return -EINVAL; 2017 } 2018 2019 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 2020 flash->region > 0xffff) 2021 return bnxt_flash_package_from_file(dev, flash->data, 2022 flash->region); 2023 2024 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 2025 } 2026 2027 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 2028 { 2029 struct bnxt *bp = netdev_priv(dev); 2030 int rc; 2031 struct hwrm_nvm_get_dir_info_input req = {0}; 2032 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; 2033 2034 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); 2035 2036 mutex_lock(&bp->hwrm_cmd_lock); 2037 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2038 if (!rc) { 2039 *entries = le32_to_cpu(output->entries); 2040 *length = le32_to_cpu(output->entry_length); 2041 } 2042 mutex_unlock(&bp->hwrm_cmd_lock); 2043 return rc; 2044 } 2045 2046 static int bnxt_get_eeprom_len(struct net_device *dev) 2047 { 2048 struct bnxt *bp = netdev_priv(dev); 2049 2050 if (BNXT_VF(bp)) 2051 return 0; 2052 2053 /* The -1 return value allows the entire 32-bit range of offsets to be 2054 * passed via the ethtool command-line utility. 2055 */ 2056 return -1; 2057 } 2058 2059 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 2060 { 2061 struct bnxt *bp = netdev_priv(dev); 2062 int rc; 2063 u32 dir_entries; 2064 u32 entry_length; 2065 u8 *buf; 2066 size_t buflen; 2067 dma_addr_t dma_handle; 2068 struct hwrm_nvm_get_dir_entries_input req = {0}; 2069 2070 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 2071 if (rc != 0) 2072 return rc; 2073 2074 /* Insert 2 bytes of directory info (count and size of entries) */ 2075 if (len < 2) 2076 return -EINVAL; 2077 2078 *data++ = dir_entries; 2079 *data++ = entry_length; 2080 len -= 2; 2081 memset(data, 0xff, len); 2082 2083 buflen = dir_entries * entry_length; 2084 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, 2085 GFP_KERNEL); 2086 if (!buf) { 2087 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2088 (unsigned)buflen); 2089 return -ENOMEM; 2090 } 2091 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); 2092 req.host_dest_addr = cpu_to_le64(dma_handle); 2093 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2094 if (rc == 0) 2095 memcpy(data, buf, len > buflen ? buflen : len); 2096 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); 2097 return rc; 2098 } 2099 2100 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 2101 u32 length, u8 *data) 2102 { 2103 struct bnxt *bp = netdev_priv(dev); 2104 int rc; 2105 u8 *buf; 2106 dma_addr_t dma_handle; 2107 struct hwrm_nvm_read_input req = {0}; 2108 2109 if (!length) 2110 return -EINVAL; 2111 2112 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, 2113 GFP_KERNEL); 2114 if (!buf) { 2115 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2116 (unsigned)length); 2117 return -ENOMEM; 2118 } 2119 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); 2120 req.host_dest_addr = cpu_to_le64(dma_handle); 2121 req.dir_idx = cpu_to_le16(index); 2122 req.offset = cpu_to_le32(offset); 2123 req.len = cpu_to_le32(length); 2124 2125 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2126 if (rc == 0) 2127 memcpy(data, buf, length); 2128 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); 2129 return rc; 2130 } 2131 2132 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2133 u16 ext, u16 *index, u32 *item_length, 2134 u32 *data_length) 2135 { 2136 struct bnxt *bp = netdev_priv(dev); 2137 int rc; 2138 struct hwrm_nvm_find_dir_entry_input req = {0}; 2139 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; 2140 2141 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); 2142 req.enables = 0; 2143 req.dir_idx = 0; 2144 req.dir_type = cpu_to_le16(type); 2145 req.dir_ordinal = cpu_to_le16(ordinal); 2146 req.dir_ext = cpu_to_le16(ext); 2147 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 2148 mutex_lock(&bp->hwrm_cmd_lock); 2149 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2150 if (rc == 0) { 2151 if (index) 2152 *index = le16_to_cpu(output->dir_idx); 2153 if (item_length) 2154 *item_length = le32_to_cpu(output->dir_item_length); 2155 if (data_length) 2156 *data_length = le32_to_cpu(output->dir_data_length); 2157 } 2158 mutex_unlock(&bp->hwrm_cmd_lock); 2159 return rc; 2160 } 2161 2162 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 2163 { 2164 char *retval = NULL; 2165 char *p; 2166 char *value; 2167 int field = 0; 2168 2169 if (datalen < 1) 2170 return NULL; 2171 /* null-terminate the log data (removing last '\n'): */ 2172 data[datalen - 1] = 0; 2173 for (p = data; *p != 0; p++) { 2174 field = 0; 2175 retval = NULL; 2176 while (*p != 0 && *p != '\n') { 2177 value = p; 2178 while (*p != 0 && *p != '\t' && *p != '\n') 2179 p++; 2180 if (field == desired_field) 2181 retval = value; 2182 if (*p != '\t') 2183 break; 2184 *p = 0; 2185 field++; 2186 p++; 2187 } 2188 if (*p == 0) 2189 break; 2190 *p = 0; 2191 } 2192 return retval; 2193 } 2194 2195 static void bnxt_get_pkgver(struct net_device *dev) 2196 { 2197 struct bnxt *bp = netdev_priv(dev); 2198 u16 index = 0; 2199 char *pkgver; 2200 u32 pkglen; 2201 u8 *pkgbuf; 2202 int len; 2203 2204 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 2205 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2206 &index, NULL, &pkglen) != 0) 2207 return; 2208 2209 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 2210 if (!pkgbuf) { 2211 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 2212 pkglen); 2213 return; 2214 } 2215 2216 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) 2217 goto err; 2218 2219 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 2220 pkglen); 2221 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 2222 len = strlen(bp->fw_ver_str); 2223 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 2224 "/pkg %s", pkgver); 2225 } 2226 err: 2227 kfree(pkgbuf); 2228 } 2229 2230 static int bnxt_get_eeprom(struct net_device *dev, 2231 struct ethtool_eeprom *eeprom, 2232 u8 *data) 2233 { 2234 u32 index; 2235 u32 offset; 2236 2237 if (eeprom->offset == 0) /* special offset value to get directory */ 2238 return bnxt_get_nvram_directory(dev, eeprom->len, data); 2239 2240 index = eeprom->offset >> 24; 2241 offset = eeprom->offset & 0xffffff; 2242 2243 if (index == 0) { 2244 netdev_err(dev, "unsupported index value: %d\n", index); 2245 return -EINVAL; 2246 } 2247 2248 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 2249 } 2250 2251 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 2252 { 2253 struct bnxt *bp = netdev_priv(dev); 2254 struct hwrm_nvm_erase_dir_entry_input req = {0}; 2255 2256 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); 2257 req.dir_idx = cpu_to_le16(index); 2258 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2259 } 2260 2261 static int bnxt_set_eeprom(struct net_device *dev, 2262 struct ethtool_eeprom *eeprom, 2263 u8 *data) 2264 { 2265 struct bnxt *bp = netdev_priv(dev); 2266 u8 index, dir_op; 2267 u16 type, ext, ordinal, attr; 2268 2269 if (!BNXT_PF(bp)) { 2270 netdev_err(dev, "NVM write not supported from a virtual function\n"); 2271 return -EINVAL; 2272 } 2273 2274 type = eeprom->magic >> 16; 2275 2276 if (type == 0xffff) { /* special value for directory operations */ 2277 index = eeprom->magic & 0xff; 2278 dir_op = eeprom->magic >> 8; 2279 if (index == 0) 2280 return -EINVAL; 2281 switch (dir_op) { 2282 case 0x0e: /* erase */ 2283 if (eeprom->offset != ~eeprom->magic) 2284 return -EINVAL; 2285 return bnxt_erase_nvram_directory(dev, index - 1); 2286 default: 2287 return -EINVAL; 2288 } 2289 } 2290 2291 /* Create or re-write an NVM item: */ 2292 if (bnxt_dir_type_is_executable(type) == true) 2293 return -EOPNOTSUPP; 2294 ext = eeprom->magic & 0xffff; 2295 ordinal = eeprom->offset >> 16; 2296 attr = eeprom->offset & 0xffff; 2297 2298 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, 2299 eeprom->len); 2300 } 2301 2302 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) 2303 { 2304 struct bnxt *bp = netdev_priv(dev); 2305 struct ethtool_eee *eee = &bp->eee; 2306 struct bnxt_link_info *link_info = &bp->link_info; 2307 u32 advertising = 2308 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 2309 int rc = 0; 2310 2311 if (!BNXT_SINGLE_PF(bp)) 2312 return -EOPNOTSUPP; 2313 2314 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 2315 return -EOPNOTSUPP; 2316 2317 if (!edata->eee_enabled) 2318 goto eee_ok; 2319 2320 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2321 netdev_warn(dev, "EEE requires autoneg\n"); 2322 return -EINVAL; 2323 } 2324 if (edata->tx_lpi_enabled) { 2325 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 2326 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 2327 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 2328 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 2329 return -EINVAL; 2330 } else if (!bp->lpi_tmr_hi) { 2331 edata->tx_lpi_timer = eee->tx_lpi_timer; 2332 } 2333 } 2334 if (!edata->advertised) { 2335 edata->advertised = advertising & eee->supported; 2336 } else if (edata->advertised & ~advertising) { 2337 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", 2338 edata->advertised, advertising); 2339 return -EINVAL; 2340 } 2341 2342 eee->advertised = edata->advertised; 2343 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 2344 eee->tx_lpi_timer = edata->tx_lpi_timer; 2345 eee_ok: 2346 eee->eee_enabled = edata->eee_enabled; 2347 2348 if (netif_running(dev)) 2349 rc = bnxt_hwrm_set_link_setting(bp, false, true); 2350 2351 return rc; 2352 } 2353 2354 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) 2355 { 2356 struct bnxt *bp = netdev_priv(dev); 2357 2358 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 2359 return -EOPNOTSUPP; 2360 2361 *edata = bp->eee; 2362 if (!bp->eee.eee_enabled) { 2363 /* Preserve tx_lpi_timer so that the last value will be used 2364 * by default when it is re-enabled. 2365 */ 2366 edata->advertised = 0; 2367 edata->tx_lpi_enabled = 0; 2368 } 2369 2370 if (!bp->eee.eee_active) 2371 edata->lp_advertised = 0; 2372 2373 return 0; 2374 } 2375 2376 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 2377 u16 page_number, u16 start_addr, 2378 u16 data_length, u8 *buf) 2379 { 2380 struct hwrm_port_phy_i2c_read_input req = {0}; 2381 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; 2382 int rc, byte_offset = 0; 2383 2384 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); 2385 req.i2c_slave_addr = i2c_addr; 2386 req.page_number = cpu_to_le16(page_number); 2387 req.port_id = cpu_to_le16(bp->pf.port_id); 2388 do { 2389 u16 xfer_size; 2390 2391 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 2392 data_length -= xfer_size; 2393 req.page_offset = cpu_to_le16(start_addr + byte_offset); 2394 req.data_length = xfer_size; 2395 req.enables = cpu_to_le32(start_addr + byte_offset ? 2396 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); 2397 mutex_lock(&bp->hwrm_cmd_lock); 2398 rc = _hwrm_send_message(bp, &req, sizeof(req), 2399 HWRM_CMD_TIMEOUT); 2400 if (!rc) 2401 memcpy(buf + byte_offset, output->data, xfer_size); 2402 mutex_unlock(&bp->hwrm_cmd_lock); 2403 byte_offset += xfer_size; 2404 } while (!rc && data_length > 0); 2405 2406 return rc; 2407 } 2408 2409 static int bnxt_get_module_info(struct net_device *dev, 2410 struct ethtool_modinfo *modinfo) 2411 { 2412 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 2413 struct bnxt *bp = netdev_priv(dev); 2414 int rc; 2415 2416 /* No point in going further if phy status indicates 2417 * module is not inserted or if it is powered down or 2418 * if it is of type 10GBase-T 2419 */ 2420 if (bp->link_info.module_status > 2421 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 2422 return -EOPNOTSUPP; 2423 2424 /* This feature is not supported in older firmware versions */ 2425 if (bp->hwrm_spec_code < 0x10202) 2426 return -EOPNOTSUPP; 2427 2428 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 2429 SFF_DIAG_SUPPORT_OFFSET + 1, 2430 data); 2431 if (!rc) { 2432 u8 module_id = data[0]; 2433 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 2434 2435 switch (module_id) { 2436 case SFF_MODULE_ID_SFP: 2437 modinfo->type = ETH_MODULE_SFF_8472; 2438 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2439 if (!diag_supported) 2440 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2441 break; 2442 case SFF_MODULE_ID_QSFP: 2443 case SFF_MODULE_ID_QSFP_PLUS: 2444 modinfo->type = ETH_MODULE_SFF_8436; 2445 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2446 break; 2447 case SFF_MODULE_ID_QSFP28: 2448 modinfo->type = ETH_MODULE_SFF_8636; 2449 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2450 break; 2451 default: 2452 rc = -EOPNOTSUPP; 2453 break; 2454 } 2455 } 2456 return rc; 2457 } 2458 2459 static int bnxt_get_module_eeprom(struct net_device *dev, 2460 struct ethtool_eeprom *eeprom, 2461 u8 *data) 2462 { 2463 struct bnxt *bp = netdev_priv(dev); 2464 u16 start = eeprom->offset, length = eeprom->len; 2465 int rc = 0; 2466 2467 memset(data, 0, eeprom->len); 2468 2469 /* Read A0 portion of the EEPROM */ 2470 if (start < ETH_MODULE_SFF_8436_LEN) { 2471 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 2472 length = ETH_MODULE_SFF_8436_LEN - start; 2473 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 2474 start, length, data); 2475 if (rc) 2476 return rc; 2477 start += length; 2478 data += length; 2479 length = eeprom->len - length; 2480 } 2481 2482 /* Read A2 portion of the EEPROM */ 2483 if (length) { 2484 start -= ETH_MODULE_SFF_8436_LEN; 2485 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, 2486 start, length, data); 2487 } 2488 return rc; 2489 } 2490 2491 static int bnxt_nway_reset(struct net_device *dev) 2492 { 2493 int rc = 0; 2494 2495 struct bnxt *bp = netdev_priv(dev); 2496 struct bnxt_link_info *link_info = &bp->link_info; 2497 2498 if (!BNXT_SINGLE_PF(bp)) 2499 return -EOPNOTSUPP; 2500 2501 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 2502 return -EINVAL; 2503 2504 if (netif_running(dev)) 2505 rc = bnxt_hwrm_set_link_setting(bp, true, false); 2506 2507 return rc; 2508 } 2509 2510 static int bnxt_set_phys_id(struct net_device *dev, 2511 enum ethtool_phys_id_state state) 2512 { 2513 struct hwrm_port_led_cfg_input req = {0}; 2514 struct bnxt *bp = netdev_priv(dev); 2515 struct bnxt_pf_info *pf = &bp->pf; 2516 struct bnxt_led_cfg *led_cfg; 2517 u8 led_state; 2518 __le16 duration; 2519 int i, rc; 2520 2521 if (!bp->num_leds || BNXT_VF(bp)) 2522 return -EOPNOTSUPP; 2523 2524 if (state == ETHTOOL_ID_ACTIVE) { 2525 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 2526 duration = cpu_to_le16(500); 2527 } else if (state == ETHTOOL_ID_INACTIVE) { 2528 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 2529 duration = cpu_to_le16(0); 2530 } else { 2531 return -EINVAL; 2532 } 2533 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); 2534 req.port_id = cpu_to_le16(pf->port_id); 2535 req.num_leds = bp->num_leds; 2536 led_cfg = (struct bnxt_led_cfg *)&req.led0_id; 2537 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 2538 req.enables |= BNXT_LED_DFLT_ENABLES(i); 2539 led_cfg->led_id = bp->leds[i].led_id; 2540 led_cfg->led_state = led_state; 2541 led_cfg->led_blink_on = duration; 2542 led_cfg->led_blink_off = duration; 2543 led_cfg->led_group_id = bp->leds[i].led_group_id; 2544 } 2545 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2546 if (rc) 2547 rc = -EIO; 2548 return rc; 2549 } 2550 2551 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 2552 { 2553 struct hwrm_selftest_irq_input req = {0}; 2554 2555 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); 2556 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2557 } 2558 2559 static int bnxt_test_irq(struct bnxt *bp) 2560 { 2561 int i; 2562 2563 for (i = 0; i < bp->cp_nr_rings; i++) { 2564 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 2565 int rc; 2566 2567 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 2568 if (rc) 2569 return rc; 2570 } 2571 return 0; 2572 } 2573 2574 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 2575 { 2576 struct hwrm_port_mac_cfg_input req = {0}; 2577 2578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); 2579 2580 req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 2581 if (enable) 2582 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 2583 else 2584 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 2585 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2586 } 2587 2588 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 2589 { 2590 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 2591 struct hwrm_port_phy_qcaps_input req = {0}; 2592 int rc; 2593 2594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 2595 mutex_lock(&bp->hwrm_cmd_lock); 2596 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2597 if (!rc) 2598 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 2599 2600 mutex_unlock(&bp->hwrm_cmd_lock); 2601 return rc; 2602 } 2603 2604 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 2605 struct hwrm_port_phy_cfg_input *req) 2606 { 2607 struct bnxt_link_info *link_info = &bp->link_info; 2608 u16 fw_advertising; 2609 u16 fw_speed; 2610 int rc; 2611 2612 if (!link_info->autoneg) 2613 return 0; 2614 2615 rc = bnxt_query_force_speeds(bp, &fw_advertising); 2616 if (rc) 2617 return rc; 2618 2619 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2620 if (netif_carrier_ok(bp->dev)) 2621 fw_speed = bp->link_info.link_speed; 2622 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 2623 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2624 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 2625 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2626 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 2627 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 2628 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 2629 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 2630 2631 req->force_link_speed = cpu_to_le16(fw_speed); 2632 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 2633 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 2634 rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); 2635 req->flags = 0; 2636 req->force_link_speed = cpu_to_le16(0); 2637 return rc; 2638 } 2639 2640 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 2641 { 2642 struct hwrm_port_phy_cfg_input req = {0}; 2643 2644 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 2645 2646 if (enable) { 2647 bnxt_disable_an_for_lpbk(bp, &req); 2648 if (ext) 2649 req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 2650 else 2651 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 2652 } else { 2653 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 2654 } 2655 req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 2656 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2657 } 2658 2659 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2660 u32 raw_cons, int pkt_size) 2661 { 2662 struct bnxt_napi *bnapi = cpr->bnapi; 2663 struct bnxt_rx_ring_info *rxr; 2664 struct bnxt_sw_rx_bd *rx_buf; 2665 struct rx_cmp *rxcmp; 2666 u16 cp_cons, cons; 2667 u8 *data; 2668 u32 len; 2669 int i; 2670 2671 rxr = bnapi->rx_ring; 2672 cp_cons = RING_CMP(raw_cons); 2673 rxcmp = (struct rx_cmp *) 2674 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2675 cons = rxcmp->rx_cmp_opaque; 2676 rx_buf = &rxr->rx_buf_ring[cons]; 2677 data = rx_buf->data_ptr; 2678 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 2679 if (len != pkt_size) 2680 return -EIO; 2681 i = ETH_ALEN; 2682 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 2683 return -EIO; 2684 i += ETH_ALEN; 2685 for ( ; i < pkt_size; i++) { 2686 if (data[i] != (u8)(i & 0xff)) 2687 return -EIO; 2688 } 2689 return 0; 2690 } 2691 2692 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2693 int pkt_size) 2694 { 2695 struct tx_cmp *txcmp; 2696 int rc = -EIO; 2697 u32 raw_cons; 2698 u32 cons; 2699 int i; 2700 2701 raw_cons = cpr->cp_raw_cons; 2702 for (i = 0; i < 200; i++) { 2703 cons = RING_CMP(raw_cons); 2704 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2705 2706 if (!TX_CMP_VALID(txcmp, raw_cons)) { 2707 udelay(5); 2708 continue; 2709 } 2710 2711 /* The valid test of the entry must be done first before 2712 * reading any further. 2713 */ 2714 dma_rmb(); 2715 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { 2716 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 2717 raw_cons = NEXT_RAW_CMP(raw_cons); 2718 raw_cons = NEXT_RAW_CMP(raw_cons); 2719 break; 2720 } 2721 raw_cons = NEXT_RAW_CMP(raw_cons); 2722 } 2723 cpr->cp_raw_cons = raw_cons; 2724 return rc; 2725 } 2726 2727 static int bnxt_run_loopback(struct bnxt *bp) 2728 { 2729 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 2730 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 2731 struct bnxt_cp_ring_info *cpr; 2732 int pkt_size, i = 0; 2733 struct sk_buff *skb; 2734 dma_addr_t map; 2735 u8 *data; 2736 int rc; 2737 2738 cpr = &rxr->bnapi->cp_ring; 2739 if (bp->flags & BNXT_FLAG_CHIP_P5) 2740 cpr = cpr->cp_ring_arr[BNXT_RX_HDL]; 2741 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); 2742 skb = netdev_alloc_skb(bp->dev, pkt_size); 2743 if (!skb) 2744 return -ENOMEM; 2745 data = skb_put(skb, pkt_size); 2746 eth_broadcast_addr(data); 2747 i += ETH_ALEN; 2748 ether_addr_copy(&data[i], bp->dev->dev_addr); 2749 i += ETH_ALEN; 2750 for ( ; i < pkt_size; i++) 2751 data[i] = (u8)(i & 0xff); 2752 2753 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 2754 PCI_DMA_TODEVICE); 2755 if (dma_mapping_error(&bp->pdev->dev, map)) { 2756 dev_kfree_skb(skb); 2757 return -EIO; 2758 } 2759 bnxt_xmit_xdp(bp, txr, map, pkt_size, 0); 2760 2761 /* Sync BD data before updating doorbell */ 2762 wmb(); 2763 2764 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 2765 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 2766 2767 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); 2768 dev_kfree_skb(skb); 2769 return rc; 2770 } 2771 2772 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 2773 { 2774 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; 2775 struct hwrm_selftest_exec_input req = {0}; 2776 int rc; 2777 2778 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); 2779 mutex_lock(&bp->hwrm_cmd_lock); 2780 resp->test_success = 0; 2781 req.flags = test_mask; 2782 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); 2783 *test_results = resp->test_success; 2784 mutex_unlock(&bp->hwrm_cmd_lock); 2785 return rc; 2786 } 2787 2788 #define BNXT_DRV_TESTS 4 2789 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 2790 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 2791 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 2792 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 2793 2794 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 2795 u64 *buf) 2796 { 2797 struct bnxt *bp = netdev_priv(dev); 2798 bool do_ext_lpbk = false; 2799 bool offline = false; 2800 u8 test_results = 0; 2801 u8 test_mask = 0; 2802 int rc, i; 2803 2804 if (!bp->num_tests || !BNXT_SINGLE_PF(bp)) 2805 return; 2806 memset(buf, 0, sizeof(u64) * bp->num_tests); 2807 if (!netif_running(dev)) { 2808 etest->flags |= ETH_TEST_FL_FAILED; 2809 return; 2810 } 2811 2812 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 2813 (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK)) 2814 do_ext_lpbk = true; 2815 2816 if (etest->flags & ETH_TEST_FL_OFFLINE) { 2817 if (bp->pf.active_vfs) { 2818 etest->flags |= ETH_TEST_FL_FAILED; 2819 netdev_warn(dev, "Offline tests cannot be run with active VFs\n"); 2820 return; 2821 } 2822 offline = true; 2823 } 2824 2825 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 2826 u8 bit_val = 1 << i; 2827 2828 if (!(bp->test_info->offline_mask & bit_val)) 2829 test_mask |= bit_val; 2830 else if (offline) 2831 test_mask |= bit_val; 2832 } 2833 if (!offline) { 2834 bnxt_run_fw_tests(bp, test_mask, &test_results); 2835 } else { 2836 rc = bnxt_close_nic(bp, false, false); 2837 if (rc) 2838 return; 2839 bnxt_run_fw_tests(bp, test_mask, &test_results); 2840 2841 buf[BNXT_MACLPBK_TEST_IDX] = 1; 2842 bnxt_hwrm_mac_loopback(bp, true); 2843 msleep(250); 2844 rc = bnxt_half_open_nic(bp); 2845 if (rc) { 2846 bnxt_hwrm_mac_loopback(bp, false); 2847 etest->flags |= ETH_TEST_FL_FAILED; 2848 return; 2849 } 2850 if (bnxt_run_loopback(bp)) 2851 etest->flags |= ETH_TEST_FL_FAILED; 2852 else 2853 buf[BNXT_MACLPBK_TEST_IDX] = 0; 2854 2855 bnxt_hwrm_mac_loopback(bp, false); 2856 bnxt_hwrm_phy_loopback(bp, true, false); 2857 msleep(1000); 2858 if (bnxt_run_loopback(bp)) { 2859 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 2860 etest->flags |= ETH_TEST_FL_FAILED; 2861 } 2862 if (do_ext_lpbk) { 2863 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 2864 bnxt_hwrm_phy_loopback(bp, true, true); 2865 msleep(1000); 2866 if (bnxt_run_loopback(bp)) { 2867 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 2868 etest->flags |= ETH_TEST_FL_FAILED; 2869 } 2870 } 2871 bnxt_hwrm_phy_loopback(bp, false, false); 2872 bnxt_half_close_nic(bp); 2873 bnxt_open_nic(bp, false, true); 2874 } 2875 if (bnxt_test_irq(bp)) { 2876 buf[BNXT_IRQ_TEST_IDX] = 1; 2877 etest->flags |= ETH_TEST_FL_FAILED; 2878 } 2879 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 2880 u8 bit_val = 1 << i; 2881 2882 if ((test_mask & bit_val) && !(test_results & bit_val)) { 2883 buf[i] = 1; 2884 etest->flags |= ETH_TEST_FL_FAILED; 2885 } 2886 } 2887 } 2888 2889 static int bnxt_reset(struct net_device *dev, u32 *flags) 2890 { 2891 struct bnxt *bp = netdev_priv(dev); 2892 int rc = 0; 2893 2894 if (!BNXT_PF(bp)) { 2895 netdev_err(dev, "Reset is not supported from a VF\n"); 2896 return -EOPNOTSUPP; 2897 } 2898 2899 if (pci_vfs_assigned(bp->pdev)) { 2900 netdev_err(dev, 2901 "Reset not allowed when VFs are assigned to VMs\n"); 2902 return -EBUSY; 2903 } 2904 2905 if (*flags == ETH_RESET_ALL) { 2906 /* This feature is not supported in older firmware versions */ 2907 if (bp->hwrm_spec_code < 0x10803) 2908 return -EOPNOTSUPP; 2909 2910 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); 2911 if (!rc) { 2912 netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); 2913 *flags = 0; 2914 } 2915 } else if (*flags == ETH_RESET_AP) { 2916 /* This feature is not supported in older firmware versions */ 2917 if (bp->hwrm_spec_code < 0x10803) 2918 return -EOPNOTSUPP; 2919 2920 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); 2921 if (!rc) { 2922 netdev_info(dev, "Reset Application Processor request successful.\n"); 2923 *flags = 0; 2924 } 2925 } else { 2926 rc = -EINVAL; 2927 } 2928 2929 return rc; 2930 } 2931 2932 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, 2933 struct bnxt_hwrm_dbg_dma_info *info) 2934 { 2935 struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; 2936 struct hwrm_dbg_cmn_input *cmn_req = msg; 2937 __le16 *seq_ptr = msg + info->seq_off; 2938 u16 seq = 0, len, segs_off; 2939 void *resp = cmn_resp; 2940 dma_addr_t dma_handle; 2941 int rc, off = 0; 2942 void *dma_buf; 2943 2944 dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, 2945 GFP_KERNEL); 2946 if (!dma_buf) 2947 return -ENOMEM; 2948 2949 segs_off = offsetof(struct hwrm_dbg_coredump_list_output, 2950 total_segments); 2951 cmn_req->host_dest_addr = cpu_to_le64(dma_handle); 2952 cmn_req->host_buf_len = cpu_to_le32(info->dma_len); 2953 mutex_lock(&bp->hwrm_cmd_lock); 2954 while (1) { 2955 *seq_ptr = cpu_to_le16(seq); 2956 rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 2957 if (rc) 2958 break; 2959 2960 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); 2961 if (!seq && 2962 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { 2963 info->segs = le16_to_cpu(*((__le16 *)(resp + 2964 segs_off))); 2965 if (!info->segs) { 2966 rc = -EIO; 2967 break; 2968 } 2969 2970 info->dest_buf_size = info->segs * 2971 sizeof(struct coredump_segment_record); 2972 info->dest_buf = kmalloc(info->dest_buf_size, 2973 GFP_KERNEL); 2974 if (!info->dest_buf) { 2975 rc = -ENOMEM; 2976 break; 2977 } 2978 } 2979 2980 if (info->dest_buf) 2981 memcpy(info->dest_buf + off, dma_buf, len); 2982 2983 if (cmn_req->req_type == 2984 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) 2985 info->dest_buf_size += len; 2986 2987 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) 2988 break; 2989 2990 seq++; 2991 off += len; 2992 } 2993 mutex_unlock(&bp->hwrm_cmd_lock); 2994 dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); 2995 return rc; 2996 } 2997 2998 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, 2999 struct bnxt_coredump *coredump) 3000 { 3001 struct hwrm_dbg_coredump_list_input req = {0}; 3002 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3003 int rc; 3004 3005 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); 3006 3007 info.dma_len = COREDUMP_LIST_BUF_LEN; 3008 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); 3009 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, 3010 data_len); 3011 3012 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3013 if (!rc) { 3014 coredump->data = info.dest_buf; 3015 coredump->data_size = info.dest_buf_size; 3016 coredump->total_segs = info.segs; 3017 } 3018 return rc; 3019 } 3020 3021 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, 3022 u16 segment_id) 3023 { 3024 struct hwrm_dbg_coredump_initiate_input req = {0}; 3025 3026 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); 3027 req.component_id = cpu_to_le16(component_id); 3028 req.segment_id = cpu_to_le16(segment_id); 3029 3030 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3031 } 3032 3033 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, 3034 u16 segment_id, u32 *seg_len, 3035 void *buf, u32 offset) 3036 { 3037 struct hwrm_dbg_coredump_retrieve_input req = {0}; 3038 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3039 int rc; 3040 3041 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); 3042 req.component_id = cpu_to_le16(component_id); 3043 req.segment_id = cpu_to_le16(segment_id); 3044 3045 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; 3046 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, 3047 seq_no); 3048 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, 3049 data_len); 3050 if (buf) 3051 info.dest_buf = buf + offset; 3052 3053 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3054 if (!rc) 3055 *seg_len = info.dest_buf_size; 3056 3057 return rc; 3058 } 3059 3060 static void 3061 bnxt_fill_coredump_seg_hdr(struct bnxt *bp, 3062 struct bnxt_coredump_segment_hdr *seg_hdr, 3063 struct coredump_segment_record *seg_rec, u32 seg_len, 3064 int status, u32 duration, u32 instance) 3065 { 3066 memset(seg_hdr, 0, sizeof(*seg_hdr)); 3067 memcpy(seg_hdr->signature, "sEgM", 4); 3068 if (seg_rec) { 3069 seg_hdr->component_id = (__force __le32)seg_rec->component_id; 3070 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; 3071 seg_hdr->low_version = seg_rec->version_low; 3072 seg_hdr->high_version = seg_rec->version_hi; 3073 } else { 3074 /* For hwrm_ver_get response Component id = 2 3075 * and Segment id = 0 3076 */ 3077 seg_hdr->component_id = cpu_to_le32(2); 3078 seg_hdr->segment_id = 0; 3079 } 3080 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); 3081 seg_hdr->length = cpu_to_le32(seg_len); 3082 seg_hdr->status = cpu_to_le32(status); 3083 seg_hdr->duration = cpu_to_le32(duration); 3084 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); 3085 seg_hdr->instance = cpu_to_le32(instance); 3086 } 3087 3088 static void 3089 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, 3090 time64_t start, s16 start_utc, u16 total_segs, 3091 int status) 3092 { 3093 time64_t end = ktime_get_real_seconds(); 3094 u32 os_ver_major = 0, os_ver_minor = 0; 3095 struct tm tm; 3096 3097 time64_to_tm(start, 0, &tm); 3098 memset(record, 0, sizeof(*record)); 3099 memcpy(record->signature, "cOrE", 4); 3100 record->flags = 0; 3101 record->low_version = 0; 3102 record->high_version = 1; 3103 record->asic_state = 0; 3104 strlcpy(record->system_name, utsname()->nodename, 3105 sizeof(record->system_name)); 3106 record->year = cpu_to_le16(tm.tm_year + 1900); 3107 record->month = cpu_to_le16(tm.tm_mon + 1); 3108 record->day = cpu_to_le16(tm.tm_mday); 3109 record->hour = cpu_to_le16(tm.tm_hour); 3110 record->minute = cpu_to_le16(tm.tm_min); 3111 record->second = cpu_to_le16(tm.tm_sec); 3112 record->utc_bias = cpu_to_le16(start_utc); 3113 strcpy(record->commandline, "ethtool -w"); 3114 record->total_segments = cpu_to_le32(total_segs); 3115 3116 sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); 3117 record->os_ver_major = cpu_to_le32(os_ver_major); 3118 record->os_ver_minor = cpu_to_le32(os_ver_minor); 3119 3120 strlcpy(record->os_name, utsname()->sysname, 32); 3121 time64_to_tm(end, 0, &tm); 3122 record->end_year = cpu_to_le16(tm.tm_year + 1900); 3123 record->end_month = cpu_to_le16(tm.tm_mon + 1); 3124 record->end_day = cpu_to_le16(tm.tm_mday); 3125 record->end_hour = cpu_to_le16(tm.tm_hour); 3126 record->end_minute = cpu_to_le16(tm.tm_min); 3127 record->end_second = cpu_to_le16(tm.tm_sec); 3128 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); 3129 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | 3130 bp->ver_resp.chip_rev << 8 | 3131 bp->ver_resp.chip_metal); 3132 record->asic_id2 = 0; 3133 record->coredump_status = cpu_to_le32(status); 3134 record->ioctl_low_version = 0; 3135 record->ioctl_high_version = 0; 3136 } 3137 3138 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) 3139 { 3140 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); 3141 struct coredump_segment_record *seg_record = NULL; 3142 u32 offset = 0, seg_hdr_len, seg_record_len; 3143 struct bnxt_coredump_segment_hdr seg_hdr; 3144 struct bnxt_coredump coredump = {NULL}; 3145 time64_t start_time; 3146 u16 start_utc; 3147 int rc = 0, i; 3148 3149 start_time = ktime_get_real_seconds(); 3150 start_utc = sys_tz.tz_minuteswest * 60; 3151 seg_hdr_len = sizeof(seg_hdr); 3152 3153 /* First segment should be hwrm_ver_get response */ 3154 *dump_len = seg_hdr_len + ver_get_resp_len; 3155 if (buf) { 3156 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, 3157 0, 0, 0); 3158 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3159 offset += seg_hdr_len; 3160 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); 3161 offset += ver_get_resp_len; 3162 } 3163 3164 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); 3165 if (rc) { 3166 netdev_err(bp->dev, "Failed to get coredump segment list\n"); 3167 goto err; 3168 } 3169 3170 *dump_len += seg_hdr_len * coredump.total_segs; 3171 3172 seg_record = (struct coredump_segment_record *)coredump.data; 3173 seg_record_len = sizeof(*seg_record); 3174 3175 for (i = 0; i < coredump.total_segs; i++) { 3176 u16 comp_id = le16_to_cpu(seg_record->component_id); 3177 u16 seg_id = le16_to_cpu(seg_record->segment_id); 3178 u32 duration = 0, seg_len = 0; 3179 unsigned long start, end; 3180 3181 start = jiffies; 3182 3183 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); 3184 if (rc) { 3185 netdev_err(bp->dev, 3186 "Failed to initiate coredump for seg = %d\n", 3187 seg_record->segment_id); 3188 goto next_seg; 3189 } 3190 3191 /* Write segment data into the buffer */ 3192 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, 3193 &seg_len, buf, 3194 offset + seg_hdr_len); 3195 if (rc) 3196 netdev_err(bp->dev, 3197 "Failed to retrieve coredump for seg = %d\n", 3198 seg_record->segment_id); 3199 3200 next_seg: 3201 end = jiffies; 3202 duration = jiffies_to_msecs(end - start); 3203 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, 3204 rc, duration, 0); 3205 3206 if (buf) { 3207 /* Write segment header into the buffer */ 3208 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3209 offset += seg_hdr_len + seg_len; 3210 } 3211 3212 *dump_len += seg_len; 3213 seg_record = 3214 (struct coredump_segment_record *)((u8 *)seg_record + 3215 seg_record_len); 3216 } 3217 3218 err: 3219 if (buf) 3220 bnxt_fill_coredump_record(bp, buf + offset, start_time, 3221 start_utc, coredump.total_segs + 1, 3222 rc); 3223 kfree(coredump.data); 3224 *dump_len += sizeof(struct bnxt_coredump_record); 3225 3226 return rc; 3227 } 3228 3229 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 3230 { 3231 struct bnxt *bp = netdev_priv(dev); 3232 3233 if (bp->hwrm_spec_code < 0x10801) 3234 return -EOPNOTSUPP; 3235 3236 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 3237 bp->ver_resp.hwrm_fw_min_8b << 16 | 3238 bp->ver_resp.hwrm_fw_bld_8b << 8 | 3239 bp->ver_resp.hwrm_fw_rsvd_8b; 3240 3241 return bnxt_get_coredump(bp, NULL, &dump->len); 3242 } 3243 3244 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 3245 void *buf) 3246 { 3247 struct bnxt *bp = netdev_priv(dev); 3248 3249 if (bp->hwrm_spec_code < 0x10801) 3250 return -EOPNOTSUPP; 3251 3252 memset(buf, 0, dump->len); 3253 3254 return bnxt_get_coredump(bp, buf, &dump->len); 3255 } 3256 3257 void bnxt_ethtool_init(struct bnxt *bp) 3258 { 3259 struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; 3260 struct hwrm_selftest_qlist_input req = {0}; 3261 struct bnxt_test_info *test_info; 3262 struct net_device *dev = bp->dev; 3263 int i, rc; 3264 3265 bnxt_get_pkgver(dev); 3266 3267 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) 3268 return; 3269 3270 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); 3271 mutex_lock(&bp->hwrm_cmd_lock); 3272 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3273 if (rc) 3274 goto ethtool_init_exit; 3275 3276 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 3277 if (!test_info) 3278 goto ethtool_init_exit; 3279 3280 bp->test_info = test_info; 3281 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 3282 if (bp->num_tests > BNXT_MAX_TEST) 3283 bp->num_tests = BNXT_MAX_TEST; 3284 3285 test_info->offline_mask = resp->offline_tests; 3286 test_info->timeout = le16_to_cpu(resp->test_timeout); 3287 if (!test_info->timeout) 3288 test_info->timeout = HWRM_CMD_TIMEOUT; 3289 for (i = 0; i < bp->num_tests; i++) { 3290 char *str = test_info->string[i]; 3291 char *fw_str = resp->test0_name + i * 32; 3292 3293 if (i == BNXT_MACLPBK_TEST_IDX) { 3294 strcpy(str, "Mac loopback test (offline)"); 3295 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 3296 strcpy(str, "Phy loopback test (offline)"); 3297 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 3298 strcpy(str, "Ext loopback test (offline)"); 3299 } else if (i == BNXT_IRQ_TEST_IDX) { 3300 strcpy(str, "Interrupt_test (offline)"); 3301 } else { 3302 strlcpy(str, fw_str, ETH_GSTRING_LEN); 3303 strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 3304 if (test_info->offline_mask & (1 << i)) 3305 strncat(str, " (offline)", 3306 ETH_GSTRING_LEN - strlen(str)); 3307 else 3308 strncat(str, " (online)", 3309 ETH_GSTRING_LEN - strlen(str)); 3310 } 3311 } 3312 3313 ethtool_init_exit: 3314 mutex_unlock(&bp->hwrm_cmd_lock); 3315 } 3316 3317 void bnxt_ethtool_free(struct bnxt *bp) 3318 { 3319 kfree(bp->test_info); 3320 bp->test_info = NULL; 3321 } 3322 3323 const struct ethtool_ops bnxt_ethtool_ops = { 3324 .get_link_ksettings = bnxt_get_link_ksettings, 3325 .set_link_ksettings = bnxt_set_link_ksettings, 3326 .get_pauseparam = bnxt_get_pauseparam, 3327 .set_pauseparam = bnxt_set_pauseparam, 3328 .get_drvinfo = bnxt_get_drvinfo, 3329 .get_wol = bnxt_get_wol, 3330 .set_wol = bnxt_set_wol, 3331 .get_coalesce = bnxt_get_coalesce, 3332 .set_coalesce = bnxt_set_coalesce, 3333 .get_msglevel = bnxt_get_msglevel, 3334 .set_msglevel = bnxt_set_msglevel, 3335 .get_sset_count = bnxt_get_sset_count, 3336 .get_strings = bnxt_get_strings, 3337 .get_ethtool_stats = bnxt_get_ethtool_stats, 3338 .set_ringparam = bnxt_set_ringparam, 3339 .get_ringparam = bnxt_get_ringparam, 3340 .get_channels = bnxt_get_channels, 3341 .set_channels = bnxt_set_channels, 3342 .get_rxnfc = bnxt_get_rxnfc, 3343 .set_rxnfc = bnxt_set_rxnfc, 3344 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 3345 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 3346 .get_rxfh = bnxt_get_rxfh, 3347 .flash_device = bnxt_flash_device, 3348 .get_eeprom_len = bnxt_get_eeprom_len, 3349 .get_eeprom = bnxt_get_eeprom, 3350 .set_eeprom = bnxt_set_eeprom, 3351 .get_link = bnxt_get_link, 3352 .get_eee = bnxt_get_eee, 3353 .set_eee = bnxt_set_eee, 3354 .get_module_info = bnxt_get_module_info, 3355 .get_module_eeprom = bnxt_get_module_eeprom, 3356 .nway_reset = bnxt_nway_reset, 3357 .set_phys_id = bnxt_set_phys_id, 3358 .self_test = bnxt_self_test, 3359 .reset = bnxt_reset, 3360 .get_dump_flag = bnxt_get_dump_flag, 3361 .get_dump_data = bnxt_get_dump_data, 3362 }; 3363