1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/ctype.h> 12 #include <linux/stringify.h> 13 #include <linux/ethtool.h> 14 #include <linux/linkmode.h> 15 #include <linux/interrupt.h> 16 #include <linux/pci.h> 17 #include <linux/etherdevice.h> 18 #include <linux/crc32.h> 19 #include <linux/firmware.h> 20 #include <linux/utsname.h> 21 #include <linux/time.h> 22 #include <linux/ptp_clock_kernel.h> 23 #include <linux/net_tstamp.h> 24 #include <linux/timecounter.h> 25 #include "bnxt_hsi.h" 26 #include "bnxt.h" 27 #include "bnxt_hwrm.h" 28 #include "bnxt_xdp.h" 29 #include "bnxt_ptp.h" 30 #include "bnxt_ethtool.h" 31 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 32 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 33 #include "bnxt_coredump.h" 34 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) 35 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 36 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 37 38 static u32 bnxt_get_msglevel(struct net_device *dev) 39 { 40 struct bnxt *bp = netdev_priv(dev); 41 42 return bp->msg_enable; 43 } 44 45 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 46 { 47 struct bnxt *bp = netdev_priv(dev); 48 49 bp->msg_enable = value; 50 } 51 52 static int bnxt_get_coalesce(struct net_device *dev, 53 struct ethtool_coalesce *coal, 54 struct kernel_ethtool_coalesce *kernel_coal, 55 struct netlink_ext_ack *extack) 56 { 57 struct bnxt *bp = netdev_priv(dev); 58 struct bnxt_coal *hw_coal; 59 u16 mult; 60 61 memset(coal, 0, sizeof(*coal)); 62 63 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 64 65 hw_coal = &bp->rx_coal; 66 mult = hw_coal->bufs_per_record; 67 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 68 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 69 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 70 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 71 72 hw_coal = &bp->tx_coal; 73 mult = hw_coal->bufs_per_record; 74 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 75 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 76 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 77 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 78 79 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 80 81 return 0; 82 } 83 84 static int bnxt_set_coalesce(struct net_device *dev, 85 struct ethtool_coalesce *coal, 86 struct kernel_ethtool_coalesce *kernel_coal, 87 struct netlink_ext_ack *extack) 88 { 89 struct bnxt *bp = netdev_priv(dev); 90 bool update_stats = false; 91 struct bnxt_coal *hw_coal; 92 int rc = 0; 93 u16 mult; 94 95 if (coal->use_adaptive_rx_coalesce) { 96 bp->flags |= BNXT_FLAG_DIM; 97 } else { 98 if (bp->flags & BNXT_FLAG_DIM) { 99 bp->flags &= ~(BNXT_FLAG_DIM); 100 goto reset_coalesce; 101 } 102 } 103 104 hw_coal = &bp->rx_coal; 105 mult = hw_coal->bufs_per_record; 106 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 107 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 108 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 109 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 110 111 hw_coal = &bp->tx_coal; 112 mult = hw_coal->bufs_per_record; 113 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 114 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 115 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 116 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 117 118 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 119 u32 stats_ticks = coal->stats_block_coalesce_usecs; 120 121 /* Allow 0, which means disable. */ 122 if (stats_ticks) 123 stats_ticks = clamp_t(u32, stats_ticks, 124 BNXT_MIN_STATS_COAL_TICKS, 125 BNXT_MAX_STATS_COAL_TICKS); 126 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 127 bp->stats_coal_ticks = stats_ticks; 128 if (bp->stats_coal_ticks) 129 bp->current_interval = 130 bp->stats_coal_ticks * HZ / 1000000; 131 else 132 bp->current_interval = BNXT_TIMER_INTERVAL; 133 update_stats = true; 134 } 135 136 reset_coalesce: 137 if (netif_running(dev)) { 138 if (update_stats) { 139 rc = bnxt_close_nic(bp, true, false); 140 if (!rc) 141 rc = bnxt_open_nic(bp, true, false); 142 } else { 143 rc = bnxt_hwrm_set_coal(bp); 144 } 145 } 146 147 return rc; 148 } 149 150 static const char * const bnxt_ring_rx_stats_str[] = { 151 "rx_ucast_packets", 152 "rx_mcast_packets", 153 "rx_bcast_packets", 154 "rx_discards", 155 "rx_errors", 156 "rx_ucast_bytes", 157 "rx_mcast_bytes", 158 "rx_bcast_bytes", 159 }; 160 161 static const char * const bnxt_ring_tx_stats_str[] = { 162 "tx_ucast_packets", 163 "tx_mcast_packets", 164 "tx_bcast_packets", 165 "tx_errors", 166 "tx_discards", 167 "tx_ucast_bytes", 168 "tx_mcast_bytes", 169 "tx_bcast_bytes", 170 }; 171 172 static const char * const bnxt_ring_tpa_stats_str[] = { 173 "tpa_packets", 174 "tpa_bytes", 175 "tpa_events", 176 "tpa_aborts", 177 }; 178 179 static const char * const bnxt_ring_tpa2_stats_str[] = { 180 "rx_tpa_eligible_pkt", 181 "rx_tpa_eligible_bytes", 182 "rx_tpa_pkt", 183 "rx_tpa_bytes", 184 "rx_tpa_errors", 185 "rx_tpa_events", 186 }; 187 188 static const char * const bnxt_rx_sw_stats_str[] = { 189 "rx_l4_csum_errors", 190 "rx_resets", 191 "rx_buf_errors", 192 }; 193 194 static const char * const bnxt_cmn_sw_stats_str[] = { 195 "missed_irqs", 196 }; 197 198 #define BNXT_RX_STATS_ENTRY(counter) \ 199 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 200 201 #define BNXT_TX_STATS_ENTRY(counter) \ 202 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 203 204 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 205 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 206 207 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 208 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 209 210 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 211 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 212 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 213 214 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 215 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 216 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 217 218 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 219 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 220 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 221 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 222 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 223 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 224 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 225 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 226 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 227 228 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 229 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 230 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 231 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 232 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 233 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 234 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 235 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 236 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 237 238 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 239 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 240 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 241 242 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 243 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 244 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 245 246 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 247 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 248 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 249 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 250 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 251 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 252 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 253 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 254 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 255 256 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 257 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 258 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 259 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 260 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 261 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 262 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 263 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 264 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 265 266 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 267 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 268 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 269 270 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 271 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 272 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 273 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 274 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 275 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 276 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 277 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 278 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 279 280 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 281 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 282 __stringify(counter##_pri##n) } 283 284 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 285 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 286 __stringify(counter##_pri##n) } 287 288 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 289 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 290 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 291 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 292 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 293 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 294 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 295 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 296 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 297 298 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 299 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 300 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 301 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 302 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 303 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 304 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 305 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 306 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 307 308 enum { 309 RX_TOTAL_DISCARDS, 310 TX_TOTAL_DISCARDS, 311 RX_NETPOLL_DISCARDS, 312 }; 313 314 static struct { 315 u64 counter; 316 char string[ETH_GSTRING_LEN]; 317 } bnxt_sw_func_stats[] = { 318 {0, "rx_total_discard_pkts"}, 319 {0, "tx_total_discard_pkts"}, 320 {0, "rx_total_netpoll_discards"}, 321 }; 322 323 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 324 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 325 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 326 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 327 328 static const struct { 329 long offset; 330 char string[ETH_GSTRING_LEN]; 331 } bnxt_port_stats_arr[] = { 332 BNXT_RX_STATS_ENTRY(rx_64b_frames), 333 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 334 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 335 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 336 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 337 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 338 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 339 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 340 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 341 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 342 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 343 BNXT_RX_STATS_ENTRY(rx_total_frames), 344 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 345 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 346 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 347 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 348 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 349 BNXT_RX_STATS_ENTRY(rx_pause_frames), 350 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 351 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 352 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 353 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 354 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 355 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 356 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 357 BNXT_RX_STATS_ENTRY(rx_good_frames), 358 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 359 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 360 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 361 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 362 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 363 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 364 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 365 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 366 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 367 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 368 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 369 BNXT_RX_STATS_ENTRY(rx_bytes), 370 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 371 BNXT_RX_STATS_ENTRY(rx_runt_frames), 372 BNXT_RX_STATS_ENTRY(rx_stat_discard), 373 BNXT_RX_STATS_ENTRY(rx_stat_err), 374 375 BNXT_TX_STATS_ENTRY(tx_64b_frames), 376 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 377 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 378 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 379 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 380 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 381 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 382 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 383 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 384 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 385 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 386 BNXT_TX_STATS_ENTRY(tx_good_frames), 387 BNXT_TX_STATS_ENTRY(tx_total_frames), 388 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 389 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 390 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 391 BNXT_TX_STATS_ENTRY(tx_pause_frames), 392 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 393 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 394 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 395 BNXT_TX_STATS_ENTRY(tx_err), 396 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 397 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 398 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 399 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 400 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 401 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 402 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 403 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 404 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 405 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 406 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 407 BNXT_TX_STATS_ENTRY(tx_total_collisions), 408 BNXT_TX_STATS_ENTRY(tx_bytes), 409 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 410 BNXT_TX_STATS_ENTRY(tx_stat_discard), 411 BNXT_TX_STATS_ENTRY(tx_stat_error), 412 }; 413 414 static const struct { 415 long offset; 416 char string[ETH_GSTRING_LEN]; 417 } bnxt_port_stats_ext_arr[] = { 418 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 419 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 420 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 421 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 422 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 423 BNXT_RX_STATS_EXT_COS_ENTRIES, 424 BNXT_RX_STATS_EXT_PFC_ENTRIES, 425 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 426 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 427 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 428 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 429 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 430 }; 431 432 static const struct { 433 long offset; 434 char string[ETH_GSTRING_LEN]; 435 } bnxt_tx_port_stats_ext_arr[] = { 436 BNXT_TX_STATS_EXT_COS_ENTRIES, 437 BNXT_TX_STATS_EXT_PFC_ENTRIES, 438 }; 439 440 static const struct { 441 long base_off; 442 char string[ETH_GSTRING_LEN]; 443 } bnxt_rx_bytes_pri_arr[] = { 444 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 445 }; 446 447 static const struct { 448 long base_off; 449 char string[ETH_GSTRING_LEN]; 450 } bnxt_rx_pkts_pri_arr[] = { 451 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 452 }; 453 454 static const struct { 455 long base_off; 456 char string[ETH_GSTRING_LEN]; 457 } bnxt_tx_bytes_pri_arr[] = { 458 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 459 }; 460 461 static const struct { 462 long base_off; 463 char string[ETH_GSTRING_LEN]; 464 } bnxt_tx_pkts_pri_arr[] = { 465 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 466 }; 467 468 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) 469 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 470 #define BNXT_NUM_STATS_PRI \ 471 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 472 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 473 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 474 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 475 476 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 477 { 478 if (BNXT_SUPPORTS_TPA(bp)) { 479 if (bp->max_tpa_v2) { 480 if (BNXT_CHIP_P5_THOR(bp)) 481 return BNXT_NUM_TPA_RING_STATS_P5; 482 return BNXT_NUM_TPA_RING_STATS_P5_SR2; 483 } 484 return BNXT_NUM_TPA_RING_STATS; 485 } 486 return 0; 487 } 488 489 static int bnxt_get_num_ring_stats(struct bnxt *bp) 490 { 491 int rx, tx, cmn; 492 493 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 494 bnxt_get_num_tpa_ring_stats(bp); 495 tx = NUM_RING_TX_HW_STATS; 496 cmn = NUM_RING_CMN_SW_STATS; 497 return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + 498 cmn * bp->cp_nr_rings; 499 } 500 501 static int bnxt_get_num_stats(struct bnxt *bp) 502 { 503 int num_stats = bnxt_get_num_ring_stats(bp); 504 505 num_stats += BNXT_NUM_SW_FUNC_STATS; 506 507 if (bp->flags & BNXT_FLAG_PORT_STATS) 508 num_stats += BNXT_NUM_PORT_STATS; 509 510 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 511 num_stats += bp->fw_rx_stats_ext_size + 512 bp->fw_tx_stats_ext_size; 513 if (bp->pri2cos_valid) 514 num_stats += BNXT_NUM_STATS_PRI; 515 } 516 517 return num_stats; 518 } 519 520 static int bnxt_get_sset_count(struct net_device *dev, int sset) 521 { 522 struct bnxt *bp = netdev_priv(dev); 523 524 switch (sset) { 525 case ETH_SS_STATS: 526 return bnxt_get_num_stats(bp); 527 case ETH_SS_TEST: 528 if (!bp->num_tests) 529 return -EOPNOTSUPP; 530 return bp->num_tests; 531 default: 532 return -EOPNOTSUPP; 533 } 534 } 535 536 static bool is_rx_ring(struct bnxt *bp, int ring_num) 537 { 538 return ring_num < bp->rx_nr_rings; 539 } 540 541 static bool is_tx_ring(struct bnxt *bp, int ring_num) 542 { 543 int tx_base = 0; 544 545 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 546 tx_base = bp->rx_nr_rings; 547 548 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 549 return true; 550 return false; 551 } 552 553 static void bnxt_get_ethtool_stats(struct net_device *dev, 554 struct ethtool_stats *stats, u64 *buf) 555 { 556 u32 i, j = 0; 557 struct bnxt *bp = netdev_priv(dev); 558 u32 tpa_stats; 559 560 if (!bp->bnapi) { 561 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; 562 goto skip_ring_stats; 563 } 564 565 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) 566 bnxt_sw_func_stats[i].counter = 0; 567 568 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 569 for (i = 0; i < bp->cp_nr_rings; i++) { 570 struct bnxt_napi *bnapi = bp->bnapi[i]; 571 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 572 u64 *sw_stats = cpr->stats.sw_stats; 573 u64 *sw; 574 int k; 575 576 if (is_rx_ring(bp, i)) { 577 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 578 buf[j] = sw_stats[k]; 579 } 580 if (is_tx_ring(bp, i)) { 581 k = NUM_RING_RX_HW_STATS; 582 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 583 j++, k++) 584 buf[j] = sw_stats[k]; 585 } 586 if (!tpa_stats || !is_rx_ring(bp, i)) 587 goto skip_tpa_ring_stats; 588 589 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 590 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 591 tpa_stats; j++, k++) 592 buf[j] = sw_stats[k]; 593 594 skip_tpa_ring_stats: 595 sw = (u64 *)&cpr->sw_stats.rx; 596 if (is_rx_ring(bp, i)) { 597 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 598 buf[j] = sw[k]; 599 } 600 601 sw = (u64 *)&cpr->sw_stats.cmn; 602 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 603 buf[j] = sw[k]; 604 605 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 606 BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); 607 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += 608 BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); 609 bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter += 610 cpr->sw_stats.rx.rx_netpoll_discards; 611 } 612 613 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) 614 buf[j] = bnxt_sw_func_stats[i].counter; 615 616 skip_ring_stats: 617 if (bp->flags & BNXT_FLAG_PORT_STATS) { 618 u64 *port_stats = bp->port_stats.sw_stats; 619 620 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 621 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 622 } 623 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 624 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 625 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 626 627 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { 628 buf[j] = *(rx_port_stats_ext + 629 bnxt_port_stats_ext_arr[i].offset); 630 } 631 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { 632 buf[j] = *(tx_port_stats_ext + 633 bnxt_tx_port_stats_ext_arr[i].offset); 634 } 635 if (bp->pri2cos_valid) { 636 for (i = 0; i < 8; i++, j++) { 637 long n = bnxt_rx_bytes_pri_arr[i].base_off + 638 bp->pri2cos_idx[i]; 639 640 buf[j] = *(rx_port_stats_ext + n); 641 } 642 for (i = 0; i < 8; i++, j++) { 643 long n = bnxt_rx_pkts_pri_arr[i].base_off + 644 bp->pri2cos_idx[i]; 645 646 buf[j] = *(rx_port_stats_ext + n); 647 } 648 for (i = 0; i < 8; i++, j++) { 649 long n = bnxt_tx_bytes_pri_arr[i].base_off + 650 bp->pri2cos_idx[i]; 651 652 buf[j] = *(tx_port_stats_ext + n); 653 } 654 for (i = 0; i < 8; i++, j++) { 655 long n = bnxt_tx_pkts_pri_arr[i].base_off + 656 bp->pri2cos_idx[i]; 657 658 buf[j] = *(tx_port_stats_ext + n); 659 } 660 } 661 } 662 } 663 664 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 665 { 666 struct bnxt *bp = netdev_priv(dev); 667 static const char * const *str; 668 u32 i, j, num_str; 669 670 switch (stringset) { 671 case ETH_SS_STATS: 672 for (i = 0; i < bp->cp_nr_rings; i++) { 673 if (is_rx_ring(bp, i)) { 674 num_str = NUM_RING_RX_HW_STATS; 675 for (j = 0; j < num_str; j++) { 676 sprintf(buf, "[%d]: %s", i, 677 bnxt_ring_rx_stats_str[j]); 678 buf += ETH_GSTRING_LEN; 679 } 680 } 681 if (is_tx_ring(bp, i)) { 682 num_str = NUM_RING_TX_HW_STATS; 683 for (j = 0; j < num_str; j++) { 684 sprintf(buf, "[%d]: %s", i, 685 bnxt_ring_tx_stats_str[j]); 686 buf += ETH_GSTRING_LEN; 687 } 688 } 689 num_str = bnxt_get_num_tpa_ring_stats(bp); 690 if (!num_str || !is_rx_ring(bp, i)) 691 goto skip_tpa_stats; 692 693 if (bp->max_tpa_v2) 694 str = bnxt_ring_tpa2_stats_str; 695 else 696 str = bnxt_ring_tpa_stats_str; 697 698 for (j = 0; j < num_str; j++) { 699 sprintf(buf, "[%d]: %s", i, str[j]); 700 buf += ETH_GSTRING_LEN; 701 } 702 skip_tpa_stats: 703 if (is_rx_ring(bp, i)) { 704 num_str = NUM_RING_RX_SW_STATS; 705 for (j = 0; j < num_str; j++) { 706 sprintf(buf, "[%d]: %s", i, 707 bnxt_rx_sw_stats_str[j]); 708 buf += ETH_GSTRING_LEN; 709 } 710 } 711 num_str = NUM_RING_CMN_SW_STATS; 712 for (j = 0; j < num_str; j++) { 713 sprintf(buf, "[%d]: %s", i, 714 bnxt_cmn_sw_stats_str[j]); 715 buf += ETH_GSTRING_LEN; 716 } 717 } 718 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 719 strcpy(buf, bnxt_sw_func_stats[i].string); 720 buf += ETH_GSTRING_LEN; 721 } 722 723 if (bp->flags & BNXT_FLAG_PORT_STATS) { 724 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 725 strcpy(buf, bnxt_port_stats_arr[i].string); 726 buf += ETH_GSTRING_LEN; 727 } 728 } 729 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 730 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { 731 strcpy(buf, bnxt_port_stats_ext_arr[i].string); 732 buf += ETH_GSTRING_LEN; 733 } 734 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { 735 strcpy(buf, 736 bnxt_tx_port_stats_ext_arr[i].string); 737 buf += ETH_GSTRING_LEN; 738 } 739 if (bp->pri2cos_valid) { 740 for (i = 0; i < 8; i++) { 741 strcpy(buf, 742 bnxt_rx_bytes_pri_arr[i].string); 743 buf += ETH_GSTRING_LEN; 744 } 745 for (i = 0; i < 8; i++) { 746 strcpy(buf, 747 bnxt_rx_pkts_pri_arr[i].string); 748 buf += ETH_GSTRING_LEN; 749 } 750 for (i = 0; i < 8; i++) { 751 strcpy(buf, 752 bnxt_tx_bytes_pri_arr[i].string); 753 buf += ETH_GSTRING_LEN; 754 } 755 for (i = 0; i < 8; i++) { 756 strcpy(buf, 757 bnxt_tx_pkts_pri_arr[i].string); 758 buf += ETH_GSTRING_LEN; 759 } 760 } 761 } 762 break; 763 case ETH_SS_TEST: 764 if (bp->num_tests) 765 memcpy(buf, bp->test_info->string, 766 bp->num_tests * ETH_GSTRING_LEN); 767 break; 768 default: 769 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 770 stringset); 771 break; 772 } 773 } 774 775 static void bnxt_get_ringparam(struct net_device *dev, 776 struct ethtool_ringparam *ering) 777 { 778 struct bnxt *bp = netdev_priv(dev); 779 780 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 781 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 782 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 783 } else { 784 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 785 ering->rx_jumbo_max_pending = 0; 786 } 787 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 788 789 ering->rx_pending = bp->rx_ring_size; 790 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 791 ering->tx_pending = bp->tx_ring_size; 792 } 793 794 static int bnxt_set_ringparam(struct net_device *dev, 795 struct ethtool_ringparam *ering) 796 { 797 struct bnxt *bp = netdev_priv(dev); 798 799 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 800 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 801 (ering->tx_pending <= MAX_SKB_FRAGS)) 802 return -EINVAL; 803 804 if (netif_running(dev)) 805 bnxt_close_nic(bp, false, false); 806 807 bp->rx_ring_size = ering->rx_pending; 808 bp->tx_ring_size = ering->tx_pending; 809 bnxt_set_ring_params(bp); 810 811 if (netif_running(dev)) 812 return bnxt_open_nic(bp, false, false); 813 814 return 0; 815 } 816 817 static void bnxt_get_channels(struct net_device *dev, 818 struct ethtool_channels *channel) 819 { 820 struct bnxt *bp = netdev_priv(dev); 821 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 822 int max_rx_rings, max_tx_rings, tcs; 823 int max_tx_sch_inputs, tx_grps; 824 825 /* Get the most up-to-date max_tx_sch_inputs. */ 826 if (netif_running(dev) && BNXT_NEW_RM(bp)) 827 bnxt_hwrm_func_resc_qcaps(bp, false); 828 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 829 830 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 831 if (max_tx_sch_inputs) 832 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 833 834 tcs = netdev_get_num_tc(dev); 835 tx_grps = max(tcs, 1); 836 if (bp->tx_nr_rings_xdp) 837 tx_grps++; 838 max_tx_rings /= tx_grps; 839 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 840 841 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 842 max_rx_rings = 0; 843 max_tx_rings = 0; 844 } 845 if (max_tx_sch_inputs) 846 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 847 848 if (tcs > 1) 849 max_tx_rings /= tcs; 850 851 channel->max_rx = max_rx_rings; 852 channel->max_tx = max_tx_rings; 853 channel->max_other = 0; 854 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 855 channel->combined_count = bp->rx_nr_rings; 856 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 857 channel->combined_count--; 858 } else { 859 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 860 channel->rx_count = bp->rx_nr_rings; 861 channel->tx_count = bp->tx_nr_rings_per_tc; 862 } 863 } 864 } 865 866 static int bnxt_set_channels(struct net_device *dev, 867 struct ethtool_channels *channel) 868 { 869 struct bnxt *bp = netdev_priv(dev); 870 int req_tx_rings, req_rx_rings, tcs; 871 bool sh = false; 872 int tx_xdp = 0; 873 int rc = 0; 874 875 if (channel->other_count) 876 return -EINVAL; 877 878 if (!channel->combined_count && 879 (!channel->rx_count || !channel->tx_count)) 880 return -EINVAL; 881 882 if (channel->combined_count && 883 (channel->rx_count || channel->tx_count)) 884 return -EINVAL; 885 886 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 887 channel->tx_count)) 888 return -EINVAL; 889 890 if (channel->combined_count) 891 sh = true; 892 893 tcs = netdev_get_num_tc(dev); 894 895 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 896 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 897 if (bp->tx_nr_rings_xdp) { 898 if (!sh) { 899 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 900 return -EINVAL; 901 } 902 tx_xdp = req_rx_rings; 903 } 904 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 905 if (rc) { 906 netdev_warn(dev, "Unable to allocate the requested rings\n"); 907 return rc; 908 } 909 910 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 911 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 912 (dev->priv_flags & IFF_RXFH_CONFIGURED)) { 913 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 914 return -EINVAL; 915 } 916 917 if (netif_running(dev)) { 918 if (BNXT_PF(bp)) { 919 /* TODO CHIMP_FW: Send message to all VF's 920 * before PF unload 921 */ 922 } 923 rc = bnxt_close_nic(bp, true, false); 924 if (rc) { 925 netdev_err(bp->dev, "Set channel failure rc :%x\n", 926 rc); 927 return rc; 928 } 929 } 930 931 if (sh) { 932 bp->flags |= BNXT_FLAG_SHARED_RINGS; 933 bp->rx_nr_rings = channel->combined_count; 934 bp->tx_nr_rings_per_tc = channel->combined_count; 935 } else { 936 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 937 bp->rx_nr_rings = channel->rx_count; 938 bp->tx_nr_rings_per_tc = channel->tx_count; 939 } 940 bp->tx_nr_rings_xdp = tx_xdp; 941 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 942 if (tcs > 1) 943 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 944 945 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 946 bp->tx_nr_rings + bp->rx_nr_rings; 947 948 /* After changing number of rx channels, update NTUPLE feature. */ 949 netdev_update_features(dev); 950 if (netif_running(dev)) { 951 rc = bnxt_open_nic(bp, true, false); 952 if ((!rc) && BNXT_PF(bp)) { 953 /* TODO CHIMP_FW: Send message to all VF's 954 * to renable 955 */ 956 } 957 } else { 958 rc = bnxt_reserve_rings(bp, true); 959 } 960 961 return rc; 962 } 963 964 #ifdef CONFIG_RFS_ACCEL 965 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 966 u32 *rule_locs) 967 { 968 int i, j = 0; 969 970 cmd->data = bp->ntp_fltr_count; 971 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 972 struct hlist_head *head; 973 struct bnxt_ntuple_filter *fltr; 974 975 head = &bp->ntp_fltr_hash_tbl[i]; 976 rcu_read_lock(); 977 hlist_for_each_entry_rcu(fltr, head, hash) { 978 if (j == cmd->rule_cnt) 979 break; 980 rule_locs[j++] = fltr->sw_id; 981 } 982 rcu_read_unlock(); 983 if (j == cmd->rule_cnt) 984 break; 985 } 986 cmd->rule_cnt = j; 987 return 0; 988 } 989 990 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 991 { 992 struct ethtool_rx_flow_spec *fs = 993 (struct ethtool_rx_flow_spec *)&cmd->fs; 994 struct bnxt_ntuple_filter *fltr; 995 struct flow_keys *fkeys; 996 int i, rc = -EINVAL; 997 998 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) 999 return rc; 1000 1001 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 1002 struct hlist_head *head; 1003 1004 head = &bp->ntp_fltr_hash_tbl[i]; 1005 rcu_read_lock(); 1006 hlist_for_each_entry_rcu(fltr, head, hash) { 1007 if (fltr->sw_id == fs->location) 1008 goto fltr_found; 1009 } 1010 rcu_read_unlock(); 1011 } 1012 return rc; 1013 1014 fltr_found: 1015 fkeys = &fltr->fkeys; 1016 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1017 if (fkeys->basic.ip_proto == IPPROTO_TCP) 1018 fs->flow_type = TCP_V4_FLOW; 1019 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 1020 fs->flow_type = UDP_V4_FLOW; 1021 else 1022 goto fltr_err; 1023 1024 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1025 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); 1026 1027 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1028 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); 1029 1030 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1031 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); 1032 1033 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1034 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); 1035 } else { 1036 int i; 1037 1038 if (fkeys->basic.ip_proto == IPPROTO_TCP) 1039 fs->flow_type = TCP_V6_FLOW; 1040 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 1041 fs->flow_type = UDP_V6_FLOW; 1042 else 1043 goto fltr_err; 1044 1045 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1046 fkeys->addrs.v6addrs.src; 1047 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1048 fkeys->addrs.v6addrs.dst; 1049 for (i = 0; i < 4; i++) { 1050 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); 1051 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); 1052 } 1053 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1054 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); 1055 1056 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1057 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); 1058 } 1059 1060 fs->ring_cookie = fltr->rxq; 1061 rc = 0; 1062 1063 fltr_err: 1064 rcu_read_unlock(); 1065 1066 return rc; 1067 } 1068 #endif 1069 1070 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1071 { 1072 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1073 return RXH_IP_SRC | RXH_IP_DST; 1074 return 0; 1075 } 1076 1077 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1078 { 1079 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1080 return RXH_IP_SRC | RXH_IP_DST; 1081 return 0; 1082 } 1083 1084 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1085 { 1086 cmd->data = 0; 1087 switch (cmd->flow_type) { 1088 case TCP_V4_FLOW: 1089 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1090 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1091 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1092 cmd->data |= get_ethtool_ipv4_rss(bp); 1093 break; 1094 case UDP_V4_FLOW: 1095 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1096 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1097 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1098 fallthrough; 1099 case SCTP_V4_FLOW: 1100 case AH_ESP_V4_FLOW: 1101 case AH_V4_FLOW: 1102 case ESP_V4_FLOW: 1103 case IPV4_FLOW: 1104 cmd->data |= get_ethtool_ipv4_rss(bp); 1105 break; 1106 1107 case TCP_V6_FLOW: 1108 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1109 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1110 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1111 cmd->data |= get_ethtool_ipv6_rss(bp); 1112 break; 1113 case UDP_V6_FLOW: 1114 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1115 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1116 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1117 fallthrough; 1118 case SCTP_V6_FLOW: 1119 case AH_ESP_V6_FLOW: 1120 case AH_V6_FLOW: 1121 case ESP_V6_FLOW: 1122 case IPV6_FLOW: 1123 cmd->data |= get_ethtool_ipv6_rss(bp); 1124 break; 1125 } 1126 return 0; 1127 } 1128 1129 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1130 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1131 1132 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1133 { 1134 u32 rss_hash_cfg = bp->rss_hash_cfg; 1135 int tuple, rc = 0; 1136 1137 if (cmd->data == RXH_4TUPLE) 1138 tuple = 4; 1139 else if (cmd->data == RXH_2TUPLE) 1140 tuple = 2; 1141 else if (!cmd->data) 1142 tuple = 0; 1143 else 1144 return -EINVAL; 1145 1146 if (cmd->flow_type == TCP_V4_FLOW) { 1147 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1148 if (tuple == 4) 1149 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1150 } else if (cmd->flow_type == UDP_V4_FLOW) { 1151 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1152 return -EINVAL; 1153 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1154 if (tuple == 4) 1155 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1156 } else if (cmd->flow_type == TCP_V6_FLOW) { 1157 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1158 if (tuple == 4) 1159 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1160 } else if (cmd->flow_type == UDP_V6_FLOW) { 1161 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1162 return -EINVAL; 1163 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1164 if (tuple == 4) 1165 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1166 } else if (tuple == 4) { 1167 return -EINVAL; 1168 } 1169 1170 switch (cmd->flow_type) { 1171 case TCP_V4_FLOW: 1172 case UDP_V4_FLOW: 1173 case SCTP_V4_FLOW: 1174 case AH_ESP_V4_FLOW: 1175 case AH_V4_FLOW: 1176 case ESP_V4_FLOW: 1177 case IPV4_FLOW: 1178 if (tuple == 2) 1179 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1180 else if (!tuple) 1181 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1182 break; 1183 1184 case TCP_V6_FLOW: 1185 case UDP_V6_FLOW: 1186 case SCTP_V6_FLOW: 1187 case AH_ESP_V6_FLOW: 1188 case AH_V6_FLOW: 1189 case ESP_V6_FLOW: 1190 case IPV6_FLOW: 1191 if (tuple == 2) 1192 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1193 else if (!tuple) 1194 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1195 break; 1196 } 1197 1198 if (bp->rss_hash_cfg == rss_hash_cfg) 1199 return 0; 1200 1201 bp->rss_hash_cfg = rss_hash_cfg; 1202 if (netif_running(bp->dev)) { 1203 bnxt_close_nic(bp, false, false); 1204 rc = bnxt_open_nic(bp, false, false); 1205 } 1206 return rc; 1207 } 1208 1209 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1210 u32 *rule_locs) 1211 { 1212 struct bnxt *bp = netdev_priv(dev); 1213 int rc = 0; 1214 1215 switch (cmd->cmd) { 1216 #ifdef CONFIG_RFS_ACCEL 1217 case ETHTOOL_GRXRINGS: 1218 cmd->data = bp->rx_nr_rings; 1219 break; 1220 1221 case ETHTOOL_GRXCLSRLCNT: 1222 cmd->rule_cnt = bp->ntp_fltr_count; 1223 cmd->data = BNXT_NTP_FLTR_MAX_FLTR; 1224 break; 1225 1226 case ETHTOOL_GRXCLSRLALL: 1227 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1228 break; 1229 1230 case ETHTOOL_GRXCLSRULE: 1231 rc = bnxt_grxclsrule(bp, cmd); 1232 break; 1233 #endif 1234 1235 case ETHTOOL_GRXFH: 1236 rc = bnxt_grxfh(bp, cmd); 1237 break; 1238 1239 default: 1240 rc = -EOPNOTSUPP; 1241 break; 1242 } 1243 1244 return rc; 1245 } 1246 1247 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1248 { 1249 struct bnxt *bp = netdev_priv(dev); 1250 int rc; 1251 1252 switch (cmd->cmd) { 1253 case ETHTOOL_SRXFH: 1254 rc = bnxt_srxfh(bp, cmd); 1255 break; 1256 1257 default: 1258 rc = -EOPNOTSUPP; 1259 break; 1260 } 1261 return rc; 1262 } 1263 1264 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1265 { 1266 struct bnxt *bp = netdev_priv(dev); 1267 1268 if (bp->flags & BNXT_FLAG_CHIP_P5) 1269 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); 1270 return HW_HASH_INDEX_SIZE; 1271 } 1272 1273 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1274 { 1275 return HW_HASH_KEY_SIZE; 1276 } 1277 1278 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1279 u8 *hfunc) 1280 { 1281 struct bnxt *bp = netdev_priv(dev); 1282 struct bnxt_vnic_info *vnic; 1283 u32 i, tbl_size; 1284 1285 if (hfunc) 1286 *hfunc = ETH_RSS_HASH_TOP; 1287 1288 if (!bp->vnic_info) 1289 return 0; 1290 1291 vnic = &bp->vnic_info[0]; 1292 if (indir && bp->rss_indir_tbl) { 1293 tbl_size = bnxt_get_rxfh_indir_size(dev); 1294 for (i = 0; i < tbl_size; i++) 1295 indir[i] = bp->rss_indir_tbl[i]; 1296 } 1297 1298 if (key && vnic->rss_hash_key) 1299 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1300 1301 return 0; 1302 } 1303 1304 static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, 1305 const u8 *key, const u8 hfunc) 1306 { 1307 struct bnxt *bp = netdev_priv(dev); 1308 int rc = 0; 1309 1310 if (hfunc && hfunc != ETH_RSS_HASH_TOP) 1311 return -EOPNOTSUPP; 1312 1313 if (key) 1314 return -EOPNOTSUPP; 1315 1316 if (indir) { 1317 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); 1318 1319 for (i = 0; i < tbl_size; i++) 1320 bp->rss_indir_tbl[i] = indir[i]; 1321 pad = bp->rss_indir_tbl_entries - tbl_size; 1322 if (pad) 1323 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 1324 } 1325 1326 if (netif_running(bp->dev)) { 1327 bnxt_close_nic(bp, false, false); 1328 rc = bnxt_open_nic(bp, false, false); 1329 } 1330 return rc; 1331 } 1332 1333 static void bnxt_get_drvinfo(struct net_device *dev, 1334 struct ethtool_drvinfo *info) 1335 { 1336 struct bnxt *bp = netdev_priv(dev); 1337 1338 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 1339 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 1340 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 1341 info->n_stats = bnxt_get_num_stats(bp); 1342 info->testinfo_len = bp->num_tests; 1343 /* TODO CHIMP_FW: eeprom dump details */ 1344 info->eedump_len = 0; 1345 /* TODO CHIMP FW: reg dump details */ 1346 info->regdump_len = 0; 1347 } 1348 1349 static int bnxt_get_regs_len(struct net_device *dev) 1350 { 1351 struct bnxt *bp = netdev_priv(dev); 1352 int reg_len; 1353 1354 if (!BNXT_PF(bp)) 1355 return -EOPNOTSUPP; 1356 1357 reg_len = BNXT_PXP_REG_LEN; 1358 1359 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) 1360 reg_len += sizeof(struct pcie_ctx_hw_stats); 1361 1362 return reg_len; 1363 } 1364 1365 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1366 void *_p) 1367 { 1368 struct pcie_ctx_hw_stats *hw_pcie_stats; 1369 struct hwrm_pcie_qstats_input *req; 1370 struct bnxt *bp = netdev_priv(dev); 1371 dma_addr_t hw_pcie_stats_addr; 1372 int rc; 1373 1374 regs->version = 0; 1375 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 1376 1377 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 1378 return; 1379 1380 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 1381 return; 1382 1383 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 1384 &hw_pcie_stats_addr); 1385 if (!hw_pcie_stats) { 1386 hwrm_req_drop(bp, req); 1387 return; 1388 } 1389 1390 regs->version = 1; 1391 hwrm_req_hold(bp, req); /* hold on to slice */ 1392 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 1393 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 1394 rc = hwrm_req_send(bp, req); 1395 if (!rc) { 1396 __le64 *src = (__le64 *)hw_pcie_stats; 1397 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); 1398 int i; 1399 1400 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) 1401 dst[i] = le64_to_cpu(src[i]); 1402 } 1403 hwrm_req_drop(bp, req); 1404 } 1405 1406 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1407 { 1408 struct bnxt *bp = netdev_priv(dev); 1409 1410 wol->supported = 0; 1411 wol->wolopts = 0; 1412 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1413 if (bp->flags & BNXT_FLAG_WOL_CAP) { 1414 wol->supported = WAKE_MAGIC; 1415 if (bp->wol) 1416 wol->wolopts = WAKE_MAGIC; 1417 } 1418 } 1419 1420 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1421 { 1422 struct bnxt *bp = netdev_priv(dev); 1423 1424 if (wol->wolopts & ~WAKE_MAGIC) 1425 return -EINVAL; 1426 1427 if (wol->wolopts & WAKE_MAGIC) { 1428 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 1429 return -EINVAL; 1430 if (!bp->wol) { 1431 if (bnxt_hwrm_alloc_wol_fltr(bp)) 1432 return -EBUSY; 1433 bp->wol = 1; 1434 } 1435 } else { 1436 if (bp->wol) { 1437 if (bnxt_hwrm_free_wol_fltr(bp)) 1438 return -EBUSY; 1439 bp->wol = 0; 1440 } 1441 } 1442 return 0; 1443 } 1444 1445 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) 1446 { 1447 u32 speed_mask = 0; 1448 1449 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 1450 /* set the advertised speeds */ 1451 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 1452 speed_mask |= ADVERTISED_100baseT_Full; 1453 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 1454 speed_mask |= ADVERTISED_1000baseT_Full; 1455 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 1456 speed_mask |= ADVERTISED_2500baseX_Full; 1457 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 1458 speed_mask |= ADVERTISED_10000baseT_Full; 1459 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 1460 speed_mask |= ADVERTISED_40000baseCR4_Full; 1461 1462 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) 1463 speed_mask |= ADVERTISED_Pause; 1464 else if (fw_pause & BNXT_LINK_PAUSE_TX) 1465 speed_mask |= ADVERTISED_Asym_Pause; 1466 else if (fw_pause & BNXT_LINK_PAUSE_RX) 1467 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 1468 1469 return speed_mask; 1470 } 1471 1472 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ 1473 { \ 1474 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ 1475 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1476 100baseT_Full); \ 1477 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ 1478 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1479 1000baseT_Full); \ 1480 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ 1481 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1482 10000baseT_Full); \ 1483 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ 1484 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1485 25000baseCR_Full); \ 1486 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ 1487 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1488 40000baseCR4_Full);\ 1489 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ 1490 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1491 50000baseCR2_Full);\ 1492 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ 1493 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1494 100000baseCR4_Full);\ 1495 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ 1496 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1497 Pause); \ 1498 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ 1499 ethtool_link_ksettings_add_link_mode( \ 1500 lk_ksettings, name, Asym_Pause);\ 1501 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ 1502 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1503 Asym_Pause); \ 1504 } \ 1505 } 1506 1507 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ 1508 { \ 1509 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1510 100baseT_Full) || \ 1511 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1512 100baseT_Half)) \ 1513 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ 1514 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1515 1000baseT_Full) || \ 1516 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1517 1000baseT_Half)) \ 1518 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ 1519 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1520 10000baseT_Full)) \ 1521 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ 1522 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1523 25000baseCR_Full)) \ 1524 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ 1525 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1526 40000baseCR4_Full)) \ 1527 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ 1528 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1529 50000baseCR2_Full)) \ 1530 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ 1531 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1532 100000baseCR4_Full)) \ 1533 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ 1534 } 1535 1536 #define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ 1537 { \ 1538 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \ 1539 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1540 50000baseCR_Full); \ 1541 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \ 1542 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1543 100000baseCR2_Full);\ 1544 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \ 1545 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1546 200000baseCR4_Full);\ 1547 } 1548 1549 #define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ 1550 { \ 1551 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1552 50000baseCR_Full)) \ 1553 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \ 1554 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1555 100000baseCR2_Full)) \ 1556 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \ 1557 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1558 200000baseCR4_Full)) \ 1559 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \ 1560 } 1561 1562 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 1563 struct ethtool_link_ksettings *lk_ksettings) 1564 { 1565 u16 fec_cfg = link_info->fec_cfg; 1566 1567 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 1568 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1569 lk_ksettings->link_modes.advertising); 1570 return; 1571 } 1572 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 1573 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1574 lk_ksettings->link_modes.advertising); 1575 if (fec_cfg & BNXT_FEC_ENC_RS) 1576 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1577 lk_ksettings->link_modes.advertising); 1578 if (fec_cfg & BNXT_FEC_ENC_LLRS) 1579 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 1580 lk_ksettings->link_modes.advertising); 1581 } 1582 1583 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, 1584 struct ethtool_link_ksettings *lk_ksettings) 1585 { 1586 u16 fw_speeds = link_info->advertising; 1587 u8 fw_pause = 0; 1588 1589 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1590 fw_pause = link_info->auto_pause_setting; 1591 1592 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); 1593 fw_speeds = link_info->advertising_pam4; 1594 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising); 1595 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 1596 } 1597 1598 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, 1599 struct ethtool_link_ksettings *lk_ksettings) 1600 { 1601 u16 fw_speeds = link_info->lp_auto_link_speeds; 1602 u8 fw_pause = 0; 1603 1604 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1605 fw_pause = link_info->lp_pause; 1606 1607 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, 1608 lp_advertising); 1609 fw_speeds = link_info->lp_auto_pam4_link_speeds; 1610 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising); 1611 } 1612 1613 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 1614 struct ethtool_link_ksettings *lk_ksettings) 1615 { 1616 u16 fec_cfg = link_info->fec_cfg; 1617 1618 if (fec_cfg & BNXT_FEC_NONE) { 1619 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1620 lk_ksettings->link_modes.supported); 1621 return; 1622 } 1623 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 1624 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1625 lk_ksettings->link_modes.supported); 1626 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 1627 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1628 lk_ksettings->link_modes.supported); 1629 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 1630 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 1631 lk_ksettings->link_modes.supported); 1632 } 1633 1634 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, 1635 struct ethtool_link_ksettings *lk_ksettings) 1636 { 1637 u16 fw_speeds = link_info->support_speeds; 1638 1639 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); 1640 fw_speeds = link_info->support_pam4_speeds; 1641 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); 1642 1643 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); 1644 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1645 Asym_Pause); 1646 1647 if (link_info->support_auto_speeds || 1648 link_info->support_pam4_auto_speeds) 1649 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1650 Autoneg); 1651 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 1652 } 1653 1654 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 1655 { 1656 switch (fw_link_speed) { 1657 case BNXT_LINK_SPEED_100MB: 1658 return SPEED_100; 1659 case BNXT_LINK_SPEED_1GB: 1660 return SPEED_1000; 1661 case BNXT_LINK_SPEED_2_5GB: 1662 return SPEED_2500; 1663 case BNXT_LINK_SPEED_10GB: 1664 return SPEED_10000; 1665 case BNXT_LINK_SPEED_20GB: 1666 return SPEED_20000; 1667 case BNXT_LINK_SPEED_25GB: 1668 return SPEED_25000; 1669 case BNXT_LINK_SPEED_40GB: 1670 return SPEED_40000; 1671 case BNXT_LINK_SPEED_50GB: 1672 return SPEED_50000; 1673 case BNXT_LINK_SPEED_100GB: 1674 return SPEED_100000; 1675 default: 1676 return SPEED_UNKNOWN; 1677 } 1678 } 1679 1680 static int bnxt_get_link_ksettings(struct net_device *dev, 1681 struct ethtool_link_ksettings *lk_ksettings) 1682 { 1683 struct bnxt *bp = netdev_priv(dev); 1684 struct bnxt_link_info *link_info = &bp->link_info; 1685 struct ethtool_link_settings *base = &lk_ksettings->base; 1686 u32 ethtool_speed; 1687 1688 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1689 mutex_lock(&bp->link_lock); 1690 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1691 1692 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1693 if (link_info->autoneg) { 1694 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); 1695 ethtool_link_ksettings_add_link_mode(lk_ksettings, 1696 advertising, Autoneg); 1697 base->autoneg = AUTONEG_ENABLE; 1698 base->duplex = DUPLEX_UNKNOWN; 1699 if (link_info->phy_link_status == BNXT_LINK_LINK) { 1700 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); 1701 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 1702 base->duplex = DUPLEX_FULL; 1703 else 1704 base->duplex = DUPLEX_HALF; 1705 } 1706 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 1707 } else { 1708 base->autoneg = AUTONEG_DISABLE; 1709 ethtool_speed = 1710 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 1711 base->duplex = DUPLEX_HALF; 1712 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 1713 base->duplex = DUPLEX_FULL; 1714 } 1715 base->speed = ethtool_speed; 1716 1717 base->port = PORT_NONE; 1718 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1719 base->port = PORT_TP; 1720 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1721 TP); 1722 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1723 TP); 1724 } else { 1725 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1726 FIBRE); 1727 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1728 FIBRE); 1729 1730 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 1731 base->port = PORT_DA; 1732 else if (link_info->media_type == 1733 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 1734 base->port = PORT_FIBRE; 1735 } 1736 base->phy_address = link_info->phy_addr; 1737 mutex_unlock(&bp->link_lock); 1738 1739 return 0; 1740 } 1741 1742 static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) 1743 { 1744 struct bnxt *bp = netdev_priv(dev); 1745 struct bnxt_link_info *link_info = &bp->link_info; 1746 u16 support_pam4_spds = link_info->support_pam4_speeds; 1747 u16 support_spds = link_info->support_speeds; 1748 u8 sig_mode = BNXT_SIG_MODE_NRZ; 1749 u16 fw_speed = 0; 1750 1751 switch (ethtool_speed) { 1752 case SPEED_100: 1753 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 1754 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 1755 break; 1756 case SPEED_1000: 1757 if (support_spds & BNXT_LINK_SPEED_MSK_1GB) 1758 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 1759 break; 1760 case SPEED_2500: 1761 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 1762 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 1763 break; 1764 case SPEED_10000: 1765 if (support_spds & BNXT_LINK_SPEED_MSK_10GB) 1766 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 1767 break; 1768 case SPEED_20000: 1769 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) 1770 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 1771 break; 1772 case SPEED_25000: 1773 if (support_spds & BNXT_LINK_SPEED_MSK_25GB) 1774 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 1775 break; 1776 case SPEED_40000: 1777 if (support_spds & BNXT_LINK_SPEED_MSK_40GB) 1778 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 1779 break; 1780 case SPEED_50000: 1781 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) { 1782 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 1783 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 1784 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 1785 sig_mode = BNXT_SIG_MODE_PAM4; 1786 } 1787 break; 1788 case SPEED_100000: 1789 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) { 1790 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 1791 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 1792 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 1793 sig_mode = BNXT_SIG_MODE_PAM4; 1794 } 1795 break; 1796 case SPEED_200000: 1797 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 1798 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 1799 sig_mode = BNXT_SIG_MODE_PAM4; 1800 } 1801 break; 1802 } 1803 1804 if (!fw_speed) { 1805 netdev_err(dev, "unsupported speed!\n"); 1806 return -EINVAL; 1807 } 1808 1809 if (link_info->req_link_speed == fw_speed && 1810 link_info->req_signal_mode == sig_mode && 1811 link_info->autoneg == 0) 1812 return -EALREADY; 1813 1814 link_info->req_link_speed = fw_speed; 1815 link_info->req_signal_mode = sig_mode; 1816 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 1817 link_info->autoneg = 0; 1818 link_info->advertising = 0; 1819 link_info->advertising_pam4 = 0; 1820 1821 return 0; 1822 } 1823 1824 u16 bnxt_get_fw_auto_link_speeds(u32 advertising) 1825 { 1826 u16 fw_speed_mask = 0; 1827 1828 /* only support autoneg at speed 100, 1000, and 10000 */ 1829 if (advertising & (ADVERTISED_100baseT_Full | 1830 ADVERTISED_100baseT_Half)) { 1831 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 1832 } 1833 if (advertising & (ADVERTISED_1000baseT_Full | 1834 ADVERTISED_1000baseT_Half)) { 1835 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 1836 } 1837 if (advertising & ADVERTISED_10000baseT_Full) 1838 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 1839 1840 if (advertising & ADVERTISED_40000baseCR4_Full) 1841 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 1842 1843 return fw_speed_mask; 1844 } 1845 1846 static int bnxt_set_link_ksettings(struct net_device *dev, 1847 const struct ethtool_link_ksettings *lk_ksettings) 1848 { 1849 struct bnxt *bp = netdev_priv(dev); 1850 struct bnxt_link_info *link_info = &bp->link_info; 1851 const struct ethtool_link_settings *base = &lk_ksettings->base; 1852 bool set_pause = false; 1853 u32 speed; 1854 int rc = 0; 1855 1856 if (!BNXT_PHY_CFG_ABLE(bp)) 1857 return -EOPNOTSUPP; 1858 1859 mutex_lock(&bp->link_lock); 1860 if (base->autoneg == AUTONEG_ENABLE) { 1861 link_info->advertising = 0; 1862 link_info->advertising_pam4 = 0; 1863 BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings, 1864 advertising); 1865 BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4, 1866 lk_ksettings, advertising); 1867 link_info->autoneg |= BNXT_AUTONEG_SPEED; 1868 if (!link_info->advertising && !link_info->advertising_pam4) { 1869 link_info->advertising = link_info->support_auto_speeds; 1870 link_info->advertising_pam4 = 1871 link_info->support_pam4_auto_speeds; 1872 } 1873 /* any change to autoneg will cause link change, therefore the 1874 * driver should put back the original pause setting in autoneg 1875 */ 1876 set_pause = true; 1877 } else { 1878 u8 phy_type = link_info->phy_type; 1879 1880 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 1881 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 1882 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1883 netdev_err(dev, "10GBase-T devices must autoneg\n"); 1884 rc = -EINVAL; 1885 goto set_setting_exit; 1886 } 1887 if (base->duplex == DUPLEX_HALF) { 1888 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 1889 rc = -EINVAL; 1890 goto set_setting_exit; 1891 } 1892 speed = base->speed; 1893 rc = bnxt_force_link_speed(dev, speed); 1894 if (rc) { 1895 if (rc == -EALREADY) 1896 rc = 0; 1897 goto set_setting_exit; 1898 } 1899 } 1900 1901 if (netif_running(dev)) 1902 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1903 1904 set_setting_exit: 1905 mutex_unlock(&bp->link_lock); 1906 return rc; 1907 } 1908 1909 static int bnxt_get_fecparam(struct net_device *dev, 1910 struct ethtool_fecparam *fec) 1911 { 1912 struct bnxt *bp = netdev_priv(dev); 1913 struct bnxt_link_info *link_info; 1914 u8 active_fec; 1915 u16 fec_cfg; 1916 1917 link_info = &bp->link_info; 1918 fec_cfg = link_info->fec_cfg; 1919 active_fec = link_info->active_fec_sig_mode & 1920 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 1921 if (fec_cfg & BNXT_FEC_NONE) { 1922 fec->fec = ETHTOOL_FEC_NONE; 1923 fec->active_fec = ETHTOOL_FEC_NONE; 1924 return 0; 1925 } 1926 if (fec_cfg & BNXT_FEC_AUTONEG) 1927 fec->fec |= ETHTOOL_FEC_AUTO; 1928 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 1929 fec->fec |= ETHTOOL_FEC_BASER; 1930 if (fec_cfg & BNXT_FEC_ENC_RS) 1931 fec->fec |= ETHTOOL_FEC_RS; 1932 if (fec_cfg & BNXT_FEC_ENC_LLRS) 1933 fec->fec |= ETHTOOL_FEC_LLRS; 1934 1935 switch (active_fec) { 1936 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 1937 fec->active_fec |= ETHTOOL_FEC_BASER; 1938 break; 1939 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 1940 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 1941 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 1942 fec->active_fec |= ETHTOOL_FEC_RS; 1943 break; 1944 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 1945 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 1946 fec->active_fec |= ETHTOOL_FEC_LLRS; 1947 break; 1948 } 1949 return 0; 1950 } 1951 1952 static void bnxt_get_fec_stats(struct net_device *dev, 1953 struct ethtool_fec_stats *fec_stats) 1954 { 1955 struct bnxt *bp = netdev_priv(dev); 1956 u64 *rx; 1957 1958 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 1959 return; 1960 1961 rx = bp->rx_port_stats_ext.sw_stats; 1962 fec_stats->corrected_bits.total = 1963 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 1964 } 1965 1966 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 1967 u32 fec) 1968 { 1969 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 1970 1971 if (fec & ETHTOOL_FEC_BASER) 1972 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 1973 else if (fec & ETHTOOL_FEC_RS) 1974 fw_fec |= BNXT_FEC_RS_ON(link_info); 1975 else if (fec & ETHTOOL_FEC_LLRS) 1976 fw_fec |= BNXT_FEC_LLRS_ON; 1977 return fw_fec; 1978 } 1979 1980 static int bnxt_set_fecparam(struct net_device *dev, 1981 struct ethtool_fecparam *fecparam) 1982 { 1983 struct hwrm_port_phy_cfg_input *req; 1984 struct bnxt *bp = netdev_priv(dev); 1985 struct bnxt_link_info *link_info; 1986 u32 new_cfg, fec = fecparam->fec; 1987 u16 fec_cfg; 1988 int rc; 1989 1990 link_info = &bp->link_info; 1991 fec_cfg = link_info->fec_cfg; 1992 if (fec_cfg & BNXT_FEC_NONE) 1993 return -EOPNOTSUPP; 1994 1995 if (fec & ETHTOOL_FEC_OFF) { 1996 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 1997 BNXT_FEC_ALL_OFF(link_info); 1998 goto apply_fec; 1999 } 2000 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 2001 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 2002 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 2003 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 2004 return -EINVAL; 2005 2006 if (fec & ETHTOOL_FEC_AUTO) { 2007 if (!link_info->autoneg) 2008 return -EINVAL; 2009 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 2010 } else { 2011 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 2012 } 2013 2014 apply_fec: 2015 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 2016 if (rc) 2017 return rc; 2018 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 2019 rc = hwrm_req_send(bp, req); 2020 /* update current settings */ 2021 if (!rc) { 2022 mutex_lock(&bp->link_lock); 2023 bnxt_update_link(bp, false); 2024 mutex_unlock(&bp->link_lock); 2025 } 2026 return rc; 2027 } 2028 2029 static void bnxt_get_pauseparam(struct net_device *dev, 2030 struct ethtool_pauseparam *epause) 2031 { 2032 struct bnxt *bp = netdev_priv(dev); 2033 struct bnxt_link_info *link_info = &bp->link_info; 2034 2035 if (BNXT_VF(bp)) 2036 return; 2037 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 2038 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 2039 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 2040 } 2041 2042 static void bnxt_get_pause_stats(struct net_device *dev, 2043 struct ethtool_pause_stats *epstat) 2044 { 2045 struct bnxt *bp = netdev_priv(dev); 2046 u64 *rx, *tx; 2047 2048 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 2049 return; 2050 2051 rx = bp->port_stats.sw_stats; 2052 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 2053 2054 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 2055 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 2056 } 2057 2058 static int bnxt_set_pauseparam(struct net_device *dev, 2059 struct ethtool_pauseparam *epause) 2060 { 2061 int rc = 0; 2062 struct bnxt *bp = netdev_priv(dev); 2063 struct bnxt_link_info *link_info = &bp->link_info; 2064 2065 if (!BNXT_PHY_CFG_ABLE(bp)) 2066 return -EOPNOTSUPP; 2067 2068 mutex_lock(&bp->link_lock); 2069 if (epause->autoneg) { 2070 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2071 rc = -EINVAL; 2072 goto pause_exit; 2073 } 2074 2075 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 2076 if (bp->hwrm_spec_code >= 0x10201) 2077 link_info->req_flow_ctrl = 2078 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 2079 } else { 2080 /* when transition from auto pause to force pause, 2081 * force a link change 2082 */ 2083 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2084 link_info->force_link_chng = true; 2085 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 2086 link_info->req_flow_ctrl = 0; 2087 } 2088 if (epause->rx_pause) 2089 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 2090 2091 if (epause->tx_pause) 2092 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 2093 2094 if (netif_running(dev)) 2095 rc = bnxt_hwrm_set_pause(bp); 2096 2097 pause_exit: 2098 mutex_unlock(&bp->link_lock); 2099 return rc; 2100 } 2101 2102 static u32 bnxt_get_link(struct net_device *dev) 2103 { 2104 struct bnxt *bp = netdev_priv(dev); 2105 2106 /* TODO: handle MF, VF, driver close case */ 2107 return bp->link_info.link_up; 2108 } 2109 2110 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 2111 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 2112 { 2113 struct hwrm_nvm_get_dev_info_output *resp; 2114 struct hwrm_nvm_get_dev_info_input *req; 2115 int rc; 2116 2117 if (BNXT_VF(bp)) 2118 return -EOPNOTSUPP; 2119 2120 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 2121 if (rc) 2122 return rc; 2123 2124 resp = hwrm_req_hold(bp, req); 2125 rc = hwrm_req_send(bp, req); 2126 if (!rc) 2127 memcpy(nvm_dev_info, resp, sizeof(*resp)); 2128 hwrm_req_drop(bp, req); 2129 return rc; 2130 } 2131 2132 static void bnxt_print_admin_err(struct bnxt *bp) 2133 { 2134 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 2135 } 2136 2137 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2138 u16 ext, u16 *index, u32 *item_length, 2139 u32 *data_length); 2140 2141 static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 2142 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 2143 u32 dir_item_len, const u8 *data, 2144 size_t data_len) 2145 { 2146 struct bnxt *bp = netdev_priv(dev); 2147 struct hwrm_nvm_write_input *req; 2148 int rc; 2149 2150 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 2151 if (rc) 2152 return rc; 2153 2154 if (data_len && data) { 2155 dma_addr_t dma_handle; 2156 u8 *kmem; 2157 2158 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 2159 if (!kmem) { 2160 hwrm_req_drop(bp, req); 2161 return -ENOMEM; 2162 } 2163 2164 req->dir_data_length = cpu_to_le32(data_len); 2165 2166 memcpy(kmem, data, data_len); 2167 req->host_src_addr = cpu_to_le64(dma_handle); 2168 } 2169 2170 hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); 2171 req->dir_type = cpu_to_le16(dir_type); 2172 req->dir_ordinal = cpu_to_le16(dir_ordinal); 2173 req->dir_ext = cpu_to_le16(dir_ext); 2174 req->dir_attr = cpu_to_le16(dir_attr); 2175 req->dir_item_length = cpu_to_le32(dir_item_len); 2176 rc = hwrm_req_send(bp, req); 2177 2178 if (rc == -EACCES) 2179 bnxt_print_admin_err(bp); 2180 return rc; 2181 } 2182 2183 static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 2184 u8 self_reset, u8 flags) 2185 { 2186 struct bnxt *bp = netdev_priv(dev); 2187 struct hwrm_fw_reset_input *req; 2188 int rc; 2189 2190 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 2191 if (rc) 2192 return rc; 2193 2194 req->embedded_proc_type = proc_type; 2195 req->selfrst_status = self_reset; 2196 req->flags = flags; 2197 2198 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 2199 rc = hwrm_req_send_silent(bp, req); 2200 } else { 2201 rc = hwrm_req_send(bp, req); 2202 if (rc == -EACCES) 2203 bnxt_print_admin_err(bp); 2204 } 2205 return rc; 2206 } 2207 2208 static int bnxt_firmware_reset(struct net_device *dev, 2209 enum bnxt_nvm_directory_type dir_type) 2210 { 2211 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 2212 u8 proc_type, flags = 0; 2213 2214 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 2215 /* (e.g. when firmware isn't already running) */ 2216 switch (dir_type) { 2217 case BNX_DIR_TYPE_CHIMP_PATCH: 2218 case BNX_DIR_TYPE_BOOTCODE: 2219 case BNX_DIR_TYPE_BOOTCODE_2: 2220 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 2221 /* Self-reset ChiMP upon next PCIe reset: */ 2222 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 2223 break; 2224 case BNX_DIR_TYPE_APE_FW: 2225 case BNX_DIR_TYPE_APE_PATCH: 2226 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 2227 /* Self-reset APE upon next PCIe reset: */ 2228 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 2229 break; 2230 case BNX_DIR_TYPE_KONG_FW: 2231 case BNX_DIR_TYPE_KONG_PATCH: 2232 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 2233 break; 2234 case BNX_DIR_TYPE_BONO_FW: 2235 case BNX_DIR_TYPE_BONO_PATCH: 2236 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 2237 break; 2238 default: 2239 return -EINVAL; 2240 } 2241 2242 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 2243 } 2244 2245 static int bnxt_firmware_reset_chip(struct net_device *dev) 2246 { 2247 struct bnxt *bp = netdev_priv(dev); 2248 u8 flags = 0; 2249 2250 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 2251 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 2252 2253 return bnxt_hwrm_firmware_reset(dev, 2254 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 2255 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 2256 flags); 2257 } 2258 2259 static int bnxt_firmware_reset_ap(struct net_device *dev) 2260 { 2261 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 2262 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 2263 0); 2264 } 2265 2266 static int bnxt_flash_firmware(struct net_device *dev, 2267 u16 dir_type, 2268 const u8 *fw_data, 2269 size_t fw_size) 2270 { 2271 int rc = 0; 2272 u16 code_type; 2273 u32 stored_crc; 2274 u32 calculated_crc; 2275 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 2276 2277 switch (dir_type) { 2278 case BNX_DIR_TYPE_BOOTCODE: 2279 case BNX_DIR_TYPE_BOOTCODE_2: 2280 code_type = CODE_BOOT; 2281 break; 2282 case BNX_DIR_TYPE_CHIMP_PATCH: 2283 code_type = CODE_CHIMP_PATCH; 2284 break; 2285 case BNX_DIR_TYPE_APE_FW: 2286 code_type = CODE_MCTP_PASSTHRU; 2287 break; 2288 case BNX_DIR_TYPE_APE_PATCH: 2289 code_type = CODE_APE_PATCH; 2290 break; 2291 case BNX_DIR_TYPE_KONG_FW: 2292 code_type = CODE_KONG_FW; 2293 break; 2294 case BNX_DIR_TYPE_KONG_PATCH: 2295 code_type = CODE_KONG_PATCH; 2296 break; 2297 case BNX_DIR_TYPE_BONO_FW: 2298 code_type = CODE_BONO_FW; 2299 break; 2300 case BNX_DIR_TYPE_BONO_PATCH: 2301 code_type = CODE_BONO_PATCH; 2302 break; 2303 default: 2304 netdev_err(dev, "Unsupported directory entry type: %u\n", 2305 dir_type); 2306 return -EINVAL; 2307 } 2308 if (fw_size < sizeof(struct bnxt_fw_header)) { 2309 netdev_err(dev, "Invalid firmware file size: %u\n", 2310 (unsigned int)fw_size); 2311 return -EINVAL; 2312 } 2313 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 2314 netdev_err(dev, "Invalid firmware signature: %08X\n", 2315 le32_to_cpu(header->signature)); 2316 return -EINVAL; 2317 } 2318 if (header->code_type != code_type) { 2319 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 2320 code_type, header->code_type); 2321 return -EINVAL; 2322 } 2323 if (header->device != DEVICE_CUMULUS_FAMILY) { 2324 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 2325 DEVICE_CUMULUS_FAMILY, header->device); 2326 return -EINVAL; 2327 } 2328 /* Confirm the CRC32 checksum of the file: */ 2329 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 2330 sizeof(stored_crc))); 2331 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 2332 if (calculated_crc != stored_crc) { 2333 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 2334 (unsigned long)stored_crc, 2335 (unsigned long)calculated_crc); 2336 return -EINVAL; 2337 } 2338 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2339 0, 0, 0, fw_data, fw_size); 2340 if (rc == 0) /* Firmware update successful */ 2341 rc = bnxt_firmware_reset(dev, dir_type); 2342 2343 return rc; 2344 } 2345 2346 static int bnxt_flash_microcode(struct net_device *dev, 2347 u16 dir_type, 2348 const u8 *fw_data, 2349 size_t fw_size) 2350 { 2351 struct bnxt_ucode_trailer *trailer; 2352 u32 calculated_crc; 2353 u32 stored_crc; 2354 int rc = 0; 2355 2356 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 2357 netdev_err(dev, "Invalid microcode file size: %u\n", 2358 (unsigned int)fw_size); 2359 return -EINVAL; 2360 } 2361 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 2362 sizeof(*trailer))); 2363 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 2364 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 2365 le32_to_cpu(trailer->sig)); 2366 return -EINVAL; 2367 } 2368 if (le16_to_cpu(trailer->dir_type) != dir_type) { 2369 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 2370 dir_type, le16_to_cpu(trailer->dir_type)); 2371 return -EINVAL; 2372 } 2373 if (le16_to_cpu(trailer->trailer_length) < 2374 sizeof(struct bnxt_ucode_trailer)) { 2375 netdev_err(dev, "Invalid microcode trailer length: %d\n", 2376 le16_to_cpu(trailer->trailer_length)); 2377 return -EINVAL; 2378 } 2379 2380 /* Confirm the CRC32 checksum of the file: */ 2381 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 2382 sizeof(stored_crc))); 2383 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 2384 if (calculated_crc != stored_crc) { 2385 netdev_err(dev, 2386 "CRC32 (%08lX) does not match calculated: %08lX\n", 2387 (unsigned long)stored_crc, 2388 (unsigned long)calculated_crc); 2389 return -EINVAL; 2390 } 2391 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2392 0, 0, 0, fw_data, fw_size); 2393 2394 return rc; 2395 } 2396 2397 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 2398 { 2399 switch (dir_type) { 2400 case BNX_DIR_TYPE_CHIMP_PATCH: 2401 case BNX_DIR_TYPE_BOOTCODE: 2402 case BNX_DIR_TYPE_BOOTCODE_2: 2403 case BNX_DIR_TYPE_APE_FW: 2404 case BNX_DIR_TYPE_APE_PATCH: 2405 case BNX_DIR_TYPE_KONG_FW: 2406 case BNX_DIR_TYPE_KONG_PATCH: 2407 case BNX_DIR_TYPE_BONO_FW: 2408 case BNX_DIR_TYPE_BONO_PATCH: 2409 return true; 2410 } 2411 2412 return false; 2413 } 2414 2415 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 2416 { 2417 switch (dir_type) { 2418 case BNX_DIR_TYPE_AVS: 2419 case BNX_DIR_TYPE_EXP_ROM_MBA: 2420 case BNX_DIR_TYPE_PCIE: 2421 case BNX_DIR_TYPE_TSCF_UCODE: 2422 case BNX_DIR_TYPE_EXT_PHY: 2423 case BNX_DIR_TYPE_CCM: 2424 case BNX_DIR_TYPE_ISCSI_BOOT: 2425 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2426 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2427 return true; 2428 } 2429 2430 return false; 2431 } 2432 2433 static bool bnxt_dir_type_is_executable(u16 dir_type) 2434 { 2435 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2436 bnxt_dir_type_is_other_exec_format(dir_type); 2437 } 2438 2439 static int bnxt_flash_firmware_from_file(struct net_device *dev, 2440 u16 dir_type, 2441 const char *filename) 2442 { 2443 const struct firmware *fw; 2444 int rc; 2445 2446 rc = request_firmware(&fw, filename, &dev->dev); 2447 if (rc != 0) { 2448 netdev_err(dev, "Error %d requesting firmware file: %s\n", 2449 rc, filename); 2450 return rc; 2451 } 2452 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 2453 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 2454 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 2455 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 2456 else 2457 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2458 0, 0, 0, fw->data, fw->size); 2459 release_firmware(fw); 2460 return rc; 2461 } 2462 2463 #define BNXT_PKG_DMA_SIZE 0x40000 2464 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 2465 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 2466 2467 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 2468 u32 install_type) 2469 { 2470 struct hwrm_nvm_install_update_input *install; 2471 struct hwrm_nvm_install_update_output *resp; 2472 struct hwrm_nvm_modify_input *modify; 2473 struct bnxt *bp = netdev_priv(dev); 2474 bool defrag_attempted = false; 2475 dma_addr_t dma_handle; 2476 u8 *kmem = NULL; 2477 u32 modify_len; 2478 u32 item_len; 2479 u16 index; 2480 int rc; 2481 2482 bnxt_hwrm_fw_set_time(bp); 2483 2484 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 2485 if (rc) 2486 return rc; 2487 2488 /* Try allocating a large DMA buffer first. Older fw will 2489 * cause excessive NVRAM erases when using small blocks. 2490 */ 2491 modify_len = roundup_pow_of_two(fw->size); 2492 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 2493 while (1) { 2494 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 2495 if (!kmem && modify_len > PAGE_SIZE) 2496 modify_len /= 2; 2497 else 2498 break; 2499 } 2500 if (!kmem) { 2501 hwrm_req_drop(bp, modify); 2502 return -ENOMEM; 2503 } 2504 2505 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 2506 if (rc) { 2507 hwrm_req_drop(bp, modify); 2508 return rc; 2509 } 2510 2511 hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); 2512 hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); 2513 2514 hwrm_req_hold(bp, modify); 2515 modify->host_src_addr = cpu_to_le64(dma_handle); 2516 2517 resp = hwrm_req_hold(bp, install); 2518 if ((install_type & 0xffff) == 0) 2519 install_type >>= 16; 2520 install->install_type = cpu_to_le32(install_type); 2521 2522 do { 2523 u32 copied = 0, len = modify_len; 2524 2525 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2526 BNX_DIR_ORDINAL_FIRST, 2527 BNX_DIR_EXT_NONE, 2528 &index, &item_len, NULL); 2529 if (rc) { 2530 netdev_err(dev, "PKG update area not created in nvram\n"); 2531 break; 2532 } 2533 if (fw->size > item_len) { 2534 netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", 2535 (unsigned long)fw->size); 2536 rc = -EFBIG; 2537 break; 2538 } 2539 2540 modify->dir_idx = cpu_to_le16(index); 2541 2542 if (fw->size > modify_len) 2543 modify->flags = BNXT_NVM_MORE_FLAG; 2544 while (copied < fw->size) { 2545 u32 balance = fw->size - copied; 2546 2547 if (balance <= modify_len) { 2548 len = balance; 2549 if (copied) 2550 modify->flags |= BNXT_NVM_LAST_FLAG; 2551 } 2552 memcpy(kmem, fw->data + copied, len); 2553 modify->len = cpu_to_le32(len); 2554 modify->offset = cpu_to_le32(copied); 2555 rc = hwrm_req_send(bp, modify); 2556 if (rc) 2557 goto pkg_abort; 2558 copied += len; 2559 } 2560 2561 rc = hwrm_req_send_silent(bp, install); 2562 2563 if (defrag_attempted) { 2564 /* We have tried to defragment already in the previous 2565 * iteration. Return with the result for INSTALL_UPDATE 2566 */ 2567 break; 2568 } 2569 2570 if (rc && ((struct hwrm_err_output *)resp)->cmd_err == 2571 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2572 install->flags = 2573 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2574 2575 rc = hwrm_req_send_silent(bp, install); 2576 2577 if (rc && ((struct hwrm_err_output *)resp)->cmd_err == 2578 NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 2579 /* FW has cleared NVM area, driver will create 2580 * UPDATE directory and try the flash again 2581 */ 2582 defrag_attempted = true; 2583 install->flags = 0; 2584 rc = bnxt_flash_nvram(bp->dev, 2585 BNX_DIR_TYPE_UPDATE, 2586 BNX_DIR_ORDINAL_FIRST, 2587 0, 0, item_len, NULL, 0); 2588 } else if (rc) { 2589 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); 2590 } 2591 } else if (rc) { 2592 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); 2593 } 2594 } while (defrag_attempted && !rc); 2595 2596 pkg_abort: 2597 hwrm_req_drop(bp, modify); 2598 hwrm_req_drop(bp, install); 2599 2600 if (resp->result) { 2601 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 2602 (s8)resp->result, (int)resp->problem_item); 2603 rc = -ENOPKG; 2604 } 2605 if (rc == -EACCES) 2606 bnxt_print_admin_err(bp); 2607 return rc; 2608 } 2609 2610 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 2611 u32 install_type) 2612 { 2613 const struct firmware *fw; 2614 int rc; 2615 2616 rc = request_firmware(&fw, filename, &dev->dev); 2617 if (rc != 0) { 2618 netdev_err(dev, "PKG error %d requesting file: %s\n", 2619 rc, filename); 2620 return rc; 2621 } 2622 2623 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type); 2624 2625 release_firmware(fw); 2626 2627 return rc; 2628 } 2629 2630 static int bnxt_flash_device(struct net_device *dev, 2631 struct ethtool_flash *flash) 2632 { 2633 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 2634 netdev_err(dev, "flashdev not supported from a virtual function\n"); 2635 return -EINVAL; 2636 } 2637 2638 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 2639 flash->region > 0xffff) 2640 return bnxt_flash_package_from_file(dev, flash->data, 2641 flash->region); 2642 2643 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 2644 } 2645 2646 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 2647 { 2648 struct hwrm_nvm_get_dir_info_output *output; 2649 struct hwrm_nvm_get_dir_info_input *req; 2650 struct bnxt *bp = netdev_priv(dev); 2651 int rc; 2652 2653 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 2654 if (rc) 2655 return rc; 2656 2657 output = hwrm_req_hold(bp, req); 2658 rc = hwrm_req_send(bp, req); 2659 if (!rc) { 2660 *entries = le32_to_cpu(output->entries); 2661 *length = le32_to_cpu(output->entry_length); 2662 } 2663 hwrm_req_drop(bp, req); 2664 return rc; 2665 } 2666 2667 static int bnxt_get_eeprom_len(struct net_device *dev) 2668 { 2669 struct bnxt *bp = netdev_priv(dev); 2670 2671 if (BNXT_VF(bp)) 2672 return 0; 2673 2674 /* The -1 return value allows the entire 32-bit range of offsets to be 2675 * passed via the ethtool command-line utility. 2676 */ 2677 return -1; 2678 } 2679 2680 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 2681 { 2682 struct bnxt *bp = netdev_priv(dev); 2683 int rc; 2684 u32 dir_entries; 2685 u32 entry_length; 2686 u8 *buf; 2687 size_t buflen; 2688 dma_addr_t dma_handle; 2689 struct hwrm_nvm_get_dir_entries_input *req; 2690 2691 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 2692 if (rc != 0) 2693 return rc; 2694 2695 if (!dir_entries || !entry_length) 2696 return -EIO; 2697 2698 /* Insert 2 bytes of directory info (count and size of entries) */ 2699 if (len < 2) 2700 return -EINVAL; 2701 2702 *data++ = dir_entries; 2703 *data++ = entry_length; 2704 len -= 2; 2705 memset(data, 0xff, len); 2706 2707 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 2708 if (rc) 2709 return rc; 2710 2711 buflen = dir_entries * entry_length; 2712 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 2713 if (!buf) { 2714 hwrm_req_drop(bp, req); 2715 return -ENOMEM; 2716 } 2717 req->host_dest_addr = cpu_to_le64(dma_handle); 2718 2719 hwrm_req_hold(bp, req); /* hold the slice */ 2720 rc = hwrm_req_send(bp, req); 2721 if (rc == 0) 2722 memcpy(data, buf, len > buflen ? buflen : len); 2723 hwrm_req_drop(bp, req); 2724 return rc; 2725 } 2726 2727 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 2728 u32 length, u8 *data) 2729 { 2730 struct bnxt *bp = netdev_priv(dev); 2731 int rc; 2732 u8 *buf; 2733 dma_addr_t dma_handle; 2734 struct hwrm_nvm_read_input *req; 2735 2736 if (!length) 2737 return -EINVAL; 2738 2739 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 2740 if (rc) 2741 return rc; 2742 2743 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 2744 if (!buf) { 2745 hwrm_req_drop(bp, req); 2746 return -ENOMEM; 2747 } 2748 2749 req->host_dest_addr = cpu_to_le64(dma_handle); 2750 req->dir_idx = cpu_to_le16(index); 2751 req->offset = cpu_to_le32(offset); 2752 req->len = cpu_to_le32(length); 2753 2754 hwrm_req_hold(bp, req); /* hold the slice */ 2755 rc = hwrm_req_send(bp, req); 2756 if (rc == 0) 2757 memcpy(data, buf, length); 2758 hwrm_req_drop(bp, req); 2759 return rc; 2760 } 2761 2762 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2763 u16 ext, u16 *index, u32 *item_length, 2764 u32 *data_length) 2765 { 2766 struct hwrm_nvm_find_dir_entry_output *output; 2767 struct hwrm_nvm_find_dir_entry_input *req; 2768 struct bnxt *bp = netdev_priv(dev); 2769 int rc; 2770 2771 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 2772 if (rc) 2773 return rc; 2774 2775 req->enables = 0; 2776 req->dir_idx = 0; 2777 req->dir_type = cpu_to_le16(type); 2778 req->dir_ordinal = cpu_to_le16(ordinal); 2779 req->dir_ext = cpu_to_le16(ext); 2780 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 2781 output = hwrm_req_hold(bp, req); 2782 rc = hwrm_req_send_silent(bp, req); 2783 if (rc == 0) { 2784 if (index) 2785 *index = le16_to_cpu(output->dir_idx); 2786 if (item_length) 2787 *item_length = le32_to_cpu(output->dir_item_length); 2788 if (data_length) 2789 *data_length = le32_to_cpu(output->dir_data_length); 2790 } 2791 hwrm_req_drop(bp, req); 2792 return rc; 2793 } 2794 2795 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 2796 { 2797 char *retval = NULL; 2798 char *p; 2799 char *value; 2800 int field = 0; 2801 2802 if (datalen < 1) 2803 return NULL; 2804 /* null-terminate the log data (removing last '\n'): */ 2805 data[datalen - 1] = 0; 2806 for (p = data; *p != 0; p++) { 2807 field = 0; 2808 retval = NULL; 2809 while (*p != 0 && *p != '\n') { 2810 value = p; 2811 while (*p != 0 && *p != '\t' && *p != '\n') 2812 p++; 2813 if (field == desired_field) 2814 retval = value; 2815 if (*p != '\t') 2816 break; 2817 *p = 0; 2818 field++; 2819 p++; 2820 } 2821 if (*p == 0) 2822 break; 2823 *p = 0; 2824 } 2825 return retval; 2826 } 2827 2828 static void bnxt_get_pkgver(struct net_device *dev) 2829 { 2830 struct bnxt *bp = netdev_priv(dev); 2831 u16 index = 0; 2832 char *pkgver; 2833 u32 pkglen; 2834 u8 *pkgbuf; 2835 int len; 2836 2837 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 2838 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2839 &index, NULL, &pkglen) != 0) 2840 return; 2841 2842 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 2843 if (!pkgbuf) { 2844 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 2845 pkglen); 2846 return; 2847 } 2848 2849 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) 2850 goto err; 2851 2852 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 2853 pkglen); 2854 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 2855 len = strlen(bp->fw_ver_str); 2856 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 2857 "/pkg %s", pkgver); 2858 } 2859 err: 2860 kfree(pkgbuf); 2861 } 2862 2863 static int bnxt_get_eeprom(struct net_device *dev, 2864 struct ethtool_eeprom *eeprom, 2865 u8 *data) 2866 { 2867 u32 index; 2868 u32 offset; 2869 2870 if (eeprom->offset == 0) /* special offset value to get directory */ 2871 return bnxt_get_nvram_directory(dev, eeprom->len, data); 2872 2873 index = eeprom->offset >> 24; 2874 offset = eeprom->offset & 0xffffff; 2875 2876 if (index == 0) { 2877 netdev_err(dev, "unsupported index value: %d\n", index); 2878 return -EINVAL; 2879 } 2880 2881 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 2882 } 2883 2884 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 2885 { 2886 struct hwrm_nvm_erase_dir_entry_input *req; 2887 struct bnxt *bp = netdev_priv(dev); 2888 int rc; 2889 2890 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 2891 if (rc) 2892 return rc; 2893 2894 req->dir_idx = cpu_to_le16(index); 2895 return hwrm_req_send(bp, req); 2896 } 2897 2898 static int bnxt_set_eeprom(struct net_device *dev, 2899 struct ethtool_eeprom *eeprom, 2900 u8 *data) 2901 { 2902 struct bnxt *bp = netdev_priv(dev); 2903 u8 index, dir_op; 2904 u16 type, ext, ordinal, attr; 2905 2906 if (!BNXT_PF(bp)) { 2907 netdev_err(dev, "NVM write not supported from a virtual function\n"); 2908 return -EINVAL; 2909 } 2910 2911 type = eeprom->magic >> 16; 2912 2913 if (type == 0xffff) { /* special value for directory operations */ 2914 index = eeprom->magic & 0xff; 2915 dir_op = eeprom->magic >> 8; 2916 if (index == 0) 2917 return -EINVAL; 2918 switch (dir_op) { 2919 case 0x0e: /* erase */ 2920 if (eeprom->offset != ~eeprom->magic) 2921 return -EINVAL; 2922 return bnxt_erase_nvram_directory(dev, index - 1); 2923 default: 2924 return -EINVAL; 2925 } 2926 } 2927 2928 /* Create or re-write an NVM item: */ 2929 if (bnxt_dir_type_is_executable(type)) 2930 return -EOPNOTSUPP; 2931 ext = eeprom->magic & 0xffff; 2932 ordinal = eeprom->offset >> 16; 2933 attr = eeprom->offset & 0xffff; 2934 2935 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 2936 eeprom->len); 2937 } 2938 2939 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) 2940 { 2941 struct bnxt *bp = netdev_priv(dev); 2942 struct ethtool_eee *eee = &bp->eee; 2943 struct bnxt_link_info *link_info = &bp->link_info; 2944 u32 advertising; 2945 int rc = 0; 2946 2947 if (!BNXT_PHY_CFG_ABLE(bp)) 2948 return -EOPNOTSUPP; 2949 2950 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 2951 return -EOPNOTSUPP; 2952 2953 mutex_lock(&bp->link_lock); 2954 advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 2955 if (!edata->eee_enabled) 2956 goto eee_ok; 2957 2958 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2959 netdev_warn(dev, "EEE requires autoneg\n"); 2960 rc = -EINVAL; 2961 goto eee_exit; 2962 } 2963 if (edata->tx_lpi_enabled) { 2964 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 2965 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 2966 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 2967 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 2968 rc = -EINVAL; 2969 goto eee_exit; 2970 } else if (!bp->lpi_tmr_hi) { 2971 edata->tx_lpi_timer = eee->tx_lpi_timer; 2972 } 2973 } 2974 if (!edata->advertised) { 2975 edata->advertised = advertising & eee->supported; 2976 } else if (edata->advertised & ~advertising) { 2977 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", 2978 edata->advertised, advertising); 2979 rc = -EINVAL; 2980 goto eee_exit; 2981 } 2982 2983 eee->advertised = edata->advertised; 2984 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 2985 eee->tx_lpi_timer = edata->tx_lpi_timer; 2986 eee_ok: 2987 eee->eee_enabled = edata->eee_enabled; 2988 2989 if (netif_running(dev)) 2990 rc = bnxt_hwrm_set_link_setting(bp, false, true); 2991 2992 eee_exit: 2993 mutex_unlock(&bp->link_lock); 2994 return rc; 2995 } 2996 2997 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) 2998 { 2999 struct bnxt *bp = netdev_priv(dev); 3000 3001 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 3002 return -EOPNOTSUPP; 3003 3004 *edata = bp->eee; 3005 if (!bp->eee.eee_enabled) { 3006 /* Preserve tx_lpi_timer so that the last value will be used 3007 * by default when it is re-enabled. 3008 */ 3009 edata->advertised = 0; 3010 edata->tx_lpi_enabled = 0; 3011 } 3012 3013 if (!bp->eee.eee_active) 3014 edata->lp_advertised = 0; 3015 3016 return 0; 3017 } 3018 3019 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 3020 u16 page_number, u16 start_addr, 3021 u16 data_length, u8 *buf) 3022 { 3023 struct hwrm_port_phy_i2c_read_output *output; 3024 struct hwrm_port_phy_i2c_read_input *req; 3025 int rc, byte_offset = 0; 3026 3027 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 3028 if (rc) 3029 return rc; 3030 3031 output = hwrm_req_hold(bp, req); 3032 req->i2c_slave_addr = i2c_addr; 3033 req->page_number = cpu_to_le16(page_number); 3034 req->port_id = cpu_to_le16(bp->pf.port_id); 3035 do { 3036 u16 xfer_size; 3037 3038 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 3039 data_length -= xfer_size; 3040 req->page_offset = cpu_to_le16(start_addr + byte_offset); 3041 req->data_length = xfer_size; 3042 req->enables = cpu_to_le32(start_addr + byte_offset ? 3043 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); 3044 rc = hwrm_req_send(bp, req); 3045 if (!rc) 3046 memcpy(buf + byte_offset, output->data, xfer_size); 3047 byte_offset += xfer_size; 3048 } while (!rc && data_length > 0); 3049 hwrm_req_drop(bp, req); 3050 3051 return rc; 3052 } 3053 3054 static int bnxt_get_module_info(struct net_device *dev, 3055 struct ethtool_modinfo *modinfo) 3056 { 3057 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 3058 struct bnxt *bp = netdev_priv(dev); 3059 int rc; 3060 3061 /* No point in going further if phy status indicates 3062 * module is not inserted or if it is powered down or 3063 * if it is of type 10GBase-T 3064 */ 3065 if (bp->link_info.module_status > 3066 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 3067 return -EOPNOTSUPP; 3068 3069 /* This feature is not supported in older firmware versions */ 3070 if (bp->hwrm_spec_code < 0x10202) 3071 return -EOPNOTSUPP; 3072 3073 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3074 SFF_DIAG_SUPPORT_OFFSET + 1, 3075 data); 3076 if (!rc) { 3077 u8 module_id = data[0]; 3078 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 3079 3080 switch (module_id) { 3081 case SFF_MODULE_ID_SFP: 3082 modinfo->type = ETH_MODULE_SFF_8472; 3083 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 3084 if (!diag_supported) 3085 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 3086 break; 3087 case SFF_MODULE_ID_QSFP: 3088 case SFF_MODULE_ID_QSFP_PLUS: 3089 modinfo->type = ETH_MODULE_SFF_8436; 3090 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 3091 break; 3092 case SFF_MODULE_ID_QSFP28: 3093 modinfo->type = ETH_MODULE_SFF_8636; 3094 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 3095 break; 3096 default: 3097 rc = -EOPNOTSUPP; 3098 break; 3099 } 3100 } 3101 return rc; 3102 } 3103 3104 static int bnxt_get_module_eeprom(struct net_device *dev, 3105 struct ethtool_eeprom *eeprom, 3106 u8 *data) 3107 { 3108 struct bnxt *bp = netdev_priv(dev); 3109 u16 start = eeprom->offset, length = eeprom->len; 3110 int rc = 0; 3111 3112 memset(data, 0, eeprom->len); 3113 3114 /* Read A0 portion of the EEPROM */ 3115 if (start < ETH_MODULE_SFF_8436_LEN) { 3116 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 3117 length = ETH_MODULE_SFF_8436_LEN - start; 3118 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3119 start, length, data); 3120 if (rc) 3121 return rc; 3122 start += length; 3123 data += length; 3124 length = eeprom->len - length; 3125 } 3126 3127 /* Read A2 portion of the EEPROM */ 3128 if (length) { 3129 start -= ETH_MODULE_SFF_8436_LEN; 3130 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 3131 start, length, data); 3132 } 3133 return rc; 3134 } 3135 3136 static int bnxt_nway_reset(struct net_device *dev) 3137 { 3138 int rc = 0; 3139 3140 struct bnxt *bp = netdev_priv(dev); 3141 struct bnxt_link_info *link_info = &bp->link_info; 3142 3143 if (!BNXT_PHY_CFG_ABLE(bp)) 3144 return -EOPNOTSUPP; 3145 3146 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 3147 return -EINVAL; 3148 3149 if (netif_running(dev)) 3150 rc = bnxt_hwrm_set_link_setting(bp, true, false); 3151 3152 return rc; 3153 } 3154 3155 static int bnxt_set_phys_id(struct net_device *dev, 3156 enum ethtool_phys_id_state state) 3157 { 3158 struct hwrm_port_led_cfg_input *req; 3159 struct bnxt *bp = netdev_priv(dev); 3160 struct bnxt_pf_info *pf = &bp->pf; 3161 struct bnxt_led_cfg *led_cfg; 3162 u8 led_state; 3163 __le16 duration; 3164 int rc, i; 3165 3166 if (!bp->num_leds || BNXT_VF(bp)) 3167 return -EOPNOTSUPP; 3168 3169 if (state == ETHTOOL_ID_ACTIVE) { 3170 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 3171 duration = cpu_to_le16(500); 3172 } else if (state == ETHTOOL_ID_INACTIVE) { 3173 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 3174 duration = cpu_to_le16(0); 3175 } else { 3176 return -EINVAL; 3177 } 3178 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 3179 if (rc) 3180 return rc; 3181 3182 req->port_id = cpu_to_le16(pf->port_id); 3183 req->num_leds = bp->num_leds; 3184 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 3185 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 3186 req->enables |= BNXT_LED_DFLT_ENABLES(i); 3187 led_cfg->led_id = bp->leds[i].led_id; 3188 led_cfg->led_state = led_state; 3189 led_cfg->led_blink_on = duration; 3190 led_cfg->led_blink_off = duration; 3191 led_cfg->led_group_id = bp->leds[i].led_group_id; 3192 } 3193 return hwrm_req_send(bp, req); 3194 } 3195 3196 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 3197 { 3198 struct hwrm_selftest_irq_input *req; 3199 int rc; 3200 3201 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 3202 if (rc) 3203 return rc; 3204 3205 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3206 return hwrm_req_send(bp, req); 3207 } 3208 3209 static int bnxt_test_irq(struct bnxt *bp) 3210 { 3211 int i; 3212 3213 for (i = 0; i < bp->cp_nr_rings; i++) { 3214 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 3215 int rc; 3216 3217 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 3218 if (rc) 3219 return rc; 3220 } 3221 return 0; 3222 } 3223 3224 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 3225 { 3226 struct hwrm_port_mac_cfg_input *req; 3227 int rc; 3228 3229 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 3230 if (rc) 3231 return rc; 3232 3233 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 3234 if (enable) 3235 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 3236 else 3237 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 3238 return hwrm_req_send(bp, req); 3239 } 3240 3241 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 3242 { 3243 struct hwrm_port_phy_qcaps_output *resp; 3244 struct hwrm_port_phy_qcaps_input *req; 3245 int rc; 3246 3247 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 3248 if (rc) 3249 return rc; 3250 3251 resp = hwrm_req_hold(bp, req); 3252 rc = hwrm_req_send(bp, req); 3253 if (!rc) 3254 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 3255 3256 hwrm_req_drop(bp, req); 3257 return rc; 3258 } 3259 3260 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 3261 struct hwrm_port_phy_cfg_input *req) 3262 { 3263 struct bnxt_link_info *link_info = &bp->link_info; 3264 u16 fw_advertising; 3265 u16 fw_speed; 3266 int rc; 3267 3268 if (!link_info->autoneg || 3269 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 3270 return 0; 3271 3272 rc = bnxt_query_force_speeds(bp, &fw_advertising); 3273 if (rc) 3274 return rc; 3275 3276 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 3277 if (bp->link_info.link_up) 3278 fw_speed = bp->link_info.link_speed; 3279 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 3280 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 3281 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 3282 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 3283 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 3284 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 3285 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 3286 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 3287 3288 req->force_link_speed = cpu_to_le16(fw_speed); 3289 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 3290 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3291 rc = hwrm_req_send(bp, req); 3292 req->flags = 0; 3293 req->force_link_speed = cpu_to_le16(0); 3294 return rc; 3295 } 3296 3297 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 3298 { 3299 struct hwrm_port_phy_cfg_input *req; 3300 int rc; 3301 3302 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3303 if (rc) 3304 return rc; 3305 3306 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 3307 hwrm_req_hold(bp, req); 3308 3309 if (enable) { 3310 bnxt_disable_an_for_lpbk(bp, req); 3311 if (ext) 3312 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 3313 else 3314 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 3315 } else { 3316 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 3317 } 3318 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 3319 rc = hwrm_req_send(bp, req); 3320 hwrm_req_drop(bp, req); 3321 return rc; 3322 } 3323 3324 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3325 u32 raw_cons, int pkt_size) 3326 { 3327 struct bnxt_napi *bnapi = cpr->bnapi; 3328 struct bnxt_rx_ring_info *rxr; 3329 struct bnxt_sw_rx_bd *rx_buf; 3330 struct rx_cmp *rxcmp; 3331 u16 cp_cons, cons; 3332 u8 *data; 3333 u32 len; 3334 int i; 3335 3336 rxr = bnapi->rx_ring; 3337 cp_cons = RING_CMP(raw_cons); 3338 rxcmp = (struct rx_cmp *) 3339 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3340 cons = rxcmp->rx_cmp_opaque; 3341 rx_buf = &rxr->rx_buf_ring[cons]; 3342 data = rx_buf->data_ptr; 3343 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 3344 if (len != pkt_size) 3345 return -EIO; 3346 i = ETH_ALEN; 3347 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 3348 return -EIO; 3349 i += ETH_ALEN; 3350 for ( ; i < pkt_size; i++) { 3351 if (data[i] != (u8)(i & 0xff)) 3352 return -EIO; 3353 } 3354 return 0; 3355 } 3356 3357 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3358 int pkt_size) 3359 { 3360 struct tx_cmp *txcmp; 3361 int rc = -EIO; 3362 u32 raw_cons; 3363 u32 cons; 3364 int i; 3365 3366 raw_cons = cpr->cp_raw_cons; 3367 for (i = 0; i < 200; i++) { 3368 cons = RING_CMP(raw_cons); 3369 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3370 3371 if (!TX_CMP_VALID(txcmp, raw_cons)) { 3372 udelay(5); 3373 continue; 3374 } 3375 3376 /* The valid test of the entry must be done first before 3377 * reading any further. 3378 */ 3379 dma_rmb(); 3380 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { 3381 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 3382 raw_cons = NEXT_RAW_CMP(raw_cons); 3383 raw_cons = NEXT_RAW_CMP(raw_cons); 3384 break; 3385 } 3386 raw_cons = NEXT_RAW_CMP(raw_cons); 3387 } 3388 cpr->cp_raw_cons = raw_cons; 3389 return rc; 3390 } 3391 3392 static int bnxt_run_loopback(struct bnxt *bp) 3393 { 3394 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 3395 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 3396 struct bnxt_cp_ring_info *cpr; 3397 int pkt_size, i = 0; 3398 struct sk_buff *skb; 3399 dma_addr_t map; 3400 u8 *data; 3401 int rc; 3402 3403 cpr = &rxr->bnapi->cp_ring; 3404 if (bp->flags & BNXT_FLAG_CHIP_P5) 3405 cpr = cpr->cp_ring_arr[BNXT_RX_HDL]; 3406 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); 3407 skb = netdev_alloc_skb(bp->dev, pkt_size); 3408 if (!skb) 3409 return -ENOMEM; 3410 data = skb_put(skb, pkt_size); 3411 eth_broadcast_addr(data); 3412 i += ETH_ALEN; 3413 ether_addr_copy(&data[i], bp->dev->dev_addr); 3414 i += ETH_ALEN; 3415 for ( ; i < pkt_size; i++) 3416 data[i] = (u8)(i & 0xff); 3417 3418 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 3419 DMA_TO_DEVICE); 3420 if (dma_mapping_error(&bp->pdev->dev, map)) { 3421 dev_kfree_skb(skb); 3422 return -EIO; 3423 } 3424 bnxt_xmit_bd(bp, txr, map, pkt_size); 3425 3426 /* Sync BD data before updating doorbell */ 3427 wmb(); 3428 3429 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 3430 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 3431 3432 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 3433 dev_kfree_skb(skb); 3434 return rc; 3435 } 3436 3437 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 3438 { 3439 struct hwrm_selftest_exec_output *resp; 3440 struct hwrm_selftest_exec_input *req; 3441 int rc; 3442 3443 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 3444 if (rc) 3445 return rc; 3446 3447 hwrm_req_timeout(bp, req, bp->test_info->timeout); 3448 req->flags = test_mask; 3449 3450 resp = hwrm_req_hold(bp, req); 3451 rc = hwrm_req_send(bp, req); 3452 *test_results = resp->test_success; 3453 hwrm_req_drop(bp, req); 3454 return rc; 3455 } 3456 3457 #define BNXT_DRV_TESTS 4 3458 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 3459 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 3460 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 3461 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 3462 3463 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 3464 u64 *buf) 3465 { 3466 struct bnxt *bp = netdev_priv(dev); 3467 bool do_ext_lpbk = false; 3468 bool offline = false; 3469 u8 test_results = 0; 3470 u8 test_mask = 0; 3471 int rc = 0, i; 3472 3473 if (!bp->num_tests || !BNXT_PF(bp)) 3474 return; 3475 memset(buf, 0, sizeof(u64) * bp->num_tests); 3476 if (!netif_running(dev)) { 3477 etest->flags |= ETH_TEST_FL_FAILED; 3478 return; 3479 } 3480 3481 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 3482 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 3483 do_ext_lpbk = true; 3484 3485 if (etest->flags & ETH_TEST_FL_OFFLINE) { 3486 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 3487 etest->flags |= ETH_TEST_FL_FAILED; 3488 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 3489 return; 3490 } 3491 offline = true; 3492 } 3493 3494 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 3495 u8 bit_val = 1 << i; 3496 3497 if (!(bp->test_info->offline_mask & bit_val)) 3498 test_mask |= bit_val; 3499 else if (offline) 3500 test_mask |= bit_val; 3501 } 3502 if (!offline) { 3503 bnxt_run_fw_tests(bp, test_mask, &test_results); 3504 } else { 3505 rc = bnxt_close_nic(bp, false, false); 3506 if (rc) 3507 return; 3508 bnxt_run_fw_tests(bp, test_mask, &test_results); 3509 3510 buf[BNXT_MACLPBK_TEST_IDX] = 1; 3511 bnxt_hwrm_mac_loopback(bp, true); 3512 msleep(250); 3513 rc = bnxt_half_open_nic(bp); 3514 if (rc) { 3515 bnxt_hwrm_mac_loopback(bp, false); 3516 etest->flags |= ETH_TEST_FL_FAILED; 3517 return; 3518 } 3519 if (bnxt_run_loopback(bp)) 3520 etest->flags |= ETH_TEST_FL_FAILED; 3521 else 3522 buf[BNXT_MACLPBK_TEST_IDX] = 0; 3523 3524 bnxt_hwrm_mac_loopback(bp, false); 3525 bnxt_hwrm_phy_loopback(bp, true, false); 3526 msleep(1000); 3527 if (bnxt_run_loopback(bp)) { 3528 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 3529 etest->flags |= ETH_TEST_FL_FAILED; 3530 } 3531 if (do_ext_lpbk) { 3532 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 3533 bnxt_hwrm_phy_loopback(bp, true, true); 3534 msleep(1000); 3535 if (bnxt_run_loopback(bp)) { 3536 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 3537 etest->flags |= ETH_TEST_FL_FAILED; 3538 } 3539 } 3540 bnxt_hwrm_phy_loopback(bp, false, false); 3541 bnxt_half_close_nic(bp); 3542 rc = bnxt_open_nic(bp, false, true); 3543 } 3544 if (rc || bnxt_test_irq(bp)) { 3545 buf[BNXT_IRQ_TEST_IDX] = 1; 3546 etest->flags |= ETH_TEST_FL_FAILED; 3547 } 3548 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 3549 u8 bit_val = 1 << i; 3550 3551 if ((test_mask & bit_val) && !(test_results & bit_val)) { 3552 buf[i] = 1; 3553 etest->flags |= ETH_TEST_FL_FAILED; 3554 } 3555 } 3556 } 3557 3558 static int bnxt_reset(struct net_device *dev, u32 *flags) 3559 { 3560 struct bnxt *bp = netdev_priv(dev); 3561 bool reload = false; 3562 u32 req = *flags; 3563 3564 if (!req) 3565 return -EINVAL; 3566 3567 if (!BNXT_PF(bp)) { 3568 netdev_err(dev, "Reset is not supported from a VF\n"); 3569 return -EOPNOTSUPP; 3570 } 3571 3572 if (pci_vfs_assigned(bp->pdev) && 3573 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 3574 netdev_err(dev, 3575 "Reset not allowed when VFs are assigned to VMs\n"); 3576 return -EBUSY; 3577 } 3578 3579 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 3580 /* This feature is not supported in older firmware versions */ 3581 if (bp->hwrm_spec_code >= 0x10803) { 3582 if (!bnxt_firmware_reset_chip(dev)) { 3583 netdev_info(dev, "Firmware reset request successful.\n"); 3584 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 3585 reload = true; 3586 *flags &= ~BNXT_FW_RESET_CHIP; 3587 } 3588 } else if (req == BNXT_FW_RESET_CHIP) { 3589 return -EOPNOTSUPP; /* only request, fail hard */ 3590 } 3591 } 3592 3593 if (req & BNXT_FW_RESET_AP) { 3594 /* This feature is not supported in older firmware versions */ 3595 if (bp->hwrm_spec_code >= 0x10803) { 3596 if (!bnxt_firmware_reset_ap(dev)) { 3597 netdev_info(dev, "Reset application processor successful.\n"); 3598 reload = true; 3599 *flags &= ~BNXT_FW_RESET_AP; 3600 } 3601 } else if (req == BNXT_FW_RESET_AP) { 3602 return -EOPNOTSUPP; /* only request, fail hard */ 3603 } 3604 } 3605 3606 if (reload) 3607 netdev_info(dev, "Reload driver to complete reset\n"); 3608 3609 return 0; 3610 } 3611 3612 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, 3613 struct bnxt_hwrm_dbg_dma_info *info) 3614 { 3615 struct hwrm_dbg_cmn_input *cmn_req = msg; 3616 __le16 *seq_ptr = msg + info->seq_off; 3617 struct hwrm_dbg_cmn_output *cmn_resp; 3618 u16 seq = 0, len, segs_off; 3619 dma_addr_t dma_handle; 3620 void *dma_buf, *resp; 3621 int rc, off = 0; 3622 3623 dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); 3624 if (!dma_buf) { 3625 hwrm_req_drop(bp, msg); 3626 return -ENOMEM; 3627 } 3628 3629 hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); 3630 cmn_resp = hwrm_req_hold(bp, msg); 3631 resp = cmn_resp; 3632 3633 segs_off = offsetof(struct hwrm_dbg_coredump_list_output, 3634 total_segments); 3635 cmn_req->host_dest_addr = cpu_to_le64(dma_handle); 3636 cmn_req->host_buf_len = cpu_to_le32(info->dma_len); 3637 while (1) { 3638 *seq_ptr = cpu_to_le16(seq); 3639 rc = hwrm_req_send(bp, msg); 3640 if (rc) 3641 break; 3642 3643 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); 3644 if (!seq && 3645 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { 3646 info->segs = le16_to_cpu(*((__le16 *)(resp + 3647 segs_off))); 3648 if (!info->segs) { 3649 rc = -EIO; 3650 break; 3651 } 3652 3653 info->dest_buf_size = info->segs * 3654 sizeof(struct coredump_segment_record); 3655 info->dest_buf = kmalloc(info->dest_buf_size, 3656 GFP_KERNEL); 3657 if (!info->dest_buf) { 3658 rc = -ENOMEM; 3659 break; 3660 } 3661 } 3662 3663 if (info->dest_buf) { 3664 if ((info->seg_start + off + len) <= 3665 BNXT_COREDUMP_BUF_LEN(info->buf_len)) { 3666 memcpy(info->dest_buf + off, dma_buf, len); 3667 } else { 3668 rc = -ENOBUFS; 3669 break; 3670 } 3671 } 3672 3673 if (cmn_req->req_type == 3674 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) 3675 info->dest_buf_size += len; 3676 3677 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) 3678 break; 3679 3680 seq++; 3681 off += len; 3682 } 3683 hwrm_req_drop(bp, msg); 3684 return rc; 3685 } 3686 3687 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, 3688 struct bnxt_coredump *coredump) 3689 { 3690 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3691 struct hwrm_dbg_coredump_list_input *req; 3692 int rc; 3693 3694 rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); 3695 if (rc) 3696 return rc; 3697 3698 info.dma_len = COREDUMP_LIST_BUF_LEN; 3699 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); 3700 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, 3701 data_len); 3702 3703 rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); 3704 if (!rc) { 3705 coredump->data = info.dest_buf; 3706 coredump->data_size = info.dest_buf_size; 3707 coredump->total_segs = info.segs; 3708 } 3709 return rc; 3710 } 3711 3712 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, 3713 u16 segment_id) 3714 { 3715 struct hwrm_dbg_coredump_initiate_input *req; 3716 int rc; 3717 3718 rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); 3719 if (rc) 3720 return rc; 3721 3722 hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); 3723 req->component_id = cpu_to_le16(component_id); 3724 req->segment_id = cpu_to_le16(segment_id); 3725 3726 return hwrm_req_send(bp, req); 3727 } 3728 3729 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, 3730 u16 segment_id, u32 *seg_len, 3731 void *buf, u32 buf_len, u32 offset) 3732 { 3733 struct hwrm_dbg_coredump_retrieve_input *req; 3734 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3735 int rc; 3736 3737 rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); 3738 if (rc) 3739 return rc; 3740 3741 req->component_id = cpu_to_le16(component_id); 3742 req->segment_id = cpu_to_le16(segment_id); 3743 3744 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; 3745 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, 3746 seq_no); 3747 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, 3748 data_len); 3749 if (buf) { 3750 info.dest_buf = buf + offset; 3751 info.buf_len = buf_len; 3752 info.seg_start = offset; 3753 } 3754 3755 rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); 3756 if (!rc) 3757 *seg_len = info.dest_buf_size; 3758 3759 return rc; 3760 } 3761 3762 static void 3763 bnxt_fill_coredump_seg_hdr(struct bnxt *bp, 3764 struct bnxt_coredump_segment_hdr *seg_hdr, 3765 struct coredump_segment_record *seg_rec, u32 seg_len, 3766 int status, u32 duration, u32 instance) 3767 { 3768 memset(seg_hdr, 0, sizeof(*seg_hdr)); 3769 memcpy(seg_hdr->signature, "sEgM", 4); 3770 if (seg_rec) { 3771 seg_hdr->component_id = (__force __le32)seg_rec->component_id; 3772 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; 3773 seg_hdr->low_version = seg_rec->version_low; 3774 seg_hdr->high_version = seg_rec->version_hi; 3775 } else { 3776 /* For hwrm_ver_get response Component id = 2 3777 * and Segment id = 0 3778 */ 3779 seg_hdr->component_id = cpu_to_le32(2); 3780 seg_hdr->segment_id = 0; 3781 } 3782 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); 3783 seg_hdr->length = cpu_to_le32(seg_len); 3784 seg_hdr->status = cpu_to_le32(status); 3785 seg_hdr->duration = cpu_to_le32(duration); 3786 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); 3787 seg_hdr->instance = cpu_to_le32(instance); 3788 } 3789 3790 static void 3791 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, 3792 time64_t start, s16 start_utc, u16 total_segs, 3793 int status) 3794 { 3795 time64_t end = ktime_get_real_seconds(); 3796 u32 os_ver_major = 0, os_ver_minor = 0; 3797 struct tm tm; 3798 3799 time64_to_tm(start, 0, &tm); 3800 memset(record, 0, sizeof(*record)); 3801 memcpy(record->signature, "cOrE", 4); 3802 record->flags = 0; 3803 record->low_version = 0; 3804 record->high_version = 1; 3805 record->asic_state = 0; 3806 strlcpy(record->system_name, utsname()->nodename, 3807 sizeof(record->system_name)); 3808 record->year = cpu_to_le16(tm.tm_year + 1900); 3809 record->month = cpu_to_le16(tm.tm_mon + 1); 3810 record->day = cpu_to_le16(tm.tm_mday); 3811 record->hour = cpu_to_le16(tm.tm_hour); 3812 record->minute = cpu_to_le16(tm.tm_min); 3813 record->second = cpu_to_le16(tm.tm_sec); 3814 record->utc_bias = cpu_to_le16(start_utc); 3815 strcpy(record->commandline, "ethtool -w"); 3816 record->total_segments = cpu_to_le32(total_segs); 3817 3818 sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); 3819 record->os_ver_major = cpu_to_le32(os_ver_major); 3820 record->os_ver_minor = cpu_to_le32(os_ver_minor); 3821 3822 strlcpy(record->os_name, utsname()->sysname, 32); 3823 time64_to_tm(end, 0, &tm); 3824 record->end_year = cpu_to_le16(tm.tm_year + 1900); 3825 record->end_month = cpu_to_le16(tm.tm_mon + 1); 3826 record->end_day = cpu_to_le16(tm.tm_mday); 3827 record->end_hour = cpu_to_le16(tm.tm_hour); 3828 record->end_minute = cpu_to_le16(tm.tm_min); 3829 record->end_second = cpu_to_le16(tm.tm_sec); 3830 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); 3831 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | 3832 bp->ver_resp.chip_rev << 8 | 3833 bp->ver_resp.chip_metal); 3834 record->asic_id2 = 0; 3835 record->coredump_status = cpu_to_le32(status); 3836 record->ioctl_low_version = 0; 3837 record->ioctl_high_version = 0; 3838 } 3839 3840 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) 3841 { 3842 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); 3843 u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; 3844 struct coredump_segment_record *seg_record = NULL; 3845 struct bnxt_coredump_segment_hdr seg_hdr; 3846 struct bnxt_coredump coredump = {NULL}; 3847 time64_t start_time; 3848 u16 start_utc; 3849 int rc = 0, i; 3850 3851 if (buf) 3852 buf_len = *dump_len; 3853 3854 start_time = ktime_get_real_seconds(); 3855 start_utc = sys_tz.tz_minuteswest * 60; 3856 seg_hdr_len = sizeof(seg_hdr); 3857 3858 /* First segment should be hwrm_ver_get response */ 3859 *dump_len = seg_hdr_len + ver_get_resp_len; 3860 if (buf) { 3861 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, 3862 0, 0, 0); 3863 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3864 offset += seg_hdr_len; 3865 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); 3866 offset += ver_get_resp_len; 3867 } 3868 3869 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); 3870 if (rc) { 3871 netdev_err(bp->dev, "Failed to get coredump segment list\n"); 3872 goto err; 3873 } 3874 3875 *dump_len += seg_hdr_len * coredump.total_segs; 3876 3877 seg_record = (struct coredump_segment_record *)coredump.data; 3878 seg_record_len = sizeof(*seg_record); 3879 3880 for (i = 0; i < coredump.total_segs; i++) { 3881 u16 comp_id = le16_to_cpu(seg_record->component_id); 3882 u16 seg_id = le16_to_cpu(seg_record->segment_id); 3883 u32 duration = 0, seg_len = 0; 3884 unsigned long start, end; 3885 3886 if (buf && ((offset + seg_hdr_len) > 3887 BNXT_COREDUMP_BUF_LEN(buf_len))) { 3888 rc = -ENOBUFS; 3889 goto err; 3890 } 3891 3892 start = jiffies; 3893 3894 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); 3895 if (rc) { 3896 netdev_err(bp->dev, 3897 "Failed to initiate coredump for seg = %d\n", 3898 seg_record->segment_id); 3899 goto next_seg; 3900 } 3901 3902 /* Write segment data into the buffer */ 3903 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, 3904 &seg_len, buf, buf_len, 3905 offset + seg_hdr_len); 3906 if (rc && rc == -ENOBUFS) 3907 goto err; 3908 else if (rc) 3909 netdev_err(bp->dev, 3910 "Failed to retrieve coredump for seg = %d\n", 3911 seg_record->segment_id); 3912 3913 next_seg: 3914 end = jiffies; 3915 duration = jiffies_to_msecs(end - start); 3916 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, 3917 rc, duration, 0); 3918 3919 if (buf) { 3920 /* Write segment header into the buffer */ 3921 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3922 offset += seg_hdr_len + seg_len; 3923 } 3924 3925 *dump_len += seg_len; 3926 seg_record = 3927 (struct coredump_segment_record *)((u8 *)seg_record + 3928 seg_record_len); 3929 } 3930 3931 err: 3932 if (buf) 3933 bnxt_fill_coredump_record(bp, buf + offset, start_time, 3934 start_utc, coredump.total_segs + 1, 3935 rc); 3936 kfree(coredump.data); 3937 *dump_len += sizeof(struct bnxt_coredump_record); 3938 if (rc == -ENOBUFS) 3939 netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); 3940 return rc; 3941 } 3942 3943 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 3944 { 3945 struct bnxt *bp = netdev_priv(dev); 3946 3947 if (dump->flag > BNXT_DUMP_CRASH) { 3948 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n"); 3949 return -EINVAL; 3950 } 3951 3952 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) { 3953 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 3954 return -EOPNOTSUPP; 3955 } 3956 3957 bp->dump_flag = dump->flag; 3958 return 0; 3959 } 3960 3961 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 3962 { 3963 struct bnxt *bp = netdev_priv(dev); 3964 3965 if (bp->hwrm_spec_code < 0x10801) 3966 return -EOPNOTSUPP; 3967 3968 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 3969 bp->ver_resp.hwrm_fw_min_8b << 16 | 3970 bp->ver_resp.hwrm_fw_bld_8b << 8 | 3971 bp->ver_resp.hwrm_fw_rsvd_8b; 3972 3973 dump->flag = bp->dump_flag; 3974 if (bp->dump_flag == BNXT_DUMP_CRASH) 3975 dump->len = BNXT_CRASH_DUMP_LEN; 3976 else 3977 bnxt_get_coredump(bp, NULL, &dump->len); 3978 return 0; 3979 } 3980 3981 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 3982 void *buf) 3983 { 3984 struct bnxt *bp = netdev_priv(dev); 3985 3986 if (bp->hwrm_spec_code < 0x10801) 3987 return -EOPNOTSUPP; 3988 3989 memset(buf, 0, dump->len); 3990 3991 dump->flag = bp->dump_flag; 3992 if (dump->flag == BNXT_DUMP_CRASH) { 3993 #ifdef CONFIG_TEE_BNXT_FW 3994 return tee_bnxt_copy_coredump(buf, 0, dump->len); 3995 #endif 3996 } else { 3997 return bnxt_get_coredump(bp, buf, &dump->len); 3998 } 3999 4000 return 0; 4001 } 4002 4003 static int bnxt_get_ts_info(struct net_device *dev, 4004 struct ethtool_ts_info *info) 4005 { 4006 struct bnxt *bp = netdev_priv(dev); 4007 struct bnxt_ptp_cfg *ptp; 4008 4009 ptp = bp->ptp_cfg; 4010 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 4011 SOF_TIMESTAMPING_RX_SOFTWARE | 4012 SOF_TIMESTAMPING_SOFTWARE; 4013 4014 info->phc_index = -1; 4015 if (!ptp) 4016 return 0; 4017 4018 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 4019 SOF_TIMESTAMPING_RX_HARDWARE | 4020 SOF_TIMESTAMPING_RAW_HARDWARE; 4021 if (ptp->ptp_clock) 4022 info->phc_index = ptp_clock_index(ptp->ptp_clock); 4023 4024 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 4025 4026 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 4027 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 4028 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 4029 return 0; 4030 } 4031 4032 void bnxt_ethtool_init(struct bnxt *bp) 4033 { 4034 struct hwrm_selftest_qlist_output *resp; 4035 struct hwrm_selftest_qlist_input *req; 4036 struct bnxt_test_info *test_info; 4037 struct net_device *dev = bp->dev; 4038 int i, rc; 4039 4040 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 4041 bnxt_get_pkgver(dev); 4042 4043 bp->num_tests = 0; 4044 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 4045 return; 4046 4047 test_info = bp->test_info; 4048 if (!test_info) { 4049 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 4050 if (!test_info) 4051 return; 4052 bp->test_info = test_info; 4053 } 4054 4055 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 4056 return; 4057 4058 resp = hwrm_req_hold(bp, req); 4059 rc = hwrm_req_send_silent(bp, req); 4060 if (rc) 4061 goto ethtool_init_exit; 4062 4063 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 4064 if (bp->num_tests > BNXT_MAX_TEST) 4065 bp->num_tests = BNXT_MAX_TEST; 4066 4067 test_info->offline_mask = resp->offline_tests; 4068 test_info->timeout = le16_to_cpu(resp->test_timeout); 4069 if (!test_info->timeout) 4070 test_info->timeout = HWRM_CMD_TIMEOUT; 4071 for (i = 0; i < bp->num_tests; i++) { 4072 char *str = test_info->string[i]; 4073 char *fw_str = resp->test0_name + i * 32; 4074 4075 if (i == BNXT_MACLPBK_TEST_IDX) { 4076 strcpy(str, "Mac loopback test (offline)"); 4077 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 4078 strcpy(str, "Phy loopback test (offline)"); 4079 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 4080 strcpy(str, "Ext loopback test (offline)"); 4081 } else if (i == BNXT_IRQ_TEST_IDX) { 4082 strcpy(str, "Interrupt_test (offline)"); 4083 } else { 4084 strlcpy(str, fw_str, ETH_GSTRING_LEN); 4085 strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 4086 if (test_info->offline_mask & (1 << i)) 4087 strncat(str, " (offline)", 4088 ETH_GSTRING_LEN - strlen(str)); 4089 else 4090 strncat(str, " (online)", 4091 ETH_GSTRING_LEN - strlen(str)); 4092 } 4093 } 4094 4095 ethtool_init_exit: 4096 hwrm_req_drop(bp, req); 4097 } 4098 4099 static void bnxt_get_eth_phy_stats(struct net_device *dev, 4100 struct ethtool_eth_phy_stats *phy_stats) 4101 { 4102 struct bnxt *bp = netdev_priv(dev); 4103 u64 *rx; 4104 4105 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 4106 return; 4107 4108 rx = bp->rx_port_stats_ext.sw_stats; 4109 phy_stats->SymbolErrorDuringCarrier = 4110 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 4111 } 4112 4113 static void bnxt_get_eth_mac_stats(struct net_device *dev, 4114 struct ethtool_eth_mac_stats *mac_stats) 4115 { 4116 struct bnxt *bp = netdev_priv(dev); 4117 u64 *rx, *tx; 4118 4119 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4120 return; 4121 4122 rx = bp->port_stats.sw_stats; 4123 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4124 4125 mac_stats->FramesReceivedOK = 4126 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 4127 mac_stats->FramesTransmittedOK = 4128 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 4129 mac_stats->FrameCheckSequenceErrors = 4130 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 4131 mac_stats->AlignmentErrors = 4132 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 4133 mac_stats->OutOfRangeLengthField = 4134 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 4135 } 4136 4137 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 4138 struct ethtool_eth_ctrl_stats *ctrl_stats) 4139 { 4140 struct bnxt *bp = netdev_priv(dev); 4141 u64 *rx; 4142 4143 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4144 return; 4145 4146 rx = bp->port_stats.sw_stats; 4147 ctrl_stats->MACControlFramesReceived = 4148 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 4149 } 4150 4151 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 4152 { 0, 64 }, 4153 { 65, 127 }, 4154 { 128, 255 }, 4155 { 256, 511 }, 4156 { 512, 1023 }, 4157 { 1024, 1518 }, 4158 { 1519, 2047 }, 4159 { 2048, 4095 }, 4160 { 4096, 9216 }, 4161 { 9217, 16383 }, 4162 {} 4163 }; 4164 4165 static void bnxt_get_rmon_stats(struct net_device *dev, 4166 struct ethtool_rmon_stats *rmon_stats, 4167 const struct ethtool_rmon_hist_range **ranges) 4168 { 4169 struct bnxt *bp = netdev_priv(dev); 4170 u64 *rx, *tx; 4171 4172 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4173 return; 4174 4175 rx = bp->port_stats.sw_stats; 4176 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4177 4178 rmon_stats->jabbers = 4179 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 4180 rmon_stats->oversize_pkts = 4181 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 4182 rmon_stats->undersize_pkts = 4183 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 4184 4185 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 4186 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 4187 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 4188 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 4189 rmon_stats->hist[4] = 4190 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 4191 rmon_stats->hist[5] = 4192 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 4193 rmon_stats->hist[6] = 4194 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 4195 rmon_stats->hist[7] = 4196 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 4197 rmon_stats->hist[8] = 4198 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 4199 rmon_stats->hist[9] = 4200 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 4201 4202 rmon_stats->hist_tx[0] = 4203 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 4204 rmon_stats->hist_tx[1] = 4205 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 4206 rmon_stats->hist_tx[2] = 4207 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 4208 rmon_stats->hist_tx[3] = 4209 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 4210 rmon_stats->hist_tx[4] = 4211 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 4212 rmon_stats->hist_tx[5] = 4213 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 4214 rmon_stats->hist_tx[6] = 4215 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 4216 rmon_stats->hist_tx[7] = 4217 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 4218 rmon_stats->hist_tx[8] = 4219 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 4220 rmon_stats->hist_tx[9] = 4221 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 4222 4223 *ranges = bnxt_rmon_ranges; 4224 } 4225 4226 void bnxt_ethtool_free(struct bnxt *bp) 4227 { 4228 kfree(bp->test_info); 4229 bp->test_info = NULL; 4230 } 4231 4232 const struct ethtool_ops bnxt_ethtool_ops = { 4233 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 4234 ETHTOOL_COALESCE_MAX_FRAMES | 4235 ETHTOOL_COALESCE_USECS_IRQ | 4236 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 4237 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 4238 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 4239 .get_link_ksettings = bnxt_get_link_ksettings, 4240 .set_link_ksettings = bnxt_set_link_ksettings, 4241 .get_fec_stats = bnxt_get_fec_stats, 4242 .get_fecparam = bnxt_get_fecparam, 4243 .set_fecparam = bnxt_set_fecparam, 4244 .get_pause_stats = bnxt_get_pause_stats, 4245 .get_pauseparam = bnxt_get_pauseparam, 4246 .set_pauseparam = bnxt_set_pauseparam, 4247 .get_drvinfo = bnxt_get_drvinfo, 4248 .get_regs_len = bnxt_get_regs_len, 4249 .get_regs = bnxt_get_regs, 4250 .get_wol = bnxt_get_wol, 4251 .set_wol = bnxt_set_wol, 4252 .get_coalesce = bnxt_get_coalesce, 4253 .set_coalesce = bnxt_set_coalesce, 4254 .get_msglevel = bnxt_get_msglevel, 4255 .set_msglevel = bnxt_set_msglevel, 4256 .get_sset_count = bnxt_get_sset_count, 4257 .get_strings = bnxt_get_strings, 4258 .get_ethtool_stats = bnxt_get_ethtool_stats, 4259 .set_ringparam = bnxt_set_ringparam, 4260 .get_ringparam = bnxt_get_ringparam, 4261 .get_channels = bnxt_get_channels, 4262 .set_channels = bnxt_set_channels, 4263 .get_rxnfc = bnxt_get_rxnfc, 4264 .set_rxnfc = bnxt_set_rxnfc, 4265 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 4266 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 4267 .get_rxfh = bnxt_get_rxfh, 4268 .set_rxfh = bnxt_set_rxfh, 4269 .flash_device = bnxt_flash_device, 4270 .get_eeprom_len = bnxt_get_eeprom_len, 4271 .get_eeprom = bnxt_get_eeprom, 4272 .set_eeprom = bnxt_set_eeprom, 4273 .get_link = bnxt_get_link, 4274 .get_eee = bnxt_get_eee, 4275 .set_eee = bnxt_set_eee, 4276 .get_module_info = bnxt_get_module_info, 4277 .get_module_eeprom = bnxt_get_module_eeprom, 4278 .nway_reset = bnxt_nway_reset, 4279 .set_phys_id = bnxt_set_phys_id, 4280 .self_test = bnxt_self_test, 4281 .get_ts_info = bnxt_get_ts_info, 4282 .reset = bnxt_reset, 4283 .set_dump = bnxt_set_dump, 4284 .get_dump_flag = bnxt_get_dump_flag, 4285 .get_dump_data = bnxt_get_dump_data, 4286 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 4287 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 4288 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 4289 .get_rmon_stats = bnxt_get_rmon_stats, 4290 }; 4291