1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/ctype.h> 12 #include <linux/stringify.h> 13 #include <linux/ethtool.h> 14 #include <linux/linkmode.h> 15 #include <linux/interrupt.h> 16 #include <linux/pci.h> 17 #include <linux/etherdevice.h> 18 #include <linux/crc32.h> 19 #include <linux/firmware.h> 20 #include <linux/utsname.h> 21 #include <linux/time.h> 22 #include <linux/ptp_clock_kernel.h> 23 #include <linux/net_tstamp.h> 24 #include <linux/timecounter.h> 25 #include "bnxt_hsi.h" 26 #include "bnxt.h" 27 #include "bnxt_xdp.h" 28 #include "bnxt_ptp.h" 29 #include "bnxt_ethtool.h" 30 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 31 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 32 #include "bnxt_coredump.h" 33 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) 34 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 35 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 36 37 static u32 bnxt_get_msglevel(struct net_device *dev) 38 { 39 struct bnxt *bp = netdev_priv(dev); 40 41 return bp->msg_enable; 42 } 43 44 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 45 { 46 struct bnxt *bp = netdev_priv(dev); 47 48 bp->msg_enable = value; 49 } 50 51 static int bnxt_get_coalesce(struct net_device *dev, 52 struct ethtool_coalesce *coal, 53 struct kernel_ethtool_coalesce *kernel_coal, 54 struct netlink_ext_ack *extack) 55 { 56 struct bnxt *bp = netdev_priv(dev); 57 struct bnxt_coal *hw_coal; 58 u16 mult; 59 60 memset(coal, 0, sizeof(*coal)); 61 62 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 63 64 hw_coal = &bp->rx_coal; 65 mult = hw_coal->bufs_per_record; 66 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 67 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 68 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 69 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 70 71 hw_coal = &bp->tx_coal; 72 mult = hw_coal->bufs_per_record; 73 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 74 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 75 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 76 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 77 78 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 79 80 return 0; 81 } 82 83 static int bnxt_set_coalesce(struct net_device *dev, 84 struct ethtool_coalesce *coal, 85 struct kernel_ethtool_coalesce *kernel_coal, 86 struct netlink_ext_ack *extack) 87 { 88 struct bnxt *bp = netdev_priv(dev); 89 bool update_stats = false; 90 struct bnxt_coal *hw_coal; 91 int rc = 0; 92 u16 mult; 93 94 if (coal->use_adaptive_rx_coalesce) { 95 bp->flags |= BNXT_FLAG_DIM; 96 } else { 97 if (bp->flags & BNXT_FLAG_DIM) { 98 bp->flags &= ~(BNXT_FLAG_DIM); 99 goto reset_coalesce; 100 } 101 } 102 103 hw_coal = &bp->rx_coal; 104 mult = hw_coal->bufs_per_record; 105 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 106 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 107 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 108 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 109 110 hw_coal = &bp->tx_coal; 111 mult = hw_coal->bufs_per_record; 112 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 113 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 114 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 115 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 116 117 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 118 u32 stats_ticks = coal->stats_block_coalesce_usecs; 119 120 /* Allow 0, which means disable. */ 121 if (stats_ticks) 122 stats_ticks = clamp_t(u32, stats_ticks, 123 BNXT_MIN_STATS_COAL_TICKS, 124 BNXT_MAX_STATS_COAL_TICKS); 125 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 126 bp->stats_coal_ticks = stats_ticks; 127 if (bp->stats_coal_ticks) 128 bp->current_interval = 129 bp->stats_coal_ticks * HZ / 1000000; 130 else 131 bp->current_interval = BNXT_TIMER_INTERVAL; 132 update_stats = true; 133 } 134 135 reset_coalesce: 136 if (netif_running(dev)) { 137 if (update_stats) { 138 rc = bnxt_close_nic(bp, true, false); 139 if (!rc) 140 rc = bnxt_open_nic(bp, true, false); 141 } else { 142 rc = bnxt_hwrm_set_coal(bp); 143 } 144 } 145 146 return rc; 147 } 148 149 static const char * const bnxt_ring_rx_stats_str[] = { 150 "rx_ucast_packets", 151 "rx_mcast_packets", 152 "rx_bcast_packets", 153 "rx_discards", 154 "rx_errors", 155 "rx_ucast_bytes", 156 "rx_mcast_bytes", 157 "rx_bcast_bytes", 158 }; 159 160 static const char * const bnxt_ring_tx_stats_str[] = { 161 "tx_ucast_packets", 162 "tx_mcast_packets", 163 "tx_bcast_packets", 164 "tx_errors", 165 "tx_discards", 166 "tx_ucast_bytes", 167 "tx_mcast_bytes", 168 "tx_bcast_bytes", 169 }; 170 171 static const char * const bnxt_ring_tpa_stats_str[] = { 172 "tpa_packets", 173 "tpa_bytes", 174 "tpa_events", 175 "tpa_aborts", 176 }; 177 178 static const char * const bnxt_ring_tpa2_stats_str[] = { 179 "rx_tpa_eligible_pkt", 180 "rx_tpa_eligible_bytes", 181 "rx_tpa_pkt", 182 "rx_tpa_bytes", 183 "rx_tpa_errors", 184 "rx_tpa_events", 185 }; 186 187 static const char * const bnxt_rx_sw_stats_str[] = { 188 "rx_l4_csum_errors", 189 "rx_resets", 190 "rx_buf_errors", 191 }; 192 193 static const char * const bnxt_cmn_sw_stats_str[] = { 194 "missed_irqs", 195 }; 196 197 #define BNXT_RX_STATS_ENTRY(counter) \ 198 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 199 200 #define BNXT_TX_STATS_ENTRY(counter) \ 201 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 202 203 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 204 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 205 206 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 207 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 208 209 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 210 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 211 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 212 213 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 214 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 215 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 216 217 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 218 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 219 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 220 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 221 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 222 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 223 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 224 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 225 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 226 227 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 228 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 229 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 230 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 231 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 232 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 233 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 234 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 235 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 236 237 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 238 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 239 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 240 241 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 242 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 243 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 244 245 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 246 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 247 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 248 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 249 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 250 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 251 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 252 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 253 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 254 255 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 256 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 257 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 258 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 259 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 260 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 261 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 262 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 263 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 264 265 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 266 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 267 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 268 269 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 270 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 271 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 272 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 273 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 274 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 275 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 276 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 277 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 278 279 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 280 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 281 __stringify(counter##_pri##n) } 282 283 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 284 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 285 __stringify(counter##_pri##n) } 286 287 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 288 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 289 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 290 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 291 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 292 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 293 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 294 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 295 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 296 297 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 298 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 299 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 300 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 301 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 302 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 303 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 304 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 305 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 306 307 enum { 308 RX_TOTAL_DISCARDS, 309 TX_TOTAL_DISCARDS, 310 }; 311 312 static struct { 313 u64 counter; 314 char string[ETH_GSTRING_LEN]; 315 } bnxt_sw_func_stats[] = { 316 {0, "rx_total_discard_pkts"}, 317 {0, "tx_total_discard_pkts"}, 318 }; 319 320 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 321 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 322 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 323 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 324 325 static const struct { 326 long offset; 327 char string[ETH_GSTRING_LEN]; 328 } bnxt_port_stats_arr[] = { 329 BNXT_RX_STATS_ENTRY(rx_64b_frames), 330 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 331 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 332 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 333 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 334 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 335 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 336 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 337 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 338 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 339 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 340 BNXT_RX_STATS_ENTRY(rx_total_frames), 341 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 342 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 343 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 344 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 345 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 346 BNXT_RX_STATS_ENTRY(rx_pause_frames), 347 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 348 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 349 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 350 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 351 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 352 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 353 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 354 BNXT_RX_STATS_ENTRY(rx_good_frames), 355 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 356 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 357 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 358 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 359 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 360 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 361 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 362 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 363 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 364 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 365 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 366 BNXT_RX_STATS_ENTRY(rx_bytes), 367 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 368 BNXT_RX_STATS_ENTRY(rx_runt_frames), 369 BNXT_RX_STATS_ENTRY(rx_stat_discard), 370 BNXT_RX_STATS_ENTRY(rx_stat_err), 371 372 BNXT_TX_STATS_ENTRY(tx_64b_frames), 373 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 374 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 375 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 376 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 377 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 378 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 379 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 380 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 381 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 382 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 383 BNXT_TX_STATS_ENTRY(tx_good_frames), 384 BNXT_TX_STATS_ENTRY(tx_total_frames), 385 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 386 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 387 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 388 BNXT_TX_STATS_ENTRY(tx_pause_frames), 389 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 390 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 391 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 392 BNXT_TX_STATS_ENTRY(tx_err), 393 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 394 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 395 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 396 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 397 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 398 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 399 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 400 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 401 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 402 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 403 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 404 BNXT_TX_STATS_ENTRY(tx_total_collisions), 405 BNXT_TX_STATS_ENTRY(tx_bytes), 406 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 407 BNXT_TX_STATS_ENTRY(tx_stat_discard), 408 BNXT_TX_STATS_ENTRY(tx_stat_error), 409 }; 410 411 static const struct { 412 long offset; 413 char string[ETH_GSTRING_LEN]; 414 } bnxt_port_stats_ext_arr[] = { 415 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 416 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 417 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 418 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 419 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 420 BNXT_RX_STATS_EXT_COS_ENTRIES, 421 BNXT_RX_STATS_EXT_PFC_ENTRIES, 422 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 423 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 424 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 425 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 426 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 427 }; 428 429 static const struct { 430 long offset; 431 char string[ETH_GSTRING_LEN]; 432 } bnxt_tx_port_stats_ext_arr[] = { 433 BNXT_TX_STATS_EXT_COS_ENTRIES, 434 BNXT_TX_STATS_EXT_PFC_ENTRIES, 435 }; 436 437 static const struct { 438 long base_off; 439 char string[ETH_GSTRING_LEN]; 440 } bnxt_rx_bytes_pri_arr[] = { 441 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 442 }; 443 444 static const struct { 445 long base_off; 446 char string[ETH_GSTRING_LEN]; 447 } bnxt_rx_pkts_pri_arr[] = { 448 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 449 }; 450 451 static const struct { 452 long base_off; 453 char string[ETH_GSTRING_LEN]; 454 } bnxt_tx_bytes_pri_arr[] = { 455 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 456 }; 457 458 static const struct { 459 long base_off; 460 char string[ETH_GSTRING_LEN]; 461 } bnxt_tx_pkts_pri_arr[] = { 462 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 463 }; 464 465 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) 466 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 467 #define BNXT_NUM_STATS_PRI \ 468 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 469 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 470 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 471 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 472 473 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 474 { 475 if (BNXT_SUPPORTS_TPA(bp)) { 476 if (bp->max_tpa_v2) { 477 if (BNXT_CHIP_P5_THOR(bp)) 478 return BNXT_NUM_TPA_RING_STATS_P5; 479 return BNXT_NUM_TPA_RING_STATS_P5_SR2; 480 } 481 return BNXT_NUM_TPA_RING_STATS; 482 } 483 return 0; 484 } 485 486 static int bnxt_get_num_ring_stats(struct bnxt *bp) 487 { 488 int rx, tx, cmn; 489 490 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 491 bnxt_get_num_tpa_ring_stats(bp); 492 tx = NUM_RING_TX_HW_STATS; 493 cmn = NUM_RING_CMN_SW_STATS; 494 return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + 495 cmn * bp->cp_nr_rings; 496 } 497 498 static int bnxt_get_num_stats(struct bnxt *bp) 499 { 500 int num_stats = bnxt_get_num_ring_stats(bp); 501 502 num_stats += BNXT_NUM_SW_FUNC_STATS; 503 504 if (bp->flags & BNXT_FLAG_PORT_STATS) 505 num_stats += BNXT_NUM_PORT_STATS; 506 507 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 508 num_stats += bp->fw_rx_stats_ext_size + 509 bp->fw_tx_stats_ext_size; 510 if (bp->pri2cos_valid) 511 num_stats += BNXT_NUM_STATS_PRI; 512 } 513 514 return num_stats; 515 } 516 517 static int bnxt_get_sset_count(struct net_device *dev, int sset) 518 { 519 struct bnxt *bp = netdev_priv(dev); 520 521 switch (sset) { 522 case ETH_SS_STATS: 523 return bnxt_get_num_stats(bp); 524 case ETH_SS_TEST: 525 if (!bp->num_tests) 526 return -EOPNOTSUPP; 527 return bp->num_tests; 528 default: 529 return -EOPNOTSUPP; 530 } 531 } 532 533 static bool is_rx_ring(struct bnxt *bp, int ring_num) 534 { 535 return ring_num < bp->rx_nr_rings; 536 } 537 538 static bool is_tx_ring(struct bnxt *bp, int ring_num) 539 { 540 int tx_base = 0; 541 542 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 543 tx_base = bp->rx_nr_rings; 544 545 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 546 return true; 547 return false; 548 } 549 550 static void bnxt_get_ethtool_stats(struct net_device *dev, 551 struct ethtool_stats *stats, u64 *buf) 552 { 553 u32 i, j = 0; 554 struct bnxt *bp = netdev_priv(dev); 555 u32 tpa_stats; 556 557 if (!bp->bnapi) { 558 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; 559 goto skip_ring_stats; 560 } 561 562 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) 563 bnxt_sw_func_stats[i].counter = 0; 564 565 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 566 for (i = 0; i < bp->cp_nr_rings; i++) { 567 struct bnxt_napi *bnapi = bp->bnapi[i]; 568 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 569 u64 *sw_stats = cpr->stats.sw_stats; 570 u64 *sw; 571 int k; 572 573 if (is_rx_ring(bp, i)) { 574 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 575 buf[j] = sw_stats[k]; 576 } 577 if (is_tx_ring(bp, i)) { 578 k = NUM_RING_RX_HW_STATS; 579 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 580 j++, k++) 581 buf[j] = sw_stats[k]; 582 } 583 if (!tpa_stats || !is_rx_ring(bp, i)) 584 goto skip_tpa_ring_stats; 585 586 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 587 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 588 tpa_stats; j++, k++) 589 buf[j] = sw_stats[k]; 590 591 skip_tpa_ring_stats: 592 sw = (u64 *)&cpr->sw_stats.rx; 593 if (is_rx_ring(bp, i)) { 594 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 595 buf[j] = sw[k]; 596 } 597 598 sw = (u64 *)&cpr->sw_stats.cmn; 599 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 600 buf[j] = sw[k]; 601 602 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 603 BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); 604 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += 605 BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); 606 } 607 608 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) 609 buf[j] = bnxt_sw_func_stats[i].counter; 610 611 skip_ring_stats: 612 if (bp->flags & BNXT_FLAG_PORT_STATS) { 613 u64 *port_stats = bp->port_stats.sw_stats; 614 615 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 616 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 617 } 618 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 619 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 620 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 621 622 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { 623 buf[j] = *(rx_port_stats_ext + 624 bnxt_port_stats_ext_arr[i].offset); 625 } 626 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { 627 buf[j] = *(tx_port_stats_ext + 628 bnxt_tx_port_stats_ext_arr[i].offset); 629 } 630 if (bp->pri2cos_valid) { 631 for (i = 0; i < 8; i++, j++) { 632 long n = bnxt_rx_bytes_pri_arr[i].base_off + 633 bp->pri2cos_idx[i]; 634 635 buf[j] = *(rx_port_stats_ext + n); 636 } 637 for (i = 0; i < 8; i++, j++) { 638 long n = bnxt_rx_pkts_pri_arr[i].base_off + 639 bp->pri2cos_idx[i]; 640 641 buf[j] = *(rx_port_stats_ext + n); 642 } 643 for (i = 0; i < 8; i++, j++) { 644 long n = bnxt_tx_bytes_pri_arr[i].base_off + 645 bp->pri2cos_idx[i]; 646 647 buf[j] = *(tx_port_stats_ext + n); 648 } 649 for (i = 0; i < 8; i++, j++) { 650 long n = bnxt_tx_pkts_pri_arr[i].base_off + 651 bp->pri2cos_idx[i]; 652 653 buf[j] = *(tx_port_stats_ext + n); 654 } 655 } 656 } 657 } 658 659 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 660 { 661 struct bnxt *bp = netdev_priv(dev); 662 static const char * const *str; 663 u32 i, j, num_str; 664 665 switch (stringset) { 666 case ETH_SS_STATS: 667 for (i = 0; i < bp->cp_nr_rings; i++) { 668 if (is_rx_ring(bp, i)) { 669 num_str = NUM_RING_RX_HW_STATS; 670 for (j = 0; j < num_str; j++) { 671 sprintf(buf, "[%d]: %s", i, 672 bnxt_ring_rx_stats_str[j]); 673 buf += ETH_GSTRING_LEN; 674 } 675 } 676 if (is_tx_ring(bp, i)) { 677 num_str = NUM_RING_TX_HW_STATS; 678 for (j = 0; j < num_str; j++) { 679 sprintf(buf, "[%d]: %s", i, 680 bnxt_ring_tx_stats_str[j]); 681 buf += ETH_GSTRING_LEN; 682 } 683 } 684 num_str = bnxt_get_num_tpa_ring_stats(bp); 685 if (!num_str || !is_rx_ring(bp, i)) 686 goto skip_tpa_stats; 687 688 if (bp->max_tpa_v2) 689 str = bnxt_ring_tpa2_stats_str; 690 else 691 str = bnxt_ring_tpa_stats_str; 692 693 for (j = 0; j < num_str; j++) { 694 sprintf(buf, "[%d]: %s", i, str[j]); 695 buf += ETH_GSTRING_LEN; 696 } 697 skip_tpa_stats: 698 if (is_rx_ring(bp, i)) { 699 num_str = NUM_RING_RX_SW_STATS; 700 for (j = 0; j < num_str; j++) { 701 sprintf(buf, "[%d]: %s", i, 702 bnxt_rx_sw_stats_str[j]); 703 buf += ETH_GSTRING_LEN; 704 } 705 } 706 num_str = NUM_RING_CMN_SW_STATS; 707 for (j = 0; j < num_str; j++) { 708 sprintf(buf, "[%d]: %s", i, 709 bnxt_cmn_sw_stats_str[j]); 710 buf += ETH_GSTRING_LEN; 711 } 712 } 713 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 714 strcpy(buf, bnxt_sw_func_stats[i].string); 715 buf += ETH_GSTRING_LEN; 716 } 717 718 if (bp->flags & BNXT_FLAG_PORT_STATS) { 719 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 720 strcpy(buf, bnxt_port_stats_arr[i].string); 721 buf += ETH_GSTRING_LEN; 722 } 723 } 724 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 725 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { 726 strcpy(buf, bnxt_port_stats_ext_arr[i].string); 727 buf += ETH_GSTRING_LEN; 728 } 729 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { 730 strcpy(buf, 731 bnxt_tx_port_stats_ext_arr[i].string); 732 buf += ETH_GSTRING_LEN; 733 } 734 if (bp->pri2cos_valid) { 735 for (i = 0; i < 8; i++) { 736 strcpy(buf, 737 bnxt_rx_bytes_pri_arr[i].string); 738 buf += ETH_GSTRING_LEN; 739 } 740 for (i = 0; i < 8; i++) { 741 strcpy(buf, 742 bnxt_rx_pkts_pri_arr[i].string); 743 buf += ETH_GSTRING_LEN; 744 } 745 for (i = 0; i < 8; i++) { 746 strcpy(buf, 747 bnxt_tx_bytes_pri_arr[i].string); 748 buf += ETH_GSTRING_LEN; 749 } 750 for (i = 0; i < 8; i++) { 751 strcpy(buf, 752 bnxt_tx_pkts_pri_arr[i].string); 753 buf += ETH_GSTRING_LEN; 754 } 755 } 756 } 757 break; 758 case ETH_SS_TEST: 759 if (bp->num_tests) 760 memcpy(buf, bp->test_info->string, 761 bp->num_tests * ETH_GSTRING_LEN); 762 break; 763 default: 764 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 765 stringset); 766 break; 767 } 768 } 769 770 static void bnxt_get_ringparam(struct net_device *dev, 771 struct ethtool_ringparam *ering) 772 { 773 struct bnxt *bp = netdev_priv(dev); 774 775 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 776 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 777 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 778 } else { 779 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 780 ering->rx_jumbo_max_pending = 0; 781 } 782 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 783 784 ering->rx_pending = bp->rx_ring_size; 785 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 786 ering->tx_pending = bp->tx_ring_size; 787 } 788 789 static int bnxt_set_ringparam(struct net_device *dev, 790 struct ethtool_ringparam *ering) 791 { 792 struct bnxt *bp = netdev_priv(dev); 793 794 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 795 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 796 (ering->tx_pending <= MAX_SKB_FRAGS)) 797 return -EINVAL; 798 799 if (netif_running(dev)) 800 bnxt_close_nic(bp, false, false); 801 802 bp->rx_ring_size = ering->rx_pending; 803 bp->tx_ring_size = ering->tx_pending; 804 bnxt_set_ring_params(bp); 805 806 if (netif_running(dev)) 807 return bnxt_open_nic(bp, false, false); 808 809 return 0; 810 } 811 812 static void bnxt_get_channels(struct net_device *dev, 813 struct ethtool_channels *channel) 814 { 815 struct bnxt *bp = netdev_priv(dev); 816 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 817 int max_rx_rings, max_tx_rings, tcs; 818 int max_tx_sch_inputs, tx_grps; 819 820 /* Get the most up-to-date max_tx_sch_inputs. */ 821 if (netif_running(dev) && BNXT_NEW_RM(bp)) 822 bnxt_hwrm_func_resc_qcaps(bp, false); 823 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 824 825 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 826 if (max_tx_sch_inputs) 827 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 828 829 tcs = netdev_get_num_tc(dev); 830 tx_grps = max(tcs, 1); 831 if (bp->tx_nr_rings_xdp) 832 tx_grps++; 833 max_tx_rings /= tx_grps; 834 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 835 836 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 837 max_rx_rings = 0; 838 max_tx_rings = 0; 839 } 840 if (max_tx_sch_inputs) 841 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 842 843 if (tcs > 1) 844 max_tx_rings /= tcs; 845 846 channel->max_rx = max_rx_rings; 847 channel->max_tx = max_tx_rings; 848 channel->max_other = 0; 849 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 850 channel->combined_count = bp->rx_nr_rings; 851 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 852 channel->combined_count--; 853 } else { 854 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 855 channel->rx_count = bp->rx_nr_rings; 856 channel->tx_count = bp->tx_nr_rings_per_tc; 857 } 858 } 859 } 860 861 static int bnxt_set_channels(struct net_device *dev, 862 struct ethtool_channels *channel) 863 { 864 struct bnxt *bp = netdev_priv(dev); 865 int req_tx_rings, req_rx_rings, tcs; 866 bool sh = false; 867 int tx_xdp = 0; 868 int rc = 0; 869 870 if (channel->other_count) 871 return -EINVAL; 872 873 if (!channel->combined_count && 874 (!channel->rx_count || !channel->tx_count)) 875 return -EINVAL; 876 877 if (channel->combined_count && 878 (channel->rx_count || channel->tx_count)) 879 return -EINVAL; 880 881 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 882 channel->tx_count)) 883 return -EINVAL; 884 885 if (channel->combined_count) 886 sh = true; 887 888 tcs = netdev_get_num_tc(dev); 889 890 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 891 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 892 if (bp->tx_nr_rings_xdp) { 893 if (!sh) { 894 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 895 return -EINVAL; 896 } 897 tx_xdp = req_rx_rings; 898 } 899 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 900 if (rc) { 901 netdev_warn(dev, "Unable to allocate the requested rings\n"); 902 return rc; 903 } 904 905 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 906 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 907 (dev->priv_flags & IFF_RXFH_CONFIGURED)) { 908 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 909 return -EINVAL; 910 } 911 912 if (netif_running(dev)) { 913 if (BNXT_PF(bp)) { 914 /* TODO CHIMP_FW: Send message to all VF's 915 * before PF unload 916 */ 917 } 918 rc = bnxt_close_nic(bp, true, false); 919 if (rc) { 920 netdev_err(bp->dev, "Set channel failure rc :%x\n", 921 rc); 922 return rc; 923 } 924 } 925 926 if (sh) { 927 bp->flags |= BNXT_FLAG_SHARED_RINGS; 928 bp->rx_nr_rings = channel->combined_count; 929 bp->tx_nr_rings_per_tc = channel->combined_count; 930 } else { 931 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 932 bp->rx_nr_rings = channel->rx_count; 933 bp->tx_nr_rings_per_tc = channel->tx_count; 934 } 935 bp->tx_nr_rings_xdp = tx_xdp; 936 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 937 if (tcs > 1) 938 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 939 940 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 941 bp->tx_nr_rings + bp->rx_nr_rings; 942 943 /* After changing number of rx channels, update NTUPLE feature. */ 944 netdev_update_features(dev); 945 if (netif_running(dev)) { 946 rc = bnxt_open_nic(bp, true, false); 947 if ((!rc) && BNXT_PF(bp)) { 948 /* TODO CHIMP_FW: Send message to all VF's 949 * to renable 950 */ 951 } 952 } else { 953 rc = bnxt_reserve_rings(bp, true); 954 } 955 956 return rc; 957 } 958 959 #ifdef CONFIG_RFS_ACCEL 960 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 961 u32 *rule_locs) 962 { 963 int i, j = 0; 964 965 cmd->data = bp->ntp_fltr_count; 966 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 967 struct hlist_head *head; 968 struct bnxt_ntuple_filter *fltr; 969 970 head = &bp->ntp_fltr_hash_tbl[i]; 971 rcu_read_lock(); 972 hlist_for_each_entry_rcu(fltr, head, hash) { 973 if (j == cmd->rule_cnt) 974 break; 975 rule_locs[j++] = fltr->sw_id; 976 } 977 rcu_read_unlock(); 978 if (j == cmd->rule_cnt) 979 break; 980 } 981 cmd->rule_cnt = j; 982 return 0; 983 } 984 985 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 986 { 987 struct ethtool_rx_flow_spec *fs = 988 (struct ethtool_rx_flow_spec *)&cmd->fs; 989 struct bnxt_ntuple_filter *fltr; 990 struct flow_keys *fkeys; 991 int i, rc = -EINVAL; 992 993 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) 994 return rc; 995 996 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 997 struct hlist_head *head; 998 999 head = &bp->ntp_fltr_hash_tbl[i]; 1000 rcu_read_lock(); 1001 hlist_for_each_entry_rcu(fltr, head, hash) { 1002 if (fltr->sw_id == fs->location) 1003 goto fltr_found; 1004 } 1005 rcu_read_unlock(); 1006 } 1007 return rc; 1008 1009 fltr_found: 1010 fkeys = &fltr->fkeys; 1011 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1012 if (fkeys->basic.ip_proto == IPPROTO_TCP) 1013 fs->flow_type = TCP_V4_FLOW; 1014 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 1015 fs->flow_type = UDP_V4_FLOW; 1016 else 1017 goto fltr_err; 1018 1019 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1020 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); 1021 1022 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1023 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); 1024 1025 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1026 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); 1027 1028 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1029 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); 1030 } else { 1031 int i; 1032 1033 if (fkeys->basic.ip_proto == IPPROTO_TCP) 1034 fs->flow_type = TCP_V6_FLOW; 1035 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 1036 fs->flow_type = UDP_V6_FLOW; 1037 else 1038 goto fltr_err; 1039 1040 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1041 fkeys->addrs.v6addrs.src; 1042 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1043 fkeys->addrs.v6addrs.dst; 1044 for (i = 0; i < 4; i++) { 1045 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); 1046 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); 1047 } 1048 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1049 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); 1050 1051 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1052 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); 1053 } 1054 1055 fs->ring_cookie = fltr->rxq; 1056 rc = 0; 1057 1058 fltr_err: 1059 rcu_read_unlock(); 1060 1061 return rc; 1062 } 1063 #endif 1064 1065 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1066 { 1067 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1068 return RXH_IP_SRC | RXH_IP_DST; 1069 return 0; 1070 } 1071 1072 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1073 { 1074 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1075 return RXH_IP_SRC | RXH_IP_DST; 1076 return 0; 1077 } 1078 1079 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1080 { 1081 cmd->data = 0; 1082 switch (cmd->flow_type) { 1083 case TCP_V4_FLOW: 1084 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1085 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1086 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1087 cmd->data |= get_ethtool_ipv4_rss(bp); 1088 break; 1089 case UDP_V4_FLOW: 1090 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1091 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1092 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1093 fallthrough; 1094 case SCTP_V4_FLOW: 1095 case AH_ESP_V4_FLOW: 1096 case AH_V4_FLOW: 1097 case ESP_V4_FLOW: 1098 case IPV4_FLOW: 1099 cmd->data |= get_ethtool_ipv4_rss(bp); 1100 break; 1101 1102 case TCP_V6_FLOW: 1103 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1104 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1105 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1106 cmd->data |= get_ethtool_ipv6_rss(bp); 1107 break; 1108 case UDP_V6_FLOW: 1109 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1110 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1111 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1112 fallthrough; 1113 case SCTP_V6_FLOW: 1114 case AH_ESP_V6_FLOW: 1115 case AH_V6_FLOW: 1116 case ESP_V6_FLOW: 1117 case IPV6_FLOW: 1118 cmd->data |= get_ethtool_ipv6_rss(bp); 1119 break; 1120 } 1121 return 0; 1122 } 1123 1124 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1125 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1126 1127 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1128 { 1129 u32 rss_hash_cfg = bp->rss_hash_cfg; 1130 int tuple, rc = 0; 1131 1132 if (cmd->data == RXH_4TUPLE) 1133 tuple = 4; 1134 else if (cmd->data == RXH_2TUPLE) 1135 tuple = 2; 1136 else if (!cmd->data) 1137 tuple = 0; 1138 else 1139 return -EINVAL; 1140 1141 if (cmd->flow_type == TCP_V4_FLOW) { 1142 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1143 if (tuple == 4) 1144 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1145 } else if (cmd->flow_type == UDP_V4_FLOW) { 1146 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1147 return -EINVAL; 1148 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1149 if (tuple == 4) 1150 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1151 } else if (cmd->flow_type == TCP_V6_FLOW) { 1152 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1153 if (tuple == 4) 1154 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1155 } else if (cmd->flow_type == UDP_V6_FLOW) { 1156 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1157 return -EINVAL; 1158 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1159 if (tuple == 4) 1160 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1161 } else if (tuple == 4) { 1162 return -EINVAL; 1163 } 1164 1165 switch (cmd->flow_type) { 1166 case TCP_V4_FLOW: 1167 case UDP_V4_FLOW: 1168 case SCTP_V4_FLOW: 1169 case AH_ESP_V4_FLOW: 1170 case AH_V4_FLOW: 1171 case ESP_V4_FLOW: 1172 case IPV4_FLOW: 1173 if (tuple == 2) 1174 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1175 else if (!tuple) 1176 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1177 break; 1178 1179 case TCP_V6_FLOW: 1180 case UDP_V6_FLOW: 1181 case SCTP_V6_FLOW: 1182 case AH_ESP_V6_FLOW: 1183 case AH_V6_FLOW: 1184 case ESP_V6_FLOW: 1185 case IPV6_FLOW: 1186 if (tuple == 2) 1187 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1188 else if (!tuple) 1189 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1190 break; 1191 } 1192 1193 if (bp->rss_hash_cfg == rss_hash_cfg) 1194 return 0; 1195 1196 bp->rss_hash_cfg = rss_hash_cfg; 1197 if (netif_running(bp->dev)) { 1198 bnxt_close_nic(bp, false, false); 1199 rc = bnxt_open_nic(bp, false, false); 1200 } 1201 return rc; 1202 } 1203 1204 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1205 u32 *rule_locs) 1206 { 1207 struct bnxt *bp = netdev_priv(dev); 1208 int rc = 0; 1209 1210 switch (cmd->cmd) { 1211 #ifdef CONFIG_RFS_ACCEL 1212 case ETHTOOL_GRXRINGS: 1213 cmd->data = bp->rx_nr_rings; 1214 break; 1215 1216 case ETHTOOL_GRXCLSRLCNT: 1217 cmd->rule_cnt = bp->ntp_fltr_count; 1218 cmd->data = BNXT_NTP_FLTR_MAX_FLTR; 1219 break; 1220 1221 case ETHTOOL_GRXCLSRLALL: 1222 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1223 break; 1224 1225 case ETHTOOL_GRXCLSRULE: 1226 rc = bnxt_grxclsrule(bp, cmd); 1227 break; 1228 #endif 1229 1230 case ETHTOOL_GRXFH: 1231 rc = bnxt_grxfh(bp, cmd); 1232 break; 1233 1234 default: 1235 rc = -EOPNOTSUPP; 1236 break; 1237 } 1238 1239 return rc; 1240 } 1241 1242 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1243 { 1244 struct bnxt *bp = netdev_priv(dev); 1245 int rc; 1246 1247 switch (cmd->cmd) { 1248 case ETHTOOL_SRXFH: 1249 rc = bnxt_srxfh(bp, cmd); 1250 break; 1251 1252 default: 1253 rc = -EOPNOTSUPP; 1254 break; 1255 } 1256 return rc; 1257 } 1258 1259 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1260 { 1261 struct bnxt *bp = netdev_priv(dev); 1262 1263 if (bp->flags & BNXT_FLAG_CHIP_P5) 1264 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); 1265 return HW_HASH_INDEX_SIZE; 1266 } 1267 1268 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1269 { 1270 return HW_HASH_KEY_SIZE; 1271 } 1272 1273 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1274 u8 *hfunc) 1275 { 1276 struct bnxt *bp = netdev_priv(dev); 1277 struct bnxt_vnic_info *vnic; 1278 u32 i, tbl_size; 1279 1280 if (hfunc) 1281 *hfunc = ETH_RSS_HASH_TOP; 1282 1283 if (!bp->vnic_info) 1284 return 0; 1285 1286 vnic = &bp->vnic_info[0]; 1287 if (indir && bp->rss_indir_tbl) { 1288 tbl_size = bnxt_get_rxfh_indir_size(dev); 1289 for (i = 0; i < tbl_size; i++) 1290 indir[i] = bp->rss_indir_tbl[i]; 1291 } 1292 1293 if (key && vnic->rss_hash_key) 1294 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1295 1296 return 0; 1297 } 1298 1299 static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, 1300 const u8 *key, const u8 hfunc) 1301 { 1302 struct bnxt *bp = netdev_priv(dev); 1303 int rc = 0; 1304 1305 if (hfunc && hfunc != ETH_RSS_HASH_TOP) 1306 return -EOPNOTSUPP; 1307 1308 if (key) 1309 return -EOPNOTSUPP; 1310 1311 if (indir) { 1312 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); 1313 1314 for (i = 0; i < tbl_size; i++) 1315 bp->rss_indir_tbl[i] = indir[i]; 1316 pad = bp->rss_indir_tbl_entries - tbl_size; 1317 if (pad) 1318 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 1319 } 1320 1321 if (netif_running(bp->dev)) { 1322 bnxt_close_nic(bp, false, false); 1323 rc = bnxt_open_nic(bp, false, false); 1324 } 1325 return rc; 1326 } 1327 1328 static void bnxt_get_drvinfo(struct net_device *dev, 1329 struct ethtool_drvinfo *info) 1330 { 1331 struct bnxt *bp = netdev_priv(dev); 1332 1333 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 1334 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 1335 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 1336 info->n_stats = bnxt_get_num_stats(bp); 1337 info->testinfo_len = bp->num_tests; 1338 /* TODO CHIMP_FW: eeprom dump details */ 1339 info->eedump_len = 0; 1340 /* TODO CHIMP FW: reg dump details */ 1341 info->regdump_len = 0; 1342 } 1343 1344 static int bnxt_get_regs_len(struct net_device *dev) 1345 { 1346 struct bnxt *bp = netdev_priv(dev); 1347 int reg_len; 1348 1349 if (!BNXT_PF(bp)) 1350 return -EOPNOTSUPP; 1351 1352 reg_len = BNXT_PXP_REG_LEN; 1353 1354 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) 1355 reg_len += sizeof(struct pcie_ctx_hw_stats); 1356 1357 return reg_len; 1358 } 1359 1360 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1361 void *_p) 1362 { 1363 struct pcie_ctx_hw_stats *hw_pcie_stats; 1364 struct hwrm_pcie_qstats_input req = {0}; 1365 struct bnxt *bp = netdev_priv(dev); 1366 dma_addr_t hw_pcie_stats_addr; 1367 int rc; 1368 1369 regs->version = 0; 1370 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 1371 1372 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 1373 return; 1374 1375 hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, 1376 sizeof(*hw_pcie_stats), 1377 &hw_pcie_stats_addr, GFP_KERNEL); 1378 if (!hw_pcie_stats) 1379 return; 1380 1381 regs->version = 1; 1382 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 1383 req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 1384 req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 1385 mutex_lock(&bp->hwrm_cmd_lock); 1386 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1387 if (!rc) { 1388 __le64 *src = (__le64 *)hw_pcie_stats; 1389 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); 1390 int i; 1391 1392 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) 1393 dst[i] = le64_to_cpu(src[i]); 1394 } 1395 mutex_unlock(&bp->hwrm_cmd_lock); 1396 dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, 1397 hw_pcie_stats_addr); 1398 } 1399 1400 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1401 { 1402 struct bnxt *bp = netdev_priv(dev); 1403 1404 wol->supported = 0; 1405 wol->wolopts = 0; 1406 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1407 if (bp->flags & BNXT_FLAG_WOL_CAP) { 1408 wol->supported = WAKE_MAGIC; 1409 if (bp->wol) 1410 wol->wolopts = WAKE_MAGIC; 1411 } 1412 } 1413 1414 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1415 { 1416 struct bnxt *bp = netdev_priv(dev); 1417 1418 if (wol->wolopts & ~WAKE_MAGIC) 1419 return -EINVAL; 1420 1421 if (wol->wolopts & WAKE_MAGIC) { 1422 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 1423 return -EINVAL; 1424 if (!bp->wol) { 1425 if (bnxt_hwrm_alloc_wol_fltr(bp)) 1426 return -EBUSY; 1427 bp->wol = 1; 1428 } 1429 } else { 1430 if (bp->wol) { 1431 if (bnxt_hwrm_free_wol_fltr(bp)) 1432 return -EBUSY; 1433 bp->wol = 0; 1434 } 1435 } 1436 return 0; 1437 } 1438 1439 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) 1440 { 1441 u32 speed_mask = 0; 1442 1443 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 1444 /* set the advertised speeds */ 1445 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 1446 speed_mask |= ADVERTISED_100baseT_Full; 1447 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 1448 speed_mask |= ADVERTISED_1000baseT_Full; 1449 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 1450 speed_mask |= ADVERTISED_2500baseX_Full; 1451 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 1452 speed_mask |= ADVERTISED_10000baseT_Full; 1453 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 1454 speed_mask |= ADVERTISED_40000baseCR4_Full; 1455 1456 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) 1457 speed_mask |= ADVERTISED_Pause; 1458 else if (fw_pause & BNXT_LINK_PAUSE_TX) 1459 speed_mask |= ADVERTISED_Asym_Pause; 1460 else if (fw_pause & BNXT_LINK_PAUSE_RX) 1461 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 1462 1463 return speed_mask; 1464 } 1465 1466 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ 1467 { \ 1468 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ 1469 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1470 100baseT_Full); \ 1471 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ 1472 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1473 1000baseT_Full); \ 1474 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ 1475 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1476 10000baseT_Full); \ 1477 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ 1478 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1479 25000baseCR_Full); \ 1480 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ 1481 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1482 40000baseCR4_Full);\ 1483 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ 1484 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1485 50000baseCR2_Full);\ 1486 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ 1487 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1488 100000baseCR4_Full);\ 1489 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ 1490 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1491 Pause); \ 1492 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ 1493 ethtool_link_ksettings_add_link_mode( \ 1494 lk_ksettings, name, Asym_Pause);\ 1495 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ 1496 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1497 Asym_Pause); \ 1498 } \ 1499 } 1500 1501 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ 1502 { \ 1503 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1504 100baseT_Full) || \ 1505 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1506 100baseT_Half)) \ 1507 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ 1508 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1509 1000baseT_Full) || \ 1510 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1511 1000baseT_Half)) \ 1512 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ 1513 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1514 10000baseT_Full)) \ 1515 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ 1516 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1517 25000baseCR_Full)) \ 1518 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ 1519 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1520 40000baseCR4_Full)) \ 1521 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ 1522 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1523 50000baseCR2_Full)) \ 1524 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ 1525 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1526 100000baseCR4_Full)) \ 1527 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ 1528 } 1529 1530 #define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ 1531 { \ 1532 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \ 1533 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1534 50000baseCR_Full); \ 1535 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \ 1536 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1537 100000baseCR2_Full);\ 1538 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \ 1539 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1540 200000baseCR4_Full);\ 1541 } 1542 1543 #define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ 1544 { \ 1545 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1546 50000baseCR_Full)) \ 1547 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \ 1548 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1549 100000baseCR2_Full)) \ 1550 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \ 1551 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1552 200000baseCR4_Full)) \ 1553 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \ 1554 } 1555 1556 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 1557 struct ethtool_link_ksettings *lk_ksettings) 1558 { 1559 u16 fec_cfg = link_info->fec_cfg; 1560 1561 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 1562 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1563 lk_ksettings->link_modes.advertising); 1564 return; 1565 } 1566 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 1567 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1568 lk_ksettings->link_modes.advertising); 1569 if (fec_cfg & BNXT_FEC_ENC_RS) 1570 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1571 lk_ksettings->link_modes.advertising); 1572 if (fec_cfg & BNXT_FEC_ENC_LLRS) 1573 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 1574 lk_ksettings->link_modes.advertising); 1575 } 1576 1577 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, 1578 struct ethtool_link_ksettings *lk_ksettings) 1579 { 1580 u16 fw_speeds = link_info->advertising; 1581 u8 fw_pause = 0; 1582 1583 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1584 fw_pause = link_info->auto_pause_setting; 1585 1586 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); 1587 fw_speeds = link_info->advertising_pam4; 1588 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising); 1589 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 1590 } 1591 1592 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, 1593 struct ethtool_link_ksettings *lk_ksettings) 1594 { 1595 u16 fw_speeds = link_info->lp_auto_link_speeds; 1596 u8 fw_pause = 0; 1597 1598 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1599 fw_pause = link_info->lp_pause; 1600 1601 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, 1602 lp_advertising); 1603 fw_speeds = link_info->lp_auto_pam4_link_speeds; 1604 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising); 1605 } 1606 1607 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 1608 struct ethtool_link_ksettings *lk_ksettings) 1609 { 1610 u16 fec_cfg = link_info->fec_cfg; 1611 1612 if (fec_cfg & BNXT_FEC_NONE) { 1613 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1614 lk_ksettings->link_modes.supported); 1615 return; 1616 } 1617 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 1618 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1619 lk_ksettings->link_modes.supported); 1620 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 1621 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1622 lk_ksettings->link_modes.supported); 1623 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 1624 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 1625 lk_ksettings->link_modes.supported); 1626 } 1627 1628 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, 1629 struct ethtool_link_ksettings *lk_ksettings) 1630 { 1631 u16 fw_speeds = link_info->support_speeds; 1632 1633 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); 1634 fw_speeds = link_info->support_pam4_speeds; 1635 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); 1636 1637 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); 1638 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1639 Asym_Pause); 1640 1641 if (link_info->support_auto_speeds || 1642 link_info->support_pam4_auto_speeds) 1643 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1644 Autoneg); 1645 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 1646 } 1647 1648 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 1649 { 1650 switch (fw_link_speed) { 1651 case BNXT_LINK_SPEED_100MB: 1652 return SPEED_100; 1653 case BNXT_LINK_SPEED_1GB: 1654 return SPEED_1000; 1655 case BNXT_LINK_SPEED_2_5GB: 1656 return SPEED_2500; 1657 case BNXT_LINK_SPEED_10GB: 1658 return SPEED_10000; 1659 case BNXT_LINK_SPEED_20GB: 1660 return SPEED_20000; 1661 case BNXT_LINK_SPEED_25GB: 1662 return SPEED_25000; 1663 case BNXT_LINK_SPEED_40GB: 1664 return SPEED_40000; 1665 case BNXT_LINK_SPEED_50GB: 1666 return SPEED_50000; 1667 case BNXT_LINK_SPEED_100GB: 1668 return SPEED_100000; 1669 default: 1670 return SPEED_UNKNOWN; 1671 } 1672 } 1673 1674 static int bnxt_get_link_ksettings(struct net_device *dev, 1675 struct ethtool_link_ksettings *lk_ksettings) 1676 { 1677 struct bnxt *bp = netdev_priv(dev); 1678 struct bnxt_link_info *link_info = &bp->link_info; 1679 struct ethtool_link_settings *base = &lk_ksettings->base; 1680 u32 ethtool_speed; 1681 1682 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1683 mutex_lock(&bp->link_lock); 1684 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1685 1686 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1687 if (link_info->autoneg) { 1688 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); 1689 ethtool_link_ksettings_add_link_mode(lk_ksettings, 1690 advertising, Autoneg); 1691 base->autoneg = AUTONEG_ENABLE; 1692 base->duplex = DUPLEX_UNKNOWN; 1693 if (link_info->phy_link_status == BNXT_LINK_LINK) { 1694 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); 1695 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 1696 base->duplex = DUPLEX_FULL; 1697 else 1698 base->duplex = DUPLEX_HALF; 1699 } 1700 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 1701 } else { 1702 base->autoneg = AUTONEG_DISABLE; 1703 ethtool_speed = 1704 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 1705 base->duplex = DUPLEX_HALF; 1706 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 1707 base->duplex = DUPLEX_FULL; 1708 } 1709 base->speed = ethtool_speed; 1710 1711 base->port = PORT_NONE; 1712 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1713 base->port = PORT_TP; 1714 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1715 TP); 1716 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1717 TP); 1718 } else { 1719 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1720 FIBRE); 1721 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1722 FIBRE); 1723 1724 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 1725 base->port = PORT_DA; 1726 else if (link_info->media_type == 1727 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 1728 base->port = PORT_FIBRE; 1729 } 1730 base->phy_address = link_info->phy_addr; 1731 mutex_unlock(&bp->link_lock); 1732 1733 return 0; 1734 } 1735 1736 static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) 1737 { 1738 struct bnxt *bp = netdev_priv(dev); 1739 struct bnxt_link_info *link_info = &bp->link_info; 1740 u16 support_pam4_spds = link_info->support_pam4_speeds; 1741 u16 support_spds = link_info->support_speeds; 1742 u8 sig_mode = BNXT_SIG_MODE_NRZ; 1743 u16 fw_speed = 0; 1744 1745 switch (ethtool_speed) { 1746 case SPEED_100: 1747 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 1748 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 1749 break; 1750 case SPEED_1000: 1751 if (support_spds & BNXT_LINK_SPEED_MSK_1GB) 1752 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 1753 break; 1754 case SPEED_2500: 1755 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 1756 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 1757 break; 1758 case SPEED_10000: 1759 if (support_spds & BNXT_LINK_SPEED_MSK_10GB) 1760 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 1761 break; 1762 case SPEED_20000: 1763 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) 1764 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 1765 break; 1766 case SPEED_25000: 1767 if (support_spds & BNXT_LINK_SPEED_MSK_25GB) 1768 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 1769 break; 1770 case SPEED_40000: 1771 if (support_spds & BNXT_LINK_SPEED_MSK_40GB) 1772 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 1773 break; 1774 case SPEED_50000: 1775 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) { 1776 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 1777 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 1778 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 1779 sig_mode = BNXT_SIG_MODE_PAM4; 1780 } 1781 break; 1782 case SPEED_100000: 1783 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) { 1784 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 1785 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 1786 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 1787 sig_mode = BNXT_SIG_MODE_PAM4; 1788 } 1789 break; 1790 case SPEED_200000: 1791 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 1792 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 1793 sig_mode = BNXT_SIG_MODE_PAM4; 1794 } 1795 break; 1796 } 1797 1798 if (!fw_speed) { 1799 netdev_err(dev, "unsupported speed!\n"); 1800 return -EINVAL; 1801 } 1802 1803 if (link_info->req_link_speed == fw_speed && 1804 link_info->req_signal_mode == sig_mode && 1805 link_info->autoneg == 0) 1806 return -EALREADY; 1807 1808 link_info->req_link_speed = fw_speed; 1809 link_info->req_signal_mode = sig_mode; 1810 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 1811 link_info->autoneg = 0; 1812 link_info->advertising = 0; 1813 link_info->advertising_pam4 = 0; 1814 1815 return 0; 1816 } 1817 1818 u16 bnxt_get_fw_auto_link_speeds(u32 advertising) 1819 { 1820 u16 fw_speed_mask = 0; 1821 1822 /* only support autoneg at speed 100, 1000, and 10000 */ 1823 if (advertising & (ADVERTISED_100baseT_Full | 1824 ADVERTISED_100baseT_Half)) { 1825 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 1826 } 1827 if (advertising & (ADVERTISED_1000baseT_Full | 1828 ADVERTISED_1000baseT_Half)) { 1829 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 1830 } 1831 if (advertising & ADVERTISED_10000baseT_Full) 1832 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 1833 1834 if (advertising & ADVERTISED_40000baseCR4_Full) 1835 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 1836 1837 return fw_speed_mask; 1838 } 1839 1840 static int bnxt_set_link_ksettings(struct net_device *dev, 1841 const struct ethtool_link_ksettings *lk_ksettings) 1842 { 1843 struct bnxt *bp = netdev_priv(dev); 1844 struct bnxt_link_info *link_info = &bp->link_info; 1845 const struct ethtool_link_settings *base = &lk_ksettings->base; 1846 bool set_pause = false; 1847 u32 speed; 1848 int rc = 0; 1849 1850 if (!BNXT_PHY_CFG_ABLE(bp)) 1851 return -EOPNOTSUPP; 1852 1853 mutex_lock(&bp->link_lock); 1854 if (base->autoneg == AUTONEG_ENABLE) { 1855 link_info->advertising = 0; 1856 link_info->advertising_pam4 = 0; 1857 BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings, 1858 advertising); 1859 BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4, 1860 lk_ksettings, advertising); 1861 link_info->autoneg |= BNXT_AUTONEG_SPEED; 1862 if (!link_info->advertising && !link_info->advertising_pam4) { 1863 link_info->advertising = link_info->support_auto_speeds; 1864 link_info->advertising_pam4 = 1865 link_info->support_pam4_auto_speeds; 1866 } 1867 /* any change to autoneg will cause link change, therefore the 1868 * driver should put back the original pause setting in autoneg 1869 */ 1870 set_pause = true; 1871 } else { 1872 u8 phy_type = link_info->phy_type; 1873 1874 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 1875 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 1876 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1877 netdev_err(dev, "10GBase-T devices must autoneg\n"); 1878 rc = -EINVAL; 1879 goto set_setting_exit; 1880 } 1881 if (base->duplex == DUPLEX_HALF) { 1882 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 1883 rc = -EINVAL; 1884 goto set_setting_exit; 1885 } 1886 speed = base->speed; 1887 rc = bnxt_force_link_speed(dev, speed); 1888 if (rc) { 1889 if (rc == -EALREADY) 1890 rc = 0; 1891 goto set_setting_exit; 1892 } 1893 } 1894 1895 if (netif_running(dev)) 1896 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1897 1898 set_setting_exit: 1899 mutex_unlock(&bp->link_lock); 1900 return rc; 1901 } 1902 1903 static int bnxt_get_fecparam(struct net_device *dev, 1904 struct ethtool_fecparam *fec) 1905 { 1906 struct bnxt *bp = netdev_priv(dev); 1907 struct bnxt_link_info *link_info; 1908 u8 active_fec; 1909 u16 fec_cfg; 1910 1911 link_info = &bp->link_info; 1912 fec_cfg = link_info->fec_cfg; 1913 active_fec = link_info->active_fec_sig_mode & 1914 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 1915 if (fec_cfg & BNXT_FEC_NONE) { 1916 fec->fec = ETHTOOL_FEC_NONE; 1917 fec->active_fec = ETHTOOL_FEC_NONE; 1918 return 0; 1919 } 1920 if (fec_cfg & BNXT_FEC_AUTONEG) 1921 fec->fec |= ETHTOOL_FEC_AUTO; 1922 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 1923 fec->fec |= ETHTOOL_FEC_BASER; 1924 if (fec_cfg & BNXT_FEC_ENC_RS) 1925 fec->fec |= ETHTOOL_FEC_RS; 1926 if (fec_cfg & BNXT_FEC_ENC_LLRS) 1927 fec->fec |= ETHTOOL_FEC_LLRS; 1928 1929 switch (active_fec) { 1930 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 1931 fec->active_fec |= ETHTOOL_FEC_BASER; 1932 break; 1933 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 1934 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 1935 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 1936 fec->active_fec |= ETHTOOL_FEC_RS; 1937 break; 1938 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 1939 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 1940 fec->active_fec |= ETHTOOL_FEC_LLRS; 1941 break; 1942 } 1943 return 0; 1944 } 1945 1946 static void bnxt_get_fec_stats(struct net_device *dev, 1947 struct ethtool_fec_stats *fec_stats) 1948 { 1949 struct bnxt *bp = netdev_priv(dev); 1950 u64 *rx; 1951 1952 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 1953 return; 1954 1955 rx = bp->rx_port_stats_ext.sw_stats; 1956 fec_stats->corrected_bits.total = 1957 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 1958 } 1959 1960 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 1961 u32 fec) 1962 { 1963 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 1964 1965 if (fec & ETHTOOL_FEC_BASER) 1966 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 1967 else if (fec & ETHTOOL_FEC_RS) 1968 fw_fec |= BNXT_FEC_RS_ON(link_info); 1969 else if (fec & ETHTOOL_FEC_LLRS) 1970 fw_fec |= BNXT_FEC_LLRS_ON; 1971 return fw_fec; 1972 } 1973 1974 static int bnxt_set_fecparam(struct net_device *dev, 1975 struct ethtool_fecparam *fecparam) 1976 { 1977 struct hwrm_port_phy_cfg_input req = {0}; 1978 struct bnxt *bp = netdev_priv(dev); 1979 struct bnxt_link_info *link_info; 1980 u32 new_cfg, fec = fecparam->fec; 1981 u16 fec_cfg; 1982 int rc; 1983 1984 link_info = &bp->link_info; 1985 fec_cfg = link_info->fec_cfg; 1986 if (fec_cfg & BNXT_FEC_NONE) 1987 return -EOPNOTSUPP; 1988 1989 if (fec & ETHTOOL_FEC_OFF) { 1990 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 1991 BNXT_FEC_ALL_OFF(link_info); 1992 goto apply_fec; 1993 } 1994 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 1995 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 1996 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 1997 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 1998 return -EINVAL; 1999 2000 if (fec & ETHTOOL_FEC_AUTO) { 2001 if (!link_info->autoneg) 2002 return -EINVAL; 2003 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 2004 } else { 2005 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 2006 } 2007 2008 apply_fec: 2009 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 2010 req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 2011 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2012 /* update current settings */ 2013 if (!rc) { 2014 mutex_lock(&bp->link_lock); 2015 bnxt_update_link(bp, false); 2016 mutex_unlock(&bp->link_lock); 2017 } 2018 return rc; 2019 } 2020 2021 static void bnxt_get_pauseparam(struct net_device *dev, 2022 struct ethtool_pauseparam *epause) 2023 { 2024 struct bnxt *bp = netdev_priv(dev); 2025 struct bnxt_link_info *link_info = &bp->link_info; 2026 2027 if (BNXT_VF(bp)) 2028 return; 2029 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 2030 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 2031 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 2032 } 2033 2034 static void bnxt_get_pause_stats(struct net_device *dev, 2035 struct ethtool_pause_stats *epstat) 2036 { 2037 struct bnxt *bp = netdev_priv(dev); 2038 u64 *rx, *tx; 2039 2040 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 2041 return; 2042 2043 rx = bp->port_stats.sw_stats; 2044 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 2045 2046 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 2047 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 2048 } 2049 2050 static int bnxt_set_pauseparam(struct net_device *dev, 2051 struct ethtool_pauseparam *epause) 2052 { 2053 int rc = 0; 2054 struct bnxt *bp = netdev_priv(dev); 2055 struct bnxt_link_info *link_info = &bp->link_info; 2056 2057 if (!BNXT_PHY_CFG_ABLE(bp)) 2058 return -EOPNOTSUPP; 2059 2060 mutex_lock(&bp->link_lock); 2061 if (epause->autoneg) { 2062 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2063 rc = -EINVAL; 2064 goto pause_exit; 2065 } 2066 2067 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 2068 if (bp->hwrm_spec_code >= 0x10201) 2069 link_info->req_flow_ctrl = 2070 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 2071 } else { 2072 /* when transition from auto pause to force pause, 2073 * force a link change 2074 */ 2075 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2076 link_info->force_link_chng = true; 2077 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 2078 link_info->req_flow_ctrl = 0; 2079 } 2080 if (epause->rx_pause) 2081 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 2082 2083 if (epause->tx_pause) 2084 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 2085 2086 if (netif_running(dev)) 2087 rc = bnxt_hwrm_set_pause(bp); 2088 2089 pause_exit: 2090 mutex_unlock(&bp->link_lock); 2091 return rc; 2092 } 2093 2094 static u32 bnxt_get_link(struct net_device *dev) 2095 { 2096 struct bnxt *bp = netdev_priv(dev); 2097 2098 /* TODO: handle MF, VF, driver close case */ 2099 return bp->link_info.link_up; 2100 } 2101 2102 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 2103 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 2104 { 2105 struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr; 2106 struct hwrm_nvm_get_dev_info_input req = {0}; 2107 int rc; 2108 2109 if (BNXT_VF(bp)) 2110 return -EOPNOTSUPP; 2111 2112 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1); 2113 mutex_lock(&bp->hwrm_cmd_lock); 2114 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2115 if (!rc) 2116 memcpy(nvm_dev_info, resp, sizeof(*resp)); 2117 mutex_unlock(&bp->hwrm_cmd_lock); 2118 return rc; 2119 } 2120 2121 static void bnxt_print_admin_err(struct bnxt *bp) 2122 { 2123 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 2124 } 2125 2126 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2127 u16 ext, u16 *index, u32 *item_length, 2128 u32 *data_length); 2129 2130 static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 2131 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 2132 u32 dir_item_len, const u8 *data, 2133 size_t data_len) 2134 { 2135 struct bnxt *bp = netdev_priv(dev); 2136 int rc; 2137 struct hwrm_nvm_write_input req = {0}; 2138 dma_addr_t dma_handle; 2139 u8 *kmem = NULL; 2140 2141 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); 2142 2143 req.dir_type = cpu_to_le16(dir_type); 2144 req.dir_ordinal = cpu_to_le16(dir_ordinal); 2145 req.dir_ext = cpu_to_le16(dir_ext); 2146 req.dir_attr = cpu_to_le16(dir_attr); 2147 req.dir_item_length = cpu_to_le32(dir_item_len); 2148 if (data_len && data) { 2149 req.dir_data_length = cpu_to_le32(data_len); 2150 2151 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, 2152 GFP_KERNEL); 2153 if (!kmem) 2154 return -ENOMEM; 2155 2156 memcpy(kmem, data, data_len); 2157 req.host_src_addr = cpu_to_le64(dma_handle); 2158 } 2159 2160 rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); 2161 if (kmem) 2162 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); 2163 2164 if (rc == -EACCES) 2165 bnxt_print_admin_err(bp); 2166 return rc; 2167 } 2168 2169 static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 2170 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 2171 const u8 *data, size_t data_len) 2172 { 2173 struct bnxt *bp = netdev_priv(dev); 2174 int rc; 2175 2176 mutex_lock(&bp->hwrm_cmd_lock); 2177 rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, 2178 0, data, data_len); 2179 mutex_unlock(&bp->hwrm_cmd_lock); 2180 return rc; 2181 } 2182 2183 static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 2184 u8 self_reset, u8 flags) 2185 { 2186 struct hwrm_fw_reset_input req = {0}; 2187 struct bnxt *bp = netdev_priv(dev); 2188 int rc; 2189 2190 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 2191 2192 req.embedded_proc_type = proc_type; 2193 req.selfrst_status = self_reset; 2194 req.flags = flags; 2195 2196 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 2197 rc = hwrm_send_message_silent(bp, &req, sizeof(req), 2198 HWRM_CMD_TIMEOUT); 2199 } else { 2200 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2201 if (rc == -EACCES) 2202 bnxt_print_admin_err(bp); 2203 } 2204 return rc; 2205 } 2206 2207 static int bnxt_firmware_reset(struct net_device *dev, 2208 enum bnxt_nvm_directory_type dir_type) 2209 { 2210 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 2211 u8 proc_type, flags = 0; 2212 2213 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 2214 /* (e.g. when firmware isn't already running) */ 2215 switch (dir_type) { 2216 case BNX_DIR_TYPE_CHIMP_PATCH: 2217 case BNX_DIR_TYPE_BOOTCODE: 2218 case BNX_DIR_TYPE_BOOTCODE_2: 2219 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 2220 /* Self-reset ChiMP upon next PCIe reset: */ 2221 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 2222 break; 2223 case BNX_DIR_TYPE_APE_FW: 2224 case BNX_DIR_TYPE_APE_PATCH: 2225 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 2226 /* Self-reset APE upon next PCIe reset: */ 2227 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 2228 break; 2229 case BNX_DIR_TYPE_KONG_FW: 2230 case BNX_DIR_TYPE_KONG_PATCH: 2231 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 2232 break; 2233 case BNX_DIR_TYPE_BONO_FW: 2234 case BNX_DIR_TYPE_BONO_PATCH: 2235 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 2236 break; 2237 default: 2238 return -EINVAL; 2239 } 2240 2241 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 2242 } 2243 2244 static int bnxt_firmware_reset_chip(struct net_device *dev) 2245 { 2246 struct bnxt *bp = netdev_priv(dev); 2247 u8 flags = 0; 2248 2249 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 2250 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 2251 2252 return bnxt_hwrm_firmware_reset(dev, 2253 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 2254 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 2255 flags); 2256 } 2257 2258 static int bnxt_firmware_reset_ap(struct net_device *dev) 2259 { 2260 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 2261 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 2262 0); 2263 } 2264 2265 static int bnxt_flash_firmware(struct net_device *dev, 2266 u16 dir_type, 2267 const u8 *fw_data, 2268 size_t fw_size) 2269 { 2270 int rc = 0; 2271 u16 code_type; 2272 u32 stored_crc; 2273 u32 calculated_crc; 2274 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 2275 2276 switch (dir_type) { 2277 case BNX_DIR_TYPE_BOOTCODE: 2278 case BNX_DIR_TYPE_BOOTCODE_2: 2279 code_type = CODE_BOOT; 2280 break; 2281 case BNX_DIR_TYPE_CHIMP_PATCH: 2282 code_type = CODE_CHIMP_PATCH; 2283 break; 2284 case BNX_DIR_TYPE_APE_FW: 2285 code_type = CODE_MCTP_PASSTHRU; 2286 break; 2287 case BNX_DIR_TYPE_APE_PATCH: 2288 code_type = CODE_APE_PATCH; 2289 break; 2290 case BNX_DIR_TYPE_KONG_FW: 2291 code_type = CODE_KONG_FW; 2292 break; 2293 case BNX_DIR_TYPE_KONG_PATCH: 2294 code_type = CODE_KONG_PATCH; 2295 break; 2296 case BNX_DIR_TYPE_BONO_FW: 2297 code_type = CODE_BONO_FW; 2298 break; 2299 case BNX_DIR_TYPE_BONO_PATCH: 2300 code_type = CODE_BONO_PATCH; 2301 break; 2302 default: 2303 netdev_err(dev, "Unsupported directory entry type: %u\n", 2304 dir_type); 2305 return -EINVAL; 2306 } 2307 if (fw_size < sizeof(struct bnxt_fw_header)) { 2308 netdev_err(dev, "Invalid firmware file size: %u\n", 2309 (unsigned int)fw_size); 2310 return -EINVAL; 2311 } 2312 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 2313 netdev_err(dev, "Invalid firmware signature: %08X\n", 2314 le32_to_cpu(header->signature)); 2315 return -EINVAL; 2316 } 2317 if (header->code_type != code_type) { 2318 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 2319 code_type, header->code_type); 2320 return -EINVAL; 2321 } 2322 if (header->device != DEVICE_CUMULUS_FAMILY) { 2323 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 2324 DEVICE_CUMULUS_FAMILY, header->device); 2325 return -EINVAL; 2326 } 2327 /* Confirm the CRC32 checksum of the file: */ 2328 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 2329 sizeof(stored_crc))); 2330 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 2331 if (calculated_crc != stored_crc) { 2332 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 2333 (unsigned long)stored_crc, 2334 (unsigned long)calculated_crc); 2335 return -EINVAL; 2336 } 2337 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2338 0, 0, fw_data, fw_size); 2339 if (rc == 0) /* Firmware update successful */ 2340 rc = bnxt_firmware_reset(dev, dir_type); 2341 2342 return rc; 2343 } 2344 2345 static int bnxt_flash_microcode(struct net_device *dev, 2346 u16 dir_type, 2347 const u8 *fw_data, 2348 size_t fw_size) 2349 { 2350 struct bnxt_ucode_trailer *trailer; 2351 u32 calculated_crc; 2352 u32 stored_crc; 2353 int rc = 0; 2354 2355 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 2356 netdev_err(dev, "Invalid microcode file size: %u\n", 2357 (unsigned int)fw_size); 2358 return -EINVAL; 2359 } 2360 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 2361 sizeof(*trailer))); 2362 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 2363 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 2364 le32_to_cpu(trailer->sig)); 2365 return -EINVAL; 2366 } 2367 if (le16_to_cpu(trailer->dir_type) != dir_type) { 2368 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 2369 dir_type, le16_to_cpu(trailer->dir_type)); 2370 return -EINVAL; 2371 } 2372 if (le16_to_cpu(trailer->trailer_length) < 2373 sizeof(struct bnxt_ucode_trailer)) { 2374 netdev_err(dev, "Invalid microcode trailer length: %d\n", 2375 le16_to_cpu(trailer->trailer_length)); 2376 return -EINVAL; 2377 } 2378 2379 /* Confirm the CRC32 checksum of the file: */ 2380 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 2381 sizeof(stored_crc))); 2382 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 2383 if (calculated_crc != stored_crc) { 2384 netdev_err(dev, 2385 "CRC32 (%08lX) does not match calculated: %08lX\n", 2386 (unsigned long)stored_crc, 2387 (unsigned long)calculated_crc); 2388 return -EINVAL; 2389 } 2390 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2391 0, 0, fw_data, fw_size); 2392 2393 return rc; 2394 } 2395 2396 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 2397 { 2398 switch (dir_type) { 2399 case BNX_DIR_TYPE_CHIMP_PATCH: 2400 case BNX_DIR_TYPE_BOOTCODE: 2401 case BNX_DIR_TYPE_BOOTCODE_2: 2402 case BNX_DIR_TYPE_APE_FW: 2403 case BNX_DIR_TYPE_APE_PATCH: 2404 case BNX_DIR_TYPE_KONG_FW: 2405 case BNX_DIR_TYPE_KONG_PATCH: 2406 case BNX_DIR_TYPE_BONO_FW: 2407 case BNX_DIR_TYPE_BONO_PATCH: 2408 return true; 2409 } 2410 2411 return false; 2412 } 2413 2414 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 2415 { 2416 switch (dir_type) { 2417 case BNX_DIR_TYPE_AVS: 2418 case BNX_DIR_TYPE_EXP_ROM_MBA: 2419 case BNX_DIR_TYPE_PCIE: 2420 case BNX_DIR_TYPE_TSCF_UCODE: 2421 case BNX_DIR_TYPE_EXT_PHY: 2422 case BNX_DIR_TYPE_CCM: 2423 case BNX_DIR_TYPE_ISCSI_BOOT: 2424 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2425 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2426 return true; 2427 } 2428 2429 return false; 2430 } 2431 2432 static bool bnxt_dir_type_is_executable(u16 dir_type) 2433 { 2434 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2435 bnxt_dir_type_is_other_exec_format(dir_type); 2436 } 2437 2438 static int bnxt_flash_firmware_from_file(struct net_device *dev, 2439 u16 dir_type, 2440 const char *filename) 2441 { 2442 const struct firmware *fw; 2443 int rc; 2444 2445 rc = request_firmware(&fw, filename, &dev->dev); 2446 if (rc != 0) { 2447 netdev_err(dev, "Error %d requesting firmware file: %s\n", 2448 rc, filename); 2449 return rc; 2450 } 2451 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 2452 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 2453 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 2454 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 2455 else 2456 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2457 0, 0, fw->data, fw->size); 2458 release_firmware(fw); 2459 return rc; 2460 } 2461 2462 #define BNXT_PKG_DMA_SIZE 0x40000 2463 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 2464 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 2465 2466 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 2467 u32 install_type) 2468 { 2469 struct hwrm_nvm_install_update_input install = {0}; 2470 struct hwrm_nvm_install_update_output resp = {0}; 2471 struct hwrm_nvm_modify_input modify = {0}; 2472 struct bnxt *bp = netdev_priv(dev); 2473 bool defrag_attempted = false; 2474 dma_addr_t dma_handle; 2475 u8 *kmem = NULL; 2476 u32 modify_len; 2477 u32 item_len; 2478 int rc = 0; 2479 u16 index; 2480 2481 bnxt_hwrm_fw_set_time(bp); 2482 2483 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); 2484 2485 /* Try allocating a large DMA buffer first. Older fw will 2486 * cause excessive NVRAM erases when using small blocks. 2487 */ 2488 modify_len = roundup_pow_of_two(fw->size); 2489 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 2490 while (1) { 2491 kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, 2492 &dma_handle, GFP_KERNEL); 2493 if (!kmem && modify_len > PAGE_SIZE) 2494 modify_len /= 2; 2495 else 2496 break; 2497 } 2498 if (!kmem) 2499 return -ENOMEM; 2500 2501 modify.host_src_addr = cpu_to_le64(dma_handle); 2502 2503 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); 2504 if ((install_type & 0xffff) == 0) 2505 install_type >>= 16; 2506 install.install_type = cpu_to_le32(install_type); 2507 2508 do { 2509 u32 copied = 0, len = modify_len; 2510 2511 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2512 BNX_DIR_ORDINAL_FIRST, 2513 BNX_DIR_EXT_NONE, 2514 &index, &item_len, NULL); 2515 if (rc) { 2516 netdev_err(dev, "PKG update area not created in nvram\n"); 2517 break; 2518 } 2519 if (fw->size > item_len) { 2520 netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", 2521 (unsigned long)fw->size); 2522 rc = -EFBIG; 2523 break; 2524 } 2525 2526 modify.dir_idx = cpu_to_le16(index); 2527 2528 if (fw->size > modify_len) 2529 modify.flags = BNXT_NVM_MORE_FLAG; 2530 while (copied < fw->size) { 2531 u32 balance = fw->size - copied; 2532 2533 if (balance <= modify_len) { 2534 len = balance; 2535 if (copied) 2536 modify.flags |= BNXT_NVM_LAST_FLAG; 2537 } 2538 memcpy(kmem, fw->data + copied, len); 2539 modify.len = cpu_to_le32(len); 2540 modify.offset = cpu_to_le32(copied); 2541 rc = hwrm_send_message(bp, &modify, sizeof(modify), 2542 FLASH_PACKAGE_TIMEOUT); 2543 if (rc) 2544 goto pkg_abort; 2545 copied += len; 2546 } 2547 mutex_lock(&bp->hwrm_cmd_lock); 2548 rc = _hwrm_send_message_silent(bp, &install, sizeof(install), 2549 INSTALL_PACKAGE_TIMEOUT); 2550 memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); 2551 2552 if (defrag_attempted) { 2553 /* We have tried to defragment already in the previous 2554 * iteration. Return with the result for INSTALL_UPDATE 2555 */ 2556 mutex_unlock(&bp->hwrm_cmd_lock); 2557 break; 2558 } 2559 2560 if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == 2561 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2562 install.flags = 2563 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2564 2565 rc = _hwrm_send_message_silent(bp, &install, 2566 sizeof(install), 2567 INSTALL_PACKAGE_TIMEOUT); 2568 memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); 2569 2570 if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == 2571 NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 2572 /* FW has cleared NVM area, driver will create 2573 * UPDATE directory and try the flash again 2574 */ 2575 defrag_attempted = true; 2576 install.flags = 0; 2577 rc = __bnxt_flash_nvram(bp->dev, 2578 BNX_DIR_TYPE_UPDATE, 2579 BNX_DIR_ORDINAL_FIRST, 2580 0, 0, item_len, NULL, 2581 0); 2582 } else if (rc) { 2583 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); 2584 } 2585 } else if (rc) { 2586 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); 2587 } 2588 mutex_unlock(&bp->hwrm_cmd_lock); 2589 } while (defrag_attempted && !rc); 2590 2591 pkg_abort: 2592 dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); 2593 if (resp.result) { 2594 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 2595 (s8)resp.result, (int)resp.problem_item); 2596 rc = -ENOPKG; 2597 } 2598 if (rc == -EACCES) 2599 bnxt_print_admin_err(bp); 2600 return rc; 2601 } 2602 2603 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 2604 u32 install_type) 2605 { 2606 const struct firmware *fw; 2607 int rc; 2608 2609 rc = request_firmware(&fw, filename, &dev->dev); 2610 if (rc != 0) { 2611 netdev_err(dev, "PKG error %d requesting file: %s\n", 2612 rc, filename); 2613 return rc; 2614 } 2615 2616 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type); 2617 2618 release_firmware(fw); 2619 2620 return rc; 2621 } 2622 2623 static int bnxt_flash_device(struct net_device *dev, 2624 struct ethtool_flash *flash) 2625 { 2626 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 2627 netdev_err(dev, "flashdev not supported from a virtual function\n"); 2628 return -EINVAL; 2629 } 2630 2631 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 2632 flash->region > 0xffff) 2633 return bnxt_flash_package_from_file(dev, flash->data, 2634 flash->region); 2635 2636 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 2637 } 2638 2639 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 2640 { 2641 struct bnxt *bp = netdev_priv(dev); 2642 int rc; 2643 struct hwrm_nvm_get_dir_info_input req = {0}; 2644 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; 2645 2646 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); 2647 2648 mutex_lock(&bp->hwrm_cmd_lock); 2649 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2650 if (!rc) { 2651 *entries = le32_to_cpu(output->entries); 2652 *length = le32_to_cpu(output->entry_length); 2653 } 2654 mutex_unlock(&bp->hwrm_cmd_lock); 2655 return rc; 2656 } 2657 2658 static int bnxt_get_eeprom_len(struct net_device *dev) 2659 { 2660 struct bnxt *bp = netdev_priv(dev); 2661 2662 if (BNXT_VF(bp)) 2663 return 0; 2664 2665 /* The -1 return value allows the entire 32-bit range of offsets to be 2666 * passed via the ethtool command-line utility. 2667 */ 2668 return -1; 2669 } 2670 2671 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 2672 { 2673 struct bnxt *bp = netdev_priv(dev); 2674 int rc; 2675 u32 dir_entries; 2676 u32 entry_length; 2677 u8 *buf; 2678 size_t buflen; 2679 dma_addr_t dma_handle; 2680 struct hwrm_nvm_get_dir_entries_input req = {0}; 2681 2682 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 2683 if (rc != 0) 2684 return rc; 2685 2686 if (!dir_entries || !entry_length) 2687 return -EIO; 2688 2689 /* Insert 2 bytes of directory info (count and size of entries) */ 2690 if (len < 2) 2691 return -EINVAL; 2692 2693 *data++ = dir_entries; 2694 *data++ = entry_length; 2695 len -= 2; 2696 memset(data, 0xff, len); 2697 2698 buflen = dir_entries * entry_length; 2699 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, 2700 GFP_KERNEL); 2701 if (!buf) { 2702 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2703 (unsigned)buflen); 2704 return -ENOMEM; 2705 } 2706 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); 2707 req.host_dest_addr = cpu_to_le64(dma_handle); 2708 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2709 if (rc == 0) 2710 memcpy(data, buf, len > buflen ? buflen : len); 2711 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); 2712 return rc; 2713 } 2714 2715 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 2716 u32 length, u8 *data) 2717 { 2718 struct bnxt *bp = netdev_priv(dev); 2719 int rc; 2720 u8 *buf; 2721 dma_addr_t dma_handle; 2722 struct hwrm_nvm_read_input req = {0}; 2723 2724 if (!length) 2725 return -EINVAL; 2726 2727 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, 2728 GFP_KERNEL); 2729 if (!buf) { 2730 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2731 (unsigned)length); 2732 return -ENOMEM; 2733 } 2734 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); 2735 req.host_dest_addr = cpu_to_le64(dma_handle); 2736 req.dir_idx = cpu_to_le16(index); 2737 req.offset = cpu_to_le32(offset); 2738 req.len = cpu_to_le32(length); 2739 2740 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2741 if (rc == 0) 2742 memcpy(data, buf, length); 2743 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); 2744 return rc; 2745 } 2746 2747 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2748 u16 ext, u16 *index, u32 *item_length, 2749 u32 *data_length) 2750 { 2751 struct bnxt *bp = netdev_priv(dev); 2752 int rc; 2753 struct hwrm_nvm_find_dir_entry_input req = {0}; 2754 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; 2755 2756 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); 2757 req.enables = 0; 2758 req.dir_idx = 0; 2759 req.dir_type = cpu_to_le16(type); 2760 req.dir_ordinal = cpu_to_le16(ordinal); 2761 req.dir_ext = cpu_to_le16(ext); 2762 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 2763 mutex_lock(&bp->hwrm_cmd_lock); 2764 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2765 if (rc == 0) { 2766 if (index) 2767 *index = le16_to_cpu(output->dir_idx); 2768 if (item_length) 2769 *item_length = le32_to_cpu(output->dir_item_length); 2770 if (data_length) 2771 *data_length = le32_to_cpu(output->dir_data_length); 2772 } 2773 mutex_unlock(&bp->hwrm_cmd_lock); 2774 return rc; 2775 } 2776 2777 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 2778 { 2779 char *retval = NULL; 2780 char *p; 2781 char *value; 2782 int field = 0; 2783 2784 if (datalen < 1) 2785 return NULL; 2786 /* null-terminate the log data (removing last '\n'): */ 2787 data[datalen - 1] = 0; 2788 for (p = data; *p != 0; p++) { 2789 field = 0; 2790 retval = NULL; 2791 while (*p != 0 && *p != '\n') { 2792 value = p; 2793 while (*p != 0 && *p != '\t' && *p != '\n') 2794 p++; 2795 if (field == desired_field) 2796 retval = value; 2797 if (*p != '\t') 2798 break; 2799 *p = 0; 2800 field++; 2801 p++; 2802 } 2803 if (*p == 0) 2804 break; 2805 *p = 0; 2806 } 2807 return retval; 2808 } 2809 2810 static void bnxt_get_pkgver(struct net_device *dev) 2811 { 2812 struct bnxt *bp = netdev_priv(dev); 2813 u16 index = 0; 2814 char *pkgver; 2815 u32 pkglen; 2816 u8 *pkgbuf; 2817 int len; 2818 2819 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 2820 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2821 &index, NULL, &pkglen) != 0) 2822 return; 2823 2824 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 2825 if (!pkgbuf) { 2826 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 2827 pkglen); 2828 return; 2829 } 2830 2831 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) 2832 goto err; 2833 2834 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 2835 pkglen); 2836 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 2837 len = strlen(bp->fw_ver_str); 2838 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 2839 "/pkg %s", pkgver); 2840 } 2841 err: 2842 kfree(pkgbuf); 2843 } 2844 2845 static int bnxt_get_eeprom(struct net_device *dev, 2846 struct ethtool_eeprom *eeprom, 2847 u8 *data) 2848 { 2849 u32 index; 2850 u32 offset; 2851 2852 if (eeprom->offset == 0) /* special offset value to get directory */ 2853 return bnxt_get_nvram_directory(dev, eeprom->len, data); 2854 2855 index = eeprom->offset >> 24; 2856 offset = eeprom->offset & 0xffffff; 2857 2858 if (index == 0) { 2859 netdev_err(dev, "unsupported index value: %d\n", index); 2860 return -EINVAL; 2861 } 2862 2863 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 2864 } 2865 2866 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 2867 { 2868 struct bnxt *bp = netdev_priv(dev); 2869 struct hwrm_nvm_erase_dir_entry_input req = {0}; 2870 2871 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); 2872 req.dir_idx = cpu_to_le16(index); 2873 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2874 } 2875 2876 static int bnxt_set_eeprom(struct net_device *dev, 2877 struct ethtool_eeprom *eeprom, 2878 u8 *data) 2879 { 2880 struct bnxt *bp = netdev_priv(dev); 2881 u8 index, dir_op; 2882 u16 type, ext, ordinal, attr; 2883 2884 if (!BNXT_PF(bp)) { 2885 netdev_err(dev, "NVM write not supported from a virtual function\n"); 2886 return -EINVAL; 2887 } 2888 2889 type = eeprom->magic >> 16; 2890 2891 if (type == 0xffff) { /* special value for directory operations */ 2892 index = eeprom->magic & 0xff; 2893 dir_op = eeprom->magic >> 8; 2894 if (index == 0) 2895 return -EINVAL; 2896 switch (dir_op) { 2897 case 0x0e: /* erase */ 2898 if (eeprom->offset != ~eeprom->magic) 2899 return -EINVAL; 2900 return bnxt_erase_nvram_directory(dev, index - 1); 2901 default: 2902 return -EINVAL; 2903 } 2904 } 2905 2906 /* Create or re-write an NVM item: */ 2907 if (bnxt_dir_type_is_executable(type)) 2908 return -EOPNOTSUPP; 2909 ext = eeprom->magic & 0xffff; 2910 ordinal = eeprom->offset >> 16; 2911 attr = eeprom->offset & 0xffff; 2912 2913 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, 2914 eeprom->len); 2915 } 2916 2917 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) 2918 { 2919 struct bnxt *bp = netdev_priv(dev); 2920 struct ethtool_eee *eee = &bp->eee; 2921 struct bnxt_link_info *link_info = &bp->link_info; 2922 u32 advertising; 2923 int rc = 0; 2924 2925 if (!BNXT_PHY_CFG_ABLE(bp)) 2926 return -EOPNOTSUPP; 2927 2928 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 2929 return -EOPNOTSUPP; 2930 2931 mutex_lock(&bp->link_lock); 2932 advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 2933 if (!edata->eee_enabled) 2934 goto eee_ok; 2935 2936 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2937 netdev_warn(dev, "EEE requires autoneg\n"); 2938 rc = -EINVAL; 2939 goto eee_exit; 2940 } 2941 if (edata->tx_lpi_enabled) { 2942 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 2943 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 2944 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 2945 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 2946 rc = -EINVAL; 2947 goto eee_exit; 2948 } else if (!bp->lpi_tmr_hi) { 2949 edata->tx_lpi_timer = eee->tx_lpi_timer; 2950 } 2951 } 2952 if (!edata->advertised) { 2953 edata->advertised = advertising & eee->supported; 2954 } else if (edata->advertised & ~advertising) { 2955 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", 2956 edata->advertised, advertising); 2957 rc = -EINVAL; 2958 goto eee_exit; 2959 } 2960 2961 eee->advertised = edata->advertised; 2962 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 2963 eee->tx_lpi_timer = edata->tx_lpi_timer; 2964 eee_ok: 2965 eee->eee_enabled = edata->eee_enabled; 2966 2967 if (netif_running(dev)) 2968 rc = bnxt_hwrm_set_link_setting(bp, false, true); 2969 2970 eee_exit: 2971 mutex_unlock(&bp->link_lock); 2972 return rc; 2973 } 2974 2975 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) 2976 { 2977 struct bnxt *bp = netdev_priv(dev); 2978 2979 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 2980 return -EOPNOTSUPP; 2981 2982 *edata = bp->eee; 2983 if (!bp->eee.eee_enabled) { 2984 /* Preserve tx_lpi_timer so that the last value will be used 2985 * by default when it is re-enabled. 2986 */ 2987 edata->advertised = 0; 2988 edata->tx_lpi_enabled = 0; 2989 } 2990 2991 if (!bp->eee.eee_active) 2992 edata->lp_advertised = 0; 2993 2994 return 0; 2995 } 2996 2997 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 2998 u16 page_number, u16 start_addr, 2999 u16 data_length, u8 *buf) 3000 { 3001 struct hwrm_port_phy_i2c_read_input req = {0}; 3002 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; 3003 int rc, byte_offset = 0; 3004 3005 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); 3006 req.i2c_slave_addr = i2c_addr; 3007 req.page_number = cpu_to_le16(page_number); 3008 req.port_id = cpu_to_le16(bp->pf.port_id); 3009 do { 3010 u16 xfer_size; 3011 3012 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 3013 data_length -= xfer_size; 3014 req.page_offset = cpu_to_le16(start_addr + byte_offset); 3015 req.data_length = xfer_size; 3016 req.enables = cpu_to_le32(start_addr + byte_offset ? 3017 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); 3018 mutex_lock(&bp->hwrm_cmd_lock); 3019 rc = _hwrm_send_message(bp, &req, sizeof(req), 3020 HWRM_CMD_TIMEOUT); 3021 if (!rc) 3022 memcpy(buf + byte_offset, output->data, xfer_size); 3023 mutex_unlock(&bp->hwrm_cmd_lock); 3024 byte_offset += xfer_size; 3025 } while (!rc && data_length > 0); 3026 3027 return rc; 3028 } 3029 3030 static int bnxt_get_module_info(struct net_device *dev, 3031 struct ethtool_modinfo *modinfo) 3032 { 3033 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 3034 struct bnxt *bp = netdev_priv(dev); 3035 int rc; 3036 3037 /* No point in going further if phy status indicates 3038 * module is not inserted or if it is powered down or 3039 * if it is of type 10GBase-T 3040 */ 3041 if (bp->link_info.module_status > 3042 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 3043 return -EOPNOTSUPP; 3044 3045 /* This feature is not supported in older firmware versions */ 3046 if (bp->hwrm_spec_code < 0x10202) 3047 return -EOPNOTSUPP; 3048 3049 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3050 SFF_DIAG_SUPPORT_OFFSET + 1, 3051 data); 3052 if (!rc) { 3053 u8 module_id = data[0]; 3054 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 3055 3056 switch (module_id) { 3057 case SFF_MODULE_ID_SFP: 3058 modinfo->type = ETH_MODULE_SFF_8472; 3059 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 3060 if (!diag_supported) 3061 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 3062 break; 3063 case SFF_MODULE_ID_QSFP: 3064 case SFF_MODULE_ID_QSFP_PLUS: 3065 modinfo->type = ETH_MODULE_SFF_8436; 3066 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 3067 break; 3068 case SFF_MODULE_ID_QSFP28: 3069 modinfo->type = ETH_MODULE_SFF_8636; 3070 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 3071 break; 3072 default: 3073 rc = -EOPNOTSUPP; 3074 break; 3075 } 3076 } 3077 return rc; 3078 } 3079 3080 static int bnxt_get_module_eeprom(struct net_device *dev, 3081 struct ethtool_eeprom *eeprom, 3082 u8 *data) 3083 { 3084 struct bnxt *bp = netdev_priv(dev); 3085 u16 start = eeprom->offset, length = eeprom->len; 3086 int rc = 0; 3087 3088 memset(data, 0, eeprom->len); 3089 3090 /* Read A0 portion of the EEPROM */ 3091 if (start < ETH_MODULE_SFF_8436_LEN) { 3092 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 3093 length = ETH_MODULE_SFF_8436_LEN - start; 3094 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3095 start, length, data); 3096 if (rc) 3097 return rc; 3098 start += length; 3099 data += length; 3100 length = eeprom->len - length; 3101 } 3102 3103 /* Read A2 portion of the EEPROM */ 3104 if (length) { 3105 start -= ETH_MODULE_SFF_8436_LEN; 3106 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 3107 start, length, data); 3108 } 3109 return rc; 3110 } 3111 3112 static int bnxt_nway_reset(struct net_device *dev) 3113 { 3114 int rc = 0; 3115 3116 struct bnxt *bp = netdev_priv(dev); 3117 struct bnxt_link_info *link_info = &bp->link_info; 3118 3119 if (!BNXT_PHY_CFG_ABLE(bp)) 3120 return -EOPNOTSUPP; 3121 3122 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 3123 return -EINVAL; 3124 3125 if (netif_running(dev)) 3126 rc = bnxt_hwrm_set_link_setting(bp, true, false); 3127 3128 return rc; 3129 } 3130 3131 static int bnxt_set_phys_id(struct net_device *dev, 3132 enum ethtool_phys_id_state state) 3133 { 3134 struct hwrm_port_led_cfg_input req = {0}; 3135 struct bnxt *bp = netdev_priv(dev); 3136 struct bnxt_pf_info *pf = &bp->pf; 3137 struct bnxt_led_cfg *led_cfg; 3138 u8 led_state; 3139 __le16 duration; 3140 int i; 3141 3142 if (!bp->num_leds || BNXT_VF(bp)) 3143 return -EOPNOTSUPP; 3144 3145 if (state == ETHTOOL_ID_ACTIVE) { 3146 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 3147 duration = cpu_to_le16(500); 3148 } else if (state == ETHTOOL_ID_INACTIVE) { 3149 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 3150 duration = cpu_to_le16(0); 3151 } else { 3152 return -EINVAL; 3153 } 3154 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); 3155 req.port_id = cpu_to_le16(pf->port_id); 3156 req.num_leds = bp->num_leds; 3157 led_cfg = (struct bnxt_led_cfg *)&req.led0_id; 3158 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 3159 req.enables |= BNXT_LED_DFLT_ENABLES(i); 3160 led_cfg->led_id = bp->leds[i].led_id; 3161 led_cfg->led_state = led_state; 3162 led_cfg->led_blink_on = duration; 3163 led_cfg->led_blink_off = duration; 3164 led_cfg->led_group_id = bp->leds[i].led_group_id; 3165 } 3166 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3167 } 3168 3169 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 3170 { 3171 struct hwrm_selftest_irq_input req = {0}; 3172 3173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); 3174 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3175 } 3176 3177 static int bnxt_test_irq(struct bnxt *bp) 3178 { 3179 int i; 3180 3181 for (i = 0; i < bp->cp_nr_rings; i++) { 3182 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 3183 int rc; 3184 3185 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 3186 if (rc) 3187 return rc; 3188 } 3189 return 0; 3190 } 3191 3192 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 3193 { 3194 struct hwrm_port_mac_cfg_input req = {0}; 3195 3196 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); 3197 3198 req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 3199 if (enable) 3200 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 3201 else 3202 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 3203 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3204 } 3205 3206 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 3207 { 3208 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 3209 struct hwrm_port_phy_qcaps_input req = {0}; 3210 int rc; 3211 3212 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 3213 mutex_lock(&bp->hwrm_cmd_lock); 3214 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3215 if (!rc) 3216 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 3217 3218 mutex_unlock(&bp->hwrm_cmd_lock); 3219 return rc; 3220 } 3221 3222 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 3223 struct hwrm_port_phy_cfg_input *req) 3224 { 3225 struct bnxt_link_info *link_info = &bp->link_info; 3226 u16 fw_advertising; 3227 u16 fw_speed; 3228 int rc; 3229 3230 if (!link_info->autoneg || 3231 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 3232 return 0; 3233 3234 rc = bnxt_query_force_speeds(bp, &fw_advertising); 3235 if (rc) 3236 return rc; 3237 3238 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 3239 if (bp->link_info.link_up) 3240 fw_speed = bp->link_info.link_speed; 3241 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 3242 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 3243 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 3244 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 3245 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 3246 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 3247 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 3248 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 3249 3250 req->force_link_speed = cpu_to_le16(fw_speed); 3251 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 3252 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3253 rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); 3254 req->flags = 0; 3255 req->force_link_speed = cpu_to_le16(0); 3256 return rc; 3257 } 3258 3259 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 3260 { 3261 struct hwrm_port_phy_cfg_input req = {0}; 3262 3263 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 3264 3265 if (enable) { 3266 bnxt_disable_an_for_lpbk(bp, &req); 3267 if (ext) 3268 req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 3269 else 3270 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 3271 } else { 3272 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 3273 } 3274 req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 3275 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3276 } 3277 3278 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3279 u32 raw_cons, int pkt_size) 3280 { 3281 struct bnxt_napi *bnapi = cpr->bnapi; 3282 struct bnxt_rx_ring_info *rxr; 3283 struct bnxt_sw_rx_bd *rx_buf; 3284 struct rx_cmp *rxcmp; 3285 u16 cp_cons, cons; 3286 u8 *data; 3287 u32 len; 3288 int i; 3289 3290 rxr = bnapi->rx_ring; 3291 cp_cons = RING_CMP(raw_cons); 3292 rxcmp = (struct rx_cmp *) 3293 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3294 cons = rxcmp->rx_cmp_opaque; 3295 rx_buf = &rxr->rx_buf_ring[cons]; 3296 data = rx_buf->data_ptr; 3297 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 3298 if (len != pkt_size) 3299 return -EIO; 3300 i = ETH_ALEN; 3301 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 3302 return -EIO; 3303 i += ETH_ALEN; 3304 for ( ; i < pkt_size; i++) { 3305 if (data[i] != (u8)(i & 0xff)) 3306 return -EIO; 3307 } 3308 return 0; 3309 } 3310 3311 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3312 int pkt_size) 3313 { 3314 struct tx_cmp *txcmp; 3315 int rc = -EIO; 3316 u32 raw_cons; 3317 u32 cons; 3318 int i; 3319 3320 raw_cons = cpr->cp_raw_cons; 3321 for (i = 0; i < 200; i++) { 3322 cons = RING_CMP(raw_cons); 3323 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3324 3325 if (!TX_CMP_VALID(txcmp, raw_cons)) { 3326 udelay(5); 3327 continue; 3328 } 3329 3330 /* The valid test of the entry must be done first before 3331 * reading any further. 3332 */ 3333 dma_rmb(); 3334 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { 3335 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 3336 raw_cons = NEXT_RAW_CMP(raw_cons); 3337 raw_cons = NEXT_RAW_CMP(raw_cons); 3338 break; 3339 } 3340 raw_cons = NEXT_RAW_CMP(raw_cons); 3341 } 3342 cpr->cp_raw_cons = raw_cons; 3343 return rc; 3344 } 3345 3346 static int bnxt_run_loopback(struct bnxt *bp) 3347 { 3348 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 3349 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 3350 struct bnxt_cp_ring_info *cpr; 3351 int pkt_size, i = 0; 3352 struct sk_buff *skb; 3353 dma_addr_t map; 3354 u8 *data; 3355 int rc; 3356 3357 cpr = &rxr->bnapi->cp_ring; 3358 if (bp->flags & BNXT_FLAG_CHIP_P5) 3359 cpr = cpr->cp_ring_arr[BNXT_RX_HDL]; 3360 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); 3361 skb = netdev_alloc_skb(bp->dev, pkt_size); 3362 if (!skb) 3363 return -ENOMEM; 3364 data = skb_put(skb, pkt_size); 3365 eth_broadcast_addr(data); 3366 i += ETH_ALEN; 3367 ether_addr_copy(&data[i], bp->dev->dev_addr); 3368 i += ETH_ALEN; 3369 for ( ; i < pkt_size; i++) 3370 data[i] = (u8)(i & 0xff); 3371 3372 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 3373 DMA_TO_DEVICE); 3374 if (dma_mapping_error(&bp->pdev->dev, map)) { 3375 dev_kfree_skb(skb); 3376 return -EIO; 3377 } 3378 bnxt_xmit_bd(bp, txr, map, pkt_size); 3379 3380 /* Sync BD data before updating doorbell */ 3381 wmb(); 3382 3383 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 3384 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 3385 3386 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 3387 dev_kfree_skb(skb); 3388 return rc; 3389 } 3390 3391 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 3392 { 3393 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; 3394 struct hwrm_selftest_exec_input req = {0}; 3395 int rc; 3396 3397 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); 3398 mutex_lock(&bp->hwrm_cmd_lock); 3399 resp->test_success = 0; 3400 req.flags = test_mask; 3401 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); 3402 *test_results = resp->test_success; 3403 mutex_unlock(&bp->hwrm_cmd_lock); 3404 return rc; 3405 } 3406 3407 #define BNXT_DRV_TESTS 4 3408 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 3409 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 3410 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 3411 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 3412 3413 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 3414 u64 *buf) 3415 { 3416 struct bnxt *bp = netdev_priv(dev); 3417 bool do_ext_lpbk = false; 3418 bool offline = false; 3419 u8 test_results = 0; 3420 u8 test_mask = 0; 3421 int rc = 0, i; 3422 3423 if (!bp->num_tests || !BNXT_PF(bp)) 3424 return; 3425 memset(buf, 0, sizeof(u64) * bp->num_tests); 3426 if (!netif_running(dev)) { 3427 etest->flags |= ETH_TEST_FL_FAILED; 3428 return; 3429 } 3430 3431 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 3432 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 3433 do_ext_lpbk = true; 3434 3435 if (etest->flags & ETH_TEST_FL_OFFLINE) { 3436 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 3437 etest->flags |= ETH_TEST_FL_FAILED; 3438 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 3439 return; 3440 } 3441 offline = true; 3442 } 3443 3444 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 3445 u8 bit_val = 1 << i; 3446 3447 if (!(bp->test_info->offline_mask & bit_val)) 3448 test_mask |= bit_val; 3449 else if (offline) 3450 test_mask |= bit_val; 3451 } 3452 if (!offline) { 3453 bnxt_run_fw_tests(bp, test_mask, &test_results); 3454 } else { 3455 rc = bnxt_close_nic(bp, false, false); 3456 if (rc) 3457 return; 3458 bnxt_run_fw_tests(bp, test_mask, &test_results); 3459 3460 buf[BNXT_MACLPBK_TEST_IDX] = 1; 3461 bnxt_hwrm_mac_loopback(bp, true); 3462 msleep(250); 3463 rc = bnxt_half_open_nic(bp); 3464 if (rc) { 3465 bnxt_hwrm_mac_loopback(bp, false); 3466 etest->flags |= ETH_TEST_FL_FAILED; 3467 return; 3468 } 3469 if (bnxt_run_loopback(bp)) 3470 etest->flags |= ETH_TEST_FL_FAILED; 3471 else 3472 buf[BNXT_MACLPBK_TEST_IDX] = 0; 3473 3474 bnxt_hwrm_mac_loopback(bp, false); 3475 bnxt_hwrm_phy_loopback(bp, true, false); 3476 msleep(1000); 3477 if (bnxt_run_loopback(bp)) { 3478 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 3479 etest->flags |= ETH_TEST_FL_FAILED; 3480 } 3481 if (do_ext_lpbk) { 3482 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 3483 bnxt_hwrm_phy_loopback(bp, true, true); 3484 msleep(1000); 3485 if (bnxt_run_loopback(bp)) { 3486 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 3487 etest->flags |= ETH_TEST_FL_FAILED; 3488 } 3489 } 3490 bnxt_hwrm_phy_loopback(bp, false, false); 3491 bnxt_half_close_nic(bp); 3492 rc = bnxt_open_nic(bp, false, true); 3493 } 3494 if (rc || bnxt_test_irq(bp)) { 3495 buf[BNXT_IRQ_TEST_IDX] = 1; 3496 etest->flags |= ETH_TEST_FL_FAILED; 3497 } 3498 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 3499 u8 bit_val = 1 << i; 3500 3501 if ((test_mask & bit_val) && !(test_results & bit_val)) { 3502 buf[i] = 1; 3503 etest->flags |= ETH_TEST_FL_FAILED; 3504 } 3505 } 3506 } 3507 3508 static int bnxt_reset(struct net_device *dev, u32 *flags) 3509 { 3510 struct bnxt *bp = netdev_priv(dev); 3511 bool reload = false; 3512 u32 req = *flags; 3513 3514 if (!req) 3515 return -EINVAL; 3516 3517 if (!BNXT_PF(bp)) { 3518 netdev_err(dev, "Reset is not supported from a VF\n"); 3519 return -EOPNOTSUPP; 3520 } 3521 3522 if (pci_vfs_assigned(bp->pdev) && 3523 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 3524 netdev_err(dev, 3525 "Reset not allowed when VFs are assigned to VMs\n"); 3526 return -EBUSY; 3527 } 3528 3529 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 3530 /* This feature is not supported in older firmware versions */ 3531 if (bp->hwrm_spec_code >= 0x10803) { 3532 if (!bnxt_firmware_reset_chip(dev)) { 3533 netdev_info(dev, "Firmware reset request successful.\n"); 3534 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 3535 reload = true; 3536 *flags &= ~BNXT_FW_RESET_CHIP; 3537 } 3538 } else if (req == BNXT_FW_RESET_CHIP) { 3539 return -EOPNOTSUPP; /* only request, fail hard */ 3540 } 3541 } 3542 3543 if (req & BNXT_FW_RESET_AP) { 3544 /* This feature is not supported in older firmware versions */ 3545 if (bp->hwrm_spec_code >= 0x10803) { 3546 if (!bnxt_firmware_reset_ap(dev)) { 3547 netdev_info(dev, "Reset application processor successful.\n"); 3548 reload = true; 3549 *flags &= ~BNXT_FW_RESET_AP; 3550 } 3551 } else if (req == BNXT_FW_RESET_AP) { 3552 return -EOPNOTSUPP; /* only request, fail hard */ 3553 } 3554 } 3555 3556 if (reload) 3557 netdev_info(dev, "Reload driver to complete reset\n"); 3558 3559 return 0; 3560 } 3561 3562 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, 3563 struct bnxt_hwrm_dbg_dma_info *info) 3564 { 3565 struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; 3566 struct hwrm_dbg_cmn_input *cmn_req = msg; 3567 __le16 *seq_ptr = msg + info->seq_off; 3568 u16 seq = 0, len, segs_off; 3569 void *resp = cmn_resp; 3570 dma_addr_t dma_handle; 3571 int rc, off = 0; 3572 void *dma_buf; 3573 3574 dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, 3575 GFP_KERNEL); 3576 if (!dma_buf) 3577 return -ENOMEM; 3578 3579 segs_off = offsetof(struct hwrm_dbg_coredump_list_output, 3580 total_segments); 3581 cmn_req->host_dest_addr = cpu_to_le64(dma_handle); 3582 cmn_req->host_buf_len = cpu_to_le32(info->dma_len); 3583 mutex_lock(&bp->hwrm_cmd_lock); 3584 while (1) { 3585 *seq_ptr = cpu_to_le16(seq); 3586 rc = _hwrm_send_message(bp, msg, msg_len, 3587 HWRM_COREDUMP_TIMEOUT); 3588 if (rc) 3589 break; 3590 3591 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); 3592 if (!seq && 3593 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { 3594 info->segs = le16_to_cpu(*((__le16 *)(resp + 3595 segs_off))); 3596 if (!info->segs) { 3597 rc = -EIO; 3598 break; 3599 } 3600 3601 info->dest_buf_size = info->segs * 3602 sizeof(struct coredump_segment_record); 3603 info->dest_buf = kmalloc(info->dest_buf_size, 3604 GFP_KERNEL); 3605 if (!info->dest_buf) { 3606 rc = -ENOMEM; 3607 break; 3608 } 3609 } 3610 3611 if (info->dest_buf) { 3612 if ((info->seg_start + off + len) <= 3613 BNXT_COREDUMP_BUF_LEN(info->buf_len)) { 3614 memcpy(info->dest_buf + off, dma_buf, len); 3615 } else { 3616 rc = -ENOBUFS; 3617 break; 3618 } 3619 } 3620 3621 if (cmn_req->req_type == 3622 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) 3623 info->dest_buf_size += len; 3624 3625 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) 3626 break; 3627 3628 seq++; 3629 off += len; 3630 } 3631 mutex_unlock(&bp->hwrm_cmd_lock); 3632 dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); 3633 return rc; 3634 } 3635 3636 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, 3637 struct bnxt_coredump *coredump) 3638 { 3639 struct hwrm_dbg_coredump_list_input req = {0}; 3640 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3641 int rc; 3642 3643 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); 3644 3645 info.dma_len = COREDUMP_LIST_BUF_LEN; 3646 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); 3647 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, 3648 data_len); 3649 3650 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3651 if (!rc) { 3652 coredump->data = info.dest_buf; 3653 coredump->data_size = info.dest_buf_size; 3654 coredump->total_segs = info.segs; 3655 } 3656 return rc; 3657 } 3658 3659 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, 3660 u16 segment_id) 3661 { 3662 struct hwrm_dbg_coredump_initiate_input req = {0}; 3663 3664 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); 3665 req.component_id = cpu_to_le16(component_id); 3666 req.segment_id = cpu_to_le16(segment_id); 3667 3668 return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); 3669 } 3670 3671 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, 3672 u16 segment_id, u32 *seg_len, 3673 void *buf, u32 buf_len, u32 offset) 3674 { 3675 struct hwrm_dbg_coredump_retrieve_input req = {0}; 3676 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3677 int rc; 3678 3679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); 3680 req.component_id = cpu_to_le16(component_id); 3681 req.segment_id = cpu_to_le16(segment_id); 3682 3683 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; 3684 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, 3685 seq_no); 3686 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, 3687 data_len); 3688 if (buf) { 3689 info.dest_buf = buf + offset; 3690 info.buf_len = buf_len; 3691 info.seg_start = offset; 3692 } 3693 3694 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3695 if (!rc) 3696 *seg_len = info.dest_buf_size; 3697 3698 return rc; 3699 } 3700 3701 static void 3702 bnxt_fill_coredump_seg_hdr(struct bnxt *bp, 3703 struct bnxt_coredump_segment_hdr *seg_hdr, 3704 struct coredump_segment_record *seg_rec, u32 seg_len, 3705 int status, u32 duration, u32 instance) 3706 { 3707 memset(seg_hdr, 0, sizeof(*seg_hdr)); 3708 memcpy(seg_hdr->signature, "sEgM", 4); 3709 if (seg_rec) { 3710 seg_hdr->component_id = (__force __le32)seg_rec->component_id; 3711 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; 3712 seg_hdr->low_version = seg_rec->version_low; 3713 seg_hdr->high_version = seg_rec->version_hi; 3714 } else { 3715 /* For hwrm_ver_get response Component id = 2 3716 * and Segment id = 0 3717 */ 3718 seg_hdr->component_id = cpu_to_le32(2); 3719 seg_hdr->segment_id = 0; 3720 } 3721 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); 3722 seg_hdr->length = cpu_to_le32(seg_len); 3723 seg_hdr->status = cpu_to_le32(status); 3724 seg_hdr->duration = cpu_to_le32(duration); 3725 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); 3726 seg_hdr->instance = cpu_to_le32(instance); 3727 } 3728 3729 static void 3730 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, 3731 time64_t start, s16 start_utc, u16 total_segs, 3732 int status) 3733 { 3734 time64_t end = ktime_get_real_seconds(); 3735 u32 os_ver_major = 0, os_ver_minor = 0; 3736 struct tm tm; 3737 3738 time64_to_tm(start, 0, &tm); 3739 memset(record, 0, sizeof(*record)); 3740 memcpy(record->signature, "cOrE", 4); 3741 record->flags = 0; 3742 record->low_version = 0; 3743 record->high_version = 1; 3744 record->asic_state = 0; 3745 strlcpy(record->system_name, utsname()->nodename, 3746 sizeof(record->system_name)); 3747 record->year = cpu_to_le16(tm.tm_year + 1900); 3748 record->month = cpu_to_le16(tm.tm_mon + 1); 3749 record->day = cpu_to_le16(tm.tm_mday); 3750 record->hour = cpu_to_le16(tm.tm_hour); 3751 record->minute = cpu_to_le16(tm.tm_min); 3752 record->second = cpu_to_le16(tm.tm_sec); 3753 record->utc_bias = cpu_to_le16(start_utc); 3754 strcpy(record->commandline, "ethtool -w"); 3755 record->total_segments = cpu_to_le32(total_segs); 3756 3757 sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); 3758 record->os_ver_major = cpu_to_le32(os_ver_major); 3759 record->os_ver_minor = cpu_to_le32(os_ver_minor); 3760 3761 strlcpy(record->os_name, utsname()->sysname, 32); 3762 time64_to_tm(end, 0, &tm); 3763 record->end_year = cpu_to_le16(tm.tm_year + 1900); 3764 record->end_month = cpu_to_le16(tm.tm_mon + 1); 3765 record->end_day = cpu_to_le16(tm.tm_mday); 3766 record->end_hour = cpu_to_le16(tm.tm_hour); 3767 record->end_minute = cpu_to_le16(tm.tm_min); 3768 record->end_second = cpu_to_le16(tm.tm_sec); 3769 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); 3770 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | 3771 bp->ver_resp.chip_rev << 8 | 3772 bp->ver_resp.chip_metal); 3773 record->asic_id2 = 0; 3774 record->coredump_status = cpu_to_le32(status); 3775 record->ioctl_low_version = 0; 3776 record->ioctl_high_version = 0; 3777 } 3778 3779 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) 3780 { 3781 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); 3782 u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; 3783 struct coredump_segment_record *seg_record = NULL; 3784 struct bnxt_coredump_segment_hdr seg_hdr; 3785 struct bnxt_coredump coredump = {NULL}; 3786 time64_t start_time; 3787 u16 start_utc; 3788 int rc = 0, i; 3789 3790 if (buf) 3791 buf_len = *dump_len; 3792 3793 start_time = ktime_get_real_seconds(); 3794 start_utc = sys_tz.tz_minuteswest * 60; 3795 seg_hdr_len = sizeof(seg_hdr); 3796 3797 /* First segment should be hwrm_ver_get response */ 3798 *dump_len = seg_hdr_len + ver_get_resp_len; 3799 if (buf) { 3800 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, 3801 0, 0, 0); 3802 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3803 offset += seg_hdr_len; 3804 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); 3805 offset += ver_get_resp_len; 3806 } 3807 3808 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); 3809 if (rc) { 3810 netdev_err(bp->dev, "Failed to get coredump segment list\n"); 3811 goto err; 3812 } 3813 3814 *dump_len += seg_hdr_len * coredump.total_segs; 3815 3816 seg_record = (struct coredump_segment_record *)coredump.data; 3817 seg_record_len = sizeof(*seg_record); 3818 3819 for (i = 0; i < coredump.total_segs; i++) { 3820 u16 comp_id = le16_to_cpu(seg_record->component_id); 3821 u16 seg_id = le16_to_cpu(seg_record->segment_id); 3822 u32 duration = 0, seg_len = 0; 3823 unsigned long start, end; 3824 3825 if (buf && ((offset + seg_hdr_len) > 3826 BNXT_COREDUMP_BUF_LEN(buf_len))) { 3827 rc = -ENOBUFS; 3828 goto err; 3829 } 3830 3831 start = jiffies; 3832 3833 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); 3834 if (rc) { 3835 netdev_err(bp->dev, 3836 "Failed to initiate coredump for seg = %d\n", 3837 seg_record->segment_id); 3838 goto next_seg; 3839 } 3840 3841 /* Write segment data into the buffer */ 3842 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, 3843 &seg_len, buf, buf_len, 3844 offset + seg_hdr_len); 3845 if (rc && rc == -ENOBUFS) 3846 goto err; 3847 else if (rc) 3848 netdev_err(bp->dev, 3849 "Failed to retrieve coredump for seg = %d\n", 3850 seg_record->segment_id); 3851 3852 next_seg: 3853 end = jiffies; 3854 duration = jiffies_to_msecs(end - start); 3855 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, 3856 rc, duration, 0); 3857 3858 if (buf) { 3859 /* Write segment header into the buffer */ 3860 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3861 offset += seg_hdr_len + seg_len; 3862 } 3863 3864 *dump_len += seg_len; 3865 seg_record = 3866 (struct coredump_segment_record *)((u8 *)seg_record + 3867 seg_record_len); 3868 } 3869 3870 err: 3871 if (buf) 3872 bnxt_fill_coredump_record(bp, buf + offset, start_time, 3873 start_utc, coredump.total_segs + 1, 3874 rc); 3875 kfree(coredump.data); 3876 *dump_len += sizeof(struct bnxt_coredump_record); 3877 if (rc == -ENOBUFS) 3878 netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); 3879 return rc; 3880 } 3881 3882 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 3883 { 3884 struct bnxt *bp = netdev_priv(dev); 3885 3886 if (dump->flag > BNXT_DUMP_CRASH) { 3887 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n"); 3888 return -EINVAL; 3889 } 3890 3891 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) { 3892 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 3893 return -EOPNOTSUPP; 3894 } 3895 3896 bp->dump_flag = dump->flag; 3897 return 0; 3898 } 3899 3900 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 3901 { 3902 struct bnxt *bp = netdev_priv(dev); 3903 3904 if (bp->hwrm_spec_code < 0x10801) 3905 return -EOPNOTSUPP; 3906 3907 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 3908 bp->ver_resp.hwrm_fw_min_8b << 16 | 3909 bp->ver_resp.hwrm_fw_bld_8b << 8 | 3910 bp->ver_resp.hwrm_fw_rsvd_8b; 3911 3912 dump->flag = bp->dump_flag; 3913 if (bp->dump_flag == BNXT_DUMP_CRASH) 3914 dump->len = BNXT_CRASH_DUMP_LEN; 3915 else 3916 bnxt_get_coredump(bp, NULL, &dump->len); 3917 return 0; 3918 } 3919 3920 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 3921 void *buf) 3922 { 3923 struct bnxt *bp = netdev_priv(dev); 3924 3925 if (bp->hwrm_spec_code < 0x10801) 3926 return -EOPNOTSUPP; 3927 3928 memset(buf, 0, dump->len); 3929 3930 dump->flag = bp->dump_flag; 3931 if (dump->flag == BNXT_DUMP_CRASH) { 3932 #ifdef CONFIG_TEE_BNXT_FW 3933 return tee_bnxt_copy_coredump(buf, 0, dump->len); 3934 #endif 3935 } else { 3936 return bnxt_get_coredump(bp, buf, &dump->len); 3937 } 3938 3939 return 0; 3940 } 3941 3942 static int bnxt_get_ts_info(struct net_device *dev, 3943 struct ethtool_ts_info *info) 3944 { 3945 struct bnxt *bp = netdev_priv(dev); 3946 struct bnxt_ptp_cfg *ptp; 3947 3948 ptp = bp->ptp_cfg; 3949 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 3950 SOF_TIMESTAMPING_RX_SOFTWARE | 3951 SOF_TIMESTAMPING_SOFTWARE; 3952 3953 info->phc_index = -1; 3954 if (!ptp) 3955 return 0; 3956 3957 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 3958 SOF_TIMESTAMPING_RX_HARDWARE | 3959 SOF_TIMESTAMPING_RAW_HARDWARE; 3960 if (ptp->ptp_clock) 3961 info->phc_index = ptp_clock_index(ptp->ptp_clock); 3962 3963 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 3964 3965 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 3966 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 3967 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 3968 return 0; 3969 } 3970 3971 void bnxt_ethtool_init(struct bnxt *bp) 3972 { 3973 struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; 3974 struct hwrm_selftest_qlist_input req = {0}; 3975 struct bnxt_test_info *test_info; 3976 struct net_device *dev = bp->dev; 3977 int i, rc; 3978 3979 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 3980 bnxt_get_pkgver(dev); 3981 3982 bp->num_tests = 0; 3983 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 3984 return; 3985 3986 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); 3987 mutex_lock(&bp->hwrm_cmd_lock); 3988 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3989 if (rc) 3990 goto ethtool_init_exit; 3991 3992 test_info = bp->test_info; 3993 if (!test_info) 3994 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 3995 if (!test_info) 3996 goto ethtool_init_exit; 3997 3998 bp->test_info = test_info; 3999 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 4000 if (bp->num_tests > BNXT_MAX_TEST) 4001 bp->num_tests = BNXT_MAX_TEST; 4002 4003 test_info->offline_mask = resp->offline_tests; 4004 test_info->timeout = le16_to_cpu(resp->test_timeout); 4005 if (!test_info->timeout) 4006 test_info->timeout = HWRM_CMD_TIMEOUT; 4007 for (i = 0; i < bp->num_tests; i++) { 4008 char *str = test_info->string[i]; 4009 char *fw_str = resp->test0_name + i * 32; 4010 4011 if (i == BNXT_MACLPBK_TEST_IDX) { 4012 strcpy(str, "Mac loopback test (offline)"); 4013 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 4014 strcpy(str, "Phy loopback test (offline)"); 4015 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 4016 strcpy(str, "Ext loopback test (offline)"); 4017 } else if (i == BNXT_IRQ_TEST_IDX) { 4018 strcpy(str, "Interrupt_test (offline)"); 4019 } else { 4020 strlcpy(str, fw_str, ETH_GSTRING_LEN); 4021 strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 4022 if (test_info->offline_mask & (1 << i)) 4023 strncat(str, " (offline)", 4024 ETH_GSTRING_LEN - strlen(str)); 4025 else 4026 strncat(str, " (online)", 4027 ETH_GSTRING_LEN - strlen(str)); 4028 } 4029 } 4030 4031 ethtool_init_exit: 4032 mutex_unlock(&bp->hwrm_cmd_lock); 4033 } 4034 4035 static void bnxt_get_eth_phy_stats(struct net_device *dev, 4036 struct ethtool_eth_phy_stats *phy_stats) 4037 { 4038 struct bnxt *bp = netdev_priv(dev); 4039 u64 *rx; 4040 4041 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 4042 return; 4043 4044 rx = bp->rx_port_stats_ext.sw_stats; 4045 phy_stats->SymbolErrorDuringCarrier = 4046 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 4047 } 4048 4049 static void bnxt_get_eth_mac_stats(struct net_device *dev, 4050 struct ethtool_eth_mac_stats *mac_stats) 4051 { 4052 struct bnxt *bp = netdev_priv(dev); 4053 u64 *rx, *tx; 4054 4055 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4056 return; 4057 4058 rx = bp->port_stats.sw_stats; 4059 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4060 4061 mac_stats->FramesReceivedOK = 4062 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 4063 mac_stats->FramesTransmittedOK = 4064 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 4065 mac_stats->FrameCheckSequenceErrors = 4066 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 4067 mac_stats->AlignmentErrors = 4068 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 4069 mac_stats->OutOfRangeLengthField = 4070 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 4071 } 4072 4073 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 4074 struct ethtool_eth_ctrl_stats *ctrl_stats) 4075 { 4076 struct bnxt *bp = netdev_priv(dev); 4077 u64 *rx; 4078 4079 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4080 return; 4081 4082 rx = bp->port_stats.sw_stats; 4083 ctrl_stats->MACControlFramesReceived = 4084 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 4085 } 4086 4087 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 4088 { 0, 64 }, 4089 { 65, 127 }, 4090 { 128, 255 }, 4091 { 256, 511 }, 4092 { 512, 1023 }, 4093 { 1024, 1518 }, 4094 { 1519, 2047 }, 4095 { 2048, 4095 }, 4096 { 4096, 9216 }, 4097 { 9217, 16383 }, 4098 {} 4099 }; 4100 4101 static void bnxt_get_rmon_stats(struct net_device *dev, 4102 struct ethtool_rmon_stats *rmon_stats, 4103 const struct ethtool_rmon_hist_range **ranges) 4104 { 4105 struct bnxt *bp = netdev_priv(dev); 4106 u64 *rx, *tx; 4107 4108 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4109 return; 4110 4111 rx = bp->port_stats.sw_stats; 4112 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4113 4114 rmon_stats->jabbers = 4115 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 4116 rmon_stats->oversize_pkts = 4117 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 4118 rmon_stats->undersize_pkts = 4119 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 4120 4121 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 4122 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 4123 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 4124 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 4125 rmon_stats->hist[4] = 4126 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 4127 rmon_stats->hist[5] = 4128 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 4129 rmon_stats->hist[6] = 4130 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 4131 rmon_stats->hist[7] = 4132 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 4133 rmon_stats->hist[8] = 4134 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 4135 rmon_stats->hist[9] = 4136 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 4137 4138 rmon_stats->hist_tx[0] = 4139 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 4140 rmon_stats->hist_tx[1] = 4141 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 4142 rmon_stats->hist_tx[2] = 4143 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 4144 rmon_stats->hist_tx[3] = 4145 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 4146 rmon_stats->hist_tx[4] = 4147 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 4148 rmon_stats->hist_tx[5] = 4149 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 4150 rmon_stats->hist_tx[6] = 4151 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 4152 rmon_stats->hist_tx[7] = 4153 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 4154 rmon_stats->hist_tx[8] = 4155 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 4156 rmon_stats->hist_tx[9] = 4157 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 4158 4159 *ranges = bnxt_rmon_ranges; 4160 } 4161 4162 void bnxt_ethtool_free(struct bnxt *bp) 4163 { 4164 kfree(bp->test_info); 4165 bp->test_info = NULL; 4166 } 4167 4168 const struct ethtool_ops bnxt_ethtool_ops = { 4169 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 4170 ETHTOOL_COALESCE_MAX_FRAMES | 4171 ETHTOOL_COALESCE_USECS_IRQ | 4172 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 4173 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 4174 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 4175 .get_link_ksettings = bnxt_get_link_ksettings, 4176 .set_link_ksettings = bnxt_set_link_ksettings, 4177 .get_fec_stats = bnxt_get_fec_stats, 4178 .get_fecparam = bnxt_get_fecparam, 4179 .set_fecparam = bnxt_set_fecparam, 4180 .get_pause_stats = bnxt_get_pause_stats, 4181 .get_pauseparam = bnxt_get_pauseparam, 4182 .set_pauseparam = bnxt_set_pauseparam, 4183 .get_drvinfo = bnxt_get_drvinfo, 4184 .get_regs_len = bnxt_get_regs_len, 4185 .get_regs = bnxt_get_regs, 4186 .get_wol = bnxt_get_wol, 4187 .set_wol = bnxt_set_wol, 4188 .get_coalesce = bnxt_get_coalesce, 4189 .set_coalesce = bnxt_set_coalesce, 4190 .get_msglevel = bnxt_get_msglevel, 4191 .set_msglevel = bnxt_set_msglevel, 4192 .get_sset_count = bnxt_get_sset_count, 4193 .get_strings = bnxt_get_strings, 4194 .get_ethtool_stats = bnxt_get_ethtool_stats, 4195 .set_ringparam = bnxt_set_ringparam, 4196 .get_ringparam = bnxt_get_ringparam, 4197 .get_channels = bnxt_get_channels, 4198 .set_channels = bnxt_set_channels, 4199 .get_rxnfc = bnxt_get_rxnfc, 4200 .set_rxnfc = bnxt_set_rxnfc, 4201 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 4202 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 4203 .get_rxfh = bnxt_get_rxfh, 4204 .set_rxfh = bnxt_set_rxfh, 4205 .flash_device = bnxt_flash_device, 4206 .get_eeprom_len = bnxt_get_eeprom_len, 4207 .get_eeprom = bnxt_get_eeprom, 4208 .set_eeprom = bnxt_set_eeprom, 4209 .get_link = bnxt_get_link, 4210 .get_eee = bnxt_get_eee, 4211 .set_eee = bnxt_set_eee, 4212 .get_module_info = bnxt_get_module_info, 4213 .get_module_eeprom = bnxt_get_module_eeprom, 4214 .nway_reset = bnxt_nway_reset, 4215 .set_phys_id = bnxt_set_phys_id, 4216 .self_test = bnxt_self_test, 4217 .get_ts_info = bnxt_get_ts_info, 4218 .reset = bnxt_reset, 4219 .set_dump = bnxt_set_dump, 4220 .get_dump_flag = bnxt_get_dump_flag, 4221 .get_dump_data = bnxt_get_dump_data, 4222 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 4223 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 4224 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 4225 .get_rmon_stats = bnxt_get_rmon_stats, 4226 }; 4227