1 /* bnx2x_main.c: QLogic Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * Copyright (c) 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 12 * Written by: Eliezer Tamir 13 * Based on code from Michael Chan's bnx2 driver 14 * UDP CSUM errata workaround by Arik Gendelman 15 * Slowpath and fastpath rework by Vladislav Zolotarov 16 * Statistics and Link management by Yitchak Gertner 17 * 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/kernel.h> 25 #include <linux/device.h> /* for dev_info() */ 26 #include <linux/timer.h> 27 #include <linux/errno.h> 28 #include <linux/ioport.h> 29 #include <linux/slab.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/aer.h> 33 #include <linux/init.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/bitops.h> 39 #include <linux/irq.h> 40 #include <linux/delay.h> 41 #include <asm/byteorder.h> 42 #include <linux/time.h> 43 #include <linux/ethtool.h> 44 #include <linux/mii.h> 45 #include <linux/if_vlan.h> 46 #include <linux/crash_dump.h> 47 #include <net/ip.h> 48 #include <net/ipv6.h> 49 #include <net/tcp.h> 50 #include <net/vxlan.h> 51 #include <net/checksum.h> 52 #include <net/ip6_checksum.h> 53 #include <linux/workqueue.h> 54 #include <linux/crc32.h> 55 #include <linux/crc32c.h> 56 #include <linux/prefetch.h> 57 #include <linux/zlib.h> 58 #include <linux/io.h> 59 #include <linux/semaphore.h> 60 #include <linux/stringify.h> 61 #include <linux/vmalloc.h> 62 #include "bnx2x.h" 63 #include "bnx2x_init.h" 64 #include "bnx2x_init_ops.h" 65 #include "bnx2x_cmn.h" 66 #include "bnx2x_vfpf.h" 67 #include "bnx2x_dcb.h" 68 #include "bnx2x_sp.h" 69 #include <linux/firmware.h> 70 #include "bnx2x_fw_file_hdr.h" 71 /* FW files */ 72 #define FW_FILE_VERSION \ 73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 76 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 77 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 78 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 79 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 80 81 /* Time in jiffies before concluding the transmitter is hung */ 82 #define TX_TIMEOUT (5*HZ) 83 84 MODULE_AUTHOR("Eliezer Tamir"); 85 MODULE_DESCRIPTION("QLogic " 86 "BCM57710/57711/57711E/" 87 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 88 "57840/57840_MF Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_FIRMWARE(FW_FILE_NAME_E1); 91 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 92 MODULE_FIRMWARE(FW_FILE_NAME_E2); 93 94 int bnx2x_num_queues; 95 module_param_named(num_queues, bnx2x_num_queues, int, 0444); 96 MODULE_PARM_DESC(num_queues, 97 " Set number of queues (default is as a number of CPUs)"); 98 99 static int disable_tpa; 100 module_param(disable_tpa, int, 0444); 101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 102 103 static int int_mode; 104 module_param(int_mode, int, 0444); 105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 106 "(1 INT#x; 2 MSI)"); 107 108 static int dropless_fc; 109 module_param(dropless_fc, int, 0444); 110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 111 112 static int mrrs = -1; 113 module_param(mrrs, int, 0444); 114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 115 116 static int debug; 117 module_param(debug, int, 0444); 118 MODULE_PARM_DESC(debug, " Default debug msglevel"); 119 120 static struct workqueue_struct *bnx2x_wq; 121 struct workqueue_struct *bnx2x_iov_wq; 122 123 struct bnx2x_mac_vals { 124 u32 xmac_addr; 125 u32 xmac_val; 126 u32 emac_addr; 127 u32 emac_val; 128 u32 umac_addr[2]; 129 u32 umac_val[2]; 130 u32 bmac_addr; 131 u32 bmac_val[2]; 132 }; 133 134 enum bnx2x_board_type { 135 BCM57710 = 0, 136 BCM57711, 137 BCM57711E, 138 BCM57712, 139 BCM57712_MF, 140 BCM57712_VF, 141 BCM57800, 142 BCM57800_MF, 143 BCM57800_VF, 144 BCM57810, 145 BCM57810_MF, 146 BCM57810_VF, 147 BCM57840_4_10, 148 BCM57840_2_20, 149 BCM57840_MF, 150 BCM57840_VF, 151 BCM57811, 152 BCM57811_MF, 153 BCM57840_O, 154 BCM57840_MFO, 155 BCM57811_VF 156 }; 157 158 /* indexed by board_type, above */ 159 static struct { 160 char *name; 161 } board_info[] = { 162 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" }, 163 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" }, 164 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" }, 165 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" }, 166 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" }, 167 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" }, 168 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" }, 169 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" }, 170 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" }, 171 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" }, 172 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" }, 173 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" }, 174 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" }, 175 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" }, 176 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, 177 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }, 178 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" }, 179 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" }, 180 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" }, 181 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, 182 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" } 183 }; 184 185 #ifndef PCI_DEVICE_ID_NX2_57710 186 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 187 #endif 188 #ifndef PCI_DEVICE_ID_NX2_57711 189 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 190 #endif 191 #ifndef PCI_DEVICE_ID_NX2_57711E 192 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 193 #endif 194 #ifndef PCI_DEVICE_ID_NX2_57712 195 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 196 #endif 197 #ifndef PCI_DEVICE_ID_NX2_57712_MF 198 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 199 #endif 200 #ifndef PCI_DEVICE_ID_NX2_57712_VF 201 #define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF 202 #endif 203 #ifndef PCI_DEVICE_ID_NX2_57800 204 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 205 #endif 206 #ifndef PCI_DEVICE_ID_NX2_57800_MF 207 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 208 #endif 209 #ifndef PCI_DEVICE_ID_NX2_57800_VF 210 #define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF 211 #endif 212 #ifndef PCI_DEVICE_ID_NX2_57810 213 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 214 #endif 215 #ifndef PCI_DEVICE_ID_NX2_57810_MF 216 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 217 #endif 218 #ifndef PCI_DEVICE_ID_NX2_57840_O 219 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE 220 #endif 221 #ifndef PCI_DEVICE_ID_NX2_57810_VF 222 #define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF 223 #endif 224 #ifndef PCI_DEVICE_ID_NX2_57840_4_10 225 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 226 #endif 227 #ifndef PCI_DEVICE_ID_NX2_57840_2_20 228 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 229 #endif 230 #ifndef PCI_DEVICE_ID_NX2_57840_MFO 231 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE 232 #endif 233 #ifndef PCI_DEVICE_ID_NX2_57840_MF 234 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 235 #endif 236 #ifndef PCI_DEVICE_ID_NX2_57840_VF 237 #define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF 238 #endif 239 #ifndef PCI_DEVICE_ID_NX2_57811 240 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 241 #endif 242 #ifndef PCI_DEVICE_ID_NX2_57811_MF 243 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 244 #endif 245 #ifndef PCI_DEVICE_ID_NX2_57811_VF 246 #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF 247 #endif 248 249 static const struct pci_device_id bnx2x_pci_tbl[] = { 250 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 251 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, 256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, 259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, 262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 263 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, 265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, 266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, 267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 270 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, 274 { 0 } 275 }; 276 277 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 278 279 const u32 dmae_reg_go_c[] = { 280 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 281 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 282 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, 283 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 284 }; 285 286 /* Global resources for unloading a previously loaded device */ 287 #define BNX2X_PREV_WAIT_NEEDED 1 288 static DEFINE_SEMAPHORE(bnx2x_prev_sem); 289 static LIST_HEAD(bnx2x_prev_list); 290 291 /* Forward declaration */ 292 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); 293 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); 294 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); 295 296 /**************************************************************************** 297 * General service functions 298 ****************************************************************************/ 299 300 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); 301 302 static void __storm_memset_dma_mapping(struct bnx2x *bp, 303 u32 addr, dma_addr_t mapping) 304 { 305 REG_WR(bp, addr, U64_LO(mapping)); 306 REG_WR(bp, addr + 4, U64_HI(mapping)); 307 } 308 309 static void storm_memset_spq_addr(struct bnx2x *bp, 310 dma_addr_t mapping, u16 abs_fid) 311 { 312 u32 addr = XSEM_REG_FAST_MEMORY + 313 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 314 315 __storm_memset_dma_mapping(bp, addr, mapping); 316 } 317 318 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 319 u16 pf_id) 320 { 321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 322 pf_id); 323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 324 pf_id); 325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 326 pf_id); 327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 328 pf_id); 329 } 330 331 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 332 u8 enable) 333 { 334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 335 enable); 336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 337 enable); 338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 339 enable); 340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 341 enable); 342 } 343 344 static void storm_memset_eq_data(struct bnx2x *bp, 345 struct event_ring_data *eq_data, 346 u16 pfid) 347 { 348 size_t size = sizeof(struct event_ring_data); 349 350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 351 352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 353 } 354 355 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 356 u16 pfid) 357 { 358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 359 REG_WR16(bp, addr, eq_prod); 360 } 361 362 /* used only at init 363 * locking is done by mcp 364 */ 365 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 366 { 367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 368 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 370 PCICFG_VENDOR_ID_OFFSET); 371 } 372 373 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 374 { 375 u32 val; 376 377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 378 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 380 PCICFG_VENDOR_ID_OFFSET); 381 382 return val; 383 } 384 385 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 386 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 387 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 388 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 389 #define DMAE_DP_DST_NONE "dst_addr [none]" 390 391 static void bnx2x_dp_dmae(struct bnx2x *bp, 392 struct dmae_command *dmae, int msglvl) 393 { 394 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 395 int i; 396 397 switch (dmae->opcode & DMAE_COMMAND_DST) { 398 case DMAE_CMD_DST_PCI: 399 if (src_type == DMAE_CMD_SRC_PCI) 400 DP(msglvl, "DMAE: opcode 0x%08x\n" 401 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 402 "comp_addr [%x:%08x], comp_val 0x%08x\n", 403 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 404 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 405 dmae->comp_addr_hi, dmae->comp_addr_lo, 406 dmae->comp_val); 407 else 408 DP(msglvl, "DMAE: opcode 0x%08x\n" 409 "src [%08x], len [%d*4], dst [%x:%08x]\n" 410 "comp_addr [%x:%08x], comp_val 0x%08x\n", 411 dmae->opcode, dmae->src_addr_lo >> 2, 412 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 413 dmae->comp_addr_hi, dmae->comp_addr_lo, 414 dmae->comp_val); 415 break; 416 case DMAE_CMD_DST_GRC: 417 if (src_type == DMAE_CMD_SRC_PCI) 418 DP(msglvl, "DMAE: opcode 0x%08x\n" 419 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 420 "comp_addr [%x:%08x], comp_val 0x%08x\n", 421 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 422 dmae->len, dmae->dst_addr_lo >> 2, 423 dmae->comp_addr_hi, dmae->comp_addr_lo, 424 dmae->comp_val); 425 else 426 DP(msglvl, "DMAE: opcode 0x%08x\n" 427 "src [%08x], len [%d*4], dst [%08x]\n" 428 "comp_addr [%x:%08x], comp_val 0x%08x\n", 429 dmae->opcode, dmae->src_addr_lo >> 2, 430 dmae->len, dmae->dst_addr_lo >> 2, 431 dmae->comp_addr_hi, dmae->comp_addr_lo, 432 dmae->comp_val); 433 break; 434 default: 435 if (src_type == DMAE_CMD_SRC_PCI) 436 DP(msglvl, "DMAE: opcode 0x%08x\n" 437 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 438 "comp_addr [%x:%08x] comp_val 0x%08x\n", 439 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 440 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 441 dmae->comp_val); 442 else 443 DP(msglvl, "DMAE: opcode 0x%08x\n" 444 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 445 "comp_addr [%x:%08x] comp_val 0x%08x\n", 446 dmae->opcode, dmae->src_addr_lo >> 2, 447 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 448 dmae->comp_val); 449 break; 450 } 451 452 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) 453 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", 454 i, *(((u32 *)dmae) + i)); 455 } 456 457 /* copy command into DMAE command memory and set DMAE command go */ 458 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 459 { 460 u32 cmd_offset; 461 int i; 462 463 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 464 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 465 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 466 } 467 REG_WR(bp, dmae_reg_go_c[idx], 1); 468 } 469 470 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 471 { 472 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 473 DMAE_CMD_C_ENABLE); 474 } 475 476 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 477 { 478 return opcode & ~DMAE_CMD_SRC_RESET; 479 } 480 481 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 482 bool with_comp, u8 comp_type) 483 { 484 u32 opcode = 0; 485 486 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 487 (dst_type << DMAE_COMMAND_DST_SHIFT)); 488 489 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 490 491 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 492 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 493 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 494 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 495 496 #ifdef __BIG_ENDIAN 497 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 498 #else 499 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 500 #endif 501 if (with_comp) 502 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 503 return opcode; 504 } 505 506 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 507 struct dmae_command *dmae, 508 u8 src_type, u8 dst_type) 509 { 510 memset(dmae, 0, sizeof(struct dmae_command)); 511 512 /* set the opcode */ 513 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 514 true, DMAE_COMP_PCI); 515 516 /* fill in the completion parameters */ 517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 519 dmae->comp_val = DMAE_COMP_VAL; 520 } 521 522 /* issue a dmae command over the init-channel and wait for completion */ 523 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 524 u32 *comp) 525 { 526 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 527 int rc = 0; 528 529 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); 530 531 /* Lock the dmae channel. Disable BHs to prevent a dead-lock 532 * as long as this code is called both from syscall context and 533 * from ndo_set_rx_mode() flow that may be called from BH. 534 */ 535 536 spin_lock_bh(&bp->dmae_lock); 537 538 /* reset completion */ 539 *comp = 0; 540 541 /* post the command on the channel used for initializations */ 542 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 543 544 /* wait for completion */ 545 udelay(5); 546 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 547 548 if (!cnt || 549 (bp->recovery_state != BNX2X_RECOVERY_DONE && 550 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 551 BNX2X_ERR("DMAE timeout!\n"); 552 rc = DMAE_TIMEOUT; 553 goto unlock; 554 } 555 cnt--; 556 udelay(50); 557 } 558 if (*comp & DMAE_PCI_ERR_FLAG) { 559 BNX2X_ERR("DMAE PCI error!\n"); 560 rc = DMAE_PCI_ERROR; 561 } 562 563 unlock: 564 565 spin_unlock_bh(&bp->dmae_lock); 566 567 return rc; 568 } 569 570 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 571 u32 len32) 572 { 573 int rc; 574 struct dmae_command dmae; 575 576 if (!bp->dmae_ready) { 577 u32 *data = bnx2x_sp(bp, wb_data[0]); 578 579 if (CHIP_IS_E1(bp)) 580 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 581 else 582 bnx2x_init_str_wr(bp, dst_addr, data, len32); 583 return; 584 } 585 586 /* set opcode and fixed command fields */ 587 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 588 589 /* fill in addresses and len */ 590 dmae.src_addr_lo = U64_LO(dma_addr); 591 dmae.src_addr_hi = U64_HI(dma_addr); 592 dmae.dst_addr_lo = dst_addr >> 2; 593 dmae.dst_addr_hi = 0; 594 dmae.len = len32; 595 596 /* issue the command and wait for completion */ 597 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 598 if (rc) { 599 BNX2X_ERR("DMAE returned failure %d\n", rc); 600 #ifdef BNX2X_STOP_ON_ERROR 601 bnx2x_panic(); 602 #endif 603 } 604 } 605 606 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 607 { 608 int rc; 609 struct dmae_command dmae; 610 611 if (!bp->dmae_ready) { 612 u32 *data = bnx2x_sp(bp, wb_data[0]); 613 int i; 614 615 if (CHIP_IS_E1(bp)) 616 for (i = 0; i < len32; i++) 617 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 618 else 619 for (i = 0; i < len32; i++) 620 data[i] = REG_RD(bp, src_addr + i*4); 621 622 return; 623 } 624 625 /* set opcode and fixed command fields */ 626 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 627 628 /* fill in addresses and len */ 629 dmae.src_addr_lo = src_addr >> 2; 630 dmae.src_addr_hi = 0; 631 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 632 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 633 dmae.len = len32; 634 635 /* issue the command and wait for completion */ 636 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 637 if (rc) { 638 BNX2X_ERR("DMAE returned failure %d\n", rc); 639 #ifdef BNX2X_STOP_ON_ERROR 640 bnx2x_panic(); 641 #endif 642 } 643 } 644 645 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 646 u32 addr, u32 len) 647 { 648 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 649 int offset = 0; 650 651 while (len > dmae_wr_max) { 652 bnx2x_write_dmae(bp, phys_addr + offset, 653 addr + offset, dmae_wr_max); 654 offset += dmae_wr_max * 4; 655 len -= dmae_wr_max; 656 } 657 658 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 659 } 660 661 enum storms { 662 XSTORM, 663 TSTORM, 664 CSTORM, 665 USTORM, 666 MAX_STORMS 667 }; 668 669 #define STORMS_NUM 4 670 #define REGS_IN_ENTRY 4 671 672 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, 673 enum storms storm, 674 int entry) 675 { 676 switch (storm) { 677 case XSTORM: 678 return XSTORM_ASSERT_LIST_OFFSET(entry); 679 case TSTORM: 680 return TSTORM_ASSERT_LIST_OFFSET(entry); 681 case CSTORM: 682 return CSTORM_ASSERT_LIST_OFFSET(entry); 683 case USTORM: 684 return USTORM_ASSERT_LIST_OFFSET(entry); 685 case MAX_STORMS: 686 default: 687 BNX2X_ERR("unknown storm\n"); 688 } 689 return -EINVAL; 690 } 691 692 static int bnx2x_mc_assert(struct bnx2x *bp) 693 { 694 char last_idx; 695 int i, j, rc = 0; 696 enum storms storm; 697 u32 regs[REGS_IN_ENTRY]; 698 u32 bar_storm_intmem[STORMS_NUM] = { 699 BAR_XSTRORM_INTMEM, 700 BAR_TSTRORM_INTMEM, 701 BAR_CSTRORM_INTMEM, 702 BAR_USTRORM_INTMEM 703 }; 704 u32 storm_assert_list_index[STORMS_NUM] = { 705 XSTORM_ASSERT_LIST_INDEX_OFFSET, 706 TSTORM_ASSERT_LIST_INDEX_OFFSET, 707 CSTORM_ASSERT_LIST_INDEX_OFFSET, 708 USTORM_ASSERT_LIST_INDEX_OFFSET 709 }; 710 char *storms_string[STORMS_NUM] = { 711 "XSTORM", 712 "TSTORM", 713 "CSTORM", 714 "USTORM" 715 }; 716 717 for (storm = XSTORM; storm < MAX_STORMS; storm++) { 718 last_idx = REG_RD8(bp, bar_storm_intmem[storm] + 719 storm_assert_list_index[storm]); 720 if (last_idx) 721 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n", 722 storms_string[storm], last_idx); 723 724 /* print the asserts */ 725 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 726 /* read a single assert entry */ 727 for (j = 0; j < REGS_IN_ENTRY; j++) 728 regs[j] = REG_RD(bp, bar_storm_intmem[storm] + 729 bnx2x_get_assert_list_entry(bp, 730 storm, 731 i) + 732 sizeof(u32) * j); 733 734 /* log entry if it contains a valid assert */ 735 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) { 736 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 737 storms_string[storm], i, regs[3], 738 regs[2], regs[1], regs[0]); 739 rc++; 740 } else { 741 break; 742 } 743 } 744 } 745 746 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n", 747 CHIP_IS_E1(bp) ? "everest1" : 748 CHIP_IS_E1H(bp) ? "everest1h" : 749 CHIP_IS_E2(bp) ? "everest2" : "everest3", 750 BCM_5710_FW_MAJOR_VERSION, 751 BCM_5710_FW_MINOR_VERSION, 752 BCM_5710_FW_REVISION_VERSION); 753 754 return rc; 755 } 756 757 #define MCPR_TRACE_BUFFER_SIZE (0x800) 758 #define SCRATCH_BUFFER_SIZE(bp) \ 759 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) 760 761 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 762 { 763 u32 addr, val; 764 u32 mark, offset; 765 __be32 data[9]; 766 int word; 767 u32 trace_shmem_base; 768 if (BP_NOMCP(bp)) { 769 BNX2X_ERR("NO MCP - can not dump\n"); 770 return; 771 } 772 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 773 (bp->common.bc_ver & 0xff0000) >> 16, 774 (bp->common.bc_ver & 0xff00) >> 8, 775 (bp->common.bc_ver & 0xff)); 776 777 if (pci_channel_offline(bp->pdev)) { 778 BNX2X_ERR("Cannot dump MCP info while in PCI error\n"); 779 return; 780 } 781 782 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 783 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 784 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 785 786 if (BP_PATH(bp) == 0) 787 trace_shmem_base = bp->common.shmem_base; 788 else 789 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 790 791 /* sanity */ 792 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || 793 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + 794 SCRATCH_BUFFER_SIZE(bp)) { 795 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", 796 trace_shmem_base); 797 return; 798 } 799 800 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; 801 802 /* validate TRCB signature */ 803 mark = REG_RD(bp, addr); 804 if (mark != MFW_TRACE_SIGNATURE) { 805 BNX2X_ERR("Trace buffer signature is missing."); 806 return ; 807 } 808 809 /* read cyclic buffer pointer */ 810 addr += 4; 811 mark = REG_RD(bp, addr); 812 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; 813 if (mark >= trace_shmem_base || mark < addr + 4) { 814 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); 815 return; 816 } 817 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 818 819 printk("%s", lvl); 820 821 /* dump buffer after the mark */ 822 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { 823 for (word = 0; word < 8; word++) 824 data[word] = htonl(REG_RD(bp, offset + 4*word)); 825 data[8] = 0x0; 826 pr_cont("%s", (char *)data); 827 } 828 829 /* dump buffer before the mark */ 830 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 831 for (word = 0; word < 8; word++) 832 data[word] = htonl(REG_RD(bp, offset + 4*word)); 833 data[8] = 0x0; 834 pr_cont("%s", (char *)data); 835 } 836 printk("%s" "end of fw dump\n", lvl); 837 } 838 839 static void bnx2x_fw_dump(struct bnx2x *bp) 840 { 841 bnx2x_fw_dump_lvl(bp, KERN_ERR); 842 } 843 844 static void bnx2x_hc_int_disable(struct bnx2x *bp) 845 { 846 int port = BP_PORT(bp); 847 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 848 u32 val = REG_RD(bp, addr); 849 850 /* in E1 we must use only PCI configuration space to disable 851 * MSI/MSIX capability 852 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 853 */ 854 if (CHIP_IS_E1(bp)) { 855 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 856 * Use mask register to prevent from HC sending interrupts 857 * after we exit the function 858 */ 859 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 860 861 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 862 HC_CONFIG_0_REG_INT_LINE_EN_0 | 863 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 864 } else 865 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 866 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 867 HC_CONFIG_0_REG_INT_LINE_EN_0 | 868 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 869 870 DP(NETIF_MSG_IFDOWN, 871 "write %x to HC %d (addr 0x%x)\n", 872 val, port, addr); 873 874 REG_WR(bp, addr, val); 875 if (REG_RD(bp, addr) != val) 876 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 877 } 878 879 static void bnx2x_igu_int_disable(struct bnx2x *bp) 880 { 881 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 882 883 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 884 IGU_PF_CONF_INT_LINE_EN | 885 IGU_PF_CONF_ATTN_BIT_EN); 886 887 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 888 889 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 890 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 891 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 892 } 893 894 static void bnx2x_int_disable(struct bnx2x *bp) 895 { 896 if (bp->common.int_block == INT_BLOCK_HC) 897 bnx2x_hc_int_disable(bp); 898 else 899 bnx2x_igu_int_disable(bp); 900 } 901 902 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) 903 { 904 int i; 905 u16 j; 906 struct hc_sp_status_block_data sp_sb_data; 907 int func = BP_FUNC(bp); 908 #ifdef BNX2X_STOP_ON_ERROR 909 u16 start = 0, end = 0; 910 u8 cos; 911 #endif 912 if (IS_PF(bp) && disable_int) 913 bnx2x_int_disable(bp); 914 915 bp->stats_state = STATS_STATE_DISABLED; 916 bp->eth_stats.unrecoverable_error++; 917 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 918 919 BNX2X_ERR("begin crash dump -----------------\n"); 920 921 /* Indices */ 922 /* Common */ 923 if (IS_PF(bp)) { 924 struct host_sp_status_block *def_sb = bp->def_status_blk; 925 int data_size, cstorm_offset; 926 927 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 928 bp->def_idx, bp->def_att_idx, bp->attn_state, 929 bp->spq_prod_idx, bp->stats_counter); 930 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 931 def_sb->atten_status_block.attn_bits, 932 def_sb->atten_status_block.attn_bits_ack, 933 def_sb->atten_status_block.status_block_id, 934 def_sb->atten_status_block.attn_bits_index); 935 BNX2X_ERR(" def ("); 936 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 937 pr_cont("0x%x%s", 938 def_sb->sp_sb.index_values[i], 939 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 940 941 data_size = sizeof(struct hc_sp_status_block_data) / 942 sizeof(u32); 943 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); 944 for (i = 0; i < data_size; i++) 945 *((u32 *)&sp_sb_data + i) = 946 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + 947 i * sizeof(u32)); 948 949 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 950 sp_sb_data.igu_sb_id, 951 sp_sb_data.igu_seg_id, 952 sp_sb_data.p_func.pf_id, 953 sp_sb_data.p_func.vnic_id, 954 sp_sb_data.p_func.vf_id, 955 sp_sb_data.p_func.vf_valid, 956 sp_sb_data.state); 957 } 958 959 for_each_eth_queue(bp, i) { 960 struct bnx2x_fastpath *fp = &bp->fp[i]; 961 int loop; 962 struct hc_status_block_data_e2 sb_data_e2; 963 struct hc_status_block_data_e1x sb_data_e1x; 964 struct hc_status_block_sm *hc_sm_p = 965 CHIP_IS_E1x(bp) ? 966 sb_data_e1x.common.state_machine : 967 sb_data_e2.common.state_machine; 968 struct hc_index_data *hc_index_p = 969 CHIP_IS_E1x(bp) ? 970 sb_data_e1x.index_data : 971 sb_data_e2.index_data; 972 u8 data_size, cos; 973 u32 *sb_data_p; 974 struct bnx2x_fp_txdata txdata; 975 976 if (!bp->fp) 977 break; 978 979 if (!fp->rx_cons_sb) 980 continue; 981 982 /* Rx */ 983 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 984 i, fp->rx_bd_prod, fp->rx_bd_cons, 985 fp->rx_comp_prod, 986 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 987 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 988 fp->rx_sge_prod, fp->last_max_sge, 989 le16_to_cpu(fp->fp_hc_idx)); 990 991 /* Tx */ 992 for_each_cos_in_tx_queue(fp, cos) 993 { 994 if (!fp->txdata_ptr[cos]) 995 break; 996 997 txdata = *fp->txdata_ptr[cos]; 998 999 if (!txdata.tx_cons_sb) 1000 continue; 1001 1002 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 1003 i, txdata.tx_pkt_prod, 1004 txdata.tx_pkt_cons, txdata.tx_bd_prod, 1005 txdata.tx_bd_cons, 1006 le16_to_cpu(*txdata.tx_cons_sb)); 1007 } 1008 1009 loop = CHIP_IS_E1x(bp) ? 1010 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 1011 1012 /* host sb data */ 1013 1014 if (IS_FCOE_FP(fp)) 1015 continue; 1016 1017 BNX2X_ERR(" run indexes ("); 1018 for (j = 0; j < HC_SB_MAX_SM; j++) 1019 pr_cont("0x%x%s", 1020 fp->sb_running_index[j], 1021 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 1022 1023 BNX2X_ERR(" indexes ("); 1024 for (j = 0; j < loop; j++) 1025 pr_cont("0x%x%s", 1026 fp->sb_index_values[j], 1027 (j == loop - 1) ? ")" : " "); 1028 1029 /* VF cannot access FW refelection for status block */ 1030 if (IS_VF(bp)) 1031 continue; 1032 1033 /* fw sb data */ 1034 data_size = CHIP_IS_E1x(bp) ? 1035 sizeof(struct hc_status_block_data_e1x) : 1036 sizeof(struct hc_status_block_data_e2); 1037 data_size /= sizeof(u32); 1038 sb_data_p = CHIP_IS_E1x(bp) ? 1039 (u32 *)&sb_data_e1x : 1040 (u32 *)&sb_data_e2; 1041 /* copy sb data in here */ 1042 for (j = 0; j < data_size; j++) 1043 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 1044 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 1045 j * sizeof(u32)); 1046 1047 if (!CHIP_IS_E1x(bp)) { 1048 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1049 sb_data_e2.common.p_func.pf_id, 1050 sb_data_e2.common.p_func.vf_id, 1051 sb_data_e2.common.p_func.vf_valid, 1052 sb_data_e2.common.p_func.vnic_id, 1053 sb_data_e2.common.same_igu_sb_1b, 1054 sb_data_e2.common.state); 1055 } else { 1056 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1057 sb_data_e1x.common.p_func.pf_id, 1058 sb_data_e1x.common.p_func.vf_id, 1059 sb_data_e1x.common.p_func.vf_valid, 1060 sb_data_e1x.common.p_func.vnic_id, 1061 sb_data_e1x.common.same_igu_sb_1b, 1062 sb_data_e1x.common.state); 1063 } 1064 1065 /* SB_SMs data */ 1066 for (j = 0; j < HC_SB_MAX_SM; j++) { 1067 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 1068 j, hc_sm_p[j].__flags, 1069 hc_sm_p[j].igu_sb_id, 1070 hc_sm_p[j].igu_seg_id, 1071 hc_sm_p[j].time_to_expire, 1072 hc_sm_p[j].timer_value); 1073 } 1074 1075 /* Indices data */ 1076 for (j = 0; j < loop; j++) { 1077 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 1078 hc_index_p[j].flags, 1079 hc_index_p[j].timeout); 1080 } 1081 } 1082 1083 #ifdef BNX2X_STOP_ON_ERROR 1084 if (IS_PF(bp)) { 1085 /* event queue */ 1086 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1087 for (i = 0; i < NUM_EQ_DESC; i++) { 1088 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1089 1090 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1091 i, bp->eq_ring[i].message.opcode, 1092 bp->eq_ring[i].message.error); 1093 BNX2X_ERR("data: %x %x %x\n", 1094 data[0], data[1], data[2]); 1095 } 1096 } 1097 1098 /* Rings */ 1099 /* Rx */ 1100 for_each_valid_rx_queue(bp, i) { 1101 struct bnx2x_fastpath *fp = &bp->fp[i]; 1102 1103 if (!bp->fp) 1104 break; 1105 1106 if (!fp->rx_cons_sb) 1107 continue; 1108 1109 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1110 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 1111 for (j = start; j != end; j = RX_BD(j + 1)) { 1112 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 1113 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 1114 1115 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 1116 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 1117 } 1118 1119 start = RX_SGE(fp->rx_sge_prod); 1120 end = RX_SGE(fp->last_max_sge); 1121 for (j = start; j != end; j = RX_SGE(j + 1)) { 1122 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 1123 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 1124 1125 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 1126 i, j, rx_sge[1], rx_sge[0], sw_page->page); 1127 } 1128 1129 start = RCQ_BD(fp->rx_comp_cons - 10); 1130 end = RCQ_BD(fp->rx_comp_cons + 503); 1131 for (j = start; j != end; j = RCQ_BD(j + 1)) { 1132 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 1133 1134 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 1135 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 1136 } 1137 } 1138 1139 /* Tx */ 1140 for_each_valid_tx_queue(bp, i) { 1141 struct bnx2x_fastpath *fp = &bp->fp[i]; 1142 1143 if (!bp->fp) 1144 break; 1145 1146 for_each_cos_in_tx_queue(fp, cos) { 1147 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1148 1149 if (!fp->txdata_ptr[cos]) 1150 break; 1151 1152 if (!txdata->tx_cons_sb) 1153 continue; 1154 1155 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 1156 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 1157 for (j = start; j != end; j = TX_BD(j + 1)) { 1158 struct sw_tx_bd *sw_bd = 1159 &txdata->tx_buf_ring[j]; 1160 1161 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 1162 i, cos, j, sw_bd->skb, 1163 sw_bd->first_bd); 1164 } 1165 1166 start = TX_BD(txdata->tx_bd_cons - 10); 1167 end = TX_BD(txdata->tx_bd_cons + 254); 1168 for (j = start; j != end; j = TX_BD(j + 1)) { 1169 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 1170 1171 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 1172 i, cos, j, tx_bd[0], tx_bd[1], 1173 tx_bd[2], tx_bd[3]); 1174 } 1175 } 1176 } 1177 #endif 1178 if (IS_PF(bp)) { 1179 int tmp_msg_en = bp->msg_enable; 1180 1181 bnx2x_fw_dump(bp); 1182 bp->msg_enable |= NETIF_MSG_HW; 1183 BNX2X_ERR("Idle check (1st round) ----------\n"); 1184 bnx2x_idle_chk(bp); 1185 BNX2X_ERR("Idle check (2nd round) ----------\n"); 1186 bnx2x_idle_chk(bp); 1187 bp->msg_enable = tmp_msg_en; 1188 bnx2x_mc_assert(bp); 1189 } 1190 1191 BNX2X_ERR("end crash dump -----------------\n"); 1192 } 1193 1194 /* 1195 * FLR Support for E2 1196 * 1197 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 1198 * initialization. 1199 */ 1200 #define FLR_WAIT_USEC 10000 /* 10 milliseconds */ 1201 #define FLR_WAIT_INTERVAL 50 /* usec */ 1202 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 1203 1204 struct pbf_pN_buf_regs { 1205 int pN; 1206 u32 init_crd; 1207 u32 crd; 1208 u32 crd_freed; 1209 }; 1210 1211 struct pbf_pN_cmd_regs { 1212 int pN; 1213 u32 lines_occup; 1214 u32 lines_freed; 1215 }; 1216 1217 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 1218 struct pbf_pN_buf_regs *regs, 1219 u32 poll_count) 1220 { 1221 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 1222 u32 cur_cnt = poll_count; 1223 1224 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 1225 crd = crd_start = REG_RD(bp, regs->crd); 1226 init_crd = REG_RD(bp, regs->init_crd); 1227 1228 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 1229 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 1230 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 1231 1232 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 1233 (init_crd - crd_start))) { 1234 if (cur_cnt--) { 1235 udelay(FLR_WAIT_INTERVAL); 1236 crd = REG_RD(bp, regs->crd); 1237 crd_freed = REG_RD(bp, regs->crd_freed); 1238 } else { 1239 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 1240 regs->pN); 1241 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 1242 regs->pN, crd); 1243 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 1244 regs->pN, crd_freed); 1245 break; 1246 } 1247 } 1248 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 1249 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1250 } 1251 1252 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 1253 struct pbf_pN_cmd_regs *regs, 1254 u32 poll_count) 1255 { 1256 u32 occup, to_free, freed, freed_start; 1257 u32 cur_cnt = poll_count; 1258 1259 occup = to_free = REG_RD(bp, regs->lines_occup); 1260 freed = freed_start = REG_RD(bp, regs->lines_freed); 1261 1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 1263 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 1264 1265 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1266 if (cur_cnt--) { 1267 udelay(FLR_WAIT_INTERVAL); 1268 occup = REG_RD(bp, regs->lines_occup); 1269 freed = REG_RD(bp, regs->lines_freed); 1270 } else { 1271 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 1272 regs->pN); 1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 1274 regs->pN, occup); 1275 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 1276 regs->pN, freed); 1277 break; 1278 } 1279 } 1280 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1281 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1282 } 1283 1284 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1285 u32 expected, u32 poll_count) 1286 { 1287 u32 cur_cnt = poll_count; 1288 u32 val; 1289 1290 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1291 udelay(FLR_WAIT_INTERVAL); 1292 1293 return val; 1294 } 1295 1296 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1297 char *msg, u32 poll_cnt) 1298 { 1299 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1300 if (val != 0) { 1301 BNX2X_ERR("%s usage count=%d\n", msg, val); 1302 return 1; 1303 } 1304 return 0; 1305 } 1306 1307 /* Common routines with VF FLR cleanup */ 1308 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1309 { 1310 /* adjust polling timeout */ 1311 if (CHIP_REV_IS_EMUL(bp)) 1312 return FLR_POLL_CNT * 2000; 1313 1314 if (CHIP_REV_IS_FPGA(bp)) 1315 return FLR_POLL_CNT * 120; 1316 1317 return FLR_POLL_CNT; 1318 } 1319 1320 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1321 { 1322 struct pbf_pN_cmd_regs cmd_regs[] = { 1323 {0, (CHIP_IS_E3B0(bp)) ? 1324 PBF_REG_TQ_OCCUPANCY_Q0 : 1325 PBF_REG_P0_TQ_OCCUPANCY, 1326 (CHIP_IS_E3B0(bp)) ? 1327 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1328 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1329 {1, (CHIP_IS_E3B0(bp)) ? 1330 PBF_REG_TQ_OCCUPANCY_Q1 : 1331 PBF_REG_P1_TQ_OCCUPANCY, 1332 (CHIP_IS_E3B0(bp)) ? 1333 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1334 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1335 {4, (CHIP_IS_E3B0(bp)) ? 1336 PBF_REG_TQ_OCCUPANCY_LB_Q : 1337 PBF_REG_P4_TQ_OCCUPANCY, 1338 (CHIP_IS_E3B0(bp)) ? 1339 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1340 PBF_REG_P4_TQ_LINES_FREED_CNT} 1341 }; 1342 1343 struct pbf_pN_buf_regs buf_regs[] = { 1344 {0, (CHIP_IS_E3B0(bp)) ? 1345 PBF_REG_INIT_CRD_Q0 : 1346 PBF_REG_P0_INIT_CRD , 1347 (CHIP_IS_E3B0(bp)) ? 1348 PBF_REG_CREDIT_Q0 : 1349 PBF_REG_P0_CREDIT, 1350 (CHIP_IS_E3B0(bp)) ? 1351 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1352 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1353 {1, (CHIP_IS_E3B0(bp)) ? 1354 PBF_REG_INIT_CRD_Q1 : 1355 PBF_REG_P1_INIT_CRD, 1356 (CHIP_IS_E3B0(bp)) ? 1357 PBF_REG_CREDIT_Q1 : 1358 PBF_REG_P1_CREDIT, 1359 (CHIP_IS_E3B0(bp)) ? 1360 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1361 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1362 {4, (CHIP_IS_E3B0(bp)) ? 1363 PBF_REG_INIT_CRD_LB_Q : 1364 PBF_REG_P4_INIT_CRD, 1365 (CHIP_IS_E3B0(bp)) ? 1366 PBF_REG_CREDIT_LB_Q : 1367 PBF_REG_P4_CREDIT, 1368 (CHIP_IS_E3B0(bp)) ? 1369 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1370 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1371 }; 1372 1373 int i; 1374 1375 /* Verify the command queues are flushed P0, P1, P4 */ 1376 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1377 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1378 1379 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1380 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1381 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1382 } 1383 1384 #define OP_GEN_PARAM(param) \ 1385 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1386 1387 #define OP_GEN_TYPE(type) \ 1388 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1389 1390 #define OP_GEN_AGG_VECT(index) \ 1391 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1392 1393 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) 1394 { 1395 u32 op_gen_command = 0; 1396 u32 comp_addr = BAR_CSTRORM_INTMEM + 1397 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1398 1399 if (REG_RD(bp, comp_addr)) { 1400 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1401 return 1; 1402 } 1403 1404 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1405 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1406 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 1407 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1408 1409 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1410 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); 1411 1412 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1413 BNX2X_ERR("FW final cleanup did not succeed\n"); 1414 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1415 (REG_RD(bp, comp_addr))); 1416 bnx2x_panic(); 1417 return 1; 1418 } 1419 /* Zero completion for next FLR */ 1420 REG_WR(bp, comp_addr, 0); 1421 1422 return 0; 1423 } 1424 1425 u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1426 { 1427 u16 status; 1428 1429 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1430 return status & PCI_EXP_DEVSTA_TRPND; 1431 } 1432 1433 /* PF FLR specific routines 1434 */ 1435 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1436 { 1437 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1438 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1439 CFC_REG_NUM_LCIDS_INSIDE_PF, 1440 "CFC PF usage counter timed out", 1441 poll_cnt)) 1442 return 1; 1443 1444 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1445 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1446 DORQ_REG_PF_USAGE_CNT, 1447 "DQ PF usage counter timed out", 1448 poll_cnt)) 1449 return 1; 1450 1451 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1452 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1453 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1454 "QM PF usage counter timed out", 1455 poll_cnt)) 1456 return 1; 1457 1458 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1459 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1460 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1461 "Timers VNIC usage counter timed out", 1462 poll_cnt)) 1463 return 1; 1464 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1465 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1466 "Timers NUM_SCANS usage counter timed out", 1467 poll_cnt)) 1468 return 1; 1469 1470 /* Wait DMAE PF usage counter to zero */ 1471 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1472 dmae_reg_go_c[INIT_DMAE_C(bp)], 1473 "DMAE command register timed out", 1474 poll_cnt)) 1475 return 1; 1476 1477 return 0; 1478 } 1479 1480 static void bnx2x_hw_enable_status(struct bnx2x *bp) 1481 { 1482 u32 val; 1483 1484 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1485 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1486 1487 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1488 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1489 1490 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1491 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1492 1493 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1494 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1495 1496 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1497 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1498 1499 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1500 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1501 1502 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1503 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1504 1505 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1506 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1507 val); 1508 } 1509 1510 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1511 { 1512 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1513 1514 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1515 1516 /* Re-enable PF target read access */ 1517 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1518 1519 /* Poll HW usage counters */ 1520 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1521 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1522 return -EBUSY; 1523 1524 /* Zero the igu 'trailing edge' and 'leading edge' */ 1525 1526 /* Send the FW cleanup command */ 1527 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1528 return -EBUSY; 1529 1530 /* ATC cleanup */ 1531 1532 /* Verify TX hw is flushed */ 1533 bnx2x_tx_hw_flushed(bp, poll_cnt); 1534 1535 /* Wait 100ms (not adjusted according to platform) */ 1536 msleep(100); 1537 1538 /* Verify no pending pci transactions */ 1539 if (bnx2x_is_pcie_pending(bp->pdev)) 1540 BNX2X_ERR("PCIE Transactions still pending\n"); 1541 1542 /* Debug */ 1543 bnx2x_hw_enable_status(bp); 1544 1545 /* 1546 * Master enable - Due to WB DMAE writes performed before this 1547 * register is re-initialized as part of the regular function init 1548 */ 1549 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1550 1551 return 0; 1552 } 1553 1554 static void bnx2x_hc_int_enable(struct bnx2x *bp) 1555 { 1556 int port = BP_PORT(bp); 1557 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1558 u32 val = REG_RD(bp, addr); 1559 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1560 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1561 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1562 1563 if (msix) { 1564 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1565 HC_CONFIG_0_REG_INT_LINE_EN_0); 1566 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1567 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1568 if (single_msix) 1569 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1570 } else if (msi) { 1571 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1572 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1573 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1574 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1575 } else { 1576 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1577 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1578 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1579 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1580 1581 if (!CHIP_IS_E1(bp)) { 1582 DP(NETIF_MSG_IFUP, 1583 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1584 1585 REG_WR(bp, addr, val); 1586 1587 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1588 } 1589 } 1590 1591 if (CHIP_IS_E1(bp)) 1592 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1593 1594 DP(NETIF_MSG_IFUP, 1595 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1596 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1597 1598 REG_WR(bp, addr, val); 1599 /* 1600 * Ensure that HC_CONFIG is written before leading/trailing edge config 1601 */ 1602 barrier(); 1603 1604 if (!CHIP_IS_E1(bp)) { 1605 /* init leading/trailing edge */ 1606 if (IS_MF(bp)) { 1607 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1608 if (bp->port.pmf) 1609 /* enable nig and gpio3 attention */ 1610 val |= 0x1100; 1611 } else 1612 val = 0xffff; 1613 1614 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1615 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1616 } 1617 } 1618 1619 static void bnx2x_igu_int_enable(struct bnx2x *bp) 1620 { 1621 u32 val; 1622 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1623 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1624 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1625 1626 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1627 1628 if (msix) { 1629 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1630 IGU_PF_CONF_SINGLE_ISR_EN); 1631 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1632 IGU_PF_CONF_ATTN_BIT_EN); 1633 1634 if (single_msix) 1635 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1636 } else if (msi) { 1637 val &= ~IGU_PF_CONF_INT_LINE_EN; 1638 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1639 IGU_PF_CONF_ATTN_BIT_EN | 1640 IGU_PF_CONF_SINGLE_ISR_EN); 1641 } else { 1642 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1643 val |= (IGU_PF_CONF_INT_LINE_EN | 1644 IGU_PF_CONF_ATTN_BIT_EN | 1645 IGU_PF_CONF_SINGLE_ISR_EN); 1646 } 1647 1648 /* Clean previous status - need to configure igu prior to ack*/ 1649 if ((!msix) || single_msix) { 1650 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1651 bnx2x_ack_int(bp); 1652 } 1653 1654 val |= IGU_PF_CONF_FUNC_EN; 1655 1656 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1657 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1658 1659 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1660 1661 if (val & IGU_PF_CONF_INT_LINE_EN) 1662 pci_intx(bp->pdev, true); 1663 1664 barrier(); 1665 1666 /* init leading/trailing edge */ 1667 if (IS_MF(bp)) { 1668 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1669 if (bp->port.pmf) 1670 /* enable nig and gpio3 attention */ 1671 val |= 0x1100; 1672 } else 1673 val = 0xffff; 1674 1675 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1676 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1677 } 1678 1679 void bnx2x_int_enable(struct bnx2x *bp) 1680 { 1681 if (bp->common.int_block == INT_BLOCK_HC) 1682 bnx2x_hc_int_enable(bp); 1683 else 1684 bnx2x_igu_int_enable(bp); 1685 } 1686 1687 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1688 { 1689 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1690 int i, offset; 1691 1692 if (disable_hw) 1693 /* prevent the HW from sending interrupts */ 1694 bnx2x_int_disable(bp); 1695 1696 /* make sure all ISRs are done */ 1697 if (msix) { 1698 synchronize_irq(bp->msix_table[0].vector); 1699 offset = 1; 1700 if (CNIC_SUPPORT(bp)) 1701 offset++; 1702 for_each_eth_queue(bp, i) 1703 synchronize_irq(bp->msix_table[offset++].vector); 1704 } else 1705 synchronize_irq(bp->pdev->irq); 1706 1707 /* make sure sp_task is not running */ 1708 cancel_delayed_work(&bp->sp_task); 1709 cancel_delayed_work(&bp->period_task); 1710 flush_workqueue(bnx2x_wq); 1711 } 1712 1713 /* fast path */ 1714 1715 /* 1716 * General service functions 1717 */ 1718 1719 /* Return true if succeeded to acquire the lock */ 1720 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1721 { 1722 u32 lock_status; 1723 u32 resource_bit = (1 << resource); 1724 int func = BP_FUNC(bp); 1725 u32 hw_lock_control_reg; 1726 1727 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1728 "Trying to take a lock on resource %d\n", resource); 1729 1730 /* Validating that the resource is within range */ 1731 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1732 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1733 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1734 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1735 return false; 1736 } 1737 1738 if (func <= 5) 1739 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1740 else 1741 hw_lock_control_reg = 1742 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1743 1744 /* Try to acquire the lock */ 1745 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1746 lock_status = REG_RD(bp, hw_lock_control_reg); 1747 if (lock_status & resource_bit) 1748 return true; 1749 1750 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1751 "Failed to get a lock on resource %d\n", resource); 1752 return false; 1753 } 1754 1755 /** 1756 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1757 * 1758 * @bp: driver handle 1759 * 1760 * Returns the recovery leader resource id according to the engine this function 1761 * belongs to. Currently only only 2 engines is supported. 1762 */ 1763 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1764 { 1765 if (BP_PATH(bp)) 1766 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1767 else 1768 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1769 } 1770 1771 /** 1772 * bnx2x_trylock_leader_lock- try to acquire a leader lock. 1773 * 1774 * @bp: driver handle 1775 * 1776 * Tries to acquire a leader lock for current engine. 1777 */ 1778 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1779 { 1780 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1781 } 1782 1783 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1784 1785 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */ 1786 static int bnx2x_schedule_sp_task(struct bnx2x *bp) 1787 { 1788 /* Set the interrupt occurred bit for the sp-task to recognize it 1789 * must ack the interrupt and transition according to the IGU 1790 * state machine. 1791 */ 1792 atomic_set(&bp->interrupt_occurred, 1); 1793 1794 /* The sp_task must execute only after this bit 1795 * is set, otherwise we will get out of sync and miss all 1796 * further interrupts. Hence, the barrier. 1797 */ 1798 smp_wmb(); 1799 1800 /* schedule sp_task to workqueue */ 1801 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1802 } 1803 1804 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1805 { 1806 struct bnx2x *bp = fp->bp; 1807 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1808 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1809 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1810 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 1811 1812 DP(BNX2X_MSG_SP, 1813 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1814 fp->index, cid, command, bp->state, 1815 rr_cqe->ramrod_cqe.ramrod_type); 1816 1817 /* If cid is within VF range, replace the slowpath object with the 1818 * one corresponding to this VF 1819 */ 1820 if (cid >= BNX2X_FIRST_VF_CID && 1821 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) 1822 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); 1823 1824 switch (command) { 1825 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1826 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1827 drv_cmd = BNX2X_Q_CMD_UPDATE; 1828 break; 1829 1830 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1831 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1832 drv_cmd = BNX2X_Q_CMD_SETUP; 1833 break; 1834 1835 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1836 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1837 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1838 break; 1839 1840 case (RAMROD_CMD_ID_ETH_HALT): 1841 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1842 drv_cmd = BNX2X_Q_CMD_HALT; 1843 break; 1844 1845 case (RAMROD_CMD_ID_ETH_TERMINATE): 1846 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); 1847 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1848 break; 1849 1850 case (RAMROD_CMD_ID_ETH_EMPTY): 1851 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1852 drv_cmd = BNX2X_Q_CMD_EMPTY; 1853 break; 1854 1855 case (RAMROD_CMD_ID_ETH_TPA_UPDATE): 1856 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); 1857 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; 1858 break; 1859 1860 default: 1861 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1862 command, fp->index); 1863 return; 1864 } 1865 1866 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1867 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1868 /* q_obj->complete_cmd() failure means that this was 1869 * an unexpected completion. 1870 * 1871 * In this case we don't want to increase the bp->spq_left 1872 * because apparently we haven't sent this command the first 1873 * place. 1874 */ 1875 #ifdef BNX2X_STOP_ON_ERROR 1876 bnx2x_panic(); 1877 #else 1878 return; 1879 #endif 1880 1881 smp_mb__before_atomic(); 1882 atomic_inc(&bp->cq_spq_left); 1883 /* push the change in bp->spq_left and towards the memory */ 1884 smp_mb__after_atomic(); 1885 1886 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1887 1888 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1889 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1890 /* if Q update ramrod is completed for last Q in AFEX vif set 1891 * flow, then ACK MCP at the end 1892 * 1893 * mark pending ACK to MCP bit. 1894 * prevent case that both bits are cleared. 1895 * At the end of load/unload driver checks that 1896 * sp_state is cleared, and this order prevents 1897 * races 1898 */ 1899 smp_mb__before_atomic(); 1900 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1901 wmb(); 1902 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1903 smp_mb__after_atomic(); 1904 1905 /* schedule the sp task as mcp ack is required */ 1906 bnx2x_schedule_sp_task(bp); 1907 } 1908 1909 return; 1910 } 1911 1912 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1913 { 1914 struct bnx2x *bp = netdev_priv(dev_instance); 1915 u16 status = bnx2x_ack_int(bp); 1916 u16 mask; 1917 int i; 1918 u8 cos; 1919 1920 /* Return here if interrupt is shared and it's not for us */ 1921 if (unlikely(status == 0)) { 1922 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1923 return IRQ_NONE; 1924 } 1925 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1926 1927 #ifdef BNX2X_STOP_ON_ERROR 1928 if (unlikely(bp->panic)) 1929 return IRQ_HANDLED; 1930 #endif 1931 1932 for_each_eth_queue(bp, i) { 1933 struct bnx2x_fastpath *fp = &bp->fp[i]; 1934 1935 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); 1936 if (status & mask) { 1937 /* Handle Rx or Tx according to SB id */ 1938 for_each_cos_in_tx_queue(fp, cos) 1939 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1940 prefetch(&fp->sb_running_index[SM_RX_ID]); 1941 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); 1942 status &= ~mask; 1943 } 1944 } 1945 1946 if (CNIC_SUPPORT(bp)) { 1947 mask = 0x2; 1948 if (status & (mask | 0x1)) { 1949 struct cnic_ops *c_ops = NULL; 1950 1951 rcu_read_lock(); 1952 c_ops = rcu_dereference(bp->cnic_ops); 1953 if (c_ops && (bp->cnic_eth_dev.drv_state & 1954 CNIC_DRV_STATE_HANDLES_IRQ)) 1955 c_ops->cnic_handler(bp->cnic_data, NULL); 1956 rcu_read_unlock(); 1957 1958 status &= ~mask; 1959 } 1960 } 1961 1962 if (unlikely(status & 0x1)) { 1963 1964 /* schedule sp task to perform default status block work, ack 1965 * attentions and enable interrupts. 1966 */ 1967 bnx2x_schedule_sp_task(bp); 1968 1969 status &= ~0x1; 1970 if (!status) 1971 return IRQ_HANDLED; 1972 } 1973 1974 if (unlikely(status)) 1975 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1976 status); 1977 1978 return IRQ_HANDLED; 1979 } 1980 1981 /* Link */ 1982 1983 /* 1984 * General service functions 1985 */ 1986 1987 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1988 { 1989 u32 lock_status; 1990 u32 resource_bit = (1 << resource); 1991 int func = BP_FUNC(bp); 1992 u32 hw_lock_control_reg; 1993 int cnt; 1994 1995 /* Validating that the resource is within range */ 1996 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1997 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1998 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1999 return -EINVAL; 2000 } 2001 2002 if (func <= 5) { 2003 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 2004 } else { 2005 hw_lock_control_reg = 2006 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 2007 } 2008 2009 /* Validating that the resource is not already taken */ 2010 lock_status = REG_RD(bp, hw_lock_control_reg); 2011 if (lock_status & resource_bit) { 2012 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 2013 lock_status, resource_bit); 2014 return -EEXIST; 2015 } 2016 2017 /* Try for 5 second every 5ms */ 2018 for (cnt = 0; cnt < 1000; cnt++) { 2019 /* Try to acquire the lock */ 2020 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 2021 lock_status = REG_RD(bp, hw_lock_control_reg); 2022 if (lock_status & resource_bit) 2023 return 0; 2024 2025 usleep_range(5000, 10000); 2026 } 2027 BNX2X_ERR("Timeout\n"); 2028 return -EAGAIN; 2029 } 2030 2031 int bnx2x_release_leader_lock(struct bnx2x *bp) 2032 { 2033 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 2034 } 2035 2036 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 2037 { 2038 u32 lock_status; 2039 u32 resource_bit = (1 << resource); 2040 int func = BP_FUNC(bp); 2041 u32 hw_lock_control_reg; 2042 2043 /* Validating that the resource is within range */ 2044 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 2045 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 2046 resource, HW_LOCK_MAX_RESOURCE_VALUE); 2047 return -EINVAL; 2048 } 2049 2050 if (func <= 5) { 2051 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 2052 } else { 2053 hw_lock_control_reg = 2054 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 2055 } 2056 2057 /* Validating that the resource is currently taken */ 2058 lock_status = REG_RD(bp, hw_lock_control_reg); 2059 if (!(lock_status & resource_bit)) { 2060 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", 2061 lock_status, resource_bit); 2062 return -EFAULT; 2063 } 2064 2065 REG_WR(bp, hw_lock_control_reg, resource_bit); 2066 return 0; 2067 } 2068 2069 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 2070 { 2071 /* The GPIO should be swapped if swap register is set and active */ 2072 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2073 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2074 int gpio_shift = gpio_num + 2075 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2076 u32 gpio_mask = (1 << gpio_shift); 2077 u32 gpio_reg; 2078 int value; 2079 2080 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2081 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2082 return -EINVAL; 2083 } 2084 2085 /* read GPIO value */ 2086 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2087 2088 /* get the requested pin value */ 2089 if ((gpio_reg & gpio_mask) == gpio_mask) 2090 value = 1; 2091 else 2092 value = 0; 2093 2094 return value; 2095 } 2096 2097 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2098 { 2099 /* The GPIO should be swapped if swap register is set and active */ 2100 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2101 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2102 int gpio_shift = gpio_num + 2103 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2104 u32 gpio_mask = (1 << gpio_shift); 2105 u32 gpio_reg; 2106 2107 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2108 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2109 return -EINVAL; 2110 } 2111 2112 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2113 /* read GPIO and mask except the float bits */ 2114 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2115 2116 switch (mode) { 2117 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2118 DP(NETIF_MSG_LINK, 2119 "Set GPIO %d (shift %d) -> output low\n", 2120 gpio_num, gpio_shift); 2121 /* clear FLOAT and set CLR */ 2122 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2123 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2124 break; 2125 2126 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2127 DP(NETIF_MSG_LINK, 2128 "Set GPIO %d (shift %d) -> output high\n", 2129 gpio_num, gpio_shift); 2130 /* clear FLOAT and set SET */ 2131 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2132 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2133 break; 2134 2135 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2136 DP(NETIF_MSG_LINK, 2137 "Set GPIO %d (shift %d) -> input\n", 2138 gpio_num, gpio_shift); 2139 /* set FLOAT */ 2140 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2141 break; 2142 2143 default: 2144 break; 2145 } 2146 2147 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2148 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2149 2150 return 0; 2151 } 2152 2153 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 2154 { 2155 u32 gpio_reg = 0; 2156 int rc = 0; 2157 2158 /* Any port swapping should be handled by caller. */ 2159 2160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2161 /* read GPIO and mask except the float bits */ 2162 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2163 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2164 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2165 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2166 2167 switch (mode) { 2168 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2169 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 2170 /* set CLR */ 2171 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2172 break; 2173 2174 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2175 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 2176 /* set SET */ 2177 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2178 break; 2179 2180 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2181 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 2182 /* set FLOAT */ 2183 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2184 break; 2185 2186 default: 2187 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 2188 rc = -EINVAL; 2189 break; 2190 } 2191 2192 if (rc == 0) 2193 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2194 2195 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2196 2197 return rc; 2198 } 2199 2200 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2201 { 2202 /* The GPIO should be swapped if swap register is set and active */ 2203 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2204 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2205 int gpio_shift = gpio_num + 2206 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2207 u32 gpio_mask = (1 << gpio_shift); 2208 u32 gpio_reg; 2209 2210 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2211 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2212 return -EINVAL; 2213 } 2214 2215 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2216 /* read GPIO int */ 2217 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 2218 2219 switch (mode) { 2220 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2221 DP(NETIF_MSG_LINK, 2222 "Clear GPIO INT %d (shift %d) -> output low\n", 2223 gpio_num, gpio_shift); 2224 /* clear SET and set CLR */ 2225 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2226 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2227 break; 2228 2229 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2230 DP(NETIF_MSG_LINK, 2231 "Set GPIO INT %d (shift %d) -> output high\n", 2232 gpio_num, gpio_shift); 2233 /* clear CLR and set SET */ 2234 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2235 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2236 break; 2237 2238 default: 2239 break; 2240 } 2241 2242 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2243 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2244 2245 return 0; 2246 } 2247 2248 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) 2249 { 2250 u32 spio_reg; 2251 2252 /* Only 2 SPIOs are configurable */ 2253 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 2254 BNX2X_ERR("Invalid SPIO 0x%x\n", spio); 2255 return -EINVAL; 2256 } 2257 2258 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2259 /* read SPIO and mask except the float bits */ 2260 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 2261 2262 switch (mode) { 2263 case MISC_SPIO_OUTPUT_LOW: 2264 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); 2265 /* clear FLOAT and set CLR */ 2266 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2267 spio_reg |= (spio << MISC_SPIO_CLR_POS); 2268 break; 2269 2270 case MISC_SPIO_OUTPUT_HIGH: 2271 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); 2272 /* clear FLOAT and set SET */ 2273 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2274 spio_reg |= (spio << MISC_SPIO_SET_POS); 2275 break; 2276 2277 case MISC_SPIO_INPUT_HI_Z: 2278 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); 2279 /* set FLOAT */ 2280 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 2281 break; 2282 2283 default: 2284 break; 2285 } 2286 2287 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2288 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2289 2290 return 0; 2291 } 2292 2293 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2294 { 2295 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2296 2297 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2298 ADVERTISED_Pause); 2299 switch (bp->link_vars.ieee_fc & 2300 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2301 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2302 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2303 ADVERTISED_Pause); 2304 break; 2305 2306 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2307 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2308 break; 2309 2310 default: 2311 break; 2312 } 2313 } 2314 2315 static void bnx2x_set_requested_fc(struct bnx2x *bp) 2316 { 2317 /* Initialize link parameters structure variables 2318 * It is recommended to turn off RX FC for jumbo frames 2319 * for better performance 2320 */ 2321 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2322 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2323 else 2324 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2325 } 2326 2327 static void bnx2x_init_dropless_fc(struct bnx2x *bp) 2328 { 2329 u32 pause_enabled = 0; 2330 2331 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { 2332 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2333 pause_enabled = 1; 2334 2335 REG_WR(bp, BAR_USTRORM_INTMEM + 2336 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), 2337 pause_enabled); 2338 } 2339 2340 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", 2341 pause_enabled ? "enabled" : "disabled"); 2342 } 2343 2344 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2345 { 2346 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2347 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2348 2349 if (!BP_NOMCP(bp)) { 2350 bnx2x_set_requested_fc(bp); 2351 bnx2x_acquire_phy_lock(bp); 2352 2353 if (load_mode == LOAD_DIAG) { 2354 struct link_params *lp = &bp->link_params; 2355 lp->loopback_mode = LOOPBACK_XGXS; 2356 /* Prefer doing PHY loopback at highest speed */ 2357 if (lp->req_line_speed[cfx_idx] < SPEED_20000) { 2358 if (lp->speed_cap_mask[cfx_idx] & 2359 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) 2360 lp->req_line_speed[cfx_idx] = 2361 SPEED_20000; 2362 else if (lp->speed_cap_mask[cfx_idx] & 2363 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2364 lp->req_line_speed[cfx_idx] = 2365 SPEED_10000; 2366 else 2367 lp->req_line_speed[cfx_idx] = 2368 SPEED_1000; 2369 } 2370 } 2371 2372 if (load_mode == LOAD_LOOPBACK_EXT) { 2373 struct link_params *lp = &bp->link_params; 2374 lp->loopback_mode = LOOPBACK_EXT; 2375 } 2376 2377 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2378 2379 bnx2x_release_phy_lock(bp); 2380 2381 bnx2x_init_dropless_fc(bp); 2382 2383 bnx2x_calc_fc_adv(bp); 2384 2385 if (bp->link_vars.link_up) { 2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2387 bnx2x_link_report(bp); 2388 } 2389 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2390 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2391 return rc; 2392 } 2393 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2394 return -EINVAL; 2395 } 2396 2397 void bnx2x_link_set(struct bnx2x *bp) 2398 { 2399 if (!BP_NOMCP(bp)) { 2400 bnx2x_acquire_phy_lock(bp); 2401 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2402 bnx2x_release_phy_lock(bp); 2403 2404 bnx2x_init_dropless_fc(bp); 2405 2406 bnx2x_calc_fc_adv(bp); 2407 } else 2408 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2409 } 2410 2411 static void bnx2x__link_reset(struct bnx2x *bp) 2412 { 2413 if (!BP_NOMCP(bp)) { 2414 bnx2x_acquire_phy_lock(bp); 2415 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); 2416 bnx2x_release_phy_lock(bp); 2417 } else 2418 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2419 } 2420 2421 void bnx2x_force_link_reset(struct bnx2x *bp) 2422 { 2423 bnx2x_acquire_phy_lock(bp); 2424 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2425 bnx2x_release_phy_lock(bp); 2426 } 2427 2428 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2429 { 2430 u8 rc = 0; 2431 2432 if (!BP_NOMCP(bp)) { 2433 bnx2x_acquire_phy_lock(bp); 2434 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2435 is_serdes); 2436 bnx2x_release_phy_lock(bp); 2437 } else 2438 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2439 2440 return rc; 2441 } 2442 2443 /* Calculates the sum of vn_min_rates. 2444 It's needed for further normalizing of the min_rates. 2445 Returns: 2446 sum of vn_min_rates. 2447 or 2448 0 - if all the min_rates are 0. 2449 In the later case fairness algorithm should be deactivated. 2450 If not all min_rates are zero then those that are zeroes will be set to 1. 2451 */ 2452 static void bnx2x_calc_vn_min(struct bnx2x *bp, 2453 struct cmng_init_input *input) 2454 { 2455 int all_zero = 1; 2456 int vn; 2457 2458 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2459 u32 vn_cfg = bp->mf_config[vn]; 2460 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2461 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2462 2463 /* Skip hidden vns */ 2464 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2465 vn_min_rate = 0; 2466 /* If min rate is zero - set it to 1 */ 2467 else if (!vn_min_rate) 2468 vn_min_rate = DEF_MIN_RATE; 2469 else 2470 all_zero = 0; 2471 2472 input->vnic_min_rate[vn] = vn_min_rate; 2473 } 2474 2475 /* if ETS or all min rates are zeros - disable fairness */ 2476 if (BNX2X_IS_ETS_ENABLED(bp)) { 2477 input->flags.cmng_enables &= 2478 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2479 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2480 } else if (all_zero) { 2481 input->flags.cmng_enables &= 2482 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2483 DP(NETIF_MSG_IFUP, 2484 "All MIN values are zeroes fairness will be disabled\n"); 2485 } else 2486 input->flags.cmng_enables |= 2487 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2488 } 2489 2490 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2491 struct cmng_init_input *input) 2492 { 2493 u16 vn_max_rate; 2494 u32 vn_cfg = bp->mf_config[vn]; 2495 2496 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2497 vn_max_rate = 0; 2498 else { 2499 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2500 2501 if (IS_MF_PERCENT_BW(bp)) { 2502 /* maxCfg in percents of linkspeed */ 2503 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2504 } else /* SD modes */ 2505 /* maxCfg is absolute in 100Mb units */ 2506 vn_max_rate = maxCfg * 100; 2507 } 2508 2509 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2510 2511 input->vnic_max_rate[vn] = vn_max_rate; 2512 } 2513 2514 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2515 { 2516 if (CHIP_REV_IS_SLOW(bp)) 2517 return CMNG_FNS_NONE; 2518 if (IS_MF(bp)) 2519 return CMNG_FNS_MINMAX; 2520 2521 return CMNG_FNS_NONE; 2522 } 2523 2524 void bnx2x_read_mf_cfg(struct bnx2x *bp) 2525 { 2526 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2527 2528 if (BP_NOMCP(bp)) 2529 return; /* what should be the default value in this case */ 2530 2531 /* For 2 port configuration the absolute function number formula 2532 * is: 2533 * abs_func = 2 * vn + BP_PORT + BP_PATH 2534 * 2535 * and there are 4 functions per port 2536 * 2537 * For 4 port configuration it is 2538 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2539 * 2540 * and there are 2 functions per port 2541 */ 2542 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2543 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2544 2545 if (func >= E1H_FUNC_MAX) 2546 break; 2547 2548 bp->mf_config[vn] = 2549 MF_CFG_RD(bp, func_mf_config[func].config); 2550 } 2551 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2552 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2553 bp->flags |= MF_FUNC_DIS; 2554 } else { 2555 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2556 bp->flags &= ~MF_FUNC_DIS; 2557 } 2558 } 2559 2560 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2561 { 2562 struct cmng_init_input input; 2563 memset(&input, 0, sizeof(struct cmng_init_input)); 2564 2565 input.port_rate = bp->link_vars.line_speed; 2566 2567 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { 2568 int vn; 2569 2570 /* read mf conf from shmem */ 2571 if (read_cfg) 2572 bnx2x_read_mf_cfg(bp); 2573 2574 /* vn_weight_sum and enable fairness if not 0 */ 2575 bnx2x_calc_vn_min(bp, &input); 2576 2577 /* calculate and set min-max rate for each vn */ 2578 if (bp->port.pmf) 2579 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2580 bnx2x_calc_vn_max(bp, vn, &input); 2581 2582 /* always enable rate shaping and fairness */ 2583 input.flags.cmng_enables |= 2584 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2585 2586 bnx2x_init_cmng(&input, &bp->cmng); 2587 return; 2588 } 2589 2590 /* rate shaping and fairness are disabled */ 2591 DP(NETIF_MSG_IFUP, 2592 "rate shaping and fairness are disabled\n"); 2593 } 2594 2595 static void storm_memset_cmng(struct bnx2x *bp, 2596 struct cmng_init *cmng, 2597 u8 port) 2598 { 2599 int vn; 2600 size_t size = sizeof(struct cmng_struct_per_port); 2601 2602 u32 addr = BAR_XSTRORM_INTMEM + 2603 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2604 2605 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2606 2607 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2608 int func = func_by_vn(bp, vn); 2609 2610 addr = BAR_XSTRORM_INTMEM + 2611 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2612 size = sizeof(struct rate_shaping_vars_per_vn); 2613 __storm_memset_struct(bp, addr, size, 2614 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2615 2616 addr = BAR_XSTRORM_INTMEM + 2617 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2618 size = sizeof(struct fairness_vars_per_vn); 2619 __storm_memset_struct(bp, addr, size, 2620 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2621 } 2622 } 2623 2624 /* init cmng mode in HW according to local configuration */ 2625 void bnx2x_set_local_cmng(struct bnx2x *bp) 2626 { 2627 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2628 2629 if (cmng_fns != CMNG_FNS_NONE) { 2630 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2631 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2632 } else { 2633 /* rate shaping and fairness are disabled */ 2634 DP(NETIF_MSG_IFUP, 2635 "single function mode without fairness\n"); 2636 } 2637 } 2638 2639 /* This function is called upon link interrupt */ 2640 static void bnx2x_link_attn(struct bnx2x *bp) 2641 { 2642 /* Make sure that we are synced with the current statistics */ 2643 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2644 2645 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2646 2647 bnx2x_init_dropless_fc(bp); 2648 2649 if (bp->link_vars.link_up) { 2650 2651 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2652 struct host_port_stats *pstats; 2653 2654 pstats = bnx2x_sp(bp, port_stats); 2655 /* reset old mac stats */ 2656 memset(&(pstats->mac_stx[0]), 0, 2657 sizeof(struct mac_stx)); 2658 } 2659 if (bp->state == BNX2X_STATE_OPEN) 2660 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2661 } 2662 2663 if (bp->link_vars.link_up && bp->link_vars.line_speed) 2664 bnx2x_set_local_cmng(bp); 2665 2666 __bnx2x_link_report(bp); 2667 2668 if (IS_MF(bp)) 2669 bnx2x_link_sync_notify(bp); 2670 } 2671 2672 void bnx2x__link_status_update(struct bnx2x *bp) 2673 { 2674 if (bp->state != BNX2X_STATE_OPEN) 2675 return; 2676 2677 /* read updated dcb configuration */ 2678 if (IS_PF(bp)) { 2679 bnx2x_dcbx_pmf_update(bp); 2680 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2681 if (bp->link_vars.link_up) 2682 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2683 else 2684 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2685 /* indicate link status */ 2686 bnx2x_link_report(bp); 2687 2688 } else { /* VF */ 2689 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | 2690 SUPPORTED_10baseT_Full | 2691 SUPPORTED_100baseT_Half | 2692 SUPPORTED_100baseT_Full | 2693 SUPPORTED_1000baseT_Full | 2694 SUPPORTED_2500baseX_Full | 2695 SUPPORTED_10000baseT_Full | 2696 SUPPORTED_TP | 2697 SUPPORTED_FIBRE | 2698 SUPPORTED_Autoneg | 2699 SUPPORTED_Pause | 2700 SUPPORTED_Asym_Pause); 2701 bp->port.advertising[0] = bp->port.supported[0]; 2702 2703 bp->link_params.bp = bp; 2704 bp->link_params.port = BP_PORT(bp); 2705 bp->link_params.req_duplex[0] = DUPLEX_FULL; 2706 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; 2707 bp->link_params.req_line_speed[0] = SPEED_10000; 2708 bp->link_params.speed_cap_mask[0] = 0x7f0000; 2709 bp->link_params.switch_cfg = SWITCH_CFG_10G; 2710 bp->link_vars.mac_type = MAC_TYPE_BMAC; 2711 bp->link_vars.line_speed = SPEED_10000; 2712 bp->link_vars.link_status = 2713 (LINK_STATUS_LINK_UP | 2714 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 2715 bp->link_vars.link_up = 1; 2716 bp->link_vars.duplex = DUPLEX_FULL; 2717 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2718 __bnx2x_link_report(bp); 2719 2720 bnx2x_sample_bulletin(bp); 2721 2722 /* if bulletin board did not have an update for link status 2723 * __bnx2x_link_report will report current status 2724 * but it will NOT duplicate report in case of already reported 2725 * during sampling bulletin board. 2726 */ 2727 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2728 } 2729 } 2730 2731 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2732 u16 vlan_val, u8 allowed_prio) 2733 { 2734 struct bnx2x_func_state_params func_params = {NULL}; 2735 struct bnx2x_func_afex_update_params *f_update_params = 2736 &func_params.params.afex_update; 2737 2738 func_params.f_obj = &bp->func_obj; 2739 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2740 2741 /* no need to wait for RAMROD completion, so don't 2742 * set RAMROD_COMP_WAIT flag 2743 */ 2744 2745 f_update_params->vif_id = vifid; 2746 f_update_params->afex_default_vlan = vlan_val; 2747 f_update_params->allowed_priorities = allowed_prio; 2748 2749 /* if ramrod can not be sent, response to MCP immediately */ 2750 if (bnx2x_func_state_change(bp, &func_params) < 0) 2751 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2752 2753 return 0; 2754 } 2755 2756 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2757 u16 vif_index, u8 func_bit_map) 2758 { 2759 struct bnx2x_func_state_params func_params = {NULL}; 2760 struct bnx2x_func_afex_viflists_params *update_params = 2761 &func_params.params.afex_viflists; 2762 int rc; 2763 u32 drv_msg_code; 2764 2765 /* validate only LIST_SET and LIST_GET are received from switch */ 2766 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2767 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2768 cmd_type); 2769 2770 func_params.f_obj = &bp->func_obj; 2771 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2772 2773 /* set parameters according to cmd_type */ 2774 update_params->afex_vif_list_command = cmd_type; 2775 update_params->vif_list_index = vif_index; 2776 update_params->func_bit_map = 2777 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2778 update_params->func_to_clear = 0; 2779 drv_msg_code = 2780 (cmd_type == VIF_LIST_RULE_GET) ? 2781 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2782 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2783 2784 /* if ramrod can not be sent, respond to MCP immediately for 2785 * SET and GET requests (other are not triggered from MCP) 2786 */ 2787 rc = bnx2x_func_state_change(bp, &func_params); 2788 if (rc < 0) 2789 bnx2x_fw_command(bp, drv_msg_code, 0); 2790 2791 return 0; 2792 } 2793 2794 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2795 { 2796 struct afex_stats afex_stats; 2797 u32 func = BP_ABS_FUNC(bp); 2798 u32 mf_config; 2799 u16 vlan_val; 2800 u32 vlan_prio; 2801 u16 vif_id; 2802 u8 allowed_prio; 2803 u8 vlan_mode; 2804 u32 addr_to_write, vifid, addrs, stats_type, i; 2805 2806 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2807 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2808 DP(BNX2X_MSG_MCP, 2809 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2810 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2811 } 2812 2813 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2814 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2815 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2816 DP(BNX2X_MSG_MCP, 2817 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2818 vifid, addrs); 2819 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2820 addrs); 2821 } 2822 2823 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2824 addr_to_write = SHMEM2_RD(bp, 2825 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2826 stats_type = SHMEM2_RD(bp, 2827 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2828 2829 DP(BNX2X_MSG_MCP, 2830 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2831 addr_to_write); 2832 2833 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2834 2835 /* write response to scratchpad, for MCP */ 2836 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2837 REG_WR(bp, addr_to_write + i*sizeof(u32), 2838 *(((u32 *)(&afex_stats))+i)); 2839 2840 /* send ack message to MCP */ 2841 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2842 } 2843 2844 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2845 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2846 bp->mf_config[BP_VN(bp)] = mf_config; 2847 DP(BNX2X_MSG_MCP, 2848 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2849 mf_config); 2850 2851 /* if VIF_SET is "enabled" */ 2852 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2853 /* set rate limit directly to internal RAM */ 2854 struct cmng_init_input cmng_input; 2855 struct rate_shaping_vars_per_vn m_rs_vn; 2856 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2857 u32 addr = BAR_XSTRORM_INTMEM + 2858 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2859 2860 bp->mf_config[BP_VN(bp)] = mf_config; 2861 2862 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2863 m_rs_vn.vn_counter.rate = 2864 cmng_input.vnic_max_rate[BP_VN(bp)]; 2865 m_rs_vn.vn_counter.quota = 2866 (m_rs_vn.vn_counter.rate * 2867 RS_PERIODIC_TIMEOUT_USEC) / 8; 2868 2869 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2870 2871 /* read relevant values from mf_cfg struct in shmem */ 2872 vif_id = 2873 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2874 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2875 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2876 vlan_val = 2877 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2878 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2879 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2880 vlan_prio = (mf_config & 2881 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2882 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2883 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2884 vlan_mode = 2885 (MF_CFG_RD(bp, 2886 func_mf_config[func].afex_config) & 2887 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2888 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2889 allowed_prio = 2890 (MF_CFG_RD(bp, 2891 func_mf_config[func].afex_config) & 2892 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2893 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2894 2895 /* send ramrod to FW, return in case of failure */ 2896 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2897 allowed_prio)) 2898 return; 2899 2900 bp->afex_def_vlan_tag = vlan_val; 2901 bp->afex_vlan_mode = vlan_mode; 2902 } else { 2903 /* notify link down because BP->flags is disabled */ 2904 bnx2x_link_report(bp); 2905 2906 /* send INVALID VIF ramrod to FW */ 2907 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2908 2909 /* Reset the default afex VLAN */ 2910 bp->afex_def_vlan_tag = -1; 2911 } 2912 } 2913 } 2914 2915 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) 2916 { 2917 struct bnx2x_func_switch_update_params *switch_update_params; 2918 struct bnx2x_func_state_params func_params; 2919 2920 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params)); 2921 switch_update_params = &func_params.params.switch_update; 2922 func_params.f_obj = &bp->func_obj; 2923 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 2924 2925 /* Prepare parameters for function state transitions */ 2926 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 2927 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 2928 2929 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { 2930 int func = BP_ABS_FUNC(bp); 2931 u32 val; 2932 2933 /* Re-learn the S-tag from shmem */ 2934 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2935 FUNC_MF_CFG_E1HOV_TAG_MASK; 2936 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 2937 bp->mf_ov = val; 2938 } else { 2939 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n"); 2940 goto fail; 2941 } 2942 2943 /* Configure new S-tag in LLH */ 2944 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, 2945 bp->mf_ov); 2946 2947 /* Send Ramrod to update FW of change */ 2948 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, 2949 &switch_update_params->changes); 2950 switch_update_params->vlan = bp->mf_ov; 2951 2952 if (bnx2x_func_state_change(bp, &func_params) < 0) { 2953 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", 2954 bp->mf_ov); 2955 goto fail; 2956 } else { 2957 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", 2958 bp->mf_ov); 2959 } 2960 } else { 2961 goto fail; 2962 } 2963 2964 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); 2965 return; 2966 fail: 2967 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); 2968 } 2969 2970 static void bnx2x_pmf_update(struct bnx2x *bp) 2971 { 2972 int port = BP_PORT(bp); 2973 u32 val; 2974 2975 bp->port.pmf = 1; 2976 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2977 2978 /* 2979 * We need the mb() to ensure the ordering between the writing to 2980 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2981 */ 2982 smp_mb(); 2983 2984 /* queue a periodic task */ 2985 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2986 2987 bnx2x_dcbx_pmf_update(bp); 2988 2989 /* enable nig attention */ 2990 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2991 if (bp->common.int_block == INT_BLOCK_HC) { 2992 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2993 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2994 } else if (!CHIP_IS_E1x(bp)) { 2995 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2996 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2997 } 2998 2999 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 3000 } 3001 3002 /* end of Link */ 3003 3004 /* slow path */ 3005 3006 /* 3007 * General service functions 3008 */ 3009 3010 /* send the MCP a request, block until there is a reply */ 3011 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 3012 { 3013 int mb_idx = BP_FW_MB_IDX(bp); 3014 u32 seq; 3015 u32 rc = 0; 3016 u32 cnt = 1; 3017 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 3018 3019 mutex_lock(&bp->fw_mb_mutex); 3020 seq = ++bp->fw_seq; 3021 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 3022 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 3023 3024 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 3025 (command | seq), param); 3026 3027 do { 3028 /* let the FW do it's magic ... */ 3029 msleep(delay); 3030 3031 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 3032 3033 /* Give the FW up to 5 second (500*10ms) */ 3034 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 3035 3036 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 3037 cnt*delay, rc, seq); 3038 3039 /* is this a reply to our command? */ 3040 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 3041 rc &= FW_MSG_CODE_MASK; 3042 else { 3043 /* FW BUG! */ 3044 BNX2X_ERR("FW failed to respond!\n"); 3045 bnx2x_fw_dump(bp); 3046 rc = 0; 3047 } 3048 mutex_unlock(&bp->fw_mb_mutex); 3049 3050 return rc; 3051 } 3052 3053 static void storm_memset_func_cfg(struct bnx2x *bp, 3054 struct tstorm_eth_function_common_config *tcfg, 3055 u16 abs_fid) 3056 { 3057 size_t size = sizeof(struct tstorm_eth_function_common_config); 3058 3059 u32 addr = BAR_TSTRORM_INTMEM + 3060 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 3061 3062 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 3063 } 3064 3065 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 3066 { 3067 if (CHIP_IS_E1x(bp)) { 3068 struct tstorm_eth_function_common_config tcfg = {0}; 3069 3070 storm_memset_func_cfg(bp, &tcfg, p->func_id); 3071 } 3072 3073 /* Enable the function in the FW */ 3074 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 3075 storm_memset_func_en(bp, p->func_id, 1); 3076 3077 /* spq */ 3078 if (p->spq_active) { 3079 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 3080 REG_WR(bp, XSEM_REG_FAST_MEMORY + 3081 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 3082 } 3083 } 3084 3085 /** 3086 * bnx2x_get_common_flags - Return common flags 3087 * 3088 * @bp: device handle 3089 * @fp: queue handle 3090 * @zero_stats: TRUE if statistics zeroing is needed 3091 * 3092 * Return the flags that are common for the Tx-only and not normal connections. 3093 */ 3094 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 3095 struct bnx2x_fastpath *fp, 3096 bool zero_stats) 3097 { 3098 unsigned long flags = 0; 3099 3100 /* PF driver will always initialize the Queue to an ACTIVE state */ 3101 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 3102 3103 /* tx only connections collect statistics (on the same index as the 3104 * parent connection). The statistics are zeroed when the parent 3105 * connection is initialized. 3106 */ 3107 3108 __set_bit(BNX2X_Q_FLG_STATS, &flags); 3109 if (zero_stats) 3110 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 3111 3112 if (bp->flags & TX_SWITCHING) 3113 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); 3114 3115 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); 3116 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); 3117 3118 #ifdef BNX2X_STOP_ON_ERROR 3119 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 3120 #endif 3121 3122 return flags; 3123 } 3124 3125 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 3126 struct bnx2x_fastpath *fp, 3127 bool leading) 3128 { 3129 unsigned long flags = 0; 3130 3131 /* calculate other queue flags */ 3132 if (IS_MF_SD(bp)) 3133 __set_bit(BNX2X_Q_FLG_OV, &flags); 3134 3135 if (IS_FCOE_FP(fp)) { 3136 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 3137 /* For FCoE - force usage of default priority (for afex) */ 3138 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 3139 } 3140 3141 if (fp->mode != TPA_MODE_DISABLED) { 3142 __set_bit(BNX2X_Q_FLG_TPA, &flags); 3143 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 3144 if (fp->mode == TPA_MODE_GRO) 3145 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 3146 } 3147 3148 if (leading) { 3149 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 3150 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 3151 } 3152 3153 /* Always set HW VLAN stripping */ 3154 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 3155 3156 /* configure silent vlan removal */ 3157 if (IS_MF_AFEX(bp)) 3158 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 3159 3160 return flags | bnx2x_get_common_flags(bp, fp, true); 3161 } 3162 3163 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 3164 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 3165 u8 cos) 3166 { 3167 gen_init->stat_id = bnx2x_stats_id(fp); 3168 gen_init->spcl_id = fp->cl_id; 3169 3170 /* Always use mini-jumbo MTU for FCoE L2 ring */ 3171 if (IS_FCOE_FP(fp)) 3172 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 3173 else 3174 gen_init->mtu = bp->dev->mtu; 3175 3176 gen_init->cos = cos; 3177 3178 gen_init->fp_hsi = ETH_FP_HSI_VERSION; 3179 } 3180 3181 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 3182 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 3183 struct bnx2x_rxq_setup_params *rxq_init) 3184 { 3185 u8 max_sge = 0; 3186 u16 sge_sz = 0; 3187 u16 tpa_agg_size = 0; 3188 3189 if (fp->mode != TPA_MODE_DISABLED) { 3190 pause->sge_th_lo = SGE_TH_LO(bp); 3191 pause->sge_th_hi = SGE_TH_HI(bp); 3192 3193 /* validate SGE ring has enough to cross high threshold */ 3194 WARN_ON(bp->dropless_fc && 3195 pause->sge_th_hi + FW_PREFETCH_CNT > 3196 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 3197 3198 tpa_agg_size = TPA_AGG_SIZE; 3199 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 3200 SGE_PAGE_SHIFT; 3201 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 3202 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 3203 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); 3204 } 3205 3206 /* pause - not for e1 */ 3207 if (!CHIP_IS_E1(bp)) { 3208 pause->bd_th_lo = BD_TH_LO(bp); 3209 pause->bd_th_hi = BD_TH_HI(bp); 3210 3211 pause->rcq_th_lo = RCQ_TH_LO(bp); 3212 pause->rcq_th_hi = RCQ_TH_HI(bp); 3213 /* 3214 * validate that rings have enough entries to cross 3215 * high thresholds 3216 */ 3217 WARN_ON(bp->dropless_fc && 3218 pause->bd_th_hi + FW_PREFETCH_CNT > 3219 bp->rx_ring_size); 3220 WARN_ON(bp->dropless_fc && 3221 pause->rcq_th_hi + FW_PREFETCH_CNT > 3222 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 3223 3224 pause->pri_map = 1; 3225 } 3226 3227 /* rxq setup */ 3228 rxq_init->dscr_map = fp->rx_desc_mapping; 3229 rxq_init->sge_map = fp->rx_sge_mapping; 3230 rxq_init->rcq_map = fp->rx_comp_mapping; 3231 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 3232 3233 /* This should be a maximum number of data bytes that may be 3234 * placed on the BD (not including paddings). 3235 */ 3236 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 3237 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 3238 3239 rxq_init->cl_qzone_id = fp->cl_qzone_id; 3240 rxq_init->tpa_agg_sz = tpa_agg_size; 3241 rxq_init->sge_buf_sz = sge_sz; 3242 rxq_init->max_sges_pkt = max_sge; 3243 rxq_init->rss_engine_id = BP_FUNC(bp); 3244 rxq_init->mcast_engine_id = BP_FUNC(bp); 3245 3246 /* Maximum number or simultaneous TPA aggregation for this Queue. 3247 * 3248 * For PF Clients it should be the maximum available number. 3249 * VF driver(s) may want to define it to a smaller value. 3250 */ 3251 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 3252 3253 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 3254 rxq_init->fw_sb_id = fp->fw_sb_id; 3255 3256 if (IS_FCOE_FP(fp)) 3257 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 3258 else 3259 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 3260 /* configure silent vlan removal 3261 * if multi function mode is afex, then mask default vlan 3262 */ 3263 if (IS_MF_AFEX(bp)) { 3264 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 3265 rxq_init->silent_removal_mask = VLAN_VID_MASK; 3266 } 3267 } 3268 3269 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 3270 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 3271 u8 cos) 3272 { 3273 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 3274 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 3275 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 3276 txq_init->fw_sb_id = fp->fw_sb_id; 3277 3278 /* 3279 * set the tss leading client id for TX classification == 3280 * leading RSS client id 3281 */ 3282 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 3283 3284 if (IS_FCOE_FP(fp)) { 3285 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 3286 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 3287 } 3288 } 3289 3290 static void bnx2x_pf_init(struct bnx2x *bp) 3291 { 3292 struct bnx2x_func_init_params func_init = {0}; 3293 struct event_ring_data eq_data = { {0} }; 3294 3295 if (!CHIP_IS_E1x(bp)) { 3296 /* reset IGU PF statistics: MSIX + ATTN */ 3297 /* PF */ 3298 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3299 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3300 (CHIP_MODE_IS_4_PORT(bp) ? 3301 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3302 /* ATTN */ 3303 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3304 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3305 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 3306 (CHIP_MODE_IS_4_PORT(bp) ? 3307 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3308 } 3309 3310 func_init.spq_active = true; 3311 func_init.pf_id = BP_FUNC(bp); 3312 func_init.func_id = BP_FUNC(bp); 3313 func_init.spq_map = bp->spq_mapping; 3314 func_init.spq_prod = bp->spq_prod_idx; 3315 3316 bnx2x_func_init(bp, &func_init); 3317 3318 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 3319 3320 /* 3321 * Congestion management values depend on the link rate 3322 * There is no active link so initial link rate is set to 10 Gbps. 3323 * When the link comes up The congestion management values are 3324 * re-calculated according to the actual link rate. 3325 */ 3326 bp->link_vars.line_speed = SPEED_10000; 3327 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 3328 3329 /* Only the PMF sets the HW */ 3330 if (bp->port.pmf) 3331 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3332 3333 /* init Event Queue - PCI bus guarantees correct endianity*/ 3334 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 3335 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 3336 eq_data.producer = bp->eq_prod; 3337 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 3338 eq_data.sb_id = DEF_SB_ID; 3339 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3340 } 3341 3342 static void bnx2x_e1h_disable(struct bnx2x *bp) 3343 { 3344 int port = BP_PORT(bp); 3345 3346 bnx2x_tx_disable(bp); 3347 3348 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3349 } 3350 3351 static void bnx2x_e1h_enable(struct bnx2x *bp) 3352 { 3353 int port = BP_PORT(bp); 3354 3355 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 3356 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 3357 3358 /* Tx queue should be only re-enabled */ 3359 netif_tx_wake_all_queues(bp->dev); 3360 3361 /* 3362 * Should not call netif_carrier_on since it will be called if the link 3363 * is up when checking for link state 3364 */ 3365 } 3366 3367 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3368 3369 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3370 { 3371 struct eth_stats_info *ether_stat = 3372 &bp->slowpath->drv_info_to_mcp.ether_stat; 3373 struct bnx2x_vlan_mac_obj *mac_obj = 3374 &bp->sp_objs->mac_obj; 3375 int i; 3376 3377 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3378 ETH_STAT_INFO_VERSION_LEN); 3379 3380 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the 3381 * mac_local field in ether_stat struct. The base address is offset by 2 3382 * bytes to account for the field being 8 bytes but a mac address is 3383 * only 6 bytes. Likewise, the stride for the get_n_elements function is 3384 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes 3385 * allocated by the ether_stat struct, so the macs will land in their 3386 * proper positions. 3387 */ 3388 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) 3389 memset(ether_stat->mac_local + i, 0, 3390 sizeof(ether_stat->mac_local[0])); 3391 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3392 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3393 ether_stat->mac_local + MAC_PAD, MAC_PAD, 3394 ETH_ALEN); 3395 ether_stat->mtu_size = bp->dev->mtu; 3396 if (bp->dev->features & NETIF_F_RXCSUM) 3397 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3398 if (bp->dev->features & NETIF_F_TSO) 3399 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3400 ether_stat->feature_flags |= bp->common.boot_mode; 3401 3402 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3403 3404 ether_stat->txq_size = bp->tx_ring_size; 3405 ether_stat->rxq_size = bp->rx_ring_size; 3406 3407 #ifdef CONFIG_BNX2X_SRIOV 3408 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; 3409 #endif 3410 } 3411 3412 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3413 { 3414 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3415 struct fcoe_stats_info *fcoe_stat = 3416 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3417 3418 if (!CNIC_LOADED(bp)) 3419 return; 3420 3421 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); 3422 3423 fcoe_stat->qos_priority = 3424 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3425 3426 /* insert FCoE stats from ramrod response */ 3427 if (!NO_FCOE(bp)) { 3428 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3429 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3430 tstorm_queue_statistics; 3431 3432 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3433 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3434 xstorm_queue_statistics; 3435 3436 struct fcoe_statistics_params *fw_fcoe_stat = 3437 &bp->fw_stats_data->fcoe; 3438 3439 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, 3440 fcoe_stat->rx_bytes_lo, 3441 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3442 3443 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3444 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3445 fcoe_stat->rx_bytes_lo, 3446 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3447 3448 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3449 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3450 fcoe_stat->rx_bytes_lo, 3451 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3452 3453 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3454 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3455 fcoe_stat->rx_bytes_lo, 3456 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3457 3458 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3459 fcoe_stat->rx_frames_lo, 3460 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3461 3462 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3463 fcoe_stat->rx_frames_lo, 3464 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3465 3466 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3467 fcoe_stat->rx_frames_lo, 3468 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3469 3470 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3471 fcoe_stat->rx_frames_lo, 3472 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3473 3474 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, 3475 fcoe_stat->tx_bytes_lo, 3476 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3477 3478 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3479 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3480 fcoe_stat->tx_bytes_lo, 3481 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3482 3483 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3484 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3485 fcoe_stat->tx_bytes_lo, 3486 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3487 3488 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3489 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3490 fcoe_stat->tx_bytes_lo, 3491 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3492 3493 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3494 fcoe_stat->tx_frames_lo, 3495 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3496 3497 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3498 fcoe_stat->tx_frames_lo, 3499 fcoe_q_xstorm_stats->ucast_pkts_sent); 3500 3501 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3502 fcoe_stat->tx_frames_lo, 3503 fcoe_q_xstorm_stats->bcast_pkts_sent); 3504 3505 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3506 fcoe_stat->tx_frames_lo, 3507 fcoe_q_xstorm_stats->mcast_pkts_sent); 3508 } 3509 3510 /* ask L5 driver to add data to the struct */ 3511 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3512 } 3513 3514 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3515 { 3516 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3517 struct iscsi_stats_info *iscsi_stat = 3518 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3519 3520 if (!CNIC_LOADED(bp)) 3521 return; 3522 3523 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, 3524 ETH_ALEN); 3525 3526 iscsi_stat->qos_priority = 3527 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3528 3529 /* ask L5 driver to add data to the struct */ 3530 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3531 } 3532 3533 /* called due to MCP event (on pmf): 3534 * reread new bandwidth configuration 3535 * configure FW 3536 * notify others function about the change 3537 */ 3538 static void bnx2x_config_mf_bw(struct bnx2x *bp) 3539 { 3540 /* Workaround for MFW bug. 3541 * MFW is not supposed to generate BW attention in 3542 * single function mode. 3543 */ 3544 if (!IS_MF(bp)) { 3545 DP(BNX2X_MSG_MCP, 3546 "Ignoring MF BW config in single function mode\n"); 3547 return; 3548 } 3549 3550 if (bp->link_vars.link_up) { 3551 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3552 bnx2x_link_sync_notify(bp); 3553 } 3554 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3555 } 3556 3557 static void bnx2x_set_mf_bw(struct bnx2x *bp) 3558 { 3559 bnx2x_config_mf_bw(bp); 3560 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3561 } 3562 3563 static void bnx2x_handle_eee_event(struct bnx2x *bp) 3564 { 3565 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3566 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3567 } 3568 3569 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) 3570 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) 3571 3572 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3573 { 3574 enum drv_info_opcode op_code; 3575 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3576 bool release = false; 3577 int wait; 3578 3579 /* if drv_info version supported by MFW doesn't match - send NACK */ 3580 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3581 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3582 return; 3583 } 3584 3585 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3586 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3587 3588 /* Must prevent other flows from accessing drv_info_to_mcp */ 3589 mutex_lock(&bp->drv_info_mutex); 3590 3591 memset(&bp->slowpath->drv_info_to_mcp, 0, 3592 sizeof(union drv_info_to_mcp)); 3593 3594 switch (op_code) { 3595 case ETH_STATS_OPCODE: 3596 bnx2x_drv_info_ether_stat(bp); 3597 break; 3598 case FCOE_STATS_OPCODE: 3599 bnx2x_drv_info_fcoe_stat(bp); 3600 break; 3601 case ISCSI_STATS_OPCODE: 3602 bnx2x_drv_info_iscsi_stat(bp); 3603 break; 3604 default: 3605 /* if op code isn't supported - send NACK */ 3606 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3607 goto out; 3608 } 3609 3610 /* if we got drv_info attn from MFW then these fields are defined in 3611 * shmem2 for sure 3612 */ 3613 SHMEM2_WR(bp, drv_info_host_addr_lo, 3614 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3615 SHMEM2_WR(bp, drv_info_host_addr_hi, 3616 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3617 3618 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3619 3620 /* Since possible management wants both this and get_driver_version 3621 * need to wait until management notifies us it finished utilizing 3622 * the buffer. 3623 */ 3624 if (!SHMEM2_HAS(bp, mfw_drv_indication)) { 3625 DP(BNX2X_MSG_MCP, "Management does not support indication\n"); 3626 } else if (!bp->drv_info_mng_owner) { 3627 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); 3628 3629 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { 3630 u32 indication = SHMEM2_RD(bp, mfw_drv_indication); 3631 3632 /* Management is done; need to clear indication */ 3633 if (indication & bit) { 3634 SHMEM2_WR(bp, mfw_drv_indication, 3635 indication & ~bit); 3636 release = true; 3637 break; 3638 } 3639 3640 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); 3641 } 3642 } 3643 if (!release) { 3644 DP(BNX2X_MSG_MCP, "Management did not release indication\n"); 3645 bp->drv_info_mng_owner = true; 3646 } 3647 3648 out: 3649 mutex_unlock(&bp->drv_info_mutex); 3650 } 3651 3652 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) 3653 { 3654 u8 vals[4]; 3655 int i = 0; 3656 3657 if (bnx2x_format) { 3658 i = sscanf(version, "1.%c%hhd.%hhd.%hhd", 3659 &vals[0], &vals[1], &vals[2], &vals[3]); 3660 if (i > 0) 3661 vals[0] -= '0'; 3662 } else { 3663 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", 3664 &vals[0], &vals[1], &vals[2], &vals[3]); 3665 } 3666 3667 while (i < 4) 3668 vals[i++] = 0; 3669 3670 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; 3671 } 3672 3673 void bnx2x_update_mng_version(struct bnx2x *bp) 3674 { 3675 u32 iscsiver = DRV_VER_NOT_LOADED; 3676 u32 fcoever = DRV_VER_NOT_LOADED; 3677 u32 ethver = DRV_VER_NOT_LOADED; 3678 int idx = BP_FW_MB_IDX(bp); 3679 u8 *version; 3680 3681 if (!SHMEM2_HAS(bp, func_os_drv_ver)) 3682 return; 3683 3684 mutex_lock(&bp->drv_info_mutex); 3685 /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ 3686 if (bp->drv_info_mng_owner) 3687 goto out; 3688 3689 if (bp->state != BNX2X_STATE_OPEN) 3690 goto out; 3691 3692 /* Parse ethernet driver version */ 3693 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3694 if (!CNIC_LOADED(bp)) 3695 goto out; 3696 3697 /* Try getting storage driver version via cnic */ 3698 memset(&bp->slowpath->drv_info_to_mcp, 0, 3699 sizeof(union drv_info_to_mcp)); 3700 bnx2x_drv_info_iscsi_stat(bp); 3701 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; 3702 iscsiver = bnx2x_update_mng_version_utility(version, false); 3703 3704 memset(&bp->slowpath->drv_info_to_mcp, 0, 3705 sizeof(union drv_info_to_mcp)); 3706 bnx2x_drv_info_fcoe_stat(bp); 3707 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; 3708 fcoever = bnx2x_update_mng_version_utility(version, false); 3709 3710 out: 3711 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); 3712 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); 3713 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); 3714 3715 mutex_unlock(&bp->drv_info_mutex); 3716 3717 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", 3718 ethver, iscsiver, fcoever); 3719 } 3720 3721 void bnx2x_update_mfw_dump(struct bnx2x *bp) 3722 { 3723 u32 drv_ver; 3724 u32 valid_dump; 3725 3726 if (!SHMEM2_HAS(bp, drv_info)) 3727 return; 3728 3729 /* Update Driver load time, possibly broken in y2038 */ 3730 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds()); 3731 3732 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3733 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); 3734 3735 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM)); 3736 3737 /* Check & notify On-Chip dump. */ 3738 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump); 3739 3740 if (valid_dump & FIRST_DUMP_VALID) 3741 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); 3742 3743 if (valid_dump & SECOND_DUMP_VALID) 3744 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); 3745 } 3746 3747 static void bnx2x_oem_event(struct bnx2x *bp, u32 event) 3748 { 3749 u32 cmd_ok, cmd_fail; 3750 3751 /* sanity */ 3752 if (event & DRV_STATUS_DCC_EVENT_MASK && 3753 event & DRV_STATUS_OEM_EVENT_MASK) { 3754 BNX2X_ERR("Received simultaneous events %08x\n", event); 3755 return; 3756 } 3757 3758 if (event & DRV_STATUS_DCC_EVENT_MASK) { 3759 cmd_fail = DRV_MSG_CODE_DCC_FAILURE; 3760 cmd_ok = DRV_MSG_CODE_DCC_OK; 3761 } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ { 3762 cmd_fail = DRV_MSG_CODE_OEM_FAILURE; 3763 cmd_ok = DRV_MSG_CODE_OEM_OK; 3764 } 3765 3766 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); 3767 3768 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF | 3769 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) { 3770 /* This is the only place besides the function initialization 3771 * where the bp->flags can change so it is done without any 3772 * locks 3773 */ 3774 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3775 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3776 bp->flags |= MF_FUNC_DIS; 3777 3778 bnx2x_e1h_disable(bp); 3779 } else { 3780 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3781 bp->flags &= ~MF_FUNC_DIS; 3782 3783 bnx2x_e1h_enable(bp); 3784 } 3785 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF | 3786 DRV_STATUS_OEM_DISABLE_ENABLE_PF); 3787 } 3788 3789 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | 3790 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) { 3791 bnx2x_config_mf_bw(bp); 3792 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | 3793 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION); 3794 } 3795 3796 /* Report results to MCP */ 3797 if (event) 3798 bnx2x_fw_command(bp, cmd_fail, 0); 3799 else 3800 bnx2x_fw_command(bp, cmd_ok, 0); 3801 } 3802 3803 /* must be called under the spq lock */ 3804 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3805 { 3806 struct eth_spe *next_spe = bp->spq_prod_bd; 3807 3808 if (bp->spq_prod_bd == bp->spq_last_bd) { 3809 bp->spq_prod_bd = bp->spq; 3810 bp->spq_prod_idx = 0; 3811 DP(BNX2X_MSG_SP, "end of spq\n"); 3812 } else { 3813 bp->spq_prod_bd++; 3814 bp->spq_prod_idx++; 3815 } 3816 return next_spe; 3817 } 3818 3819 /* must be called under the spq lock */ 3820 static void bnx2x_sp_prod_update(struct bnx2x *bp) 3821 { 3822 int func = BP_FUNC(bp); 3823 3824 /* 3825 * Make sure that BD data is updated before writing the producer: 3826 * BD data is written to the memory, the producer is read from the 3827 * memory, thus we need a full memory barrier to ensure the ordering. 3828 */ 3829 mb(); 3830 3831 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3832 bp->spq_prod_idx); 3833 } 3834 3835 /** 3836 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3837 * 3838 * @cmd: command to check 3839 * @cmd_type: command type 3840 */ 3841 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3842 { 3843 if ((cmd_type == NONE_CONNECTION_TYPE) || 3844 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3845 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3846 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3847 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3848 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3849 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3850 return true; 3851 else 3852 return false; 3853 } 3854 3855 /** 3856 * bnx2x_sp_post - place a single command on an SP ring 3857 * 3858 * @bp: driver handle 3859 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3860 * @cid: SW CID the command is related to 3861 * @data_hi: command private data address (high 32 bits) 3862 * @data_lo: command private data address (low 32 bits) 3863 * @cmd_type: command type (e.g. NONE, ETH) 3864 * 3865 * SP data is handled as if it's always an address pair, thus data fields are 3866 * not swapped to little endian in upper functions. Instead this function swaps 3867 * data as if it's two u32 fields. 3868 */ 3869 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3870 u32 data_hi, u32 data_lo, int cmd_type) 3871 { 3872 struct eth_spe *spe; 3873 u16 type; 3874 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3875 3876 #ifdef BNX2X_STOP_ON_ERROR 3877 if (unlikely(bp->panic)) { 3878 BNX2X_ERR("Can't post SP when there is panic\n"); 3879 return -EIO; 3880 } 3881 #endif 3882 3883 spin_lock_bh(&bp->spq_lock); 3884 3885 if (common) { 3886 if (!atomic_read(&bp->eq_spq_left)) { 3887 BNX2X_ERR("BUG! EQ ring full!\n"); 3888 spin_unlock_bh(&bp->spq_lock); 3889 bnx2x_panic(); 3890 return -EBUSY; 3891 } 3892 } else if (!atomic_read(&bp->cq_spq_left)) { 3893 BNX2X_ERR("BUG! SPQ ring full!\n"); 3894 spin_unlock_bh(&bp->spq_lock); 3895 bnx2x_panic(); 3896 return -EBUSY; 3897 } 3898 3899 spe = bnx2x_sp_get_next(bp); 3900 3901 /* CID needs port number to be encoded int it */ 3902 spe->hdr.conn_and_cmd_data = 3903 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3904 HW_CID(bp, cid)); 3905 3906 /* In some cases, type may already contain the func-id 3907 * mainly in SRIOV related use cases, so we add it here only 3908 * if it's not already set. 3909 */ 3910 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { 3911 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & 3912 SPE_HDR_CONN_TYPE; 3913 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3914 SPE_HDR_FUNCTION_ID); 3915 } else { 3916 type = cmd_type; 3917 } 3918 3919 spe->hdr.type = cpu_to_le16(type); 3920 3921 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3922 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3923 3924 /* 3925 * It's ok if the actual decrement is issued towards the memory 3926 * somewhere between the spin_lock and spin_unlock. Thus no 3927 * more explicit memory barrier is needed. 3928 */ 3929 if (common) 3930 atomic_dec(&bp->eq_spq_left); 3931 else 3932 atomic_dec(&bp->cq_spq_left); 3933 3934 DP(BNX2X_MSG_SP, 3935 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3936 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3937 (u32)(U64_LO(bp->spq_mapping) + 3938 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3939 HW_CID(bp, cid), data_hi, data_lo, type, 3940 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3941 3942 bnx2x_sp_prod_update(bp); 3943 spin_unlock_bh(&bp->spq_lock); 3944 return 0; 3945 } 3946 3947 /* acquire split MCP access lock register */ 3948 static int bnx2x_acquire_alr(struct bnx2x *bp) 3949 { 3950 u32 j, val; 3951 int rc = 0; 3952 3953 might_sleep(); 3954 for (j = 0; j < 1000; j++) { 3955 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); 3956 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); 3957 if (val & MCPR_ACCESS_LOCK_LOCK) 3958 break; 3959 3960 usleep_range(5000, 10000); 3961 } 3962 if (!(val & MCPR_ACCESS_LOCK_LOCK)) { 3963 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3964 rc = -EBUSY; 3965 } 3966 3967 return rc; 3968 } 3969 3970 /* release split MCP access lock register */ 3971 static void bnx2x_release_alr(struct bnx2x *bp) 3972 { 3973 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 3974 } 3975 3976 #define BNX2X_DEF_SB_ATT_IDX 0x0001 3977 #define BNX2X_DEF_SB_IDX 0x0002 3978 3979 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3980 { 3981 struct host_sp_status_block *def_sb = bp->def_status_blk; 3982 u16 rc = 0; 3983 3984 barrier(); /* status block is written to by the chip */ 3985 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3986 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3987 rc |= BNX2X_DEF_SB_ATT_IDX; 3988 } 3989 3990 if (bp->def_idx != def_sb->sp_sb.running_index) { 3991 bp->def_idx = def_sb->sp_sb.running_index; 3992 rc |= BNX2X_DEF_SB_IDX; 3993 } 3994 3995 /* Do not reorder: indices reading should complete before handling */ 3996 barrier(); 3997 return rc; 3998 } 3999 4000 /* 4001 * slow path service functions 4002 */ 4003 4004 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 4005 { 4006 int port = BP_PORT(bp); 4007 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4008 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4009 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 4010 NIG_REG_MASK_INTERRUPT_PORT0; 4011 u32 aeu_mask; 4012 u32 nig_mask = 0; 4013 u32 reg_addr; 4014 4015 if (bp->attn_state & asserted) 4016 BNX2X_ERR("IGU ERROR\n"); 4017 4018 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4019 aeu_mask = REG_RD(bp, aeu_addr); 4020 4021 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 4022 aeu_mask, asserted); 4023 aeu_mask &= ~(asserted & 0x3ff); 4024 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4025 4026 REG_WR(bp, aeu_addr, aeu_mask); 4027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4028 4029 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4030 bp->attn_state |= asserted; 4031 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4032 4033 if (asserted & ATTN_HARD_WIRED_MASK) { 4034 if (asserted & ATTN_NIG_FOR_FUNC) { 4035 4036 bnx2x_acquire_phy_lock(bp); 4037 4038 /* save nig interrupt mask */ 4039 nig_mask = REG_RD(bp, nig_int_mask_addr); 4040 4041 /* If nig_mask is not set, no need to call the update 4042 * function. 4043 */ 4044 if (nig_mask) { 4045 REG_WR(bp, nig_int_mask_addr, 0); 4046 4047 bnx2x_link_attn(bp); 4048 } 4049 4050 /* handle unicore attn? */ 4051 } 4052 if (asserted & ATTN_SW_TIMER_4_FUNC) 4053 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 4054 4055 if (asserted & GPIO_2_FUNC) 4056 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 4057 4058 if (asserted & GPIO_3_FUNC) 4059 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 4060 4061 if (asserted & GPIO_4_FUNC) 4062 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 4063 4064 if (port == 0) { 4065 if (asserted & ATTN_GENERAL_ATTN_1) { 4066 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 4067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 4068 } 4069 if (asserted & ATTN_GENERAL_ATTN_2) { 4070 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 4071 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 4072 } 4073 if (asserted & ATTN_GENERAL_ATTN_3) { 4074 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 4075 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 4076 } 4077 } else { 4078 if (asserted & ATTN_GENERAL_ATTN_4) { 4079 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 4080 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 4081 } 4082 if (asserted & ATTN_GENERAL_ATTN_5) { 4083 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 4084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 4085 } 4086 if (asserted & ATTN_GENERAL_ATTN_6) { 4087 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 4088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 4089 } 4090 } 4091 4092 } /* if hardwired */ 4093 4094 if (bp->common.int_block == INT_BLOCK_HC) 4095 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4096 COMMAND_REG_ATTN_BITS_SET); 4097 else 4098 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 4099 4100 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 4101 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4102 REG_WR(bp, reg_addr, asserted); 4103 4104 /* now set back the mask */ 4105 if (asserted & ATTN_NIG_FOR_FUNC) { 4106 /* Verify that IGU ack through BAR was written before restoring 4107 * NIG mask. This loop should exit after 2-3 iterations max. 4108 */ 4109 if (bp->common.int_block != INT_BLOCK_HC) { 4110 u32 cnt = 0, igu_acked; 4111 do { 4112 igu_acked = REG_RD(bp, 4113 IGU_REG_ATTENTION_ACK_BITS); 4114 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 4115 (++cnt < MAX_IGU_ATTN_ACK_TO)); 4116 if (!igu_acked) 4117 DP(NETIF_MSG_HW, 4118 "Failed to verify IGU ack on time\n"); 4119 barrier(); 4120 } 4121 REG_WR(bp, nig_int_mask_addr, nig_mask); 4122 bnx2x_release_phy_lock(bp); 4123 } 4124 } 4125 4126 static void bnx2x_fan_failure(struct bnx2x *bp) 4127 { 4128 int port = BP_PORT(bp); 4129 u32 ext_phy_config; 4130 /* mark the failure */ 4131 ext_phy_config = 4132 SHMEM_RD(bp, 4133 dev_info.port_hw_config[port].external_phy_config); 4134 4135 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 4136 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 4137 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 4138 ext_phy_config); 4139 4140 /* log the failure */ 4141 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 4142 "Please contact OEM Support for assistance\n"); 4143 4144 /* Schedule device reset (unload) 4145 * This is due to some boards consuming sufficient power when driver is 4146 * up to overheat if fan fails. 4147 */ 4148 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); 4149 } 4150 4151 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 4152 { 4153 int port = BP_PORT(bp); 4154 int reg_offset; 4155 u32 val; 4156 4157 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4158 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4159 4160 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 4161 4162 val = REG_RD(bp, reg_offset); 4163 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 4164 REG_WR(bp, reg_offset, val); 4165 4166 BNX2X_ERR("SPIO5 hw attention\n"); 4167 4168 /* Fan failure attention */ 4169 bnx2x_hw_reset_phy(&bp->link_params); 4170 bnx2x_fan_failure(bp); 4171 } 4172 4173 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 4174 bnx2x_acquire_phy_lock(bp); 4175 bnx2x_handle_module_detect_int(&bp->link_params); 4176 bnx2x_release_phy_lock(bp); 4177 } 4178 4179 if (attn & HW_INTERRUPT_ASSERT_SET_0) { 4180 4181 val = REG_RD(bp, reg_offset); 4182 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0); 4183 REG_WR(bp, reg_offset, val); 4184 4185 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4186 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0)); 4187 bnx2x_panic(); 4188 } 4189 } 4190 4191 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 4192 { 4193 u32 val; 4194 4195 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 4196 4197 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 4198 BNX2X_ERR("DB hw attention 0x%x\n", val); 4199 /* DORQ discard attention */ 4200 if (val & 0x2) 4201 BNX2X_ERR("FATAL error from DORQ\n"); 4202 } 4203 4204 if (attn & HW_INTERRUPT_ASSERT_SET_1) { 4205 4206 int port = BP_PORT(bp); 4207 int reg_offset; 4208 4209 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 4210 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4211 4212 val = REG_RD(bp, reg_offset); 4213 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1); 4214 REG_WR(bp, reg_offset, val); 4215 4216 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4217 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1)); 4218 bnx2x_panic(); 4219 } 4220 } 4221 4222 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 4223 { 4224 u32 val; 4225 4226 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 4227 4228 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 4229 BNX2X_ERR("CFC hw attention 0x%x\n", val); 4230 /* CFC error attention */ 4231 if (val & 0x2) 4232 BNX2X_ERR("FATAL error from CFC\n"); 4233 } 4234 4235 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 4236 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 4237 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 4238 /* RQ_USDMDP_FIFO_OVERFLOW */ 4239 if (val & 0x18000) 4240 BNX2X_ERR("FATAL error from PXP\n"); 4241 4242 if (!CHIP_IS_E1x(bp)) { 4243 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 4244 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 4245 } 4246 } 4247 4248 if (attn & HW_INTERRUPT_ASSERT_SET_2) { 4249 4250 int port = BP_PORT(bp); 4251 int reg_offset; 4252 4253 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 4254 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4255 4256 val = REG_RD(bp, reg_offset); 4257 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2); 4258 REG_WR(bp, reg_offset, val); 4259 4260 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4261 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2)); 4262 bnx2x_panic(); 4263 } 4264 } 4265 4266 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 4267 { 4268 u32 val; 4269 4270 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 4271 4272 if (attn & BNX2X_PMF_LINK_ASSERT) { 4273 int func = BP_FUNC(bp); 4274 4275 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 4276 bnx2x_read_mf_cfg(bp); 4277 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 4278 func_mf_config[BP_ABS_FUNC(bp)].config); 4279 val = SHMEM_RD(bp, 4280 func_mb[BP_FW_MB_IDX(bp)].drv_status); 4281 4282 if (val & (DRV_STATUS_DCC_EVENT_MASK | 4283 DRV_STATUS_OEM_EVENT_MASK)) 4284 bnx2x_oem_event(bp, 4285 (val & (DRV_STATUS_DCC_EVENT_MASK | 4286 DRV_STATUS_OEM_EVENT_MASK))); 4287 4288 if (val & DRV_STATUS_SET_MF_BW) 4289 bnx2x_set_mf_bw(bp); 4290 4291 if (val & DRV_STATUS_DRV_INFO_REQ) 4292 bnx2x_handle_drv_info_req(bp); 4293 4294 if (val & DRV_STATUS_VF_DISABLED) 4295 bnx2x_schedule_iov_task(bp, 4296 BNX2X_IOV_HANDLE_FLR); 4297 4298 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 4299 bnx2x_pmf_update(bp); 4300 4301 if (bp->port.pmf && 4302 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 4303 bp->dcbx_enabled > 0) 4304 /* start dcbx state machine */ 4305 bnx2x_dcbx_set_params(bp, 4306 BNX2X_DCBX_STATE_NEG_RECEIVED); 4307 if (val & DRV_STATUS_AFEX_EVENT_MASK) 4308 bnx2x_handle_afex_cmd(bp, 4309 val & DRV_STATUS_AFEX_EVENT_MASK); 4310 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 4311 bnx2x_handle_eee_event(bp); 4312 4313 if (val & DRV_STATUS_OEM_UPDATE_SVID) 4314 bnx2x_schedule_sp_rtnl(bp, 4315 BNX2X_SP_RTNL_UPDATE_SVID, 0); 4316 4317 if (bp->link_vars.periodic_flags & 4318 PERIODIC_FLAGS_LINK_EVENT) { 4319 /* sync with link */ 4320 bnx2x_acquire_phy_lock(bp); 4321 bp->link_vars.periodic_flags &= 4322 ~PERIODIC_FLAGS_LINK_EVENT; 4323 bnx2x_release_phy_lock(bp); 4324 if (IS_MF(bp)) 4325 bnx2x_link_sync_notify(bp); 4326 bnx2x_link_report(bp); 4327 } 4328 /* Always call it here: bnx2x_link_report() will 4329 * prevent the link indication duplication. 4330 */ 4331 bnx2x__link_status_update(bp); 4332 } else if (attn & BNX2X_MC_ASSERT_BITS) { 4333 4334 BNX2X_ERR("MC assert!\n"); 4335 bnx2x_mc_assert(bp); 4336 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 4337 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 4338 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 4339 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 4340 bnx2x_panic(); 4341 4342 } else if (attn & BNX2X_MCP_ASSERT) { 4343 4344 BNX2X_ERR("MCP assert!\n"); 4345 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 4346 bnx2x_fw_dump(bp); 4347 4348 } else 4349 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 4350 } 4351 4352 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 4353 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 4354 if (attn & BNX2X_GRC_TIMEOUT) { 4355 val = CHIP_IS_E1(bp) ? 0 : 4356 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 4357 BNX2X_ERR("GRC time-out 0x%08x\n", val); 4358 } 4359 if (attn & BNX2X_GRC_RSV) { 4360 val = CHIP_IS_E1(bp) ? 0 : 4361 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 4362 BNX2X_ERR("GRC reserved 0x%08x\n", val); 4363 } 4364 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 4365 } 4366 } 4367 4368 /* 4369 * Bits map: 4370 * 0-7 - Engine0 load counter. 4371 * 8-15 - Engine1 load counter. 4372 * 16 - Engine0 RESET_IN_PROGRESS bit. 4373 * 17 - Engine1 RESET_IN_PROGRESS bit. 4374 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 4375 * on the engine 4376 * 19 - Engine1 ONE_IS_LOADED. 4377 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 4378 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 4379 * just the one belonging to its engine). 4380 * 4381 */ 4382 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 4383 4384 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 4385 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 4386 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 4387 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 4388 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 4389 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 4390 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 4391 4392 /* 4393 * Set the GLOBAL_RESET bit. 4394 * 4395 * Should be run under rtnl lock 4396 */ 4397 void bnx2x_set_reset_global(struct bnx2x *bp) 4398 { 4399 u32 val; 4400 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4401 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4402 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 4403 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4404 } 4405 4406 /* 4407 * Clear the GLOBAL_RESET bit. 4408 * 4409 * Should be run under rtnl lock 4410 */ 4411 static void bnx2x_clear_reset_global(struct bnx2x *bp) 4412 { 4413 u32 val; 4414 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4415 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4416 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 4417 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4418 } 4419 4420 /* 4421 * Checks the GLOBAL_RESET bit. 4422 * 4423 * should be run under rtnl lock 4424 */ 4425 static bool bnx2x_reset_is_global(struct bnx2x *bp) 4426 { 4427 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4428 4429 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 4430 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 4431 } 4432 4433 /* 4434 * Clear RESET_IN_PROGRESS bit for the current engine. 4435 * 4436 * Should be run under rtnl lock 4437 */ 4438 static void bnx2x_set_reset_done(struct bnx2x *bp) 4439 { 4440 u32 val; 4441 u32 bit = BP_PATH(bp) ? 4442 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4443 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4444 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4445 4446 /* Clear the bit */ 4447 val &= ~bit; 4448 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4449 4450 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4451 } 4452 4453 /* 4454 * Set RESET_IN_PROGRESS for the current engine. 4455 * 4456 * should be run under rtnl lock 4457 */ 4458 void bnx2x_set_reset_in_progress(struct bnx2x *bp) 4459 { 4460 u32 val; 4461 u32 bit = BP_PATH(bp) ? 4462 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4463 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4464 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4465 4466 /* Set the bit */ 4467 val |= bit; 4468 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4469 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4470 } 4471 4472 /* 4473 * Checks the RESET_IN_PROGRESS bit for the given engine. 4474 * should be run under rtnl lock 4475 */ 4476 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 4477 { 4478 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4479 u32 bit = engine ? 4480 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4481 4482 /* return false if bit is set */ 4483 return (val & bit) ? false : true; 4484 } 4485 4486 /* 4487 * set pf load for the current pf. 4488 * 4489 * should be run under rtnl lock 4490 */ 4491 void bnx2x_set_pf_load(struct bnx2x *bp) 4492 { 4493 u32 val1, val; 4494 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4495 BNX2X_PATH0_LOAD_CNT_MASK; 4496 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4497 BNX2X_PATH0_LOAD_CNT_SHIFT; 4498 4499 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4500 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4501 4502 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 4503 4504 /* get the current counter value */ 4505 val1 = (val & mask) >> shift; 4506 4507 /* set bit of that PF */ 4508 val1 |= (1 << bp->pf_num); 4509 4510 /* clear the old value */ 4511 val &= ~mask; 4512 4513 /* set the new one */ 4514 val |= ((val1 << shift) & mask); 4515 4516 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4517 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4518 } 4519 4520 /** 4521 * bnx2x_clear_pf_load - clear pf load mark 4522 * 4523 * @bp: driver handle 4524 * 4525 * Should be run under rtnl lock. 4526 * Decrements the load counter for the current engine. Returns 4527 * whether other functions are still loaded 4528 */ 4529 bool bnx2x_clear_pf_load(struct bnx2x *bp) 4530 { 4531 u32 val1, val; 4532 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4533 BNX2X_PATH0_LOAD_CNT_MASK; 4534 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4535 BNX2X_PATH0_LOAD_CNT_SHIFT; 4536 4537 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4538 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4539 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 4540 4541 /* get the current counter value */ 4542 val1 = (val & mask) >> shift; 4543 4544 /* clear bit of that PF */ 4545 val1 &= ~(1 << bp->pf_num); 4546 4547 /* clear the old value */ 4548 val &= ~mask; 4549 4550 /* set the new one */ 4551 val |= ((val1 << shift) & mask); 4552 4553 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4554 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4555 return val1 != 0; 4556 } 4557 4558 /* 4559 * Read the load status for the current engine. 4560 * 4561 * should be run under rtnl lock 4562 */ 4563 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4564 { 4565 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4566 BNX2X_PATH0_LOAD_CNT_MASK); 4567 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4568 BNX2X_PATH0_LOAD_CNT_SHIFT); 4569 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4570 4571 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4572 4573 val = (val & mask) >> shift; 4574 4575 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4576 engine, val); 4577 4578 return val != 0; 4579 } 4580 4581 static void _print_parity(struct bnx2x *bp, u32 reg) 4582 { 4583 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); 4584 } 4585 4586 static void _print_next_block(int idx, const char *blk) 4587 { 4588 pr_cont("%s%s", idx ? ", " : "", blk); 4589 } 4590 4591 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4592 int *par_num, bool print) 4593 { 4594 u32 cur_bit; 4595 bool res; 4596 int i; 4597 4598 res = false; 4599 4600 for (i = 0; sig; i++) { 4601 cur_bit = (0x1UL << i); 4602 if (sig & cur_bit) { 4603 res |= true; /* Each bit is real error! */ 4604 4605 if (print) { 4606 switch (cur_bit) { 4607 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4608 _print_next_block((*par_num)++, "BRB"); 4609 _print_parity(bp, 4610 BRB1_REG_BRB1_PRTY_STS); 4611 break; 4612 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4613 _print_next_block((*par_num)++, 4614 "PARSER"); 4615 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4616 break; 4617 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4618 _print_next_block((*par_num)++, "TSDM"); 4619 _print_parity(bp, 4620 TSDM_REG_TSDM_PRTY_STS); 4621 break; 4622 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4623 _print_next_block((*par_num)++, 4624 "SEARCHER"); 4625 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4626 break; 4627 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4628 _print_next_block((*par_num)++, "TCM"); 4629 _print_parity(bp, TCM_REG_TCM_PRTY_STS); 4630 break; 4631 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4632 _print_next_block((*par_num)++, 4633 "TSEMI"); 4634 _print_parity(bp, 4635 TSEM_REG_TSEM_PRTY_STS_0); 4636 _print_parity(bp, 4637 TSEM_REG_TSEM_PRTY_STS_1); 4638 break; 4639 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4640 _print_next_block((*par_num)++, "XPB"); 4641 _print_parity(bp, GRCBASE_XPB + 4642 PB_REG_PB_PRTY_STS); 4643 break; 4644 } 4645 } 4646 4647 /* Clear the bit */ 4648 sig &= ~cur_bit; 4649 } 4650 } 4651 4652 return res; 4653 } 4654 4655 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4656 int *par_num, bool *global, 4657 bool print) 4658 { 4659 u32 cur_bit; 4660 bool res; 4661 int i; 4662 4663 res = false; 4664 4665 for (i = 0; sig; i++) { 4666 cur_bit = (0x1UL << i); 4667 if (sig & cur_bit) { 4668 res |= true; /* Each bit is real error! */ 4669 switch (cur_bit) { 4670 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4671 if (print) { 4672 _print_next_block((*par_num)++, "PBF"); 4673 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4674 } 4675 break; 4676 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4677 if (print) { 4678 _print_next_block((*par_num)++, "QM"); 4679 _print_parity(bp, QM_REG_QM_PRTY_STS); 4680 } 4681 break; 4682 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4683 if (print) { 4684 _print_next_block((*par_num)++, "TM"); 4685 _print_parity(bp, TM_REG_TM_PRTY_STS); 4686 } 4687 break; 4688 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4689 if (print) { 4690 _print_next_block((*par_num)++, "XSDM"); 4691 _print_parity(bp, 4692 XSDM_REG_XSDM_PRTY_STS); 4693 } 4694 break; 4695 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4696 if (print) { 4697 _print_next_block((*par_num)++, "XCM"); 4698 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4699 } 4700 break; 4701 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4702 if (print) { 4703 _print_next_block((*par_num)++, 4704 "XSEMI"); 4705 _print_parity(bp, 4706 XSEM_REG_XSEM_PRTY_STS_0); 4707 _print_parity(bp, 4708 XSEM_REG_XSEM_PRTY_STS_1); 4709 } 4710 break; 4711 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4712 if (print) { 4713 _print_next_block((*par_num)++, 4714 "DOORBELLQ"); 4715 _print_parity(bp, 4716 DORQ_REG_DORQ_PRTY_STS); 4717 } 4718 break; 4719 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4720 if (print) { 4721 _print_next_block((*par_num)++, "NIG"); 4722 if (CHIP_IS_E1x(bp)) { 4723 _print_parity(bp, 4724 NIG_REG_NIG_PRTY_STS); 4725 } else { 4726 _print_parity(bp, 4727 NIG_REG_NIG_PRTY_STS_0); 4728 _print_parity(bp, 4729 NIG_REG_NIG_PRTY_STS_1); 4730 } 4731 } 4732 break; 4733 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4734 if (print) 4735 _print_next_block((*par_num)++, 4736 "VAUX PCI CORE"); 4737 *global = true; 4738 break; 4739 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4740 if (print) { 4741 _print_next_block((*par_num)++, 4742 "DEBUG"); 4743 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4744 } 4745 break; 4746 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4747 if (print) { 4748 _print_next_block((*par_num)++, "USDM"); 4749 _print_parity(bp, 4750 USDM_REG_USDM_PRTY_STS); 4751 } 4752 break; 4753 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4754 if (print) { 4755 _print_next_block((*par_num)++, "UCM"); 4756 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4757 } 4758 break; 4759 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4760 if (print) { 4761 _print_next_block((*par_num)++, 4762 "USEMI"); 4763 _print_parity(bp, 4764 USEM_REG_USEM_PRTY_STS_0); 4765 _print_parity(bp, 4766 USEM_REG_USEM_PRTY_STS_1); 4767 } 4768 break; 4769 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4770 if (print) { 4771 _print_next_block((*par_num)++, "UPB"); 4772 _print_parity(bp, GRCBASE_UPB + 4773 PB_REG_PB_PRTY_STS); 4774 } 4775 break; 4776 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4777 if (print) { 4778 _print_next_block((*par_num)++, "CSDM"); 4779 _print_parity(bp, 4780 CSDM_REG_CSDM_PRTY_STS); 4781 } 4782 break; 4783 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4784 if (print) { 4785 _print_next_block((*par_num)++, "CCM"); 4786 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4787 } 4788 break; 4789 } 4790 4791 /* Clear the bit */ 4792 sig &= ~cur_bit; 4793 } 4794 } 4795 4796 return res; 4797 } 4798 4799 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4800 int *par_num, bool print) 4801 { 4802 u32 cur_bit; 4803 bool res; 4804 int i; 4805 4806 res = false; 4807 4808 for (i = 0; sig; i++) { 4809 cur_bit = (0x1UL << i); 4810 if (sig & cur_bit) { 4811 res = true; /* Each bit is real error! */ 4812 if (print) { 4813 switch (cur_bit) { 4814 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4815 _print_next_block((*par_num)++, 4816 "CSEMI"); 4817 _print_parity(bp, 4818 CSEM_REG_CSEM_PRTY_STS_0); 4819 _print_parity(bp, 4820 CSEM_REG_CSEM_PRTY_STS_1); 4821 break; 4822 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4823 _print_next_block((*par_num)++, "PXP"); 4824 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4825 _print_parity(bp, 4826 PXP2_REG_PXP2_PRTY_STS_0); 4827 _print_parity(bp, 4828 PXP2_REG_PXP2_PRTY_STS_1); 4829 break; 4830 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4831 _print_next_block((*par_num)++, 4832 "PXPPCICLOCKCLIENT"); 4833 break; 4834 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4835 _print_next_block((*par_num)++, "CFC"); 4836 _print_parity(bp, 4837 CFC_REG_CFC_PRTY_STS); 4838 break; 4839 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4840 _print_next_block((*par_num)++, "CDU"); 4841 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4842 break; 4843 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4844 _print_next_block((*par_num)++, "DMAE"); 4845 _print_parity(bp, 4846 DMAE_REG_DMAE_PRTY_STS); 4847 break; 4848 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4849 _print_next_block((*par_num)++, "IGU"); 4850 if (CHIP_IS_E1x(bp)) 4851 _print_parity(bp, 4852 HC_REG_HC_PRTY_STS); 4853 else 4854 _print_parity(bp, 4855 IGU_REG_IGU_PRTY_STS); 4856 break; 4857 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4858 _print_next_block((*par_num)++, "MISC"); 4859 _print_parity(bp, 4860 MISC_REG_MISC_PRTY_STS); 4861 break; 4862 } 4863 } 4864 4865 /* Clear the bit */ 4866 sig &= ~cur_bit; 4867 } 4868 } 4869 4870 return res; 4871 } 4872 4873 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, 4874 int *par_num, bool *global, 4875 bool print) 4876 { 4877 bool res = false; 4878 u32 cur_bit; 4879 int i; 4880 4881 for (i = 0; sig; i++) { 4882 cur_bit = (0x1UL << i); 4883 if (sig & cur_bit) { 4884 switch (cur_bit) { 4885 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4886 if (print) 4887 _print_next_block((*par_num)++, 4888 "MCP ROM"); 4889 *global = true; 4890 res = true; 4891 break; 4892 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4893 if (print) 4894 _print_next_block((*par_num)++, 4895 "MCP UMP RX"); 4896 *global = true; 4897 res = true; 4898 break; 4899 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4900 if (print) 4901 _print_next_block((*par_num)++, 4902 "MCP UMP TX"); 4903 *global = true; 4904 res = true; 4905 break; 4906 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4907 (*par_num)++; 4908 /* clear latched SCPAD PATIRY from MCP */ 4909 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4910 1UL << 10); 4911 break; 4912 } 4913 4914 /* Clear the bit */ 4915 sig &= ~cur_bit; 4916 } 4917 } 4918 4919 return res; 4920 } 4921 4922 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4923 int *par_num, bool print) 4924 { 4925 u32 cur_bit; 4926 bool res; 4927 int i; 4928 4929 res = false; 4930 4931 for (i = 0; sig; i++) { 4932 cur_bit = (0x1UL << i); 4933 if (sig & cur_bit) { 4934 res = true; /* Each bit is real error! */ 4935 if (print) { 4936 switch (cur_bit) { 4937 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4938 _print_next_block((*par_num)++, 4939 "PGLUE_B"); 4940 _print_parity(bp, 4941 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4942 break; 4943 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4944 _print_next_block((*par_num)++, "ATC"); 4945 _print_parity(bp, 4946 ATC_REG_ATC_PRTY_STS); 4947 break; 4948 } 4949 } 4950 /* Clear the bit */ 4951 sig &= ~cur_bit; 4952 } 4953 } 4954 4955 return res; 4956 } 4957 4958 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4959 u32 *sig) 4960 { 4961 bool res = false; 4962 4963 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4964 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4965 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4966 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4967 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4968 int par_num = 0; 4969 4970 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4971 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4972 sig[0] & HW_PRTY_ASSERT_SET_0, 4973 sig[1] & HW_PRTY_ASSERT_SET_1, 4974 sig[2] & HW_PRTY_ASSERT_SET_2, 4975 sig[3] & HW_PRTY_ASSERT_SET_3, 4976 sig[4] & HW_PRTY_ASSERT_SET_4); 4977 if (print) { 4978 if (((sig[0] & HW_PRTY_ASSERT_SET_0) || 4979 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4980 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4981 (sig[4] & HW_PRTY_ASSERT_SET_4)) || 4982 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) { 4983 netdev_err(bp->dev, 4984 "Parity errors detected in blocks: "); 4985 } else { 4986 print = false; 4987 } 4988 } 4989 res |= bnx2x_check_blocks_with_parity0(bp, 4990 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 4991 res |= bnx2x_check_blocks_with_parity1(bp, 4992 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); 4993 res |= bnx2x_check_blocks_with_parity2(bp, 4994 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); 4995 res |= bnx2x_check_blocks_with_parity3(bp, 4996 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); 4997 res |= bnx2x_check_blocks_with_parity4(bp, 4998 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); 4999 5000 if (print) 5001 pr_cont("\n"); 5002 } 5003 5004 return res; 5005 } 5006 5007 /** 5008 * bnx2x_chk_parity_attn - checks for parity attentions. 5009 * 5010 * @bp: driver handle 5011 * @global: true if there was a global attention 5012 * @print: show parity attention in syslog 5013 */ 5014 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 5015 { 5016 struct attn_route attn = { {0} }; 5017 int port = BP_PORT(bp); 5018 5019 attn.sig[0] = REG_RD(bp, 5020 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 5021 port*4); 5022 attn.sig[1] = REG_RD(bp, 5023 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 5024 port*4); 5025 attn.sig[2] = REG_RD(bp, 5026 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 5027 port*4); 5028 attn.sig[3] = REG_RD(bp, 5029 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 5030 port*4); 5031 /* Since MCP attentions can't be disabled inside the block, we need to 5032 * read AEU registers to see whether they're currently disabled 5033 */ 5034 attn.sig[3] &= ((REG_RD(bp, 5035 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 5036 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & 5037 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 5038 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 5039 5040 if (!CHIP_IS_E1x(bp)) 5041 attn.sig[4] = REG_RD(bp, 5042 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 5043 port*4); 5044 5045 return bnx2x_parity_attn(bp, global, print, attn.sig); 5046 } 5047 5048 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 5049 { 5050 u32 val; 5051 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 5052 5053 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 5054 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 5055 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 5056 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 5057 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 5058 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 5059 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 5060 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 5061 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 5062 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 5063 if (val & 5064 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 5065 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 5066 if (val & 5067 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 5068 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 5069 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 5070 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 5071 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 5072 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 5073 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 5074 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 5075 } 5076 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 5077 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 5078 BNX2X_ERR("ATC hw attention 0x%x\n", val); 5079 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 5080 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 5081 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 5082 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 5083 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 5084 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 5085 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 5086 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 5087 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 5088 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 5089 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 5090 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 5091 } 5092 5093 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 5094 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 5095 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 5096 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 5097 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 5098 } 5099 } 5100 5101 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 5102 { 5103 struct attn_route attn, *group_mask; 5104 int port = BP_PORT(bp); 5105 int index; 5106 u32 reg_addr; 5107 u32 val; 5108 u32 aeu_mask; 5109 bool global = false; 5110 5111 /* need to take HW lock because MCP or other port might also 5112 try to handle this event */ 5113 bnx2x_acquire_alr(bp); 5114 5115 if (bnx2x_chk_parity_attn(bp, &global, true)) { 5116 #ifndef BNX2X_STOP_ON_ERROR 5117 bp->recovery_state = BNX2X_RECOVERY_INIT; 5118 schedule_delayed_work(&bp->sp_rtnl_task, 0); 5119 /* Disable HW interrupts */ 5120 bnx2x_int_disable(bp); 5121 /* In case of parity errors don't handle attentions so that 5122 * other function would "see" parity errors. 5123 */ 5124 #else 5125 bnx2x_panic(); 5126 #endif 5127 bnx2x_release_alr(bp); 5128 return; 5129 } 5130 5131 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 5132 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 5133 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 5134 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 5135 if (!CHIP_IS_E1x(bp)) 5136 attn.sig[4] = 5137 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 5138 else 5139 attn.sig[4] = 0; 5140 5141 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 5142 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 5143 5144 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5145 if (deasserted & (1 << index)) { 5146 group_mask = &bp->attn_group[index]; 5147 5148 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 5149 index, 5150 group_mask->sig[0], group_mask->sig[1], 5151 group_mask->sig[2], group_mask->sig[3], 5152 group_mask->sig[4]); 5153 5154 bnx2x_attn_int_deasserted4(bp, 5155 attn.sig[4] & group_mask->sig[4]); 5156 bnx2x_attn_int_deasserted3(bp, 5157 attn.sig[3] & group_mask->sig[3]); 5158 bnx2x_attn_int_deasserted1(bp, 5159 attn.sig[1] & group_mask->sig[1]); 5160 bnx2x_attn_int_deasserted2(bp, 5161 attn.sig[2] & group_mask->sig[2]); 5162 bnx2x_attn_int_deasserted0(bp, 5163 attn.sig[0] & group_mask->sig[0]); 5164 } 5165 } 5166 5167 bnx2x_release_alr(bp); 5168 5169 if (bp->common.int_block == INT_BLOCK_HC) 5170 reg_addr = (HC_REG_COMMAND_REG + port*32 + 5171 COMMAND_REG_ATTN_BITS_CLR); 5172 else 5173 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 5174 5175 val = ~deasserted; 5176 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 5177 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 5178 REG_WR(bp, reg_addr, val); 5179 5180 if (~bp->attn_state & deasserted) 5181 BNX2X_ERR("IGU ERROR\n"); 5182 5183 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 5184 MISC_REG_AEU_MASK_ATTN_FUNC_0; 5185 5186 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 5187 aeu_mask = REG_RD(bp, reg_addr); 5188 5189 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 5190 aeu_mask, deasserted); 5191 aeu_mask |= (deasserted & 0x3ff); 5192 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 5193 5194 REG_WR(bp, reg_addr, aeu_mask); 5195 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 5196 5197 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 5198 bp->attn_state &= ~deasserted; 5199 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 5200 } 5201 5202 static void bnx2x_attn_int(struct bnx2x *bp) 5203 { 5204 /* read local copy of bits */ 5205 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 5206 attn_bits); 5207 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 5208 attn_bits_ack); 5209 u32 attn_state = bp->attn_state; 5210 5211 /* look for changed bits */ 5212 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 5213 u32 deasserted = ~attn_bits & attn_ack & attn_state; 5214 5215 DP(NETIF_MSG_HW, 5216 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 5217 attn_bits, attn_ack, asserted, deasserted); 5218 5219 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 5220 BNX2X_ERR("BAD attention state\n"); 5221 5222 /* handle bits that were raised */ 5223 if (asserted) 5224 bnx2x_attn_int_asserted(bp, asserted); 5225 5226 if (deasserted) 5227 bnx2x_attn_int_deasserted(bp, deasserted); 5228 } 5229 5230 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 5231 u16 index, u8 op, u8 update) 5232 { 5233 u32 igu_addr = bp->igu_base_addr; 5234 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 5235 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 5236 igu_addr); 5237 } 5238 5239 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 5240 { 5241 /* No memory barriers */ 5242 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 5243 } 5244 5245 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 5246 union event_ring_elem *elem) 5247 { 5248 u8 err = elem->message.error; 5249 5250 if (!bp->cnic_eth_dev.starting_cid || 5251 (cid < bp->cnic_eth_dev.starting_cid && 5252 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 5253 return 1; 5254 5255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 5256 5257 if (unlikely(err)) { 5258 5259 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 5260 cid); 5261 bnx2x_panic_dump(bp, false); 5262 } 5263 bnx2x_cnic_cfc_comp(bp, cid, err); 5264 return 0; 5265 } 5266 5267 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 5268 { 5269 struct bnx2x_mcast_ramrod_params rparam; 5270 int rc; 5271 5272 memset(&rparam, 0, sizeof(rparam)); 5273 5274 rparam.mcast_obj = &bp->mcast_obj; 5275 5276 netif_addr_lock_bh(bp->dev); 5277 5278 /* Clear pending state for the last command */ 5279 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 5280 5281 /* If there are pending mcast commands - send them */ 5282 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 5283 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 5284 if (rc < 0) 5285 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 5286 rc); 5287 } 5288 5289 netif_addr_unlock_bh(bp->dev); 5290 } 5291 5292 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 5293 union event_ring_elem *elem) 5294 { 5295 unsigned long ramrod_flags = 0; 5296 int rc = 0; 5297 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); 5298 u32 cid = echo & BNX2X_SWCID_MASK; 5299 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 5300 5301 /* Always push next commands out, don't wait here */ 5302 __set_bit(RAMROD_CONT, &ramrod_flags); 5303 5304 switch (echo >> BNX2X_SWCID_SHIFT) { 5305 case BNX2X_FILTER_MAC_PENDING: 5306 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 5307 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 5308 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 5309 else 5310 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 5311 5312 break; 5313 case BNX2X_FILTER_VLAN_PENDING: 5314 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); 5315 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; 5316 break; 5317 case BNX2X_FILTER_MCAST_PENDING: 5318 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 5319 /* This is only relevant for 57710 where multicast MACs are 5320 * configured as unicast MACs using the same ramrod. 5321 */ 5322 bnx2x_handle_mcast_eqe(bp); 5323 return; 5324 default: 5325 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); 5326 return; 5327 } 5328 5329 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 5330 5331 if (rc < 0) 5332 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 5333 else if (rc > 0) 5334 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 5335 } 5336 5337 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 5338 5339 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 5340 { 5341 netif_addr_lock_bh(bp->dev); 5342 5343 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5344 5345 /* Send rx_mode command again if was requested */ 5346 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 5347 bnx2x_set_storm_rx_mode(bp); 5348 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 5349 &bp->sp_state)) 5350 bnx2x_set_iscsi_eth_rx_mode(bp, true); 5351 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 5352 &bp->sp_state)) 5353 bnx2x_set_iscsi_eth_rx_mode(bp, false); 5354 5355 netif_addr_unlock_bh(bp->dev); 5356 } 5357 5358 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 5359 union event_ring_elem *elem) 5360 { 5361 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 5362 DP(BNX2X_MSG_SP, 5363 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 5364 elem->message.data.vif_list_event.func_bit_map); 5365 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 5366 elem->message.data.vif_list_event.func_bit_map); 5367 } else if (elem->message.data.vif_list_event.echo == 5368 VIF_LIST_RULE_SET) { 5369 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 5370 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 5371 } 5372 } 5373 5374 /* called with rtnl_lock */ 5375 static void bnx2x_after_function_update(struct bnx2x *bp) 5376 { 5377 int q, rc; 5378 struct bnx2x_fastpath *fp; 5379 struct bnx2x_queue_state_params queue_params = {NULL}; 5380 struct bnx2x_queue_update_params *q_update_params = 5381 &queue_params.params.update; 5382 5383 /* Send Q update command with afex vlan removal values for all Qs */ 5384 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 5385 5386 /* set silent vlan removal values according to vlan mode */ 5387 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5388 &q_update_params->update_flags); 5389 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 5390 &q_update_params->update_flags); 5391 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5392 5393 /* in access mode mark mask and value are 0 to strip all vlans */ 5394 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 5395 q_update_params->silent_removal_value = 0; 5396 q_update_params->silent_removal_mask = 0; 5397 } else { 5398 q_update_params->silent_removal_value = 5399 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 5400 q_update_params->silent_removal_mask = VLAN_VID_MASK; 5401 } 5402 5403 for_each_eth_queue(bp, q) { 5404 /* Set the appropriate Queue object */ 5405 fp = &bp->fp[q]; 5406 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5407 5408 /* send the ramrod */ 5409 rc = bnx2x_queue_state_change(bp, &queue_params); 5410 if (rc < 0) 5411 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5412 q); 5413 } 5414 5415 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { 5416 fp = &bp->fp[FCOE_IDX(bp)]; 5417 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5418 5419 /* clear pending completion bit */ 5420 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5421 5422 /* mark latest Q bit */ 5423 smp_mb__before_atomic(); 5424 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5425 smp_mb__after_atomic(); 5426 5427 /* send Q update ramrod for FCoE Q */ 5428 rc = bnx2x_queue_state_change(bp, &queue_params); 5429 if (rc < 0) 5430 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5431 q); 5432 } else { 5433 /* If no FCoE ring - ACK MCP now */ 5434 bnx2x_link_report(bp); 5435 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5436 } 5437 } 5438 5439 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 5440 struct bnx2x *bp, u32 cid) 5441 { 5442 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 5443 5444 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) 5445 return &bnx2x_fcoe_sp_obj(bp, q_obj); 5446 else 5447 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 5448 } 5449 5450 static void bnx2x_eq_int(struct bnx2x *bp) 5451 { 5452 u16 hw_cons, sw_cons, sw_prod; 5453 union event_ring_elem *elem; 5454 u8 echo; 5455 u32 cid; 5456 u8 opcode; 5457 int rc, spqe_cnt = 0; 5458 struct bnx2x_queue_sp_obj *q_obj; 5459 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 5460 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 5461 5462 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 5463 5464 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 5465 * when we get the next-page we need to adjust so the loop 5466 * condition below will be met. The next element is the size of a 5467 * regular element and hence incrementing by 1 5468 */ 5469 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 5470 hw_cons++; 5471 5472 /* This function may never run in parallel with itself for a 5473 * specific bp, thus there is no need in "paired" read memory 5474 * barrier here. 5475 */ 5476 sw_cons = bp->eq_cons; 5477 sw_prod = bp->eq_prod; 5478 5479 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 5480 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 5481 5482 for (; sw_cons != hw_cons; 5483 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 5484 5485 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 5486 5487 rc = bnx2x_iov_eq_sp_event(bp, elem); 5488 if (!rc) { 5489 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", 5490 rc); 5491 goto next_spqe; 5492 } 5493 5494 opcode = elem->message.opcode; 5495 5496 /* handle eq element */ 5497 switch (opcode) { 5498 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5499 bnx2x_vf_mbx_schedule(bp, 5500 &elem->message.data.vf_pf_event); 5501 continue; 5502 5503 case EVENT_RING_OPCODE_STAT_QUERY: 5504 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), 5505 "got statistics comp event %d\n", 5506 bp->stats_comp++); 5507 /* nothing to do with stats comp */ 5508 goto next_spqe; 5509 5510 case EVENT_RING_OPCODE_CFC_DEL: 5511 /* handle according to cid range */ 5512 /* 5513 * we may want to verify here that the bp state is 5514 * HALTING 5515 */ 5516 5517 /* elem CID originates from FW; actually LE */ 5518 cid = SW_CID(elem->message.data.cfc_del_event.cid); 5519 5520 DP(BNX2X_MSG_SP, 5521 "got delete ramrod for MULTI[%d]\n", cid); 5522 5523 if (CNIC_LOADED(bp) && 5524 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 5525 goto next_spqe; 5526 5527 q_obj = bnx2x_cid_to_q_obj(bp, cid); 5528 5529 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 5530 break; 5531 5532 goto next_spqe; 5533 5534 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5535 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 5536 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 5537 if (f_obj->complete_cmd(bp, f_obj, 5538 BNX2X_F_CMD_TX_STOP)) 5539 break; 5540 goto next_spqe; 5541 5542 case EVENT_RING_OPCODE_START_TRAFFIC: 5543 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 5544 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 5545 if (f_obj->complete_cmd(bp, f_obj, 5546 BNX2X_F_CMD_TX_START)) 5547 break; 5548 goto next_spqe; 5549 5550 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 5551 echo = elem->message.data.function_update_event.echo; 5552 if (echo == SWITCH_UPDATE) { 5553 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5554 "got FUNC_SWITCH_UPDATE ramrod\n"); 5555 if (f_obj->complete_cmd( 5556 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) 5557 break; 5558 5559 } else { 5560 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; 5561 5562 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5563 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5564 f_obj->complete_cmd(bp, f_obj, 5565 BNX2X_F_CMD_AFEX_UPDATE); 5566 5567 /* We will perform the Queues update from 5568 * sp_rtnl task as all Queue SP operations 5569 * should run under rtnl_lock. 5570 */ 5571 bnx2x_schedule_sp_rtnl(bp, cmd, 0); 5572 } 5573 5574 goto next_spqe; 5575 5576 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 5577 f_obj->complete_cmd(bp, f_obj, 5578 BNX2X_F_CMD_AFEX_VIFLISTS); 5579 bnx2x_after_afex_vif_lists(bp, elem); 5580 goto next_spqe; 5581 case EVENT_RING_OPCODE_FUNCTION_START: 5582 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5583 "got FUNC_START ramrod\n"); 5584 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 5585 break; 5586 5587 goto next_spqe; 5588 5589 case EVENT_RING_OPCODE_FUNCTION_STOP: 5590 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5591 "got FUNC_STOP ramrod\n"); 5592 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 5593 break; 5594 5595 goto next_spqe; 5596 5597 case EVENT_RING_OPCODE_SET_TIMESYNC: 5598 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, 5599 "got set_timesync ramrod completion\n"); 5600 if (f_obj->complete_cmd(bp, f_obj, 5601 BNX2X_F_CMD_SET_TIMESYNC)) 5602 break; 5603 goto next_spqe; 5604 } 5605 5606 switch (opcode | bp->state) { 5607 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5608 BNX2X_STATE_OPEN): 5609 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5610 BNX2X_STATE_OPENING_WAIT4_PORT): 5611 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5612 BNX2X_STATE_CLOSING_WAIT4_HALT): 5613 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5614 SW_CID(elem->message.data.eth_event.echo)); 5615 rss_raw->clear_pending(rss_raw); 5616 break; 5617 5618 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 5619 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 5620 case (EVENT_RING_OPCODE_SET_MAC | 5621 BNX2X_STATE_CLOSING_WAIT4_HALT): 5622 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5623 BNX2X_STATE_OPEN): 5624 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5625 BNX2X_STATE_DIAG): 5626 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5627 BNX2X_STATE_CLOSING_WAIT4_HALT): 5628 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); 5629 bnx2x_handle_classification_eqe(bp, elem); 5630 break; 5631 5632 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5633 BNX2X_STATE_OPEN): 5634 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5635 BNX2X_STATE_DIAG): 5636 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5637 BNX2X_STATE_CLOSING_WAIT4_HALT): 5638 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 5639 bnx2x_handle_mcast_eqe(bp); 5640 break; 5641 5642 case (EVENT_RING_OPCODE_FILTERS_RULES | 5643 BNX2X_STATE_OPEN): 5644 case (EVENT_RING_OPCODE_FILTERS_RULES | 5645 BNX2X_STATE_DIAG): 5646 case (EVENT_RING_OPCODE_FILTERS_RULES | 5647 BNX2X_STATE_CLOSING_WAIT4_HALT): 5648 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 5649 bnx2x_handle_rx_mode_eqe(bp); 5650 break; 5651 default: 5652 /* unknown event log error and continue */ 5653 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 5654 elem->message.opcode, bp->state); 5655 } 5656 next_spqe: 5657 spqe_cnt++; 5658 } /* for */ 5659 5660 smp_mb__before_atomic(); 5661 atomic_add(spqe_cnt, &bp->eq_spq_left); 5662 5663 bp->eq_cons = sw_cons; 5664 bp->eq_prod = sw_prod; 5665 /* Make sure that above mem writes were issued towards the memory */ 5666 smp_wmb(); 5667 5668 /* update producer */ 5669 bnx2x_update_eq_prod(bp, bp->eq_prod); 5670 } 5671 5672 static void bnx2x_sp_task(struct work_struct *work) 5673 { 5674 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 5675 5676 DP(BNX2X_MSG_SP, "sp task invoked\n"); 5677 5678 /* make sure the atomic interrupt_occurred has been written */ 5679 smp_rmb(); 5680 if (atomic_read(&bp->interrupt_occurred)) { 5681 5682 /* what work needs to be performed? */ 5683 u16 status = bnx2x_update_dsb_idx(bp); 5684 5685 DP(BNX2X_MSG_SP, "status %x\n", status); 5686 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); 5687 atomic_set(&bp->interrupt_occurred, 0); 5688 5689 /* HW attentions */ 5690 if (status & BNX2X_DEF_SB_ATT_IDX) { 5691 bnx2x_attn_int(bp); 5692 status &= ~BNX2X_DEF_SB_ATT_IDX; 5693 } 5694 5695 /* SP events: STAT_QUERY and others */ 5696 if (status & BNX2X_DEF_SB_IDX) { 5697 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5698 5699 if (FCOE_INIT(bp) && 5700 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5701 /* Prevent local bottom-halves from running as 5702 * we are going to change the local NAPI list. 5703 */ 5704 local_bh_disable(); 5705 napi_schedule(&bnx2x_fcoe(bp, napi)); 5706 local_bh_enable(); 5707 } 5708 5709 /* Handle EQ completions */ 5710 bnx2x_eq_int(bp); 5711 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5712 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5713 5714 status &= ~BNX2X_DEF_SB_IDX; 5715 } 5716 5717 /* if status is non zero then perhaps something went wrong */ 5718 if (unlikely(status)) 5719 DP(BNX2X_MSG_SP, 5720 "got an unknown interrupt! (status 0x%x)\n", status); 5721 5722 /* ack status block only if something was actually handled */ 5723 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5724 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5725 } 5726 5727 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5728 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5729 &bp->sp_state)) { 5730 bnx2x_link_report(bp); 5731 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5732 } 5733 } 5734 5735 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5736 { 5737 struct net_device *dev = dev_instance; 5738 struct bnx2x *bp = netdev_priv(dev); 5739 5740 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5741 IGU_INT_DISABLE, 0); 5742 5743 #ifdef BNX2X_STOP_ON_ERROR 5744 if (unlikely(bp->panic)) 5745 return IRQ_HANDLED; 5746 #endif 5747 5748 if (CNIC_LOADED(bp)) { 5749 struct cnic_ops *c_ops; 5750 5751 rcu_read_lock(); 5752 c_ops = rcu_dereference(bp->cnic_ops); 5753 if (c_ops) 5754 c_ops->cnic_handler(bp->cnic_data, NULL); 5755 rcu_read_unlock(); 5756 } 5757 5758 /* schedule sp task to perform default status block work, ack 5759 * attentions and enable interrupts. 5760 */ 5761 bnx2x_schedule_sp_task(bp); 5762 5763 return IRQ_HANDLED; 5764 } 5765 5766 /* end of slow path */ 5767 5768 void bnx2x_drv_pulse(struct bnx2x *bp) 5769 { 5770 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5771 bp->fw_drv_pulse_wr_seq); 5772 } 5773 5774 static void bnx2x_timer(struct timer_list *t) 5775 { 5776 struct bnx2x *bp = from_timer(bp, t, timer); 5777 5778 if (!netif_running(bp->dev)) 5779 return; 5780 5781 if (IS_PF(bp) && 5782 !BP_NOMCP(bp)) { 5783 int mb_idx = BP_FW_MB_IDX(bp); 5784 u16 drv_pulse; 5785 u16 mcp_pulse; 5786 5787 ++bp->fw_drv_pulse_wr_seq; 5788 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5789 drv_pulse = bp->fw_drv_pulse_wr_seq; 5790 bnx2x_drv_pulse(bp); 5791 5792 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5793 MCP_PULSE_SEQ_MASK); 5794 /* The delta between driver pulse and mcp response 5795 * should not get too big. If the MFW is more than 5 pulses 5796 * behind, we should worry about it enough to generate an error 5797 * log. 5798 */ 5799 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) 5800 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5801 drv_pulse, mcp_pulse); 5802 } 5803 5804 if (bp->state == BNX2X_STATE_OPEN) 5805 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5806 5807 /* sample pf vf bulletin board for new posts from pf */ 5808 if (IS_VF(bp)) 5809 bnx2x_timer_sriov(bp); 5810 5811 mod_timer(&bp->timer, jiffies + bp->current_interval); 5812 } 5813 5814 /* end of Statistics */ 5815 5816 /* nic init */ 5817 5818 /* 5819 * nic init service functions 5820 */ 5821 5822 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5823 { 5824 u32 i; 5825 if (!(len%4) && !(addr%4)) 5826 for (i = 0; i < len; i += 4) 5827 REG_WR(bp, addr + i, fill); 5828 else 5829 for (i = 0; i < len; i++) 5830 REG_WR8(bp, addr + i, fill); 5831 } 5832 5833 /* helper: writes FP SP data to FW - data_size in dwords */ 5834 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5835 int fw_sb_id, 5836 u32 *sb_data_p, 5837 u32 data_size) 5838 { 5839 int index; 5840 for (index = 0; index < data_size; index++) 5841 REG_WR(bp, BAR_CSTRORM_INTMEM + 5842 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5843 sizeof(u32)*index, 5844 *(sb_data_p + index)); 5845 } 5846 5847 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5848 { 5849 u32 *sb_data_p; 5850 u32 data_size = 0; 5851 struct hc_status_block_data_e2 sb_data_e2; 5852 struct hc_status_block_data_e1x sb_data_e1x; 5853 5854 /* disable the function first */ 5855 if (!CHIP_IS_E1x(bp)) { 5856 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5857 sb_data_e2.common.state = SB_DISABLED; 5858 sb_data_e2.common.p_func.vf_valid = false; 5859 sb_data_p = (u32 *)&sb_data_e2; 5860 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5861 } else { 5862 memset(&sb_data_e1x, 0, 5863 sizeof(struct hc_status_block_data_e1x)); 5864 sb_data_e1x.common.state = SB_DISABLED; 5865 sb_data_e1x.common.p_func.vf_valid = false; 5866 sb_data_p = (u32 *)&sb_data_e1x; 5867 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5868 } 5869 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5870 5871 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5872 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5873 CSTORM_STATUS_BLOCK_SIZE); 5874 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5875 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5876 CSTORM_SYNC_BLOCK_SIZE); 5877 } 5878 5879 /* helper: writes SP SB data to FW */ 5880 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5881 struct hc_sp_status_block_data *sp_sb_data) 5882 { 5883 int func = BP_FUNC(bp); 5884 int i; 5885 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5886 REG_WR(bp, BAR_CSTRORM_INTMEM + 5887 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5888 i*sizeof(u32), 5889 *((u32 *)sp_sb_data + i)); 5890 } 5891 5892 static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5893 { 5894 int func = BP_FUNC(bp); 5895 struct hc_sp_status_block_data sp_sb_data; 5896 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5897 5898 sp_sb_data.state = SB_DISABLED; 5899 sp_sb_data.p_func.vf_valid = false; 5900 5901 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5902 5903 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5904 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5905 CSTORM_SP_STATUS_BLOCK_SIZE); 5906 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5907 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5908 CSTORM_SP_SYNC_BLOCK_SIZE); 5909 } 5910 5911 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5912 int igu_sb_id, int igu_seg_id) 5913 { 5914 hc_sm->igu_sb_id = igu_sb_id; 5915 hc_sm->igu_seg_id = igu_seg_id; 5916 hc_sm->timer_value = 0xFF; 5917 hc_sm->time_to_expire = 0xFFFFFFFF; 5918 } 5919 5920 /* allocates state machine ids. */ 5921 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5922 { 5923 /* zero out state machine indices */ 5924 /* rx indices */ 5925 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5926 5927 /* tx indices */ 5928 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5929 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5930 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5931 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5932 5933 /* map indices */ 5934 /* rx indices */ 5935 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5936 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5937 5938 /* tx indices */ 5939 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5940 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5941 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5942 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5943 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5944 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5945 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5946 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5947 } 5948 5949 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5950 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5951 { 5952 int igu_seg_id; 5953 5954 struct hc_status_block_data_e2 sb_data_e2; 5955 struct hc_status_block_data_e1x sb_data_e1x; 5956 struct hc_status_block_sm *hc_sm_p; 5957 int data_size; 5958 u32 *sb_data_p; 5959 5960 if (CHIP_INT_MODE_IS_BC(bp)) 5961 igu_seg_id = HC_SEG_ACCESS_NORM; 5962 else 5963 igu_seg_id = IGU_SEG_ACCESS_NORM; 5964 5965 bnx2x_zero_fp_sb(bp, fw_sb_id); 5966 5967 if (!CHIP_IS_E1x(bp)) { 5968 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5969 sb_data_e2.common.state = SB_ENABLED; 5970 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5971 sb_data_e2.common.p_func.vf_id = vfid; 5972 sb_data_e2.common.p_func.vf_valid = vf_valid; 5973 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5974 sb_data_e2.common.same_igu_sb_1b = true; 5975 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5976 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5977 hc_sm_p = sb_data_e2.common.state_machine; 5978 sb_data_p = (u32 *)&sb_data_e2; 5979 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5980 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5981 } else { 5982 memset(&sb_data_e1x, 0, 5983 sizeof(struct hc_status_block_data_e1x)); 5984 sb_data_e1x.common.state = SB_ENABLED; 5985 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5986 sb_data_e1x.common.p_func.vf_id = 0xff; 5987 sb_data_e1x.common.p_func.vf_valid = false; 5988 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5989 sb_data_e1x.common.same_igu_sb_1b = true; 5990 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5991 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5992 hc_sm_p = sb_data_e1x.common.state_machine; 5993 sb_data_p = (u32 *)&sb_data_e1x; 5994 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5995 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5996 } 5997 5998 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5999 igu_sb_id, igu_seg_id); 6000 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 6001 igu_sb_id, igu_seg_id); 6002 6003 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 6004 6005 /* write indices to HW - PCI guarantees endianity of regpairs */ 6006 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 6007 } 6008 6009 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 6010 u16 tx_usec, u16 rx_usec) 6011 { 6012 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 6013 false, rx_usec); 6014 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 6015 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 6016 tx_usec); 6017 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 6018 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 6019 tx_usec); 6020 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 6021 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 6022 tx_usec); 6023 } 6024 6025 static void bnx2x_init_def_sb(struct bnx2x *bp) 6026 { 6027 struct host_sp_status_block *def_sb = bp->def_status_blk; 6028 dma_addr_t mapping = bp->def_status_blk_mapping; 6029 int igu_sp_sb_index; 6030 int igu_seg_id; 6031 int port = BP_PORT(bp); 6032 int func = BP_FUNC(bp); 6033 int reg_offset, reg_offset_en5; 6034 u64 section; 6035 int index; 6036 struct hc_sp_status_block_data sp_sb_data; 6037 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 6038 6039 if (CHIP_INT_MODE_IS_BC(bp)) { 6040 igu_sp_sb_index = DEF_SB_IGU_ID; 6041 igu_seg_id = HC_SEG_ACCESS_DEF; 6042 } else { 6043 igu_sp_sb_index = bp->igu_dsb_id; 6044 igu_seg_id = IGU_SEG_ACCESS_DEF; 6045 } 6046 6047 /* ATTN */ 6048 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 6049 atten_status_block); 6050 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 6051 6052 bp->attn_state = 0; 6053 6054 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 6055 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 6056 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 6057 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 6058 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 6059 int sindex; 6060 /* take care of sig[0]..sig[4] */ 6061 for (sindex = 0; sindex < 4; sindex++) 6062 bp->attn_group[index].sig[sindex] = 6063 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 6064 6065 if (!CHIP_IS_E1x(bp)) 6066 /* 6067 * enable5 is separate from the rest of the registers, 6068 * and therefore the address skip is 4 6069 * and not 16 between the different groups 6070 */ 6071 bp->attn_group[index].sig[4] = REG_RD(bp, 6072 reg_offset_en5 + 0x4*index); 6073 else 6074 bp->attn_group[index].sig[4] = 0; 6075 } 6076 6077 if (bp->common.int_block == INT_BLOCK_HC) { 6078 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 6079 HC_REG_ATTN_MSG0_ADDR_L); 6080 6081 REG_WR(bp, reg_offset, U64_LO(section)); 6082 REG_WR(bp, reg_offset + 4, U64_HI(section)); 6083 } else if (!CHIP_IS_E1x(bp)) { 6084 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 6085 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 6086 } 6087 6088 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 6089 sp_sb); 6090 6091 bnx2x_zero_sp_sb(bp); 6092 6093 /* PCI guarantees endianity of regpairs */ 6094 sp_sb_data.state = SB_ENABLED; 6095 sp_sb_data.host_sb_addr.lo = U64_LO(section); 6096 sp_sb_data.host_sb_addr.hi = U64_HI(section); 6097 sp_sb_data.igu_sb_id = igu_sp_sb_index; 6098 sp_sb_data.igu_seg_id = igu_seg_id; 6099 sp_sb_data.p_func.pf_id = func; 6100 sp_sb_data.p_func.vnic_id = BP_VN(bp); 6101 sp_sb_data.p_func.vf_id = 0xff; 6102 6103 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 6104 6105 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 6106 } 6107 6108 void bnx2x_update_coalesce(struct bnx2x *bp) 6109 { 6110 int i; 6111 6112 for_each_eth_queue(bp, i) 6113 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 6114 bp->tx_ticks, bp->rx_ticks); 6115 } 6116 6117 static void bnx2x_init_sp_ring(struct bnx2x *bp) 6118 { 6119 spin_lock_init(&bp->spq_lock); 6120 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 6121 6122 bp->spq_prod_idx = 0; 6123 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 6124 bp->spq_prod_bd = bp->spq; 6125 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 6126 } 6127 6128 static void bnx2x_init_eq_ring(struct bnx2x *bp) 6129 { 6130 int i; 6131 for (i = 1; i <= NUM_EQ_PAGES; i++) { 6132 union event_ring_elem *elem = 6133 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 6134 6135 elem->next_page.addr.hi = 6136 cpu_to_le32(U64_HI(bp->eq_mapping + 6137 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 6138 elem->next_page.addr.lo = 6139 cpu_to_le32(U64_LO(bp->eq_mapping + 6140 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 6141 } 6142 bp->eq_cons = 0; 6143 bp->eq_prod = NUM_EQ_DESC; 6144 bp->eq_cons_sb = BNX2X_EQ_INDEX; 6145 /* we want a warning message before it gets wrought... */ 6146 atomic_set(&bp->eq_spq_left, 6147 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 6148 } 6149 6150 /* called with netif_addr_lock_bh() */ 6151 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 6152 unsigned long rx_mode_flags, 6153 unsigned long rx_accept_flags, 6154 unsigned long tx_accept_flags, 6155 unsigned long ramrod_flags) 6156 { 6157 struct bnx2x_rx_mode_ramrod_params ramrod_param; 6158 int rc; 6159 6160 memset(&ramrod_param, 0, sizeof(ramrod_param)); 6161 6162 /* Prepare ramrod parameters */ 6163 ramrod_param.cid = 0; 6164 ramrod_param.cl_id = cl_id; 6165 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 6166 ramrod_param.func_id = BP_FUNC(bp); 6167 6168 ramrod_param.pstate = &bp->sp_state; 6169 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 6170 6171 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 6172 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 6173 6174 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 6175 6176 ramrod_param.ramrod_flags = ramrod_flags; 6177 ramrod_param.rx_mode_flags = rx_mode_flags; 6178 6179 ramrod_param.rx_accept_flags = rx_accept_flags; 6180 ramrod_param.tx_accept_flags = tx_accept_flags; 6181 6182 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 6183 if (rc < 0) { 6184 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 6185 return rc; 6186 } 6187 6188 return 0; 6189 } 6190 6191 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, 6192 unsigned long *rx_accept_flags, 6193 unsigned long *tx_accept_flags) 6194 { 6195 /* Clear the flags first */ 6196 *rx_accept_flags = 0; 6197 *tx_accept_flags = 0; 6198 6199 switch (rx_mode) { 6200 case BNX2X_RX_MODE_NONE: 6201 /* 6202 * 'drop all' supersedes any accept flags that may have been 6203 * passed to the function. 6204 */ 6205 break; 6206 case BNX2X_RX_MODE_NORMAL: 6207 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6208 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); 6209 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6210 6211 /* internal switching mode */ 6212 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6213 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); 6214 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6215 6216 if (bp->accept_any_vlan) { 6217 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6218 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6219 } 6220 6221 break; 6222 case BNX2X_RX_MODE_ALLMULTI: 6223 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6224 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 6225 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6226 6227 /* internal switching mode */ 6228 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6229 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 6230 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6231 6232 if (bp->accept_any_vlan) { 6233 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6234 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6235 } 6236 6237 break; 6238 case BNX2X_RX_MODE_PROMISC: 6239 /* According to definition of SI mode, iface in promisc mode 6240 * should receive matched and unmatched (in resolution of port) 6241 * unicast packets. 6242 */ 6243 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); 6244 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6245 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 6246 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6247 6248 /* internal switching mode */ 6249 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 6250 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6251 6252 if (IS_MF_SI(bp)) 6253 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); 6254 else 6255 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6256 6257 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6258 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6259 6260 break; 6261 default: 6262 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); 6263 return -EINVAL; 6264 } 6265 6266 return 0; 6267 } 6268 6269 /* called with netif_addr_lock_bh() */ 6270 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) 6271 { 6272 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 6273 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 6274 int rc; 6275 6276 if (!NO_FCOE(bp)) 6277 /* Configure rx_mode of FCoE Queue */ 6278 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 6279 6280 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, 6281 &tx_accept_flags); 6282 if (rc) 6283 return rc; 6284 6285 __set_bit(RAMROD_RX, &ramrod_flags); 6286 __set_bit(RAMROD_TX, &ramrod_flags); 6287 6288 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, 6289 rx_accept_flags, tx_accept_flags, 6290 ramrod_flags); 6291 } 6292 6293 static void bnx2x_init_internal_common(struct bnx2x *bp) 6294 { 6295 int i; 6296 6297 /* Zero this manually as its initialization is 6298 currently missing in the initTool */ 6299 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 6300 REG_WR(bp, BAR_USTRORM_INTMEM + 6301 USTORM_AGG_DATA_OFFSET + i * 4, 0); 6302 if (!CHIP_IS_E1x(bp)) { 6303 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 6304 CHIP_INT_MODE_IS_BC(bp) ? 6305 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 6306 } 6307 } 6308 6309 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 6310 { 6311 switch (load_code) { 6312 case FW_MSG_CODE_DRV_LOAD_COMMON: 6313 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 6314 bnx2x_init_internal_common(bp); 6315 fallthrough; 6316 6317 case FW_MSG_CODE_DRV_LOAD_PORT: 6318 /* nothing to do */ 6319 fallthrough; 6320 6321 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 6322 /* internal memory per function is 6323 initialized inside bnx2x_pf_init */ 6324 break; 6325 6326 default: 6327 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 6328 break; 6329 } 6330 } 6331 6332 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 6333 { 6334 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); 6335 } 6336 6337 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 6338 { 6339 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); 6340 } 6341 6342 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 6343 { 6344 if (CHIP_IS_E1x(fp->bp)) 6345 return BP_L_ID(fp->bp) + fp->index; 6346 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 6347 return bnx2x_fp_igu_sb_id(fp); 6348 } 6349 6350 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 6351 { 6352 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 6353 u8 cos; 6354 unsigned long q_type = 0; 6355 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 6356 fp->rx_queue = fp_idx; 6357 fp->cid = fp_idx; 6358 fp->cl_id = bnx2x_fp_cl_id(fp); 6359 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 6360 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 6361 /* qZone id equals to FW (per path) client id */ 6362 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 6363 6364 /* init shortcut */ 6365 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 6366 6367 /* Setup SB indices */ 6368 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 6369 6370 /* Configure Queue State object */ 6371 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6372 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6373 6374 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 6375 6376 /* init tx data */ 6377 for_each_cos_in_tx_queue(fp, cos) { 6378 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 6379 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 6380 FP_COS_TO_TXQ(fp, cos, bp), 6381 BNX2X_TX_SB_INDEX_BASE + cos, fp); 6382 cids[cos] = fp->txdata_ptr[cos]->cid; 6383 } 6384 6385 /* nothing more for vf to do here */ 6386 if (IS_VF(bp)) 6387 return; 6388 6389 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 6390 fp->fw_sb_id, fp->igu_sb_id); 6391 bnx2x_update_fpsb_idx(fp); 6392 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 6393 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6394 bnx2x_sp_mapping(bp, q_rdata), q_type); 6395 6396 /** 6397 * Configure classification DBs: Always enable Tx switching 6398 */ 6399 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 6400 6401 DP(NETIF_MSG_IFUP, 6402 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6403 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6404 fp->igu_sb_id); 6405 } 6406 6407 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 6408 { 6409 int i; 6410 6411 for (i = 1; i <= NUM_TX_RINGS; i++) { 6412 struct eth_tx_next_bd *tx_next_bd = 6413 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 6414 6415 tx_next_bd->addr_hi = 6416 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 6417 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6418 tx_next_bd->addr_lo = 6419 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 6420 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6421 } 6422 6423 *txdata->tx_cons_sb = cpu_to_le16(0); 6424 6425 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 6426 txdata->tx_db.data.zero_fill1 = 0; 6427 txdata->tx_db.data.prod = 0; 6428 6429 txdata->tx_pkt_prod = 0; 6430 txdata->tx_pkt_cons = 0; 6431 txdata->tx_bd_prod = 0; 6432 txdata->tx_bd_cons = 0; 6433 txdata->tx_pkt = 0; 6434 } 6435 6436 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) 6437 { 6438 int i; 6439 6440 for_each_tx_queue_cnic(bp, i) 6441 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); 6442 } 6443 6444 static void bnx2x_init_tx_rings(struct bnx2x *bp) 6445 { 6446 int i; 6447 u8 cos; 6448 6449 for_each_eth_queue(bp, i) 6450 for_each_cos_in_tx_queue(&bp->fp[i], cos) 6451 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 6452 } 6453 6454 static void bnx2x_init_fcoe_fp(struct bnx2x *bp) 6455 { 6456 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 6457 unsigned long q_type = 0; 6458 6459 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 6460 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 6461 BNX2X_FCOE_ETH_CL_ID_IDX); 6462 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 6463 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 6464 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 6465 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 6466 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 6467 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 6468 fp); 6469 6470 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 6471 6472 /* qZone id equals to FW (per path) client id */ 6473 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 6474 /* init shortcut */ 6475 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 6476 bnx2x_rx_ustorm_prods_offset(fp); 6477 6478 /* Configure Queue State object */ 6479 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6480 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6481 6482 /* No multi-CoS for FCoE L2 client */ 6483 BUG_ON(fp->max_cos != 1); 6484 6485 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 6486 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6487 bnx2x_sp_mapping(bp, q_rdata), q_type); 6488 6489 DP(NETIF_MSG_IFUP, 6490 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6491 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6492 fp->igu_sb_id); 6493 } 6494 6495 void bnx2x_nic_init_cnic(struct bnx2x *bp) 6496 { 6497 if (!NO_FCOE(bp)) 6498 bnx2x_init_fcoe_fp(bp); 6499 6500 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 6501 BNX2X_VF_ID_INVALID, false, 6502 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 6503 6504 /* ensure status block indices were read */ 6505 rmb(); 6506 bnx2x_init_rx_rings_cnic(bp); 6507 bnx2x_init_tx_rings_cnic(bp); 6508 6509 /* flush all */ 6510 mb(); 6511 } 6512 6513 void bnx2x_pre_irq_nic_init(struct bnx2x *bp) 6514 { 6515 int i; 6516 6517 /* Setup NIC internals and enable interrupts */ 6518 for_each_eth_queue(bp, i) 6519 bnx2x_init_eth_fp(bp, i); 6520 6521 /* ensure status block indices were read */ 6522 rmb(); 6523 bnx2x_init_rx_rings(bp); 6524 bnx2x_init_tx_rings(bp); 6525 6526 if (IS_PF(bp)) { 6527 /* Initialize MOD_ABS interrupts */ 6528 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6529 bp->common.shmem_base, 6530 bp->common.shmem2_base, BP_PORT(bp)); 6531 6532 /* initialize the default status block and sp ring */ 6533 bnx2x_init_def_sb(bp); 6534 bnx2x_update_dsb_idx(bp); 6535 bnx2x_init_sp_ring(bp); 6536 } else { 6537 bnx2x_memset_stats(bp); 6538 } 6539 } 6540 6541 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) 6542 { 6543 bnx2x_init_eq_ring(bp); 6544 bnx2x_init_internal(bp, load_code); 6545 bnx2x_pf_init(bp); 6546 bnx2x_stats_init(bp); 6547 6548 /* flush all before enabling interrupts */ 6549 mb(); 6550 6551 bnx2x_int_enable(bp); 6552 6553 /* Check for SPIO5 */ 6554 bnx2x_attn_int_deasserted0(bp, 6555 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 6556 AEU_INPUTS_ATTN_BITS_SPIO5); 6557 } 6558 6559 /* gzip service functions */ 6560 static int bnx2x_gunzip_init(struct bnx2x *bp) 6561 { 6562 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 6563 &bp->gunzip_mapping, GFP_KERNEL); 6564 if (bp->gunzip_buf == NULL) 6565 goto gunzip_nomem1; 6566 6567 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 6568 if (bp->strm == NULL) 6569 goto gunzip_nomem2; 6570 6571 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 6572 if (bp->strm->workspace == NULL) 6573 goto gunzip_nomem3; 6574 6575 return 0; 6576 6577 gunzip_nomem3: 6578 kfree(bp->strm); 6579 bp->strm = NULL; 6580 6581 gunzip_nomem2: 6582 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6583 bp->gunzip_mapping); 6584 bp->gunzip_buf = NULL; 6585 6586 gunzip_nomem1: 6587 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 6588 return -ENOMEM; 6589 } 6590 6591 static void bnx2x_gunzip_end(struct bnx2x *bp) 6592 { 6593 if (bp->strm) { 6594 vfree(bp->strm->workspace); 6595 kfree(bp->strm); 6596 bp->strm = NULL; 6597 } 6598 6599 if (bp->gunzip_buf) { 6600 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6601 bp->gunzip_mapping); 6602 bp->gunzip_buf = NULL; 6603 } 6604 } 6605 6606 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 6607 { 6608 int n, rc; 6609 6610 /* check gzip header */ 6611 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 6612 BNX2X_ERR("Bad gzip header\n"); 6613 return -EINVAL; 6614 } 6615 6616 n = 10; 6617 6618 #define FNAME 0x8 6619 6620 if (zbuf[3] & FNAME) 6621 while ((zbuf[n++] != 0) && (n < len)); 6622 6623 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 6624 bp->strm->avail_in = len - n; 6625 bp->strm->next_out = bp->gunzip_buf; 6626 bp->strm->avail_out = FW_BUF_SIZE; 6627 6628 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 6629 if (rc != Z_OK) 6630 return rc; 6631 6632 rc = zlib_inflate(bp->strm, Z_FINISH); 6633 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 6634 netdev_err(bp->dev, "Firmware decompression error: %s\n", 6635 bp->strm->msg); 6636 6637 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6638 if (bp->gunzip_outlen & 0x3) 6639 netdev_err(bp->dev, 6640 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6641 bp->gunzip_outlen); 6642 bp->gunzip_outlen >>= 2; 6643 6644 zlib_inflateEnd(bp->strm); 6645 6646 if (rc == Z_STREAM_END) 6647 return 0; 6648 6649 return rc; 6650 } 6651 6652 /* nic load/unload */ 6653 6654 /* 6655 * General service functions 6656 */ 6657 6658 /* send a NIG loopback debug packet */ 6659 static void bnx2x_lb_pckt(struct bnx2x *bp) 6660 { 6661 u32 wb_write[3]; 6662 6663 /* Ethernet source and destination addresses */ 6664 wb_write[0] = 0x55555555; 6665 wb_write[1] = 0x55555555; 6666 wb_write[2] = 0x20; /* SOP */ 6667 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6668 6669 /* NON-IP protocol */ 6670 wb_write[0] = 0x09000000; 6671 wb_write[1] = 0x55555555; 6672 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 6673 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6674 } 6675 6676 /* some of the internal memories 6677 * are not directly readable from the driver 6678 * to test them we send debug packets 6679 */ 6680 static int bnx2x_int_mem_test(struct bnx2x *bp) 6681 { 6682 int factor; 6683 int count, i; 6684 u32 val = 0; 6685 6686 if (CHIP_REV_IS_FPGA(bp)) 6687 factor = 120; 6688 else if (CHIP_REV_IS_EMUL(bp)) 6689 factor = 200; 6690 else 6691 factor = 1; 6692 6693 /* Disable inputs of parser neighbor blocks */ 6694 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6695 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6696 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6697 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6698 6699 /* Write 0 to parser credits for CFC search request */ 6700 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6701 6702 /* send Ethernet packet */ 6703 bnx2x_lb_pckt(bp); 6704 6705 /* TODO do i reset NIG statistic? */ 6706 /* Wait until NIG register shows 1 packet of size 0x10 */ 6707 count = 1000 * factor; 6708 while (count) { 6709 6710 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6711 val = *bnx2x_sp(bp, wb_data[0]); 6712 if (val == 0x10) 6713 break; 6714 6715 usleep_range(10000, 20000); 6716 count--; 6717 } 6718 if (val != 0x10) { 6719 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6720 return -1; 6721 } 6722 6723 /* Wait until PRS register shows 1 packet */ 6724 count = 1000 * factor; 6725 while (count) { 6726 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6727 if (val == 1) 6728 break; 6729 6730 usleep_range(10000, 20000); 6731 count--; 6732 } 6733 if (val != 0x1) { 6734 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6735 return -2; 6736 } 6737 6738 /* Reset and init BRB, PRS */ 6739 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6740 msleep(50); 6741 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6742 msleep(50); 6743 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6744 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6745 6746 DP(NETIF_MSG_HW, "part2\n"); 6747 6748 /* Disable inputs of parser neighbor blocks */ 6749 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6750 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6751 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6752 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6753 6754 /* Write 0 to parser credits for CFC search request */ 6755 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6756 6757 /* send 10 Ethernet packets */ 6758 for (i = 0; i < 10; i++) 6759 bnx2x_lb_pckt(bp); 6760 6761 /* Wait until NIG register shows 10 + 1 6762 packets of size 11*0x10 = 0xb0 */ 6763 count = 1000 * factor; 6764 while (count) { 6765 6766 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6767 val = *bnx2x_sp(bp, wb_data[0]); 6768 if (val == 0xb0) 6769 break; 6770 6771 usleep_range(10000, 20000); 6772 count--; 6773 } 6774 if (val != 0xb0) { 6775 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6776 return -3; 6777 } 6778 6779 /* Wait until PRS register shows 2 packets */ 6780 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6781 if (val != 2) 6782 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6783 6784 /* Write 1 to parser credits for CFC search request */ 6785 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6786 6787 /* Wait until PRS register shows 3 packets */ 6788 msleep(10 * factor); 6789 /* Wait until NIG register shows 1 packet of size 0x10 */ 6790 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6791 if (val != 3) 6792 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6793 6794 /* clear NIG EOP FIFO */ 6795 for (i = 0; i < 11; i++) 6796 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6797 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6798 if (val != 1) { 6799 BNX2X_ERR("clear of NIG failed\n"); 6800 return -4; 6801 } 6802 6803 /* Reset and init BRB, PRS, NIG */ 6804 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6805 msleep(50); 6806 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6807 msleep(50); 6808 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6809 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6810 if (!CNIC_SUPPORT(bp)) 6811 /* set NIC mode */ 6812 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6813 6814 /* Enable inputs of parser neighbor blocks */ 6815 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6816 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6817 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6818 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6819 6820 DP(NETIF_MSG_HW, "done\n"); 6821 6822 return 0; /* OK */ 6823 } 6824 6825 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6826 { 6827 u32 val; 6828 6829 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6830 if (!CHIP_IS_E1x(bp)) 6831 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6832 else 6833 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6834 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6835 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6836 /* 6837 * mask read length error interrupts in brb for parser 6838 * (parsing unit and 'checksum and crc' unit) 6839 * these errors are legal (PU reads fixed length and CAC can cause 6840 * read length error on truncated packets) 6841 */ 6842 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6843 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6844 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6845 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6846 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6847 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6848 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6849 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6850 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6851 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6852 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6853 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6854 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6855 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6856 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6857 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6858 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6859 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6860 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6861 6862 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 6863 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 6864 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; 6865 if (!CHIP_IS_E1x(bp)) 6866 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 6867 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; 6868 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); 6869 6870 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6871 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6872 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6873 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6874 6875 if (!CHIP_IS_E1x(bp)) 6876 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6877 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6878 6879 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6880 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6881 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6882 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6883 } 6884 6885 static void bnx2x_reset_common(struct bnx2x *bp) 6886 { 6887 u32 val = 0x1400; 6888 6889 /* reset_common */ 6890 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6891 0xd3ffff7f); 6892 6893 if (CHIP_IS_E3(bp)) { 6894 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6895 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6896 } 6897 6898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6899 } 6900 6901 static void bnx2x_setup_dmae(struct bnx2x *bp) 6902 { 6903 bp->dmae_ready = 0; 6904 spin_lock_init(&bp->dmae_lock); 6905 } 6906 6907 static void bnx2x_init_pxp(struct bnx2x *bp) 6908 { 6909 u16 devctl; 6910 int r_order, w_order; 6911 6912 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6913 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6914 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6915 if (bp->mrrs == -1) 6916 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6917 else { 6918 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6919 r_order = bp->mrrs; 6920 } 6921 6922 bnx2x_init_pxp_arb(bp, r_order, w_order); 6923 } 6924 6925 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6926 { 6927 int is_required; 6928 u32 val; 6929 int port; 6930 6931 if (BP_NOMCP(bp)) 6932 return; 6933 6934 is_required = 0; 6935 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6936 SHARED_HW_CFG_FAN_FAILURE_MASK; 6937 6938 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6939 is_required = 1; 6940 6941 /* 6942 * The fan failure mechanism is usually related to the PHY type since 6943 * the power consumption of the board is affected by the PHY. Currently, 6944 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6945 */ 6946 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6947 for (port = PORT_0; port < PORT_MAX; port++) { 6948 is_required |= 6949 bnx2x_fan_failure_det_req( 6950 bp, 6951 bp->common.shmem_base, 6952 bp->common.shmem2_base, 6953 port); 6954 } 6955 6956 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6957 6958 if (is_required == 0) 6959 return; 6960 6961 /* Fan failure is indicated by SPIO 5 */ 6962 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 6963 6964 /* set to active low mode */ 6965 val = REG_RD(bp, MISC_REG_SPIO_INT); 6966 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 6967 REG_WR(bp, MISC_REG_SPIO_INT, val); 6968 6969 /* enable interrupt to signal the IGU */ 6970 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6971 val |= MISC_SPIO_SPIO5; 6972 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6973 } 6974 6975 void bnx2x_pf_disable(struct bnx2x *bp) 6976 { 6977 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6978 val &= ~IGU_PF_CONF_FUNC_EN; 6979 6980 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6981 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6982 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6983 } 6984 6985 static void bnx2x__common_init_phy(struct bnx2x *bp) 6986 { 6987 u32 shmem_base[2], shmem2_base[2]; 6988 /* Avoid common init in case MFW supports LFA */ 6989 if (SHMEM2_RD(bp, size) > 6990 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 6991 return; 6992 shmem_base[0] = bp->common.shmem_base; 6993 shmem2_base[0] = bp->common.shmem2_base; 6994 if (!CHIP_IS_E1x(bp)) { 6995 shmem_base[1] = 6996 SHMEM2_RD(bp, other_shmem_base_addr); 6997 shmem2_base[1] = 6998 SHMEM2_RD(bp, other_shmem2_base_addr); 6999 } 7000 bnx2x_acquire_phy_lock(bp); 7001 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 7002 bp->common.chip_id); 7003 bnx2x_release_phy_lock(bp); 7004 } 7005 7006 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) 7007 { 7008 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); 7009 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); 7010 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); 7011 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); 7012 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); 7013 7014 /* make sure this value is 0 */ 7015 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 7016 7017 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); 7018 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); 7019 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); 7020 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); 7021 } 7022 7023 static void bnx2x_set_endianity(struct bnx2x *bp) 7024 { 7025 #ifdef __BIG_ENDIAN 7026 bnx2x_config_endianity(bp, 1); 7027 #else 7028 bnx2x_config_endianity(bp, 0); 7029 #endif 7030 } 7031 7032 static void bnx2x_reset_endianity(struct bnx2x *bp) 7033 { 7034 bnx2x_config_endianity(bp, 0); 7035 } 7036 7037 /** 7038 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 7039 * 7040 * @bp: driver handle 7041 */ 7042 static int bnx2x_init_hw_common(struct bnx2x *bp) 7043 { 7044 u32 val; 7045 7046 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 7047 7048 /* 7049 * take the RESET lock to protect undi_unload flow from accessing 7050 * registers while we're resetting the chip 7051 */ 7052 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 7053 7054 bnx2x_reset_common(bp); 7055 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 7056 7057 val = 0xfffc; 7058 if (CHIP_IS_E3(bp)) { 7059 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 7060 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 7061 } 7062 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 7063 7064 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 7065 7066 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 7067 7068 if (!CHIP_IS_E1x(bp)) { 7069 u8 abs_func_id; 7070 7071 /** 7072 * 4-port mode or 2-port mode we need to turn of master-enable 7073 * for everyone, after that, turn it back on for self. 7074 * so, we disregard multi-function or not, and always disable 7075 * for all functions on the given path, this means 0,2,4,6 for 7076 * path 0 and 1,3,5,7 for path 1 7077 */ 7078 for (abs_func_id = BP_PATH(bp); 7079 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 7080 if (abs_func_id == BP_ABS_FUNC(bp)) { 7081 REG_WR(bp, 7082 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 7083 1); 7084 continue; 7085 } 7086 7087 bnx2x_pretend_func(bp, abs_func_id); 7088 /* clear pf enable */ 7089 bnx2x_pf_disable(bp); 7090 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 7091 } 7092 } 7093 7094 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 7095 if (CHIP_IS_E1(bp)) { 7096 /* enable HW interrupt from PXP on USDM overflow 7097 bit 16 on INT_MASK_0 */ 7098 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 7099 } 7100 7101 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 7102 bnx2x_init_pxp(bp); 7103 bnx2x_set_endianity(bp); 7104 bnx2x_ilt_init_page_size(bp, INITOP_SET); 7105 7106 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 7107 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 7108 7109 /* let the HW do it's magic ... */ 7110 msleep(100); 7111 /* finish PXP init */ 7112 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 7113 if (val != 1) { 7114 BNX2X_ERR("PXP2 CFG failed\n"); 7115 return -EBUSY; 7116 } 7117 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 7118 if (val != 1) { 7119 BNX2X_ERR("PXP2 RD_INIT failed\n"); 7120 return -EBUSY; 7121 } 7122 7123 /* Timers bug workaround E2 only. We need to set the entire ILT to 7124 * have entries with value "0" and valid bit on. 7125 * This needs to be done by the first PF that is loaded in a path 7126 * (i.e. common phase) 7127 */ 7128 if (!CHIP_IS_E1x(bp)) { 7129 /* In E2 there is a bug in the timers block that can cause function 6 / 7 7130 * (i.e. vnic3) to start even if it is marked as "scan-off". 7131 * This occurs when a different function (func2,3) is being marked 7132 * as "scan-off". Real-life scenario for example: if a driver is being 7133 * load-unloaded while func6,7 are down. This will cause the timer to access 7134 * the ilt, translate to a logical address and send a request to read/write. 7135 * Since the ilt for the function that is down is not valid, this will cause 7136 * a translation error which is unrecoverable. 7137 * The Workaround is intended to make sure that when this happens nothing fatal 7138 * will occur. The workaround: 7139 * 1. First PF driver which loads on a path will: 7140 * a. After taking the chip out of reset, by using pretend, 7141 * it will write "0" to the following registers of 7142 * the other vnics. 7143 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 7144 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 7145 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 7146 * And for itself it will write '1' to 7147 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 7148 * dmae-operations (writing to pram for example.) 7149 * note: can be done for only function 6,7 but cleaner this 7150 * way. 7151 * b. Write zero+valid to the entire ILT. 7152 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 7153 * VNIC3 (of that port). The range allocated will be the 7154 * entire ILT. This is needed to prevent ILT range error. 7155 * 2. Any PF driver load flow: 7156 * a. ILT update with the physical addresses of the allocated 7157 * logical pages. 7158 * b. Wait 20msec. - note that this timeout is needed to make 7159 * sure there are no requests in one of the PXP internal 7160 * queues with "old" ILT addresses. 7161 * c. PF enable in the PGLC. 7162 * d. Clear the was_error of the PF in the PGLC. (could have 7163 * occurred while driver was down) 7164 * e. PF enable in the CFC (WEAK + STRONG) 7165 * f. Timers scan enable 7166 * 3. PF driver unload flow: 7167 * a. Clear the Timers scan_en. 7168 * b. Polling for scan_on=0 for that PF. 7169 * c. Clear the PF enable bit in the PXP. 7170 * d. Clear the PF enable in the CFC (WEAK + STRONG) 7171 * e. Write zero+valid to all ILT entries (The valid bit must 7172 * stay set) 7173 * f. If this is VNIC 3 of a port then also init 7174 * first_timers_ilt_entry to zero and last_timers_ilt_entry 7175 * to the last entry in the ILT. 7176 * 7177 * Notes: 7178 * Currently the PF error in the PGLC is non recoverable. 7179 * In the future the there will be a recovery routine for this error. 7180 * Currently attention is masked. 7181 * Having an MCP lock on the load/unload process does not guarantee that 7182 * there is no Timer disable during Func6/7 enable. This is because the 7183 * Timers scan is currently being cleared by the MCP on FLR. 7184 * Step 2.d can be done only for PF6/7 and the driver can also check if 7185 * there is error before clearing it. But the flow above is simpler and 7186 * more general. 7187 * All ILT entries are written by zero+valid and not just PF6/7 7188 * ILT entries since in the future the ILT entries allocation for 7189 * PF-s might be dynamic. 7190 */ 7191 struct ilt_client_info ilt_cli; 7192 struct bnx2x_ilt ilt; 7193 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 7194 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 7195 7196 /* initialize dummy TM client */ 7197 ilt_cli.start = 0; 7198 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 7199 ilt_cli.client_num = ILT_CLIENT_TM; 7200 7201 /* Step 1: set zeroes to all ilt page entries with valid bit on 7202 * Step 2: set the timers first/last ilt entry to point 7203 * to the entire range to prevent ILT range error for 3rd/4th 7204 * vnic (this code assumes existence of the vnic) 7205 * 7206 * both steps performed by call to bnx2x_ilt_client_init_op() 7207 * with dummy TM client 7208 * 7209 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 7210 * and his brother are split registers 7211 */ 7212 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 7213 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 7214 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 7215 7216 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 7217 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 7218 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 7219 } 7220 7221 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 7222 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 7223 7224 if (!CHIP_IS_E1x(bp)) { 7225 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 7226 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 7227 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 7228 7229 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 7230 7231 /* let the HW do it's magic ... */ 7232 do { 7233 msleep(200); 7234 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 7235 } while (factor-- && (val != 1)); 7236 7237 if (val != 1) { 7238 BNX2X_ERR("ATC_INIT failed\n"); 7239 return -EBUSY; 7240 } 7241 } 7242 7243 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 7244 7245 bnx2x_iov_init_dmae(bp); 7246 7247 /* clean the DMAE memory */ 7248 bp->dmae_ready = 1; 7249 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 7250 7251 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 7252 7253 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 7254 7255 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 7256 7257 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 7258 7259 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 7260 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 7261 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 7262 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 7263 7264 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 7265 7266 /* QM queues pointers table */ 7267 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 7268 7269 /* soft reset pulse */ 7270 REG_WR(bp, QM_REG_SOFT_RESET, 1); 7271 REG_WR(bp, QM_REG_SOFT_RESET, 0); 7272 7273 if (CNIC_SUPPORT(bp)) 7274 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 7275 7276 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 7277 7278 if (!CHIP_REV_IS_SLOW(bp)) 7279 /* enable hw interrupt from doorbell Q */ 7280 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 7281 7282 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 7283 7284 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 7285 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 7286 7287 if (!CHIP_IS_E1(bp)) 7288 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 7289 7290 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 7291 if (IS_MF_AFEX(bp)) { 7292 /* configure that VNTag and VLAN headers must be 7293 * received in afex mode 7294 */ 7295 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 7296 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 7297 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 7298 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 7299 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 7300 } else { 7301 /* Bit-map indicating which L2 hdrs may appear 7302 * after the basic Ethernet header 7303 */ 7304 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 7305 bp->path_has_ovlan ? 7 : 6); 7306 } 7307 } 7308 7309 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 7310 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 7311 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 7312 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 7313 7314 if (!CHIP_IS_E1x(bp)) { 7315 /* reset VFC memories */ 7316 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7317 VFC_MEMORIES_RST_REG_CAM_RST | 7318 VFC_MEMORIES_RST_REG_RAM_RST); 7319 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7320 VFC_MEMORIES_RST_REG_CAM_RST | 7321 VFC_MEMORIES_RST_REG_RAM_RST); 7322 7323 msleep(20); 7324 } 7325 7326 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 7327 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 7328 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 7329 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 7330 7331 /* sync semi rtc */ 7332 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 7333 0x80000000); 7334 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 7335 0x80000000); 7336 7337 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 7338 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 7339 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 7340 7341 if (!CHIP_IS_E1x(bp)) { 7342 if (IS_MF_AFEX(bp)) { 7343 /* configure that VNTag and VLAN headers must be 7344 * sent in afex mode 7345 */ 7346 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 7347 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 7348 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 7349 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 7350 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 7351 } else { 7352 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 7353 bp->path_has_ovlan ? 7 : 6); 7354 } 7355 } 7356 7357 REG_WR(bp, SRC_REG_SOFT_RST, 1); 7358 7359 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 7360 7361 if (CNIC_SUPPORT(bp)) { 7362 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 7363 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 7364 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 7365 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 7366 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 7367 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 7368 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 7369 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 7370 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 7371 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 7372 } 7373 REG_WR(bp, SRC_REG_SOFT_RST, 0); 7374 7375 if (sizeof(union cdu_context) != 1024) 7376 /* we currently assume that a context is 1024 bytes */ 7377 dev_alert(&bp->pdev->dev, 7378 "please adjust the size of cdu_context(%ld)\n", 7379 (long)sizeof(union cdu_context)); 7380 7381 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 7382 val = (4 << 24) + (0 << 12) + 1024; 7383 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 7384 7385 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 7386 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 7387 /* enable context validation interrupt from CFC */ 7388 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 7389 7390 /* set the thresholds to prevent CFC/CDU race */ 7391 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 7392 7393 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 7394 7395 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 7396 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 7397 7398 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 7399 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 7400 7401 /* Reset PCIE errors for debug */ 7402 REG_WR(bp, 0x2814, 0xffffffff); 7403 REG_WR(bp, 0x3820, 0xffffffff); 7404 7405 if (!CHIP_IS_E1x(bp)) { 7406 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 7407 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 7408 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 7409 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 7410 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 7411 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 7412 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 7413 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 7414 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 7415 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 7416 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 7417 } 7418 7419 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 7420 if (!CHIP_IS_E1(bp)) { 7421 /* in E3 this done in per-port section */ 7422 if (!CHIP_IS_E3(bp)) 7423 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7424 } 7425 if (CHIP_IS_E1H(bp)) 7426 /* not applicable for E2 (and above ...) */ 7427 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 7428 7429 if (CHIP_REV_IS_SLOW(bp)) 7430 msleep(200); 7431 7432 /* finish CFC init */ 7433 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 7434 if (val != 1) { 7435 BNX2X_ERR("CFC LL_INIT failed\n"); 7436 return -EBUSY; 7437 } 7438 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 7439 if (val != 1) { 7440 BNX2X_ERR("CFC AC_INIT failed\n"); 7441 return -EBUSY; 7442 } 7443 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 7444 if (val != 1) { 7445 BNX2X_ERR("CFC CAM_INIT failed\n"); 7446 return -EBUSY; 7447 } 7448 REG_WR(bp, CFC_REG_DEBUG0, 0); 7449 7450 if (CHIP_IS_E1(bp)) { 7451 /* read NIG statistic 7452 to see if this is our first up since powerup */ 7453 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 7454 val = *bnx2x_sp(bp, wb_data[0]); 7455 7456 /* do internal memory self test */ 7457 if ((val == 0) && bnx2x_int_mem_test(bp)) { 7458 BNX2X_ERR("internal mem self test failed\n"); 7459 return -EBUSY; 7460 } 7461 } 7462 7463 bnx2x_setup_fan_failure_detection(bp); 7464 7465 /* clear PXP2 attentions */ 7466 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 7467 7468 bnx2x_enable_blocks_attention(bp); 7469 bnx2x_enable_blocks_parity(bp); 7470 7471 if (!BP_NOMCP(bp)) { 7472 if (CHIP_IS_E1x(bp)) 7473 bnx2x__common_init_phy(bp); 7474 } else 7475 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 7476 7477 if (SHMEM2_HAS(bp, netproc_fw_ver)) 7478 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM)); 7479 7480 return 0; 7481 } 7482 7483 /** 7484 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 7485 * 7486 * @bp: driver handle 7487 */ 7488 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 7489 { 7490 int rc = bnx2x_init_hw_common(bp); 7491 7492 if (rc) 7493 return rc; 7494 7495 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 7496 if (!BP_NOMCP(bp)) 7497 bnx2x__common_init_phy(bp); 7498 7499 return 0; 7500 } 7501 7502 static int bnx2x_init_hw_port(struct bnx2x *bp) 7503 { 7504 int port = BP_PORT(bp); 7505 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7506 u32 low, high; 7507 u32 val, reg; 7508 7509 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7510 7511 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 7512 7513 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7514 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7515 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7516 7517 /* Timers bug workaround: disables the pf_master bit in pglue at 7518 * common phase, we need to enable it here before any dmae access are 7519 * attempted. Therefore we manually added the enable-master to the 7520 * port phase (it also happens in the function phase) 7521 */ 7522 if (!CHIP_IS_E1x(bp)) 7523 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7524 7525 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7526 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7527 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7528 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7529 7530 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7531 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7532 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7533 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7534 7535 /* QM cid (connection) count */ 7536 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 7537 7538 if (CNIC_SUPPORT(bp)) { 7539 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7540 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 7541 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 7542 } 7543 7544 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7545 7546 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7547 7548 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 7549 7550 if (IS_MF(bp)) 7551 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 7552 else if (bp->dev->mtu > 4096) { 7553 if (bp->flags & ONE_PORT_FLAG) 7554 low = 160; 7555 else { 7556 val = bp->dev->mtu; 7557 /* (24*1024 + val*4)/256 */ 7558 low = 96 + (val/64) + 7559 ((val % 64) ? 1 : 0); 7560 } 7561 } else 7562 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 7563 high = low + 56; /* 14*1024/256 */ 7564 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 7565 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 7566 } 7567 7568 if (CHIP_MODE_IS_4_PORT(bp)) 7569 REG_WR(bp, (BP_PORT(bp) ? 7570 BRB1_REG_MAC_GUARANTIED_1 : 7571 BRB1_REG_MAC_GUARANTIED_0), 40); 7572 7573 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7574 if (CHIP_IS_E3B0(bp)) { 7575 if (IS_MF_AFEX(bp)) { 7576 /* configure headers for AFEX mode */ 7577 REG_WR(bp, BP_PORT(bp) ? 7578 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7579 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 7580 REG_WR(bp, BP_PORT(bp) ? 7581 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 7582 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 7583 REG_WR(bp, BP_PORT(bp) ? 7584 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 7585 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 7586 } else { 7587 /* Ovlan exists only if we are in multi-function + 7588 * switch-dependent mode, in switch-independent there 7589 * is no ovlan headers 7590 */ 7591 REG_WR(bp, BP_PORT(bp) ? 7592 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7593 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 7594 (bp->path_has_ovlan ? 7 : 6)); 7595 } 7596 } 7597 7598 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7599 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7600 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7601 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7602 7603 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7604 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7605 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7606 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7607 7608 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7609 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7610 7611 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7612 7613 if (CHIP_IS_E1x(bp)) { 7614 /* configure PBF to work without PAUSE mtu 9000 */ 7615 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 7616 7617 /* update threshold */ 7618 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 7619 /* update init credit */ 7620 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 7621 7622 /* probe changes */ 7623 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 7624 udelay(50); 7625 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 7626 } 7627 7628 if (CNIC_SUPPORT(bp)) 7629 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7630 7631 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7632 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7633 7634 if (CHIP_IS_E1(bp)) { 7635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7636 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7637 } 7638 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7639 7640 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7641 7642 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7643 /* init aeu_mask_attn_func_0/1: 7644 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use 7645 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF 7646 * bits 4-7 are used for "per vn group attention" */ 7647 val = IS_MF(bp) ? 0xF7 : 0x7; 7648 /* Enable DCBX attention for all but E1 */ 7649 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7650 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7651 7652 /* SCPAD_PARITY should NOT trigger close the gates */ 7653 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; 7654 REG_WR(bp, reg, 7655 REG_RD(bp, reg) & 7656 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7657 7658 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; 7659 REG_WR(bp, reg, 7660 REG_RD(bp, reg) & 7661 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7662 7663 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7664 7665 if (!CHIP_IS_E1x(bp)) { 7666 /* Bit-map indicating which L2 hdrs may appear after the 7667 * basic Ethernet header 7668 */ 7669 if (IS_MF_AFEX(bp)) 7670 REG_WR(bp, BP_PORT(bp) ? 7671 NIG_REG_P1_HDRS_AFTER_BASIC : 7672 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 7673 else 7674 REG_WR(bp, BP_PORT(bp) ? 7675 NIG_REG_P1_HDRS_AFTER_BASIC : 7676 NIG_REG_P0_HDRS_AFTER_BASIC, 7677 IS_MF_SD(bp) ? 7 : 6); 7678 7679 if (CHIP_IS_E3(bp)) 7680 REG_WR(bp, BP_PORT(bp) ? 7681 NIG_REG_LLH1_MF_MODE : 7682 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7683 } 7684 if (!CHIP_IS_E3(bp)) 7685 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 7686 7687 if (!CHIP_IS_E1(bp)) { 7688 /* 0x2 disable mf_ov, 0x1 enable */ 7689 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 7690 (IS_MF_SD(bp) ? 0x1 : 0x2)); 7691 7692 if (!CHIP_IS_E1x(bp)) { 7693 val = 0; 7694 switch (bp->mf_mode) { 7695 case MULTI_FUNCTION_SD: 7696 val = 1; 7697 break; 7698 case MULTI_FUNCTION_SI: 7699 case MULTI_FUNCTION_AFEX: 7700 val = 2; 7701 break; 7702 } 7703 7704 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 7705 NIG_REG_LLH0_CLS_TYPE), val); 7706 } 7707 { 7708 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 7709 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 7710 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 7711 } 7712 } 7713 7714 /* If SPIO5 is set to generate interrupts, enable it for this port */ 7715 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 7716 if (val & MISC_SPIO_SPIO5) { 7717 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 7718 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 7719 val = REG_RD(bp, reg_addr); 7720 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 7721 REG_WR(bp, reg_addr, val); 7722 } 7723 7724 if (CHIP_IS_E3B0(bp)) 7725 bp->flags |= PTP_SUPPORTED; 7726 7727 return 0; 7728 } 7729 7730 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 7731 { 7732 int reg; 7733 u32 wb_write[2]; 7734 7735 if (CHIP_IS_E1(bp)) 7736 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 7737 else 7738 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 7739 7740 wb_write[0] = ONCHIP_ADDR1(addr); 7741 wb_write[1] = ONCHIP_ADDR2(addr); 7742 REG_WR_DMAE(bp, reg, wb_write, 2); 7743 } 7744 7745 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) 7746 { 7747 u32 data, ctl, cnt = 100; 7748 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 7749 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 7750 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 7751 u32 sb_bit = 1 << (idu_sb_id%32); 7752 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 7753 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 7754 7755 /* Not supported in BC mode */ 7756 if (CHIP_INT_MODE_IS_BC(bp)) 7757 return; 7758 7759 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 7760 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 7761 IGU_REGULAR_CLEANUP_SET | 7762 IGU_REGULAR_BCLEANUP; 7763 7764 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 7765 func_encode << IGU_CTRL_REG_FID_SHIFT | 7766 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7767 7768 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7769 data, igu_addr_data); 7770 REG_WR(bp, igu_addr_data, data); 7771 barrier(); 7772 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7773 ctl, igu_addr_ctl); 7774 REG_WR(bp, igu_addr_ctl, ctl); 7775 barrier(); 7776 7777 /* wait for clean up to finish */ 7778 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7779 msleep(20); 7780 7781 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7782 DP(NETIF_MSG_HW, 7783 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7784 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7785 } 7786 } 7787 7788 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7789 { 7790 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7791 } 7792 7793 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7794 { 7795 u32 i, base = FUNC_ILT_BASE(func); 7796 for (i = base; i < base + ILT_PER_FUNC; i++) 7797 bnx2x_ilt_wr(bp, i, 0); 7798 } 7799 7800 static void bnx2x_init_searcher(struct bnx2x *bp) 7801 { 7802 int port = BP_PORT(bp); 7803 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7804 /* T1 hash bits value determines the T1 number of entries */ 7805 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7806 } 7807 7808 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) 7809 { 7810 int rc; 7811 struct bnx2x_func_state_params func_params = {NULL}; 7812 struct bnx2x_func_switch_update_params *switch_update_params = 7813 &func_params.params.switch_update; 7814 7815 /* Prepare parameters for function state transitions */ 7816 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 7817 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 7818 7819 func_params.f_obj = &bp->func_obj; 7820 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 7821 7822 /* Function parameters */ 7823 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, 7824 &switch_update_params->changes); 7825 if (suspend) 7826 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, 7827 &switch_update_params->changes); 7828 7829 rc = bnx2x_func_state_change(bp, &func_params); 7830 7831 return rc; 7832 } 7833 7834 static int bnx2x_reset_nic_mode(struct bnx2x *bp) 7835 { 7836 int rc, i, port = BP_PORT(bp); 7837 int vlan_en = 0, mac_en[NUM_MACS]; 7838 7839 /* Close input from network */ 7840 if (bp->mf_mode == SINGLE_FUNCTION) { 7841 bnx2x_set_rx_filter(&bp->link_params, 0); 7842 } else { 7843 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : 7844 NIG_REG_LLH0_FUNC_EN); 7845 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7846 NIG_REG_LLH0_FUNC_EN, 0); 7847 for (i = 0; i < NUM_MACS; i++) { 7848 mac_en[i] = REG_RD(bp, port ? 7849 (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7850 4 * i) : 7851 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 7852 4 * i)); 7853 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7854 4 * i) : 7855 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); 7856 } 7857 } 7858 7859 /* Close BMC to host */ 7860 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7861 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); 7862 7863 /* Suspend Tx switching to the PF. Completion of this ramrod 7864 * further guarantees that all the packets of that PF / child 7865 * VFs in BRB were processed by the Parser, so it is safe to 7866 * change the NIC_MODE register. 7867 */ 7868 rc = bnx2x_func_switch_update(bp, 1); 7869 if (rc) { 7870 BNX2X_ERR("Can't suspend tx-switching!\n"); 7871 return rc; 7872 } 7873 7874 /* Change NIC_MODE register */ 7875 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7876 7877 /* Open input from network */ 7878 if (bp->mf_mode == SINGLE_FUNCTION) { 7879 bnx2x_set_rx_filter(&bp->link_params, 1); 7880 } else { 7881 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7882 NIG_REG_LLH0_FUNC_EN, vlan_en); 7883 for (i = 0; i < NUM_MACS; i++) { 7884 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7885 4 * i) : 7886 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 7887 mac_en[i]); 7888 } 7889 } 7890 7891 /* Enable BMC to host */ 7892 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7893 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); 7894 7895 /* Resume Tx switching to the PF */ 7896 rc = bnx2x_func_switch_update(bp, 0); 7897 if (rc) { 7898 BNX2X_ERR("Can't resume tx-switching!\n"); 7899 return rc; 7900 } 7901 7902 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7903 return 0; 7904 } 7905 7906 int bnx2x_init_hw_func_cnic(struct bnx2x *bp) 7907 { 7908 int rc; 7909 7910 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); 7911 7912 if (CONFIGURE_NIC_MODE(bp)) { 7913 /* Configure searcher as part of function hw init */ 7914 bnx2x_init_searcher(bp); 7915 7916 /* Reset NIC mode */ 7917 rc = bnx2x_reset_nic_mode(bp); 7918 if (rc) 7919 BNX2X_ERR("Can't change NIC mode!\n"); 7920 return rc; 7921 } 7922 7923 return 0; 7924 } 7925 7926 /* previous driver DMAE transaction may have occurred when pre-boot stage ended 7927 * and boot began, or when kdump kernel was loaded. Either case would invalidate 7928 * the addresses of the transaction, resulting in was-error bit set in the pci 7929 * causing all hw-to-host pcie transactions to timeout. If this happened we want 7930 * to clear the interrupt which detected this from the pglueb and the was done 7931 * bit 7932 */ 7933 static void bnx2x_clean_pglue_errors(struct bnx2x *bp) 7934 { 7935 if (!CHIP_IS_E1x(bp)) 7936 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 7937 1 << BP_ABS_FUNC(bp)); 7938 } 7939 7940 static int bnx2x_init_hw_func(struct bnx2x *bp) 7941 { 7942 int port = BP_PORT(bp); 7943 int func = BP_FUNC(bp); 7944 int init_phase = PHASE_PF0 + func; 7945 struct bnx2x_ilt *ilt = BP_ILT(bp); 7946 u16 cdu_ilt_start; 7947 u32 addr, val; 7948 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7949 int i, main_mem_width, rc; 7950 7951 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7952 7953 /* FLR cleanup - hmmm */ 7954 if (!CHIP_IS_E1x(bp)) { 7955 rc = bnx2x_pf_flr_clnup(bp); 7956 if (rc) { 7957 bnx2x_fw_dump(bp); 7958 return rc; 7959 } 7960 } 7961 7962 /* set MSI reconfigure capability */ 7963 if (bp->common.int_block == INT_BLOCK_HC) { 7964 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7965 val = REG_RD(bp, addr); 7966 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7967 REG_WR(bp, addr, val); 7968 } 7969 7970 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7971 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7972 7973 ilt = BP_ILT(bp); 7974 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7975 7976 if (IS_SRIOV(bp)) 7977 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; 7978 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); 7979 7980 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes 7981 * those of the VFs, so start line should be reset 7982 */ 7983 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7984 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7985 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7986 ilt->lines[cdu_ilt_start + i].page_mapping = 7987 bp->context[i].cxt_mapping; 7988 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 7989 } 7990 7991 bnx2x_ilt_init_op(bp, INITOP_SET); 7992 7993 if (!CONFIGURE_NIC_MODE(bp)) { 7994 bnx2x_init_searcher(bp); 7995 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7996 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7997 } else { 7998 /* Set NIC mode */ 7999 REG_WR(bp, PRS_REG_NIC_MODE, 1); 8000 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); 8001 } 8002 8003 if (!CHIP_IS_E1x(bp)) { 8004 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 8005 8006 /* Turn on a single ISR mode in IGU if driver is going to use 8007 * INT#x or MSI 8008 */ 8009 if (!(bp->flags & USING_MSIX_FLAG)) 8010 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 8011 /* 8012 * Timers workaround bug: function init part. 8013 * Need to wait 20msec after initializing ILT, 8014 * needed to make sure there are no requests in 8015 * one of the PXP internal queues with "old" ILT addresses 8016 */ 8017 msleep(20); 8018 /* 8019 * Master enable - Due to WB DMAE writes performed before this 8020 * register is re-initialized as part of the regular function 8021 * init 8022 */ 8023 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 8024 /* Enable the function in IGU */ 8025 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 8026 } 8027 8028 bp->dmae_ready = 1; 8029 8030 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 8031 8032 bnx2x_clean_pglue_errors(bp); 8033 8034 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 8035 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 8036 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 8037 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 8038 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 8039 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 8040 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 8041 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 8042 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 8043 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 8044 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 8045 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 8046 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 8047 8048 if (!CHIP_IS_E1x(bp)) 8049 REG_WR(bp, QM_REG_PF_EN, 1); 8050 8051 if (!CHIP_IS_E1x(bp)) { 8052 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8053 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8054 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8055 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8056 } 8057 bnx2x_init_block(bp, BLOCK_QM, init_phase); 8058 8059 bnx2x_init_block(bp, BLOCK_TM, init_phase); 8060 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 8061 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ 8062 8063 bnx2x_iov_init_dq(bp); 8064 8065 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 8066 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 8067 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 8068 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 8069 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 8070 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 8071 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 8072 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 8073 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 8074 if (!CHIP_IS_E1x(bp)) 8075 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 8076 8077 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 8078 8079 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 8080 8081 if (!CHIP_IS_E1x(bp)) 8082 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 8083 8084 if (IS_MF(bp)) { 8085 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { 8086 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 8087 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, 8088 bp->mf_ov); 8089 } 8090 } 8091 8092 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 8093 8094 /* HC init per function */ 8095 if (bp->common.int_block == INT_BLOCK_HC) { 8096 if (CHIP_IS_E1H(bp)) { 8097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8098 8099 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8100 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8101 } 8102 bnx2x_init_block(bp, BLOCK_HC, init_phase); 8103 8104 } else { 8105 int num_segs, sb_idx, prod_offset; 8106 8107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8108 8109 if (!CHIP_IS_E1x(bp)) { 8110 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8111 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8112 } 8113 8114 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 8115 8116 if (!CHIP_IS_E1x(bp)) { 8117 int dsb_idx = 0; 8118 /** 8119 * Producer memory: 8120 * E2 mode: address 0-135 match to the mapping memory; 8121 * 136 - PF0 default prod; 137 - PF1 default prod; 8122 * 138 - PF2 default prod; 139 - PF3 default prod; 8123 * 140 - PF0 attn prod; 141 - PF1 attn prod; 8124 * 142 - PF2 attn prod; 143 - PF3 attn prod; 8125 * 144-147 reserved. 8126 * 8127 * E1.5 mode - In backward compatible mode; 8128 * for non default SB; each even line in the memory 8129 * holds the U producer and each odd line hold 8130 * the C producer. The first 128 producers are for 8131 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 8132 * producers are for the DSB for each PF. 8133 * Each PF has five segments: (the order inside each 8134 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 8135 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 8136 * 144-147 attn prods; 8137 */ 8138 /* non-default-status-blocks */ 8139 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 8140 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 8141 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 8142 prod_offset = (bp->igu_base_sb + sb_idx) * 8143 num_segs; 8144 8145 for (i = 0; i < num_segs; i++) { 8146 addr = IGU_REG_PROD_CONS_MEMORY + 8147 (prod_offset + i) * 4; 8148 REG_WR(bp, addr, 0); 8149 } 8150 /* send consumer update with value 0 */ 8151 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 8152 USTORM_ID, 0, IGU_INT_NOP, 1); 8153 bnx2x_igu_clear_sb(bp, 8154 bp->igu_base_sb + sb_idx); 8155 } 8156 8157 /* default-status-blocks */ 8158 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 8159 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 8160 8161 if (CHIP_MODE_IS_4_PORT(bp)) 8162 dsb_idx = BP_FUNC(bp); 8163 else 8164 dsb_idx = BP_VN(bp); 8165 8166 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 8167 IGU_BC_BASE_DSB_PROD + dsb_idx : 8168 IGU_NORM_BASE_DSB_PROD + dsb_idx); 8169 8170 /* 8171 * igu prods come in chunks of E1HVN_MAX (4) - 8172 * does not matters what is the current chip mode 8173 */ 8174 for (i = 0; i < (num_segs * E1HVN_MAX); 8175 i += E1HVN_MAX) { 8176 addr = IGU_REG_PROD_CONS_MEMORY + 8177 (prod_offset + i)*4; 8178 REG_WR(bp, addr, 0); 8179 } 8180 /* send consumer update with 0 */ 8181 if (CHIP_INT_MODE_IS_BC(bp)) { 8182 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8183 USTORM_ID, 0, IGU_INT_NOP, 1); 8184 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8185 CSTORM_ID, 0, IGU_INT_NOP, 1); 8186 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8187 XSTORM_ID, 0, IGU_INT_NOP, 1); 8188 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8189 TSTORM_ID, 0, IGU_INT_NOP, 1); 8190 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8191 ATTENTION_ID, 0, IGU_INT_NOP, 1); 8192 } else { 8193 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8194 USTORM_ID, 0, IGU_INT_NOP, 1); 8195 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8196 ATTENTION_ID, 0, IGU_INT_NOP, 1); 8197 } 8198 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 8199 8200 /* !!! These should become driver const once 8201 rf-tool supports split-68 const */ 8202 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 8203 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 8204 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 8205 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 8206 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 8207 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 8208 } 8209 } 8210 8211 /* Reset PCIE errors for debug */ 8212 REG_WR(bp, 0x2114, 0xffffffff); 8213 REG_WR(bp, 0x2120, 0xffffffff); 8214 8215 if (CHIP_IS_E1x(bp)) { 8216 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 8217 main_mem_base = HC_REG_MAIN_MEMORY + 8218 BP_PORT(bp) * (main_mem_size * 4); 8219 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 8220 main_mem_width = 8; 8221 8222 val = REG_RD(bp, main_mem_prty_clr); 8223 if (val) 8224 DP(NETIF_MSG_HW, 8225 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 8226 val); 8227 8228 /* Clear "false" parity errors in MSI-X table */ 8229 for (i = main_mem_base; 8230 i < main_mem_base + main_mem_size * 4; 8231 i += main_mem_width) { 8232 bnx2x_read_dmae(bp, i, main_mem_width / 4); 8233 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 8234 i, main_mem_width / 4); 8235 } 8236 /* Clear HC parity attention */ 8237 REG_RD(bp, main_mem_prty_clr); 8238 } 8239 8240 #ifdef BNX2X_STOP_ON_ERROR 8241 /* Enable STORMs SP logging */ 8242 REG_WR8(bp, BAR_USTRORM_INTMEM + 8243 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8244 REG_WR8(bp, BAR_TSTRORM_INTMEM + 8245 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8246 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8247 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8248 REG_WR8(bp, BAR_XSTRORM_INTMEM + 8249 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8250 #endif 8251 8252 bnx2x_phy_probe(&bp->link_params); 8253 8254 return 0; 8255 } 8256 8257 void bnx2x_free_mem_cnic(struct bnx2x *bp) 8258 { 8259 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); 8260 8261 if (!CHIP_IS_E1x(bp)) 8262 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 8263 sizeof(struct host_hc_status_block_e2)); 8264 else 8265 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 8266 sizeof(struct host_hc_status_block_e1x)); 8267 8268 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 8269 } 8270 8271 void bnx2x_free_mem(struct bnx2x *bp) 8272 { 8273 int i; 8274 8275 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 8276 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 8277 8278 if (IS_VF(bp)) 8279 return; 8280 8281 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 8282 sizeof(struct host_sp_status_block)); 8283 8284 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 8285 sizeof(struct bnx2x_slowpath)); 8286 8287 for (i = 0; i < L2_ILT_LINES(bp); i++) 8288 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 8289 bp->context[i].size); 8290 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 8291 8292 BNX2X_FREE(bp->ilt->lines); 8293 8294 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 8295 8296 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 8297 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8298 8299 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 8300 8301 bnx2x_iov_free_mem(bp); 8302 } 8303 8304 int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 8305 { 8306 if (!CHIP_IS_E1x(bp)) { 8307 /* size = the status block + ramrod buffers */ 8308 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, 8309 sizeof(struct host_hc_status_block_e2)); 8310 if (!bp->cnic_sb.e2_sb) 8311 goto alloc_mem_err; 8312 } else { 8313 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, 8314 sizeof(struct host_hc_status_block_e1x)); 8315 if (!bp->cnic_sb.e1x_sb) 8316 goto alloc_mem_err; 8317 } 8318 8319 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { 8320 /* allocate searcher T2 table, as it wasn't allocated before */ 8321 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); 8322 if (!bp->t2) 8323 goto alloc_mem_err; 8324 } 8325 8326 /* write address to which L5 should insert its values */ 8327 bp->cnic_eth_dev.addr_drv_info_to_mcp = 8328 &bp->slowpath->drv_info_to_mcp; 8329 8330 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) 8331 goto alloc_mem_err; 8332 8333 return 0; 8334 8335 alloc_mem_err: 8336 bnx2x_free_mem_cnic(bp); 8337 BNX2X_ERR("Can't allocate memory\n"); 8338 return -ENOMEM; 8339 } 8340 8341 int bnx2x_alloc_mem(struct bnx2x *bp) 8342 { 8343 int i, allocated, context_size; 8344 8345 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { 8346 /* allocate searcher T2 table */ 8347 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); 8348 if (!bp->t2) 8349 goto alloc_mem_err; 8350 } 8351 8352 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, 8353 sizeof(struct host_sp_status_block)); 8354 if (!bp->def_status_blk) 8355 goto alloc_mem_err; 8356 8357 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, 8358 sizeof(struct bnx2x_slowpath)); 8359 if (!bp->slowpath) 8360 goto alloc_mem_err; 8361 8362 /* Allocate memory for CDU context: 8363 * This memory is allocated separately and not in the generic ILT 8364 * functions because CDU differs in few aspects: 8365 * 1. There are multiple entities allocating memory for context - 8366 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 8367 * its own ILT lines. 8368 * 2. Since CDU page-size is not a single 4KB page (which is the case 8369 * for the other ILT clients), to be efficient we want to support 8370 * allocation of sub-page-size in the last entry. 8371 * 3. Context pointers are used by the driver to pass to FW / update 8372 * the context (for the other ILT clients the pointers are used just to 8373 * free the memory during unload). 8374 */ 8375 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 8376 8377 for (i = 0, allocated = 0; allocated < context_size; i++) { 8378 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 8379 (context_size - allocated)); 8380 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, 8381 bp->context[i].size); 8382 if (!bp->context[i].vcxt) 8383 goto alloc_mem_err; 8384 allocated += bp->context[i].size; 8385 } 8386 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), 8387 GFP_KERNEL); 8388 if (!bp->ilt->lines) 8389 goto alloc_mem_err; 8390 8391 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 8392 goto alloc_mem_err; 8393 8394 if (bnx2x_iov_alloc_mem(bp)) 8395 goto alloc_mem_err; 8396 8397 /* Slow path ring */ 8398 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); 8399 if (!bp->spq) 8400 goto alloc_mem_err; 8401 8402 /* EQ */ 8403 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, 8404 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8405 if (!bp->eq_ring) 8406 goto alloc_mem_err; 8407 8408 return 0; 8409 8410 alloc_mem_err: 8411 bnx2x_free_mem(bp); 8412 BNX2X_ERR("Can't allocate memory\n"); 8413 return -ENOMEM; 8414 } 8415 8416 /* 8417 * Init service functions 8418 */ 8419 8420 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 8421 struct bnx2x_vlan_mac_obj *obj, bool set, 8422 int mac_type, unsigned long *ramrod_flags) 8423 { 8424 int rc; 8425 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 8426 8427 memset(&ramrod_param, 0, sizeof(ramrod_param)); 8428 8429 /* Fill general parameters */ 8430 ramrod_param.vlan_mac_obj = obj; 8431 ramrod_param.ramrod_flags = *ramrod_flags; 8432 8433 /* Fill a user request section if needed */ 8434 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8435 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 8436 8437 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 8438 8439 /* Set the command: ADD or DEL */ 8440 if (set) 8441 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8442 else 8443 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 8444 } 8445 8446 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 8447 8448 if (rc == -EEXIST) { 8449 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 8450 /* do not treat adding same MAC as error */ 8451 rc = 0; 8452 } else if (rc < 0) 8453 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 8454 8455 return rc; 8456 } 8457 8458 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, 8459 struct bnx2x_vlan_mac_obj *obj, bool set, 8460 unsigned long *ramrod_flags) 8461 { 8462 int rc; 8463 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 8464 8465 memset(&ramrod_param, 0, sizeof(ramrod_param)); 8466 8467 /* Fill general parameters */ 8468 ramrod_param.vlan_mac_obj = obj; 8469 ramrod_param.ramrod_flags = *ramrod_flags; 8470 8471 /* Fill a user request section if needed */ 8472 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8473 ramrod_param.user_req.u.vlan.vlan = vlan; 8474 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags); 8475 /* Set the command: ADD or DEL */ 8476 if (set) 8477 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8478 else 8479 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 8480 } 8481 8482 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 8483 8484 if (rc == -EEXIST) { 8485 /* Do not treat adding same vlan as error. */ 8486 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 8487 rc = 0; 8488 } else if (rc < 0) { 8489 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del")); 8490 } 8491 8492 return rc; 8493 } 8494 8495 void bnx2x_clear_vlan_info(struct bnx2x *bp) 8496 { 8497 struct bnx2x_vlan_entry *vlan; 8498 8499 /* Mark that hw forgot all entries */ 8500 list_for_each_entry(vlan, &bp->vlan_reg, link) 8501 vlan->hw = false; 8502 8503 bp->vlan_cnt = 0; 8504 } 8505 8506 static int bnx2x_del_all_vlans(struct bnx2x *bp) 8507 { 8508 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; 8509 unsigned long ramrod_flags = 0, vlan_flags = 0; 8510 int rc; 8511 8512 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8513 __set_bit(BNX2X_VLAN, &vlan_flags); 8514 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); 8515 if (rc) 8516 return rc; 8517 8518 bnx2x_clear_vlan_info(bp); 8519 8520 return 0; 8521 } 8522 8523 int bnx2x_del_all_macs(struct bnx2x *bp, 8524 struct bnx2x_vlan_mac_obj *mac_obj, 8525 int mac_type, bool wait_for_comp) 8526 { 8527 int rc; 8528 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 8529 8530 /* Wait for completion of requested */ 8531 if (wait_for_comp) 8532 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8533 8534 /* Set the mac type of addresses we want to clear */ 8535 __set_bit(mac_type, &vlan_mac_flags); 8536 8537 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 8538 if (rc < 0) 8539 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 8540 8541 return rc; 8542 } 8543 8544 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 8545 { 8546 if (IS_PF(bp)) { 8547 unsigned long ramrod_flags = 0; 8548 8549 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 8550 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8551 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, 8552 &bp->sp_objs->mac_obj, set, 8553 BNX2X_ETH_MAC, &ramrod_flags); 8554 } else { /* vf */ 8555 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, 8556 bp->fp->index, set); 8557 } 8558 } 8559 8560 int bnx2x_setup_leading(struct bnx2x *bp) 8561 { 8562 if (IS_PF(bp)) 8563 return bnx2x_setup_queue(bp, &bp->fp[0], true); 8564 else /* VF */ 8565 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); 8566 } 8567 8568 /** 8569 * bnx2x_set_int_mode - configure interrupt mode 8570 * 8571 * @bp: driver handle 8572 * 8573 * In case of MSI-X it will also try to enable MSI-X. 8574 */ 8575 int bnx2x_set_int_mode(struct bnx2x *bp) 8576 { 8577 int rc = 0; 8578 8579 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { 8580 BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); 8581 return -EINVAL; 8582 } 8583 8584 switch (int_mode) { 8585 case BNX2X_INT_MODE_MSIX: 8586 /* attempt to enable msix */ 8587 rc = bnx2x_enable_msix(bp); 8588 8589 /* msix attained */ 8590 if (!rc) 8591 return 0; 8592 8593 /* vfs use only msix */ 8594 if (rc && IS_VF(bp)) 8595 return rc; 8596 8597 /* failed to enable multiple MSI-X */ 8598 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 8599 bp->num_queues, 8600 1 + bp->num_cnic_queues); 8601 8602 fallthrough; 8603 case BNX2X_INT_MODE_MSI: 8604 bnx2x_enable_msi(bp); 8605 8606 fallthrough; 8607 case BNX2X_INT_MODE_INTX: 8608 bp->num_ethernet_queues = 1; 8609 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 8610 BNX2X_DEV_INFO("set number of queues to 1\n"); 8611 break; 8612 default: 8613 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); 8614 return -EINVAL; 8615 } 8616 return 0; 8617 } 8618 8619 /* must be called prior to any HW initializations */ 8620 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 8621 { 8622 if (IS_SRIOV(bp)) 8623 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; 8624 return L2_ILT_LINES(bp); 8625 } 8626 8627 void bnx2x_ilt_set_info(struct bnx2x *bp) 8628 { 8629 struct ilt_client_info *ilt_client; 8630 struct bnx2x_ilt *ilt = BP_ILT(bp); 8631 u16 line = 0; 8632 8633 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 8634 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 8635 8636 /* CDU */ 8637 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 8638 ilt_client->client_num = ILT_CLIENT_CDU; 8639 ilt_client->page_size = CDU_ILT_PAGE_SZ; 8640 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 8641 ilt_client->start = line; 8642 line += bnx2x_cid_ilt_lines(bp); 8643 8644 if (CNIC_SUPPORT(bp)) 8645 line += CNIC_ILT_LINES; 8646 ilt_client->end = line - 1; 8647 8648 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8649 ilt_client->start, 8650 ilt_client->end, 8651 ilt_client->page_size, 8652 ilt_client->flags, 8653 ilog2(ilt_client->page_size >> 12)); 8654 8655 /* QM */ 8656 if (QM_INIT(bp->qm_cid_count)) { 8657 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 8658 ilt_client->client_num = ILT_CLIENT_QM; 8659 ilt_client->page_size = QM_ILT_PAGE_SZ; 8660 ilt_client->flags = 0; 8661 ilt_client->start = line; 8662 8663 /* 4 bytes for each cid */ 8664 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 8665 QM_ILT_PAGE_SZ); 8666 8667 ilt_client->end = line - 1; 8668 8669 DP(NETIF_MSG_IFUP, 8670 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8671 ilt_client->start, 8672 ilt_client->end, 8673 ilt_client->page_size, 8674 ilt_client->flags, 8675 ilog2(ilt_client->page_size >> 12)); 8676 } 8677 8678 if (CNIC_SUPPORT(bp)) { 8679 /* SRC */ 8680 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 8681 ilt_client->client_num = ILT_CLIENT_SRC; 8682 ilt_client->page_size = SRC_ILT_PAGE_SZ; 8683 ilt_client->flags = 0; 8684 ilt_client->start = line; 8685 line += SRC_ILT_LINES; 8686 ilt_client->end = line - 1; 8687 8688 DP(NETIF_MSG_IFUP, 8689 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8690 ilt_client->start, 8691 ilt_client->end, 8692 ilt_client->page_size, 8693 ilt_client->flags, 8694 ilog2(ilt_client->page_size >> 12)); 8695 8696 /* TM */ 8697 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 8698 ilt_client->client_num = ILT_CLIENT_TM; 8699 ilt_client->page_size = TM_ILT_PAGE_SZ; 8700 ilt_client->flags = 0; 8701 ilt_client->start = line; 8702 line += TM_ILT_LINES; 8703 ilt_client->end = line - 1; 8704 8705 DP(NETIF_MSG_IFUP, 8706 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8707 ilt_client->start, 8708 ilt_client->end, 8709 ilt_client->page_size, 8710 ilt_client->flags, 8711 ilog2(ilt_client->page_size >> 12)); 8712 } 8713 8714 BUG_ON(line > ILT_MAX_LINES); 8715 } 8716 8717 /** 8718 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 8719 * 8720 * @bp: driver handle 8721 * @fp: pointer to fastpath 8722 * @init_params: pointer to parameters structure 8723 * 8724 * parameters configured: 8725 * - HC configuration 8726 * - Queue's CDU context 8727 */ 8728 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 8729 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 8730 { 8731 u8 cos; 8732 int cxt_index, cxt_offset; 8733 8734 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 8735 if (!IS_FCOE_FP(fp)) { 8736 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 8737 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 8738 8739 /* If HC is supported, enable host coalescing in the transition 8740 * to INIT state. 8741 */ 8742 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 8743 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 8744 8745 /* HC rate */ 8746 init_params->rx.hc_rate = bp->rx_ticks ? 8747 (1000000 / bp->rx_ticks) : 0; 8748 init_params->tx.hc_rate = bp->tx_ticks ? 8749 (1000000 / bp->tx_ticks) : 0; 8750 8751 /* FW SB ID */ 8752 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 8753 fp->fw_sb_id; 8754 8755 /* 8756 * CQ index among the SB indices: FCoE clients uses the default 8757 * SB, therefore it's different. 8758 */ 8759 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 8760 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 8761 } 8762 8763 /* set maximum number of COSs supported by this queue */ 8764 init_params->max_cos = fp->max_cos; 8765 8766 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 8767 fp->index, init_params->max_cos); 8768 8769 /* set the context pointers queue object */ 8770 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 8771 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 8772 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 8773 ILT_PAGE_CIDS); 8774 init_params->cxts[cos] = 8775 &bp->context[cxt_index].vcxt[cxt_offset].eth; 8776 } 8777 } 8778 8779 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8780 struct bnx2x_queue_state_params *q_params, 8781 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 8782 int tx_index, bool leading) 8783 { 8784 memset(tx_only_params, 0, sizeof(*tx_only_params)); 8785 8786 /* Set the command */ 8787 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 8788 8789 /* Set tx-only QUEUE flags: don't zero statistics */ 8790 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 8791 8792 /* choose the index of the cid to send the slow path on */ 8793 tx_only_params->cid_index = tx_index; 8794 8795 /* Set general TX_ONLY_SETUP parameters */ 8796 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 8797 8798 /* Set Tx TX_ONLY_SETUP parameters */ 8799 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 8800 8801 DP(NETIF_MSG_IFUP, 8802 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 8803 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 8804 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 8805 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 8806 8807 /* send the ramrod */ 8808 return bnx2x_queue_state_change(bp, q_params); 8809 } 8810 8811 /** 8812 * bnx2x_setup_queue - setup queue 8813 * 8814 * @bp: driver handle 8815 * @fp: pointer to fastpath 8816 * @leading: is leading 8817 * 8818 * This function performs 2 steps in a Queue state machine 8819 * actually: 1) RESET->INIT 2) INIT->SETUP 8820 */ 8821 8822 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8823 bool leading) 8824 { 8825 struct bnx2x_queue_state_params q_params = {NULL}; 8826 struct bnx2x_queue_setup_params *setup_params = 8827 &q_params.params.setup; 8828 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 8829 &q_params.params.tx_only; 8830 int rc; 8831 u8 tx_index; 8832 8833 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 8834 8835 /* reset IGU state skip FCoE L2 queue */ 8836 if (!IS_FCOE_FP(fp)) 8837 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 8838 IGU_INT_ENABLE, 0); 8839 8840 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8841 /* We want to wait for completion in this context */ 8842 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8843 8844 /* Prepare the INIT parameters */ 8845 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 8846 8847 /* Set the command */ 8848 q_params.cmd = BNX2X_Q_CMD_INIT; 8849 8850 /* Change the state to INIT */ 8851 rc = bnx2x_queue_state_change(bp, &q_params); 8852 if (rc) { 8853 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 8854 return rc; 8855 } 8856 8857 DP(NETIF_MSG_IFUP, "init complete\n"); 8858 8859 /* Now move the Queue to the SETUP state... */ 8860 memset(setup_params, 0, sizeof(*setup_params)); 8861 8862 /* Set QUEUE flags */ 8863 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 8864 8865 /* Set general SETUP parameters */ 8866 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 8867 FIRST_TX_COS_INDEX); 8868 8869 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 8870 &setup_params->rxq_params); 8871 8872 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 8873 FIRST_TX_COS_INDEX); 8874 8875 /* Set the command */ 8876 q_params.cmd = BNX2X_Q_CMD_SETUP; 8877 8878 if (IS_FCOE_FP(fp)) 8879 bp->fcoe_init = true; 8880 8881 /* Change the state to SETUP */ 8882 rc = bnx2x_queue_state_change(bp, &q_params); 8883 if (rc) { 8884 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 8885 return rc; 8886 } 8887 8888 /* loop through the relevant tx-only indices */ 8889 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8890 tx_index < fp->max_cos; 8891 tx_index++) { 8892 8893 /* prepare and send tx-only ramrod*/ 8894 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 8895 tx_only_params, tx_index, leading); 8896 if (rc) { 8897 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 8898 fp->index, tx_index); 8899 return rc; 8900 } 8901 } 8902 8903 return rc; 8904 } 8905 8906 static int bnx2x_stop_queue(struct bnx2x *bp, int index) 8907 { 8908 struct bnx2x_fastpath *fp = &bp->fp[index]; 8909 struct bnx2x_fp_txdata *txdata; 8910 struct bnx2x_queue_state_params q_params = {NULL}; 8911 int rc, tx_index; 8912 8913 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 8914 8915 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8916 /* We want to wait for completion in this context */ 8917 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8918 8919 /* close tx-only connections */ 8920 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8921 tx_index < fp->max_cos; 8922 tx_index++){ 8923 8924 /* ascertain this is a normal queue*/ 8925 txdata = fp->txdata_ptr[tx_index]; 8926 8927 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 8928 txdata->txq_index); 8929 8930 /* send halt terminate on tx-only connection */ 8931 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8932 memset(&q_params.params.terminate, 0, 8933 sizeof(q_params.params.terminate)); 8934 q_params.params.terminate.cid_index = tx_index; 8935 8936 rc = bnx2x_queue_state_change(bp, &q_params); 8937 if (rc) 8938 return rc; 8939 8940 /* send halt terminate on tx-only connection */ 8941 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8942 memset(&q_params.params.cfc_del, 0, 8943 sizeof(q_params.params.cfc_del)); 8944 q_params.params.cfc_del.cid_index = tx_index; 8945 rc = bnx2x_queue_state_change(bp, &q_params); 8946 if (rc) 8947 return rc; 8948 } 8949 /* Stop the primary connection: */ 8950 /* ...halt the connection */ 8951 q_params.cmd = BNX2X_Q_CMD_HALT; 8952 rc = bnx2x_queue_state_change(bp, &q_params); 8953 if (rc) 8954 return rc; 8955 8956 /* ...terminate the connection */ 8957 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8958 memset(&q_params.params.terminate, 0, 8959 sizeof(q_params.params.terminate)); 8960 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8961 rc = bnx2x_queue_state_change(bp, &q_params); 8962 if (rc) 8963 return rc; 8964 /* ...delete cfc entry */ 8965 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8966 memset(&q_params.params.cfc_del, 0, 8967 sizeof(q_params.params.cfc_del)); 8968 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8969 return bnx2x_queue_state_change(bp, &q_params); 8970 } 8971 8972 static void bnx2x_reset_func(struct bnx2x *bp) 8973 { 8974 int port = BP_PORT(bp); 8975 int func = BP_FUNC(bp); 8976 int i; 8977 8978 /* Disable the function in the FW */ 8979 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8980 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8981 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8982 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8983 8984 /* FP SBs */ 8985 for_each_eth_queue(bp, i) { 8986 struct bnx2x_fastpath *fp = &bp->fp[i]; 8987 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8988 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 8989 SB_DISABLED); 8990 } 8991 8992 if (CNIC_LOADED(bp)) 8993 /* CNIC SB */ 8994 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8995 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 8996 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); 8997 8998 /* SP SB */ 8999 REG_WR8(bp, BAR_CSTRORM_INTMEM + 9000 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 9001 SB_DISABLED); 9002 9003 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 9004 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 9005 0); 9006 9007 /* Configure IGU */ 9008 if (bp->common.int_block == INT_BLOCK_HC) { 9009 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 9010 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 9011 } else { 9012 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 9013 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 9014 } 9015 9016 if (CNIC_LOADED(bp)) { 9017 /* Disable Timer scan */ 9018 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 9019 /* 9020 * Wait for at least 10ms and up to 2 second for the timers 9021 * scan to complete 9022 */ 9023 for (i = 0; i < 200; i++) { 9024 usleep_range(10000, 20000); 9025 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 9026 break; 9027 } 9028 } 9029 /* Clear ILT */ 9030 bnx2x_clear_func_ilt(bp, func); 9031 9032 /* Timers workaround bug for E2: if this is vnic-3, 9033 * we need to set the entire ilt range for this timers. 9034 */ 9035 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 9036 struct ilt_client_info ilt_cli; 9037 /* use dummy TM client */ 9038 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 9039 ilt_cli.start = 0; 9040 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 9041 ilt_cli.client_num = ILT_CLIENT_TM; 9042 9043 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 9044 } 9045 9046 /* this assumes that reset_port() called before reset_func()*/ 9047 if (!CHIP_IS_E1x(bp)) 9048 bnx2x_pf_disable(bp); 9049 9050 bp->dmae_ready = 0; 9051 } 9052 9053 static void bnx2x_reset_port(struct bnx2x *bp) 9054 { 9055 int port = BP_PORT(bp); 9056 u32 val; 9057 9058 /* Reset physical Link */ 9059 bnx2x__link_reset(bp); 9060 9061 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 9062 9063 /* Do not rcv packets to BRB */ 9064 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 9065 /* Do not direct rcv packets that are not for MCP to the BRB */ 9066 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 9067 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 9068 9069 /* Configure AEU */ 9070 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 9071 9072 msleep(100); 9073 /* Check for BRB port occupancy */ 9074 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 9075 if (val) 9076 DP(NETIF_MSG_IFDOWN, 9077 "BRB1 is not empty %d blocks are occupied\n", val); 9078 9079 /* TODO: Close Doorbell port? */ 9080 } 9081 9082 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 9083 { 9084 struct bnx2x_func_state_params func_params = {NULL}; 9085 9086 /* Prepare parameters for function state transitions */ 9087 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 9088 9089 func_params.f_obj = &bp->func_obj; 9090 func_params.cmd = BNX2X_F_CMD_HW_RESET; 9091 9092 func_params.params.hw_init.load_phase = load_code; 9093 9094 return bnx2x_func_state_change(bp, &func_params); 9095 } 9096 9097 static int bnx2x_func_stop(struct bnx2x *bp) 9098 { 9099 struct bnx2x_func_state_params func_params = {NULL}; 9100 int rc; 9101 9102 /* Prepare parameters for function state transitions */ 9103 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 9104 func_params.f_obj = &bp->func_obj; 9105 func_params.cmd = BNX2X_F_CMD_STOP; 9106 9107 /* 9108 * Try to stop the function the 'good way'. If fails (in case 9109 * of a parity error during bnx2x_chip_cleanup()) and we are 9110 * not in a debug mode, perform a state transaction in order to 9111 * enable further HW_RESET transaction. 9112 */ 9113 rc = bnx2x_func_state_change(bp, &func_params); 9114 if (rc) { 9115 #ifdef BNX2X_STOP_ON_ERROR 9116 return rc; 9117 #else 9118 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 9119 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 9120 return bnx2x_func_state_change(bp, &func_params); 9121 #endif 9122 } 9123 9124 return 0; 9125 } 9126 9127 /** 9128 * bnx2x_send_unload_req - request unload mode from the MCP. 9129 * 9130 * @bp: driver handle 9131 * @unload_mode: requested function's unload mode 9132 * 9133 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 9134 */ 9135 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 9136 { 9137 u32 reset_code = 0; 9138 int port = BP_PORT(bp); 9139 9140 /* Select the UNLOAD request mode */ 9141 if (unload_mode == UNLOAD_NORMAL) 9142 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 9143 9144 else if (bp->flags & NO_WOL_FLAG) 9145 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 9146 9147 else if (bp->wol) { 9148 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 9149 u8 *mac_addr = bp->dev->dev_addr; 9150 struct pci_dev *pdev = bp->pdev; 9151 u32 val; 9152 u16 pmc; 9153 9154 /* The mac address is written to entries 1-4 to 9155 * preserve entry 0 which is used by the PMF 9156 */ 9157 u8 entry = (BP_VN(bp) + 1)*8; 9158 9159 val = (mac_addr[0] << 8) | mac_addr[1]; 9160 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 9161 9162 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 9163 (mac_addr[4] << 8) | mac_addr[5]; 9164 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 9165 9166 /* Enable the PME and clear the status */ 9167 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); 9168 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 9169 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); 9170 9171 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 9172 9173 } else 9174 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 9175 9176 /* Send the request to the MCP */ 9177 if (!BP_NOMCP(bp)) 9178 reset_code = bnx2x_fw_command(bp, reset_code, 0); 9179 else { 9180 int path = BP_PATH(bp); 9181 9182 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 9183 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 9184 bnx2x_load_count[path][2]); 9185 bnx2x_load_count[path][0]--; 9186 bnx2x_load_count[path][1 + port]--; 9187 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 9188 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 9189 bnx2x_load_count[path][2]); 9190 if (bnx2x_load_count[path][0] == 0) 9191 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 9192 else if (bnx2x_load_count[path][1 + port] == 0) 9193 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 9194 else 9195 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 9196 } 9197 9198 return reset_code; 9199 } 9200 9201 /** 9202 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 9203 * 9204 * @bp: driver handle 9205 * @keep_link: true iff link should be kept up 9206 */ 9207 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) 9208 { 9209 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 9210 9211 /* Report UNLOAD_DONE to MCP */ 9212 if (!BP_NOMCP(bp)) 9213 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 9214 } 9215 9216 static int bnx2x_func_wait_started(struct bnx2x *bp) 9217 { 9218 int tout = 50; 9219 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 9220 9221 if (!bp->port.pmf) 9222 return 0; 9223 9224 /* 9225 * (assumption: No Attention from MCP at this stage) 9226 * PMF probably in the middle of TX disable/enable transaction 9227 * 1. Sync IRS for default SB 9228 * 2. Sync SP queue - this guarantees us that attention handling started 9229 * 3. Wait, that TX disable/enable transaction completes 9230 * 9231 * 1+2 guarantee that if DCBx attention was scheduled it already changed 9232 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 9233 * received completion for the transaction the state is TX_STOPPED. 9234 * State will return to STARTED after completion of TX_STOPPED-->STARTED 9235 * transaction. 9236 */ 9237 9238 /* make sure default SB ISR is done */ 9239 if (msix) 9240 synchronize_irq(bp->msix_table[0].vector); 9241 else 9242 synchronize_irq(bp->pdev->irq); 9243 9244 flush_workqueue(bnx2x_wq); 9245 flush_workqueue(bnx2x_iov_wq); 9246 9247 while (bnx2x_func_get_state(bp, &bp->func_obj) != 9248 BNX2X_F_STATE_STARTED && tout--) 9249 msleep(20); 9250 9251 if (bnx2x_func_get_state(bp, &bp->func_obj) != 9252 BNX2X_F_STATE_STARTED) { 9253 #ifdef BNX2X_STOP_ON_ERROR 9254 BNX2X_ERR("Wrong function state\n"); 9255 return -EBUSY; 9256 #else 9257 /* 9258 * Failed to complete the transaction in a "good way" 9259 * Force both transactions with CLR bit 9260 */ 9261 struct bnx2x_func_state_params func_params = {NULL}; 9262 9263 DP(NETIF_MSG_IFDOWN, 9264 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); 9265 9266 func_params.f_obj = &bp->func_obj; 9267 __set_bit(RAMROD_DRV_CLR_ONLY, 9268 &func_params.ramrod_flags); 9269 9270 /* STARTED-->TX_ST0PPED */ 9271 func_params.cmd = BNX2X_F_CMD_TX_STOP; 9272 bnx2x_func_state_change(bp, &func_params); 9273 9274 /* TX_ST0PPED-->STARTED */ 9275 func_params.cmd = BNX2X_F_CMD_TX_START; 9276 return bnx2x_func_state_change(bp, &func_params); 9277 #endif 9278 } 9279 9280 return 0; 9281 } 9282 9283 static void bnx2x_disable_ptp(struct bnx2x *bp) 9284 { 9285 int port = BP_PORT(bp); 9286 9287 /* Disable sending PTP packets to host */ 9288 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : 9289 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); 9290 9291 /* Reset PTP event detection rules */ 9292 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : 9293 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); 9294 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : 9295 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); 9296 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : 9297 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); 9298 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : 9299 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); 9300 9301 /* Disable the PTP feature */ 9302 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : 9303 NIG_REG_P0_PTP_EN, 0x0); 9304 } 9305 9306 /* Called during unload, to stop PTP-related stuff */ 9307 static void bnx2x_stop_ptp(struct bnx2x *bp) 9308 { 9309 /* Cancel PTP work queue. Should be done after the Tx queues are 9310 * drained to prevent additional scheduling. 9311 */ 9312 cancel_work_sync(&bp->ptp_task); 9313 9314 if (bp->ptp_tx_skb) { 9315 dev_kfree_skb_any(bp->ptp_tx_skb); 9316 bp->ptp_tx_skb = NULL; 9317 } 9318 9319 /* Disable PTP in HW */ 9320 bnx2x_disable_ptp(bp); 9321 9322 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); 9323 } 9324 9325 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 9326 { 9327 int port = BP_PORT(bp); 9328 int i, rc = 0; 9329 u8 cos; 9330 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 9331 u32 reset_code; 9332 9333 /* Wait until tx fastpath tasks complete */ 9334 for_each_tx_queue(bp, i) { 9335 struct bnx2x_fastpath *fp = &bp->fp[i]; 9336 9337 for_each_cos_in_tx_queue(fp, cos) 9338 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 9339 #ifdef BNX2X_STOP_ON_ERROR 9340 if (rc) 9341 return; 9342 #endif 9343 } 9344 9345 /* Give HW time to discard old tx messages */ 9346 usleep_range(1000, 2000); 9347 9348 /* Clean all ETH MACs */ 9349 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 9350 false); 9351 if (rc < 0) 9352 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 9353 9354 /* Clean up UC list */ 9355 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 9356 true); 9357 if (rc < 0) 9358 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 9359 rc); 9360 9361 /* The whole *vlan_obj structure may be not initialized if VLAN 9362 * filtering offload is not supported by hardware. Currently this is 9363 * true for all hardware covered by CHIP_IS_E1x(). 9364 */ 9365 if (!CHIP_IS_E1x(bp)) { 9366 /* Remove all currently configured VLANs */ 9367 rc = bnx2x_del_all_vlans(bp); 9368 if (rc < 0) 9369 BNX2X_ERR("Failed to delete all VLANs\n"); 9370 } 9371 9372 /* Disable LLH */ 9373 if (!CHIP_IS_E1(bp)) 9374 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 9375 9376 /* Set "drop all" (stop Rx). 9377 * We need to take a netif_addr_lock() here in order to prevent 9378 * a race between the completion code and this code. 9379 */ 9380 netif_addr_lock_bh(bp->dev); 9381 /* Schedule the rx_mode command */ 9382 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 9383 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 9384 else if (bp->slowpath) 9385 bnx2x_set_storm_rx_mode(bp); 9386 9387 /* Cleanup multicast configuration */ 9388 rparam.mcast_obj = &bp->mcast_obj; 9389 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 9390 if (rc < 0) 9391 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 9392 9393 netif_addr_unlock_bh(bp->dev); 9394 9395 bnx2x_iov_chip_cleanup(bp); 9396 9397 /* 9398 * Send the UNLOAD_REQUEST to the MCP. This will return if 9399 * this function should perform FUNC, PORT or COMMON HW 9400 * reset. 9401 */ 9402 reset_code = bnx2x_send_unload_req(bp, unload_mode); 9403 9404 /* 9405 * (assumption: No Attention from MCP at this stage) 9406 * PMF probably in the middle of TX disable/enable transaction 9407 */ 9408 rc = bnx2x_func_wait_started(bp); 9409 if (rc) { 9410 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 9411 #ifdef BNX2X_STOP_ON_ERROR 9412 return; 9413 #endif 9414 } 9415 9416 /* Close multi and leading connections 9417 * Completions for ramrods are collected in a synchronous way 9418 */ 9419 for_each_eth_queue(bp, i) 9420 if (bnx2x_stop_queue(bp, i)) 9421 #ifdef BNX2X_STOP_ON_ERROR 9422 return; 9423 #else 9424 goto unload_error; 9425 #endif 9426 9427 if (CNIC_LOADED(bp)) { 9428 for_each_cnic_queue(bp, i) 9429 if (bnx2x_stop_queue(bp, i)) 9430 #ifdef BNX2X_STOP_ON_ERROR 9431 return; 9432 #else 9433 goto unload_error; 9434 #endif 9435 } 9436 9437 /* If SP settings didn't get completed so far - something 9438 * very wrong has happen. 9439 */ 9440 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 9441 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 9442 9443 #ifndef BNX2X_STOP_ON_ERROR 9444 unload_error: 9445 #endif 9446 rc = bnx2x_func_stop(bp); 9447 if (rc) { 9448 BNX2X_ERR("Function stop failed!\n"); 9449 #ifdef BNX2X_STOP_ON_ERROR 9450 return; 9451 #endif 9452 } 9453 9454 /* stop_ptp should be after the Tx queues are drained to prevent 9455 * scheduling to the cancelled PTP work queue. It should also be after 9456 * function stop ramrod is sent, since as part of this ramrod FW access 9457 * PTP registers. 9458 */ 9459 if (bp->flags & PTP_SUPPORTED) { 9460 bnx2x_stop_ptp(bp); 9461 if (bp->ptp_clock) { 9462 ptp_clock_unregister(bp->ptp_clock); 9463 bp->ptp_clock = NULL; 9464 } 9465 } 9466 9467 /* Disable HW interrupts, NAPI */ 9468 bnx2x_netif_stop(bp, 1); 9469 /* Delete all NAPI objects */ 9470 bnx2x_del_all_napi(bp); 9471 if (CNIC_LOADED(bp)) 9472 bnx2x_del_all_napi_cnic(bp); 9473 9474 /* Release IRQs */ 9475 bnx2x_free_irq(bp); 9476 9477 /* Reset the chip, unless PCI function is offline. If we reach this 9478 * point following a PCI error handling, it means device is really 9479 * in a bad state and we're about to remove it, so reset the chip 9480 * is not a good idea. 9481 */ 9482 if (!pci_channel_offline(bp->pdev)) { 9483 rc = bnx2x_reset_hw(bp, reset_code); 9484 if (rc) 9485 BNX2X_ERR("HW_RESET failed\n"); 9486 } 9487 9488 /* Report UNLOAD_DONE to MCP */ 9489 bnx2x_send_unload_done(bp, keep_link); 9490 } 9491 9492 void bnx2x_disable_close_the_gate(struct bnx2x *bp) 9493 { 9494 u32 val; 9495 9496 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 9497 9498 if (CHIP_IS_E1(bp)) { 9499 int port = BP_PORT(bp); 9500 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 9501 MISC_REG_AEU_MASK_ATTN_FUNC_0; 9502 9503 val = REG_RD(bp, addr); 9504 val &= ~(0x300); 9505 REG_WR(bp, addr, val); 9506 } else { 9507 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 9508 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 9509 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 9510 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 9511 } 9512 } 9513 9514 /* Close gates #2, #3 and #4: */ 9515 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 9516 { 9517 u32 val; 9518 9519 /* Gates #2 and #4a are closed/opened for "not E1" only */ 9520 if (!CHIP_IS_E1(bp)) { 9521 /* #4 */ 9522 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 9523 /* #2 */ 9524 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 9525 } 9526 9527 /* #3 */ 9528 if (CHIP_IS_E1x(bp)) { 9529 /* Prevent interrupts from HC on both ports */ 9530 val = REG_RD(bp, HC_REG_CONFIG_1); 9531 REG_WR(bp, HC_REG_CONFIG_1, 9532 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 9533 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 9534 9535 val = REG_RD(bp, HC_REG_CONFIG_0); 9536 REG_WR(bp, HC_REG_CONFIG_0, 9537 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 9538 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 9539 } else { 9540 /* Prevent incoming interrupts in IGU */ 9541 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9542 9543 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 9544 (!close) ? 9545 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 9546 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 9547 } 9548 9549 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 9550 close ? "closing" : "opening"); 9551 } 9552 9553 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 9554 9555 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 9556 { 9557 /* Do some magic... */ 9558 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9559 *magic_val = val & SHARED_MF_CLP_MAGIC; 9560 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 9561 } 9562 9563 /** 9564 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 9565 * 9566 * @bp: driver handle 9567 * @magic_val: old value of the `magic' bit. 9568 */ 9569 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 9570 { 9571 /* Restore the `magic' bit value... */ 9572 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9573 MF_CFG_WR(bp, shared_mf_config.clp_mb, 9574 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 9575 } 9576 9577 /** 9578 * bnx2x_reset_mcp_prep - prepare for MCP reset. 9579 * 9580 * @bp: driver handle 9581 * @magic_val: old value of 'magic' bit. 9582 * 9583 * Takes care of CLP configurations. 9584 */ 9585 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 9586 { 9587 u32 shmem; 9588 u32 validity_offset; 9589 9590 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 9591 9592 /* Set `magic' bit in order to save MF config */ 9593 if (!CHIP_IS_E1(bp)) 9594 bnx2x_clp_reset_prep(bp, magic_val); 9595 9596 /* Get shmem offset */ 9597 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9598 validity_offset = 9599 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); 9600 9601 /* Clear validity map flags */ 9602 if (shmem > 0) 9603 REG_WR(bp, shmem + validity_offset, 0); 9604 } 9605 9606 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 9607 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 9608 9609 /** 9610 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 9611 * 9612 * @bp: driver handle 9613 */ 9614 static void bnx2x_mcp_wait_one(struct bnx2x *bp) 9615 { 9616 /* special handling for emulation and FPGA, 9617 wait 10 times longer */ 9618 if (CHIP_REV_IS_SLOW(bp)) 9619 msleep(MCP_ONE_TIMEOUT*10); 9620 else 9621 msleep(MCP_ONE_TIMEOUT); 9622 } 9623 9624 /* 9625 * initializes bp->common.shmem_base and waits for validity signature to appear 9626 */ 9627 static int bnx2x_init_shmem(struct bnx2x *bp) 9628 { 9629 int cnt = 0; 9630 u32 val = 0; 9631 9632 do { 9633 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9634 9635 /* If we read all 0xFFs, means we are in PCI error state and 9636 * should bail out to avoid crashes on adapter's FW reads. 9637 */ 9638 if (bp->common.shmem_base == 0xFFFFFFFF) { 9639 bp->flags |= NO_MCP_FLAG; 9640 return -ENODEV; 9641 } 9642 9643 if (bp->common.shmem_base) { 9644 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9645 if (val & SHR_MEM_VALIDITY_MB) 9646 return 0; 9647 } 9648 9649 bnx2x_mcp_wait_one(bp); 9650 9651 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 9652 9653 BNX2X_ERR("BAD MCP validity signature\n"); 9654 9655 return -ENODEV; 9656 } 9657 9658 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 9659 { 9660 int rc = bnx2x_init_shmem(bp); 9661 9662 /* Restore the `magic' bit value */ 9663 if (!CHIP_IS_E1(bp)) 9664 bnx2x_clp_reset_done(bp, magic_val); 9665 9666 return rc; 9667 } 9668 9669 static void bnx2x_pxp_prep(struct bnx2x *bp) 9670 { 9671 if (!CHIP_IS_E1(bp)) { 9672 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 9673 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 9674 } 9675 } 9676 9677 /* 9678 * Reset the whole chip except for: 9679 * - PCIE core 9680 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 9681 * one reset bit) 9682 * - IGU 9683 * - MISC (including AEU) 9684 * - GRC 9685 * - RBCN, RBCP 9686 */ 9687 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 9688 { 9689 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 9690 u32 global_bits2, stay_reset2; 9691 9692 /* 9693 * Bits that have to be set in reset_mask2 if we want to reset 'global' 9694 * (per chip) blocks. 9695 */ 9696 global_bits2 = 9697 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 9698 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 9699 9700 /* Don't reset the following blocks. 9701 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 9702 * reset, as in 4 port device they might still be owned 9703 * by the MCP (there is only one leader per path). 9704 */ 9705 not_reset_mask1 = 9706 MISC_REGISTERS_RESET_REG_1_RST_HC | 9707 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 9708 MISC_REGISTERS_RESET_REG_1_RST_PXP; 9709 9710 not_reset_mask2 = 9711 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 9712 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 9713 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 9714 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 9715 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 9716 MISC_REGISTERS_RESET_REG_2_RST_GRC | 9717 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 9718 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 9719 MISC_REGISTERS_RESET_REG_2_RST_ATC | 9720 MISC_REGISTERS_RESET_REG_2_PGLC | 9721 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 9722 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 9723 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 9724 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 9725 MISC_REGISTERS_RESET_REG_2_UMAC0 | 9726 MISC_REGISTERS_RESET_REG_2_UMAC1; 9727 9728 /* 9729 * Keep the following blocks in reset: 9730 * - all xxMACs are handled by the bnx2x_link code. 9731 */ 9732 stay_reset2 = 9733 MISC_REGISTERS_RESET_REG_2_XMAC | 9734 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 9735 9736 /* Full reset masks according to the chip */ 9737 reset_mask1 = 0xffffffff; 9738 9739 if (CHIP_IS_E1(bp)) 9740 reset_mask2 = 0xffff; 9741 else if (CHIP_IS_E1H(bp)) 9742 reset_mask2 = 0x1ffff; 9743 else if (CHIP_IS_E2(bp)) 9744 reset_mask2 = 0xfffff; 9745 else /* CHIP_IS_E3 */ 9746 reset_mask2 = 0x3ffffff; 9747 9748 /* Don't reset global blocks unless we need to */ 9749 if (!global) 9750 reset_mask2 &= ~global_bits2; 9751 9752 /* 9753 * In case of attention in the QM, we need to reset PXP 9754 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 9755 * because otherwise QM reset would release 'close the gates' shortly 9756 * before resetting the PXP, then the PSWRQ would send a write 9757 * request to PGLUE. Then when PXP is reset, PGLUE would try to 9758 * read the payload data from PSWWR, but PSWWR would not 9759 * respond. The write queue in PGLUE would stuck, dmae commands 9760 * would not return. Therefore it's important to reset the second 9761 * reset register (containing the 9762 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 9763 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 9764 * bit). 9765 */ 9766 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 9767 reset_mask2 & (~not_reset_mask2)); 9768 9769 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 9770 reset_mask1 & (~not_reset_mask1)); 9771 9772 barrier(); 9773 9774 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 9775 reset_mask2 & (~stay_reset2)); 9776 9777 barrier(); 9778 9779 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 9780 } 9781 9782 /** 9783 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 9784 * It should get cleared in no more than 1s. 9785 * 9786 * @bp: driver handle 9787 * 9788 * It should get cleared in no more than 1s. Returns 0 if 9789 * pending writes bit gets cleared. 9790 */ 9791 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 9792 { 9793 u32 cnt = 1000; 9794 u32 pend_bits = 0; 9795 9796 do { 9797 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 9798 9799 if (pend_bits == 0) 9800 break; 9801 9802 usleep_range(1000, 2000); 9803 } while (cnt-- > 0); 9804 9805 if (cnt <= 0) { 9806 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 9807 pend_bits); 9808 return -EBUSY; 9809 } 9810 9811 return 0; 9812 } 9813 9814 static int bnx2x_process_kill(struct bnx2x *bp, bool global) 9815 { 9816 int cnt = 1000; 9817 u32 val = 0; 9818 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 9819 u32 tags_63_32 = 0; 9820 9821 /* Empty the Tetris buffer, wait for 1s */ 9822 do { 9823 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 9824 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 9825 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 9826 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 9827 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 9828 if (CHIP_IS_E3(bp)) 9829 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); 9830 9831 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 9832 ((port_is_idle_0 & 0x1) == 0x1) && 9833 ((port_is_idle_1 & 0x1) == 0x1) && 9834 (pgl_exp_rom2 == 0xffffffff) && 9835 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) 9836 break; 9837 usleep_range(1000, 2000); 9838 } while (cnt-- > 0); 9839 9840 if (cnt <= 0) { 9841 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 9842 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 9843 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 9844 pgl_exp_rom2); 9845 return -EAGAIN; 9846 } 9847 9848 barrier(); 9849 9850 /* Close gates #2, #3 and #4 */ 9851 bnx2x_set_234_gates(bp, true); 9852 9853 /* Poll for IGU VQs for 57712 and newer chips */ 9854 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 9855 return -EAGAIN; 9856 9857 /* TBD: Indicate that "process kill" is in progress to MCP */ 9858 9859 /* Clear "unprepared" bit */ 9860 REG_WR(bp, MISC_REG_UNPREPARED, 0); 9861 barrier(); 9862 9863 /* Wait for 1ms to empty GLUE and PCI-E core queues, 9864 * PSWHST, GRC and PSWRD Tetris buffer. 9865 */ 9866 usleep_range(1000, 2000); 9867 9868 /* Prepare to chip reset: */ 9869 /* MCP */ 9870 if (global) 9871 bnx2x_reset_mcp_prep(bp, &val); 9872 9873 /* PXP */ 9874 bnx2x_pxp_prep(bp); 9875 barrier(); 9876 9877 /* reset the chip */ 9878 bnx2x_process_kill_chip_reset(bp, global); 9879 barrier(); 9880 9881 /* clear errors in PGB */ 9882 if (!CHIP_IS_E1x(bp)) 9883 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 9884 9885 /* Recover after reset: */ 9886 /* MCP */ 9887 if (global && bnx2x_reset_mcp_comp(bp, val)) 9888 return -EAGAIN; 9889 9890 /* TBD: Add resetting the NO_MCP mode DB here */ 9891 9892 /* Open the gates #2, #3 and #4 */ 9893 bnx2x_set_234_gates(bp, false); 9894 9895 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 9896 * reset state, re-enable attentions. */ 9897 9898 return 0; 9899 } 9900 9901 static int bnx2x_leader_reset(struct bnx2x *bp) 9902 { 9903 int rc = 0; 9904 bool global = bnx2x_reset_is_global(bp); 9905 u32 load_code; 9906 9907 /* if not going to reset MCP - load "fake" driver to reset HW while 9908 * driver is owner of the HW 9909 */ 9910 if (!global && !BP_NOMCP(bp)) { 9911 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 9912 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 9913 if (!load_code) { 9914 BNX2X_ERR("MCP response failure, aborting\n"); 9915 rc = -EAGAIN; 9916 goto exit_leader_reset; 9917 } 9918 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 9919 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 9920 BNX2X_ERR("MCP unexpected resp, aborting\n"); 9921 rc = -EAGAIN; 9922 goto exit_leader_reset2; 9923 } 9924 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 9925 if (!load_code) { 9926 BNX2X_ERR("MCP response failure, aborting\n"); 9927 rc = -EAGAIN; 9928 goto exit_leader_reset2; 9929 } 9930 } 9931 9932 /* Try to recover after the failure */ 9933 if (bnx2x_process_kill(bp, global)) { 9934 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 9935 BP_PATH(bp)); 9936 rc = -EAGAIN; 9937 goto exit_leader_reset2; 9938 } 9939 9940 /* 9941 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 9942 * state. 9943 */ 9944 bnx2x_set_reset_done(bp); 9945 if (global) 9946 bnx2x_clear_reset_global(bp); 9947 9948 exit_leader_reset2: 9949 /* unload "fake driver" if it was loaded */ 9950 if (!global && !BP_NOMCP(bp)) { 9951 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 9952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9953 } 9954 exit_leader_reset: 9955 bp->is_leader = 0; 9956 bnx2x_release_leader_lock(bp); 9957 smp_mb(); 9958 return rc; 9959 } 9960 9961 static void bnx2x_recovery_failed(struct bnx2x *bp) 9962 { 9963 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 9964 9965 /* Disconnect this device */ 9966 netif_device_detach(bp->dev); 9967 9968 /* 9969 * Block ifup for all function on this engine until "process kill" 9970 * or power cycle. 9971 */ 9972 bnx2x_set_reset_in_progress(bp); 9973 9974 /* Shut down the power */ 9975 bnx2x_set_power_state(bp, PCI_D3hot); 9976 9977 bp->recovery_state = BNX2X_RECOVERY_FAILED; 9978 9979 smp_mb(); 9980 } 9981 9982 /* 9983 * Assumption: runs under rtnl lock. This together with the fact 9984 * that it's called only from bnx2x_sp_rtnl() ensure that it 9985 * will never be called when netif_running(bp->dev) is false. 9986 */ 9987 static void bnx2x_parity_recover(struct bnx2x *bp) 9988 { 9989 u32 error_recovered, error_unrecovered; 9990 bool is_parity, global = false; 9991 #ifdef CONFIG_BNX2X_SRIOV 9992 int vf_idx; 9993 9994 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { 9995 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 9996 9997 if (vf) 9998 vf->state = VF_LOST; 9999 } 10000 #endif 10001 DP(NETIF_MSG_HW, "Handling parity\n"); 10002 while (1) { 10003 switch (bp->recovery_state) { 10004 case BNX2X_RECOVERY_INIT: 10005 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 10006 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 10007 WARN_ON(!is_parity); 10008 10009 /* Try to get a LEADER_LOCK HW lock */ 10010 if (bnx2x_trylock_leader_lock(bp)) { 10011 bnx2x_set_reset_in_progress(bp); 10012 /* 10013 * Check if there is a global attention and if 10014 * there was a global attention, set the global 10015 * reset bit. 10016 */ 10017 10018 if (global) 10019 bnx2x_set_reset_global(bp); 10020 10021 bp->is_leader = 1; 10022 } 10023 10024 /* Stop the driver */ 10025 /* If interface has been removed - break */ 10026 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) 10027 return; 10028 10029 bp->recovery_state = BNX2X_RECOVERY_WAIT; 10030 10031 /* Ensure "is_leader", MCP command sequence and 10032 * "recovery_state" update values are seen on other 10033 * CPUs. 10034 */ 10035 smp_mb(); 10036 break; 10037 10038 case BNX2X_RECOVERY_WAIT: 10039 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 10040 if (bp->is_leader) { 10041 int other_engine = BP_PATH(bp) ? 0 : 1; 10042 bool other_load_status = 10043 bnx2x_get_load_status(bp, other_engine); 10044 bool load_status = 10045 bnx2x_get_load_status(bp, BP_PATH(bp)); 10046 global = bnx2x_reset_is_global(bp); 10047 10048 /* 10049 * In case of a parity in a global block, let 10050 * the first leader that performs a 10051 * leader_reset() reset the global blocks in 10052 * order to clear global attentions. Otherwise 10053 * the gates will remain closed for that 10054 * engine. 10055 */ 10056 if (load_status || 10057 (global && other_load_status)) { 10058 /* Wait until all other functions get 10059 * down. 10060 */ 10061 schedule_delayed_work(&bp->sp_rtnl_task, 10062 HZ/10); 10063 return; 10064 } else { 10065 /* If all other functions got down - 10066 * try to bring the chip back to 10067 * normal. In any case it's an exit 10068 * point for a leader. 10069 */ 10070 if (bnx2x_leader_reset(bp)) { 10071 bnx2x_recovery_failed(bp); 10072 return; 10073 } 10074 10075 /* If we are here, means that the 10076 * leader has succeeded and doesn't 10077 * want to be a leader any more. Try 10078 * to continue as a none-leader. 10079 */ 10080 break; 10081 } 10082 } else { /* non-leader */ 10083 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 10084 /* Try to get a LEADER_LOCK HW lock as 10085 * long as a former leader may have 10086 * been unloaded by the user or 10087 * released a leadership by another 10088 * reason. 10089 */ 10090 if (bnx2x_trylock_leader_lock(bp)) { 10091 /* I'm a leader now! Restart a 10092 * switch case. 10093 */ 10094 bp->is_leader = 1; 10095 break; 10096 } 10097 10098 schedule_delayed_work(&bp->sp_rtnl_task, 10099 HZ/10); 10100 return; 10101 10102 } else { 10103 /* 10104 * If there was a global attention, wait 10105 * for it to be cleared. 10106 */ 10107 if (bnx2x_reset_is_global(bp)) { 10108 schedule_delayed_work( 10109 &bp->sp_rtnl_task, 10110 HZ/10); 10111 return; 10112 } 10113 10114 error_recovered = 10115 bp->eth_stats.recoverable_error; 10116 error_unrecovered = 10117 bp->eth_stats.unrecoverable_error; 10118 bp->recovery_state = 10119 BNX2X_RECOVERY_NIC_LOADING; 10120 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 10121 error_unrecovered++; 10122 netdev_err(bp->dev, 10123 "Recovery failed. Power cycle needed\n"); 10124 /* Disconnect this device */ 10125 netif_device_detach(bp->dev); 10126 /* Shut down the power */ 10127 bnx2x_set_power_state( 10128 bp, PCI_D3hot); 10129 smp_mb(); 10130 } else { 10131 bp->recovery_state = 10132 BNX2X_RECOVERY_DONE; 10133 error_recovered++; 10134 smp_mb(); 10135 } 10136 bp->eth_stats.recoverable_error = 10137 error_recovered; 10138 bp->eth_stats.unrecoverable_error = 10139 error_unrecovered; 10140 10141 return; 10142 } 10143 } 10144 default: 10145 return; 10146 } 10147 } 10148 } 10149 10150 static int bnx2x_udp_port_update(struct bnx2x *bp) 10151 { 10152 struct bnx2x_func_switch_update_params *switch_update_params; 10153 struct bnx2x_func_state_params func_params = {NULL}; 10154 u16 vxlan_port = 0, geneve_port = 0; 10155 int rc; 10156 10157 switch_update_params = &func_params.params.switch_update; 10158 10159 /* Prepare parameters for function state transitions */ 10160 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 10161 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 10162 10163 func_params.f_obj = &bp->func_obj; 10164 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 10165 10166 /* Function parameters */ 10167 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, 10168 &switch_update_params->changes); 10169 10170 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { 10171 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; 10172 switch_update_params->geneve_dst_port = geneve_port; 10173 } 10174 10175 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { 10176 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; 10177 switch_update_params->vxlan_dst_port = vxlan_port; 10178 } 10179 10180 /* Re-enable inner-rss for the offloaded UDP tunnels */ 10181 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, 10182 &switch_update_params->changes); 10183 10184 rc = bnx2x_func_state_change(bp, &func_params); 10185 if (rc) 10186 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n", 10187 vxlan_port, geneve_port, rc); 10188 else 10189 DP(BNX2X_MSG_SP, 10190 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n", 10191 vxlan_port, geneve_port); 10192 10193 return rc; 10194 } 10195 10196 static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table) 10197 { 10198 struct bnx2x *bp = netdev_priv(netdev); 10199 struct udp_tunnel_info ti; 10200 10201 udp_tunnel_nic_get_port(netdev, table, 0, &ti); 10202 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port); 10203 10204 return bnx2x_udp_port_update(bp); 10205 } 10206 10207 static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = { 10208 .sync_table = bnx2x_udp_tunnel_sync, 10209 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 10210 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 10211 .tables = { 10212 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 10213 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 10214 }, 10215 }; 10216 10217 static int bnx2x_close(struct net_device *dev); 10218 10219 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 10220 * scheduled on a general queue in order to prevent a dead lock. 10221 */ 10222 static void bnx2x_sp_rtnl_task(struct work_struct *work) 10223 { 10224 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 10225 10226 rtnl_lock(); 10227 10228 if (!netif_running(bp->dev)) { 10229 rtnl_unlock(); 10230 return; 10231 } 10232 10233 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 10234 #ifdef BNX2X_STOP_ON_ERROR 10235 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 10236 "you will need to reboot when done\n"); 10237 goto sp_rtnl_not_reset; 10238 #endif 10239 /* 10240 * Clear all pending SP commands as we are going to reset the 10241 * function anyway. 10242 */ 10243 bp->sp_rtnl_state = 0; 10244 smp_mb(); 10245 10246 bnx2x_parity_recover(bp); 10247 10248 rtnl_unlock(); 10249 return; 10250 } 10251 10252 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 10253 #ifdef BNX2X_STOP_ON_ERROR 10254 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 10255 "you will need to reboot when done\n"); 10256 goto sp_rtnl_not_reset; 10257 #endif 10258 10259 /* 10260 * Clear all pending SP commands as we are going to reset the 10261 * function anyway. 10262 */ 10263 bp->sp_rtnl_state = 0; 10264 smp_mb(); 10265 10266 /* Immediately indicate link as down */ 10267 bp->link_vars.link_up = 0; 10268 bp->force_link_down = true; 10269 netif_carrier_off(bp->dev); 10270 BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); 10271 10272 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10273 /* When ret value shows failure of allocation failure, 10274 * the nic is rebooted again. If open still fails, a error 10275 * message to notify the user. 10276 */ 10277 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { 10278 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10279 if (bnx2x_nic_load(bp, LOAD_NORMAL)) 10280 BNX2X_ERR("Open the NIC fails again!\n"); 10281 } 10282 rtnl_unlock(); 10283 return; 10284 } 10285 #ifdef BNX2X_STOP_ON_ERROR 10286 sp_rtnl_not_reset: 10287 #endif 10288 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 10289 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 10290 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 10291 bnx2x_after_function_update(bp); 10292 /* 10293 * in case of fan failure we need to reset id if the "stop on error" 10294 * debug flag is set, since we trying to prevent permanent overheating 10295 * damage 10296 */ 10297 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 10298 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 10299 netif_device_detach(bp->dev); 10300 bnx2x_close(bp->dev); 10301 rtnl_unlock(); 10302 return; 10303 } 10304 10305 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { 10306 DP(BNX2X_MSG_SP, 10307 "sending set mcast vf pf channel message from rtnl sp-task\n"); 10308 bnx2x_vfpf_set_mcast(bp->dev); 10309 } 10310 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 10311 &bp->sp_rtnl_state)){ 10312 if (netif_carrier_ok(bp->dev)) { 10313 bnx2x_tx_disable(bp); 10314 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); 10315 } 10316 } 10317 10318 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { 10319 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); 10320 bnx2x_set_rx_mode_inner(bp); 10321 } 10322 10323 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 10324 &bp->sp_rtnl_state)) 10325 bnx2x_pf_set_vfs_vlan(bp); 10326 10327 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { 10328 bnx2x_dcbx_stop_hw_tx(bp); 10329 bnx2x_dcbx_resume_hw_tx(bp); 10330 } 10331 10332 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, 10333 &bp->sp_rtnl_state)) 10334 bnx2x_update_mng_version(bp); 10335 10336 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) 10337 bnx2x_handle_update_svid_cmd(bp); 10338 10339 /* work which needs rtnl lock not-taken (as it takes the lock itself and 10340 * can be called from other contexts as well) 10341 */ 10342 rtnl_unlock(); 10343 10344 /* enable SR-IOV if applicable */ 10345 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 10346 &bp->sp_rtnl_state)) { 10347 bnx2x_disable_sriov(bp); 10348 bnx2x_enable_sriov(bp); 10349 } 10350 } 10351 10352 static void bnx2x_period_task(struct work_struct *work) 10353 { 10354 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 10355 10356 if (!netif_running(bp->dev)) 10357 goto period_task_exit; 10358 10359 if (CHIP_REV_IS_SLOW(bp)) { 10360 BNX2X_ERR("period task called on emulation, ignoring\n"); 10361 goto period_task_exit; 10362 } 10363 10364 bnx2x_acquire_phy_lock(bp); 10365 /* 10366 * The barrier is needed to ensure the ordering between the writing to 10367 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 10368 * the reading here. 10369 */ 10370 smp_mb(); 10371 if (bp->port.pmf) { 10372 bnx2x_period_func(&bp->link_params, &bp->link_vars); 10373 10374 /* Re-queue task in 1 sec */ 10375 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 10376 } 10377 10378 bnx2x_release_phy_lock(bp); 10379 period_task_exit: 10380 return; 10381 } 10382 10383 /* 10384 * Init service functions 10385 */ 10386 10387 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 10388 { 10389 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 10390 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 10391 return base + (BP_ABS_FUNC(bp)) * stride; 10392 } 10393 10394 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, 10395 u8 port, u32 reset_reg, 10396 struct bnx2x_mac_vals *vals) 10397 { 10398 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 10399 u32 base_addr; 10400 10401 if (!(mask & reset_reg)) 10402 return false; 10403 10404 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); 10405 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 10406 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; 10407 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); 10408 REG_WR(bp, vals->umac_addr[port], 0); 10409 10410 return true; 10411 } 10412 10413 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 10414 struct bnx2x_mac_vals *vals) 10415 { 10416 u32 val, base_addr, offset, mask, reset_reg; 10417 bool mac_stopped = false; 10418 u8 port = BP_PORT(bp); 10419 10420 /* reset addresses as they also mark which values were changed */ 10421 memset(vals, 0, sizeof(*vals)); 10422 10423 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 10424 10425 if (!CHIP_IS_E3(bp)) { 10426 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 10427 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 10428 if ((mask & reset_reg) && val) { 10429 u32 wb_data[2]; 10430 BNX2X_DEV_INFO("Disable bmac Rx\n"); 10431 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 10432 : NIG_REG_INGRESS_BMAC0_MEM; 10433 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 10434 : BIGMAC_REGISTER_BMAC_CONTROL; 10435 10436 /* 10437 * use rd/wr since we cannot use dmae. This is safe 10438 * since MCP won't access the bus due to the request 10439 * to unload, and no function on the path can be 10440 * loaded at this time. 10441 */ 10442 wb_data[0] = REG_RD(bp, base_addr + offset); 10443 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 10444 vals->bmac_addr = base_addr + offset; 10445 vals->bmac_val[0] = wb_data[0]; 10446 vals->bmac_val[1] = wb_data[1]; 10447 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 10448 REG_WR(bp, vals->bmac_addr, wb_data[0]); 10449 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); 10450 } 10451 BNX2X_DEV_INFO("Disable emac Rx\n"); 10452 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; 10453 vals->emac_val = REG_RD(bp, vals->emac_addr); 10454 REG_WR(bp, vals->emac_addr, 0); 10455 mac_stopped = true; 10456 } else { 10457 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 10458 BNX2X_DEV_INFO("Disable xmac Rx\n"); 10459 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 10460 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 10461 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 10462 val & ~(1 << 1)); 10463 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 10464 val | (1 << 1)); 10465 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 10466 vals->xmac_val = REG_RD(bp, vals->xmac_addr); 10467 REG_WR(bp, vals->xmac_addr, 0); 10468 mac_stopped = true; 10469 } 10470 10471 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, 10472 reset_reg, vals); 10473 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, 10474 reset_reg, vals); 10475 } 10476 10477 if (mac_stopped) 10478 msleep(20); 10479 } 10480 10481 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 10482 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ 10483 0x1848 + ((f) << 4)) 10484 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 10485 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 10486 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 10487 10488 #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10489 #define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10490 #define BCM_5710_UNDI_FW_MF_VERS (0x05) 10491 10492 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) 10493 { 10494 /* UNDI marks its presence in DORQ - 10495 * it initializes CID offset for normal bell to 0x7 10496 */ 10497 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & 10498 MISC_REGISTERS_RESET_REG_1_RST_DORQ)) 10499 return false; 10500 10501 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { 10502 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10503 return true; 10504 } 10505 10506 return false; 10507 } 10508 10509 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) 10510 { 10511 u16 rcq, bd; 10512 u32 addr, tmp_reg; 10513 10514 if (BP_FUNC(bp) < 2) 10515 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); 10516 else 10517 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); 10518 10519 tmp_reg = REG_RD(bp, addr); 10520 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 10521 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 10522 10523 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 10524 REG_WR(bp, addr, tmp_reg); 10525 10526 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", 10527 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); 10528 } 10529 10530 static int bnx2x_prev_mcp_done(struct bnx2x *bp) 10531 { 10532 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 10533 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 10534 if (!rc) { 10535 BNX2X_ERR("MCP response failure, aborting\n"); 10536 return -EBUSY; 10537 } 10538 10539 return 0; 10540 } 10541 10542 static struct bnx2x_prev_path_list * 10543 bnx2x_prev_path_get_entry(struct bnx2x *bp) 10544 { 10545 struct bnx2x_prev_path_list *tmp_list; 10546 10547 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) 10548 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 10549 bp->pdev->bus->number == tmp_list->bus && 10550 BP_PATH(bp) == tmp_list->path) 10551 return tmp_list; 10552 10553 return NULL; 10554 } 10555 10556 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) 10557 { 10558 struct bnx2x_prev_path_list *tmp_list; 10559 int rc; 10560 10561 rc = down_interruptible(&bnx2x_prev_sem); 10562 if (rc) { 10563 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10564 return rc; 10565 } 10566 10567 tmp_list = bnx2x_prev_path_get_entry(bp); 10568 if (tmp_list) { 10569 tmp_list->aer = 1; 10570 rc = 0; 10571 } else { 10572 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", 10573 BP_PATH(bp)); 10574 } 10575 10576 up(&bnx2x_prev_sem); 10577 10578 return rc; 10579 } 10580 10581 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 10582 { 10583 struct bnx2x_prev_path_list *tmp_list; 10584 bool rc = false; 10585 10586 if (down_trylock(&bnx2x_prev_sem)) 10587 return false; 10588 10589 tmp_list = bnx2x_prev_path_get_entry(bp); 10590 if (tmp_list) { 10591 if (tmp_list->aer) { 10592 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", 10593 BP_PATH(bp)); 10594 } else { 10595 rc = true; 10596 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 10597 BP_PATH(bp)); 10598 } 10599 } 10600 10601 up(&bnx2x_prev_sem); 10602 10603 return rc; 10604 } 10605 10606 bool bnx2x_port_after_undi(struct bnx2x *bp) 10607 { 10608 struct bnx2x_prev_path_list *entry; 10609 bool val; 10610 10611 down(&bnx2x_prev_sem); 10612 10613 entry = bnx2x_prev_path_get_entry(bp); 10614 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); 10615 10616 up(&bnx2x_prev_sem); 10617 10618 return val; 10619 } 10620 10621 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 10622 { 10623 struct bnx2x_prev_path_list *tmp_list; 10624 int rc; 10625 10626 rc = down_interruptible(&bnx2x_prev_sem); 10627 if (rc) { 10628 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10629 return rc; 10630 } 10631 10632 /* Check whether the entry for this path already exists */ 10633 tmp_list = bnx2x_prev_path_get_entry(bp); 10634 if (tmp_list) { 10635 if (!tmp_list->aer) { 10636 BNX2X_ERR("Re-Marking the path.\n"); 10637 } else { 10638 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", 10639 BP_PATH(bp)); 10640 tmp_list->aer = 0; 10641 } 10642 up(&bnx2x_prev_sem); 10643 return 0; 10644 } 10645 up(&bnx2x_prev_sem); 10646 10647 /* Create an entry for this path and add it */ 10648 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 10649 if (!tmp_list) { 10650 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 10651 return -ENOMEM; 10652 } 10653 10654 tmp_list->bus = bp->pdev->bus->number; 10655 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 10656 tmp_list->path = BP_PATH(bp); 10657 tmp_list->aer = 0; 10658 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 10659 10660 rc = down_interruptible(&bnx2x_prev_sem); 10661 if (rc) { 10662 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10663 kfree(tmp_list); 10664 } else { 10665 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", 10666 BP_PATH(bp)); 10667 list_add(&tmp_list->list, &bnx2x_prev_list); 10668 up(&bnx2x_prev_sem); 10669 } 10670 10671 return rc; 10672 } 10673 10674 static int bnx2x_do_flr(struct bnx2x *bp) 10675 { 10676 struct pci_dev *dev = bp->pdev; 10677 10678 if (CHIP_IS_E1x(bp)) { 10679 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 10680 return -EINVAL; 10681 } 10682 10683 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 10684 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 10685 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 10686 bp->common.bc_ver); 10687 return -EINVAL; 10688 } 10689 10690 if (!pci_wait_for_pending_transaction(dev)) 10691 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 10692 10693 BNX2X_DEV_INFO("Initiating FLR\n"); 10694 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 10695 10696 return 0; 10697 } 10698 10699 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) 10700 { 10701 int rc; 10702 10703 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 10704 10705 /* Test if previous unload process was already finished for this path */ 10706 if (bnx2x_prev_is_path_marked(bp)) 10707 return bnx2x_prev_mcp_done(bp); 10708 10709 BNX2X_DEV_INFO("Path is unmarked\n"); 10710 10711 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */ 10712 if (bnx2x_prev_is_after_undi(bp)) 10713 goto out; 10714 10715 /* If function has FLR capabilities, and existing FW version matches 10716 * the one required, then FLR will be sufficient to clean any residue 10717 * left by previous driver 10718 */ 10719 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); 10720 10721 if (!rc) { 10722 /* fw version is good */ 10723 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); 10724 rc = bnx2x_do_flr(bp); 10725 } 10726 10727 if (!rc) { 10728 /* FLR was performed */ 10729 BNX2X_DEV_INFO("FLR successful\n"); 10730 return 0; 10731 } 10732 10733 BNX2X_DEV_INFO("Could not FLR\n"); 10734 10735 out: 10736 /* Close the MCP request, return failure*/ 10737 rc = bnx2x_prev_mcp_done(bp); 10738 if (!rc) 10739 rc = BNX2X_PREV_WAIT_NEEDED; 10740 10741 return rc; 10742 } 10743 10744 static int bnx2x_prev_unload_common(struct bnx2x *bp) 10745 { 10746 u32 reset_reg, tmp_reg = 0, rc; 10747 bool prev_undi = false; 10748 struct bnx2x_mac_vals mac_vals; 10749 10750 /* It is possible a previous function received 'common' answer, 10751 * but hasn't loaded yet, therefore creating a scenario of 10752 * multiple functions receiving 'common' on the same path. 10753 */ 10754 BNX2X_DEV_INFO("Common unload Flow\n"); 10755 10756 memset(&mac_vals, 0, sizeof(mac_vals)); 10757 10758 if (bnx2x_prev_is_path_marked(bp)) 10759 return bnx2x_prev_mcp_done(bp); 10760 10761 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 10762 10763 /* Reset should be performed after BRB is emptied */ 10764 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10765 u32 timer_count = 1000; 10766 10767 /* Close the MAC Rx to prevent BRB from filling up */ 10768 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10769 10770 /* close LLH filters for both ports towards the BRB */ 10771 bnx2x_set_rx_filter(&bp->link_params, 0); 10772 bp->link_params.port ^= 1; 10773 bnx2x_set_rx_filter(&bp->link_params, 0); 10774 bp->link_params.port ^= 1; 10775 10776 /* Check if the UNDI driver was previously loaded */ 10777 if (bnx2x_prev_is_after_undi(bp)) { 10778 prev_undi = true; 10779 /* clear the UNDI indication */ 10780 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 10781 /* clear possible idle check errors */ 10782 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); 10783 } 10784 if (!CHIP_IS_E1x(bp)) 10785 /* block FW from writing to host */ 10786 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10787 10788 /* wait until BRB is empty */ 10789 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10790 while (timer_count) { 10791 u32 prev_brb = tmp_reg; 10792 10793 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10794 if (!tmp_reg) 10795 break; 10796 10797 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 10798 10799 /* reset timer as long as BRB actually gets emptied */ 10800 if (prev_brb > tmp_reg) 10801 timer_count = 1000; 10802 else 10803 timer_count--; 10804 10805 /* If UNDI resides in memory, manually increment it */ 10806 if (prev_undi) 10807 bnx2x_prev_unload_undi_inc(bp, 1); 10808 10809 udelay(10); 10810 } 10811 10812 if (!timer_count) 10813 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 10814 } 10815 10816 /* No packets are in the pipeline, path is ready for reset */ 10817 bnx2x_reset_common(bp); 10818 10819 if (mac_vals.xmac_addr) 10820 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10821 if (mac_vals.umac_addr[0]) 10822 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); 10823 if (mac_vals.umac_addr[1]) 10824 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); 10825 if (mac_vals.emac_addr) 10826 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10827 if (mac_vals.bmac_addr) { 10828 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 10829 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 10830 } 10831 10832 rc = bnx2x_prev_mark_path(bp, prev_undi); 10833 if (rc) { 10834 bnx2x_prev_mcp_done(bp); 10835 return rc; 10836 } 10837 10838 return bnx2x_prev_mcp_done(bp); 10839 } 10840 10841 static int bnx2x_prev_unload(struct bnx2x *bp) 10842 { 10843 int time_counter = 10; 10844 u32 rc, fw, hw_lock_reg, hw_lock_val; 10845 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10846 10847 /* clear hw from errors which may have resulted from an interrupted 10848 * dmae transaction. 10849 */ 10850 bnx2x_clean_pglue_errors(bp); 10851 10852 /* Release previously held locks */ 10853 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10854 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 10855 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 10856 10857 hw_lock_val = REG_RD(bp, hw_lock_reg); 10858 if (hw_lock_val) { 10859 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 10860 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 10861 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 10862 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 10863 } 10864 10865 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 10866 REG_WR(bp, hw_lock_reg, 0xffffffff); 10867 } else 10868 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 10869 10870 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 10871 BNX2X_DEV_INFO("Release previously held alr\n"); 10872 bnx2x_release_alr(bp); 10873 } 10874 10875 do { 10876 int aer = 0; 10877 /* Lock MCP using an unload request */ 10878 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10879 if (!fw) { 10880 BNX2X_ERR("MCP response failure, aborting\n"); 10881 rc = -EBUSY; 10882 break; 10883 } 10884 10885 rc = down_interruptible(&bnx2x_prev_sem); 10886 if (rc) { 10887 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", 10888 rc); 10889 } else { 10890 /* If Path is marked by EEH, ignore unload status */ 10891 aer = !!(bnx2x_prev_path_get_entry(bp) && 10892 bnx2x_prev_path_get_entry(bp)->aer); 10893 up(&bnx2x_prev_sem); 10894 } 10895 10896 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { 10897 rc = bnx2x_prev_unload_common(bp); 10898 break; 10899 } 10900 10901 /* non-common reply from MCP might require looping */ 10902 rc = bnx2x_prev_unload_uncommon(bp); 10903 if (rc != BNX2X_PREV_WAIT_NEEDED) 10904 break; 10905 10906 msleep(20); 10907 } while (--time_counter); 10908 10909 if (!time_counter || rc) { 10910 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); 10911 rc = -EPROBE_DEFER; 10912 } 10913 10914 /* Mark function if its port was used to boot from SAN */ 10915 if (bnx2x_port_after_undi(bp)) 10916 bp->link_params.feature_config_flags |= 10917 FEATURE_CONFIG_BOOT_FROM_SAN; 10918 10919 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 10920 10921 return rc; 10922 } 10923 10924 static void bnx2x_get_common_hwinfo(struct bnx2x *bp) 10925 { 10926 u32 val, val2, val3, val4, id, boot_mode; 10927 u16 pmc; 10928 10929 /* Get the chip revision id and number. */ 10930 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 10931 val = REG_RD(bp, MISC_REG_CHIP_NUM); 10932 id = ((val & 0xffff) << 16); 10933 val = REG_RD(bp, MISC_REG_CHIP_REV); 10934 id |= ((val & 0xf) << 12); 10935 10936 /* Metal is read from PCI regs, but we can't access >=0x400 from 10937 * the configuration space (so we need to reg_rd) 10938 */ 10939 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); 10940 id |= (((val >> 24) & 0xf) << 4); 10941 val = REG_RD(bp, MISC_REG_BOND_ID); 10942 id |= (val & 0xf); 10943 bp->common.chip_id = id; 10944 10945 /* force 57811 according to MISC register */ 10946 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 10947 if (CHIP_IS_57810(bp)) 10948 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 10949 (bp->common.chip_id & 0x0000FFFF); 10950 else if (CHIP_IS_57810_MF(bp)) 10951 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 10952 (bp->common.chip_id & 0x0000FFFF); 10953 bp->common.chip_id |= 0x1; 10954 } 10955 10956 /* Set doorbell size */ 10957 bp->db_size = (1 << BNX2X_DB_SHIFT); 10958 10959 if (!CHIP_IS_E1x(bp)) { 10960 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 10961 if ((val & 1) == 0) 10962 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 10963 else 10964 val = (val >> 1) & 1; 10965 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 10966 "2_PORT_MODE"); 10967 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 10968 CHIP_2_PORT_MODE; 10969 10970 if (CHIP_MODE_IS_4_PORT(bp)) 10971 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 10972 else 10973 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 10974 } else { 10975 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 10976 bp->pfid = bp->pf_num; /* 0..7 */ 10977 } 10978 10979 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 10980 10981 bp->link_params.chip_id = bp->common.chip_id; 10982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 10983 10984 val = (REG_RD(bp, 0x2874) & 0x55); 10985 if ((bp->common.chip_id & 0x1) || 10986 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 10987 bp->flags |= ONE_PORT_FLAG; 10988 BNX2X_DEV_INFO("single port device\n"); 10989 } 10990 10991 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 10992 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 10993 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 10994 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 10995 bp->common.flash_size, bp->common.flash_size); 10996 10997 bnx2x_init_shmem(bp); 10998 10999 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 11000 MISC_REG_GENERIC_CR_1 : 11001 MISC_REG_GENERIC_CR_0)); 11002 11003 bp->link_params.shmem_base = bp->common.shmem_base; 11004 bp->link_params.shmem2_base = bp->common.shmem2_base; 11005 if (SHMEM2_RD(bp, size) > 11006 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 11007 bp->link_params.lfa_base = 11008 REG_RD(bp, bp->common.shmem2_base + 11009 (u32)offsetof(struct shmem2_region, 11010 lfa_host_addr[BP_PORT(bp)])); 11011 else 11012 bp->link_params.lfa_base = 0; 11013 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 11014 bp->common.shmem_base, bp->common.shmem2_base); 11015 11016 if (!bp->common.shmem_base) { 11017 BNX2X_DEV_INFO("MCP not active\n"); 11018 bp->flags |= NO_MCP_FLAG; 11019 return; 11020 } 11021 11022 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 11023 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 11024 11025 bp->link_params.hw_led_mode = ((bp->common.hw_config & 11026 SHARED_HW_CFG_LED_MODE_MASK) >> 11027 SHARED_HW_CFG_LED_MODE_SHIFT); 11028 11029 bp->link_params.feature_config_flags = 0; 11030 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 11031 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 11032 bp->link_params.feature_config_flags |= 11033 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 11034 else 11035 bp->link_params.feature_config_flags &= 11036 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 11037 11038 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 11039 bp->common.bc_ver = val; 11040 BNX2X_DEV_INFO("bc_ver %X\n", val); 11041 if (val < BNX2X_BC_VER) { 11042 /* for now only warn 11043 * later we might need to enforce this */ 11044 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 11045 BNX2X_BC_VER, val); 11046 } 11047 bp->link_params.feature_config_flags |= 11048 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 11049 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 11050 11051 bp->link_params.feature_config_flags |= 11052 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 11053 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 11054 bp->link_params.feature_config_flags |= 11055 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 11056 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 11057 bp->link_params.feature_config_flags |= 11058 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 11059 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 11060 11061 bp->link_params.feature_config_flags |= 11062 (val >= REQ_BC_VER_4_MT_SUPPORTED) ? 11063 FEATURE_CONFIG_MT_SUPPORT : 0; 11064 11065 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 11066 BC_SUPPORTS_PFC_STATS : 0; 11067 11068 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? 11069 BC_SUPPORTS_FCOE_FEATURES : 0; 11070 11071 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 11072 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 11073 11074 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? 11075 BC_SUPPORTS_RMMOD_CMD : 0; 11076 11077 boot_mode = SHMEM_RD(bp, 11078 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 11079 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 11080 switch (boot_mode) { 11081 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 11082 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 11083 break; 11084 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 11085 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 11086 break; 11087 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 11088 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 11089 break; 11090 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 11091 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 11092 break; 11093 } 11094 11095 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); 11096 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 11097 11098 BNX2X_DEV_INFO("%sWoL capable\n", 11099 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 11100 11101 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 11102 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 11103 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 11104 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 11105 11106 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 11107 val, val2, val3, val4); 11108 } 11109 11110 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 11111 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 11112 11113 static int bnx2x_get_igu_cam_info(struct bnx2x *bp) 11114 { 11115 int pfid = BP_FUNC(bp); 11116 int igu_sb_id; 11117 u32 val; 11118 u8 fid, igu_sb_cnt = 0; 11119 11120 bp->igu_base_sb = 0xff; 11121 if (CHIP_INT_MODE_IS_BC(bp)) { 11122 int vn = BP_VN(bp); 11123 igu_sb_cnt = bp->igu_sb_cnt; 11124 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 11125 FP_SB_MAX_E1x; 11126 11127 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 11128 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 11129 11130 return 0; 11131 } 11132 11133 /* IGU in normal mode - read CAM */ 11134 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 11135 igu_sb_id++) { 11136 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 11137 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 11138 continue; 11139 fid = IGU_FID(val); 11140 if ((fid & IGU_FID_ENCODE_IS_PF)) { 11141 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 11142 continue; 11143 if (IGU_VEC(val) == 0) 11144 /* default status block */ 11145 bp->igu_dsb_id = igu_sb_id; 11146 else { 11147 if (bp->igu_base_sb == 0xff) 11148 bp->igu_base_sb = igu_sb_id; 11149 igu_sb_cnt++; 11150 } 11151 } 11152 } 11153 11154 #ifdef CONFIG_PCI_MSI 11155 /* Due to new PF resource allocation by MFW T7.4 and above, it's 11156 * optional that number of CAM entries will not be equal to the value 11157 * advertised in PCI. 11158 * Driver should use the minimal value of both as the actual status 11159 * block count 11160 */ 11161 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 11162 #endif 11163 11164 if (igu_sb_cnt == 0) { 11165 BNX2X_ERR("CAM configuration error\n"); 11166 return -EINVAL; 11167 } 11168 11169 return 0; 11170 } 11171 11172 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) 11173 { 11174 int cfg_size = 0, idx, port = BP_PORT(bp); 11175 11176 /* Aggregation of supported attributes of all external phys */ 11177 bp->port.supported[0] = 0; 11178 bp->port.supported[1] = 0; 11179 switch (bp->link_params.num_phys) { 11180 case 1: 11181 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 11182 cfg_size = 1; 11183 break; 11184 case 2: 11185 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 11186 cfg_size = 1; 11187 break; 11188 case 3: 11189 if (bp->link_params.multi_phy_config & 11190 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 11191 bp->port.supported[1] = 11192 bp->link_params.phy[EXT_PHY1].supported; 11193 bp->port.supported[0] = 11194 bp->link_params.phy[EXT_PHY2].supported; 11195 } else { 11196 bp->port.supported[0] = 11197 bp->link_params.phy[EXT_PHY1].supported; 11198 bp->port.supported[1] = 11199 bp->link_params.phy[EXT_PHY2].supported; 11200 } 11201 cfg_size = 2; 11202 break; 11203 } 11204 11205 if (!(bp->port.supported[0] || bp->port.supported[1])) { 11206 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 11207 SHMEM_RD(bp, 11208 dev_info.port_hw_config[port].external_phy_config), 11209 SHMEM_RD(bp, 11210 dev_info.port_hw_config[port].external_phy_config2)); 11211 return; 11212 } 11213 11214 if (CHIP_IS_E3(bp)) 11215 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 11216 else { 11217 switch (switch_cfg) { 11218 case SWITCH_CFG_1G: 11219 bp->port.phy_addr = REG_RD( 11220 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 11221 break; 11222 case SWITCH_CFG_10G: 11223 bp->port.phy_addr = REG_RD( 11224 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 11225 break; 11226 default: 11227 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 11228 bp->port.link_config[0]); 11229 return; 11230 } 11231 } 11232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 11233 /* mask what we support according to speed_cap_mask per configuration */ 11234 for (idx = 0; idx < cfg_size; idx++) { 11235 if (!(bp->link_params.speed_cap_mask[idx] & 11236 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 11237 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 11238 11239 if (!(bp->link_params.speed_cap_mask[idx] & 11240 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 11241 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 11242 11243 if (!(bp->link_params.speed_cap_mask[idx] & 11244 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 11245 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 11246 11247 if (!(bp->link_params.speed_cap_mask[idx] & 11248 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 11249 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 11250 11251 if (!(bp->link_params.speed_cap_mask[idx] & 11252 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 11253 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 11254 SUPPORTED_1000baseT_Full); 11255 11256 if (!(bp->link_params.speed_cap_mask[idx] & 11257 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 11258 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 11259 11260 if (!(bp->link_params.speed_cap_mask[idx] & 11261 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 11262 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 11263 11264 if (!(bp->link_params.speed_cap_mask[idx] & 11265 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 11266 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; 11267 } 11268 11269 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 11270 bp->port.supported[1]); 11271 } 11272 11273 static void bnx2x_link_settings_requested(struct bnx2x *bp) 11274 { 11275 u32 link_config, idx, cfg_size = 0; 11276 bp->port.advertising[0] = 0; 11277 bp->port.advertising[1] = 0; 11278 switch (bp->link_params.num_phys) { 11279 case 1: 11280 case 2: 11281 cfg_size = 1; 11282 break; 11283 case 3: 11284 cfg_size = 2; 11285 break; 11286 } 11287 for (idx = 0; idx < cfg_size; idx++) { 11288 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 11289 link_config = bp->port.link_config[idx]; 11290 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 11291 case PORT_FEATURE_LINK_SPEED_AUTO: 11292 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 11293 bp->link_params.req_line_speed[idx] = 11294 SPEED_AUTO_NEG; 11295 bp->port.advertising[idx] |= 11296 bp->port.supported[idx]; 11297 if (bp->link_params.phy[EXT_PHY1].type == 11298 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 11299 bp->port.advertising[idx] |= 11300 (SUPPORTED_100baseT_Half | 11301 SUPPORTED_100baseT_Full); 11302 } else { 11303 /* force 10G, no AN */ 11304 bp->link_params.req_line_speed[idx] = 11305 SPEED_10000; 11306 bp->port.advertising[idx] |= 11307 (ADVERTISED_10000baseT_Full | 11308 ADVERTISED_FIBRE); 11309 continue; 11310 } 11311 break; 11312 11313 case PORT_FEATURE_LINK_SPEED_10M_FULL: 11314 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 11315 bp->link_params.req_line_speed[idx] = 11316 SPEED_10; 11317 bp->port.advertising[idx] |= 11318 (ADVERTISED_10baseT_Full | 11319 ADVERTISED_TP); 11320 } else { 11321 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11322 link_config, 11323 bp->link_params.speed_cap_mask[idx]); 11324 return; 11325 } 11326 break; 11327 11328 case PORT_FEATURE_LINK_SPEED_10M_HALF: 11329 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 11330 bp->link_params.req_line_speed[idx] = 11331 SPEED_10; 11332 bp->link_params.req_duplex[idx] = 11333 DUPLEX_HALF; 11334 bp->port.advertising[idx] |= 11335 (ADVERTISED_10baseT_Half | 11336 ADVERTISED_TP); 11337 } else { 11338 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11339 link_config, 11340 bp->link_params.speed_cap_mask[idx]); 11341 return; 11342 } 11343 break; 11344 11345 case PORT_FEATURE_LINK_SPEED_100M_FULL: 11346 if (bp->port.supported[idx] & 11347 SUPPORTED_100baseT_Full) { 11348 bp->link_params.req_line_speed[idx] = 11349 SPEED_100; 11350 bp->port.advertising[idx] |= 11351 (ADVERTISED_100baseT_Full | 11352 ADVERTISED_TP); 11353 } else { 11354 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11355 link_config, 11356 bp->link_params.speed_cap_mask[idx]); 11357 return; 11358 } 11359 break; 11360 11361 case PORT_FEATURE_LINK_SPEED_100M_HALF: 11362 if (bp->port.supported[idx] & 11363 SUPPORTED_100baseT_Half) { 11364 bp->link_params.req_line_speed[idx] = 11365 SPEED_100; 11366 bp->link_params.req_duplex[idx] = 11367 DUPLEX_HALF; 11368 bp->port.advertising[idx] |= 11369 (ADVERTISED_100baseT_Half | 11370 ADVERTISED_TP); 11371 } else { 11372 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11373 link_config, 11374 bp->link_params.speed_cap_mask[idx]); 11375 return; 11376 } 11377 break; 11378 11379 case PORT_FEATURE_LINK_SPEED_1G: 11380 if (bp->port.supported[idx] & 11381 SUPPORTED_1000baseT_Full) { 11382 bp->link_params.req_line_speed[idx] = 11383 SPEED_1000; 11384 bp->port.advertising[idx] |= 11385 (ADVERTISED_1000baseT_Full | 11386 ADVERTISED_TP); 11387 } else if (bp->port.supported[idx] & 11388 SUPPORTED_1000baseKX_Full) { 11389 bp->link_params.req_line_speed[idx] = 11390 SPEED_1000; 11391 bp->port.advertising[idx] |= 11392 ADVERTISED_1000baseKX_Full; 11393 } else { 11394 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11395 link_config, 11396 bp->link_params.speed_cap_mask[idx]); 11397 return; 11398 } 11399 break; 11400 11401 case PORT_FEATURE_LINK_SPEED_2_5G: 11402 if (bp->port.supported[idx] & 11403 SUPPORTED_2500baseX_Full) { 11404 bp->link_params.req_line_speed[idx] = 11405 SPEED_2500; 11406 bp->port.advertising[idx] |= 11407 (ADVERTISED_2500baseX_Full | 11408 ADVERTISED_TP); 11409 } else { 11410 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11411 link_config, 11412 bp->link_params.speed_cap_mask[idx]); 11413 return; 11414 } 11415 break; 11416 11417 case PORT_FEATURE_LINK_SPEED_10G_CX4: 11418 if (bp->port.supported[idx] & 11419 SUPPORTED_10000baseT_Full) { 11420 bp->link_params.req_line_speed[idx] = 11421 SPEED_10000; 11422 bp->port.advertising[idx] |= 11423 (ADVERTISED_10000baseT_Full | 11424 ADVERTISED_FIBRE); 11425 } else if (bp->port.supported[idx] & 11426 SUPPORTED_10000baseKR_Full) { 11427 bp->link_params.req_line_speed[idx] = 11428 SPEED_10000; 11429 bp->port.advertising[idx] |= 11430 (ADVERTISED_10000baseKR_Full | 11431 ADVERTISED_FIBRE); 11432 } else { 11433 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11434 link_config, 11435 bp->link_params.speed_cap_mask[idx]); 11436 return; 11437 } 11438 break; 11439 case PORT_FEATURE_LINK_SPEED_20G: 11440 bp->link_params.req_line_speed[idx] = SPEED_20000; 11441 11442 break; 11443 default: 11444 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 11445 link_config); 11446 bp->link_params.req_line_speed[idx] = 11447 SPEED_AUTO_NEG; 11448 bp->port.advertising[idx] = 11449 bp->port.supported[idx]; 11450 break; 11451 } 11452 11453 bp->link_params.req_flow_ctrl[idx] = (link_config & 11454 PORT_FEATURE_FLOW_CONTROL_MASK); 11455 if (bp->link_params.req_flow_ctrl[idx] == 11456 BNX2X_FLOW_CTRL_AUTO) { 11457 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) 11458 bp->link_params.req_flow_ctrl[idx] = 11459 BNX2X_FLOW_CTRL_NONE; 11460 else 11461 bnx2x_set_requested_fc(bp); 11462 } 11463 11464 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 11465 bp->link_params.req_line_speed[idx], 11466 bp->link_params.req_duplex[idx], 11467 bp->link_params.req_flow_ctrl[idx], 11468 bp->port.advertising[idx]); 11469 } 11470 } 11471 11472 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 11473 { 11474 __be16 mac_hi_be = cpu_to_be16(mac_hi); 11475 __be32 mac_lo_be = cpu_to_be32(mac_lo); 11476 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); 11477 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); 11478 } 11479 11480 static void bnx2x_get_port_hwinfo(struct bnx2x *bp) 11481 { 11482 int port = BP_PORT(bp); 11483 u32 config; 11484 u32 ext_phy_type, ext_phy_config, eee_mode; 11485 11486 bp->link_params.bp = bp; 11487 bp->link_params.port = port; 11488 11489 bp->link_params.lane_config = 11490 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 11491 11492 bp->link_params.speed_cap_mask[0] = 11493 SHMEM_RD(bp, 11494 dev_info.port_hw_config[port].speed_capability_mask) & 11495 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 11496 bp->link_params.speed_cap_mask[1] = 11497 SHMEM_RD(bp, 11498 dev_info.port_hw_config[port].speed_capability_mask2) & 11499 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 11500 bp->port.link_config[0] = 11501 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 11502 11503 bp->port.link_config[1] = 11504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 11505 11506 bp->link_params.multi_phy_config = 11507 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 11508 /* If the device is capable of WoL, set the default state according 11509 * to the HW 11510 */ 11511 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 11512 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 11513 (config & PORT_FEATURE_WOL_ENABLED)); 11514 11515 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 11516 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) 11517 bp->flags |= NO_ISCSI_FLAG; 11518 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 11519 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) 11520 bp->flags |= NO_FCOE_FLAG; 11521 11522 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 11523 bp->link_params.lane_config, 11524 bp->link_params.speed_cap_mask[0], 11525 bp->port.link_config[0]); 11526 11527 bp->link_params.switch_cfg = (bp->port.link_config[0] & 11528 PORT_FEATURE_CONNECTED_SWITCH_MASK); 11529 bnx2x_phy_probe(&bp->link_params); 11530 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 11531 11532 bnx2x_link_settings_requested(bp); 11533 11534 /* 11535 * If connected directly, work with the internal PHY, otherwise, work 11536 * with the external PHY 11537 */ 11538 ext_phy_config = 11539 SHMEM_RD(bp, 11540 dev_info.port_hw_config[port].external_phy_config); 11541 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 11542 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 11543 bp->mdio.prtad = bp->port.phy_addr; 11544 11545 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 11546 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 11547 bp->mdio.prtad = 11548 XGXS_EXT_PHY_ADDR(ext_phy_config); 11549 11550 /* Configure link feature according to nvram value */ 11551 eee_mode = (((SHMEM_RD(bp, dev_info. 11552 port_feature_config[port].eee_power_mode)) & 11553 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 11554 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 11555 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 11556 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 11557 EEE_MODE_ENABLE_LPI | 11558 EEE_MODE_OUTPUT_TIME; 11559 } else { 11560 bp->link_params.eee_mode = 0; 11561 } 11562 } 11563 11564 void bnx2x_get_iscsi_info(struct bnx2x *bp) 11565 { 11566 u32 no_flags = NO_ISCSI_FLAG; 11567 int port = BP_PORT(bp); 11568 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11569 drv_lic_key[port].max_iscsi_conn); 11570 11571 if (!CNIC_SUPPORT(bp)) { 11572 bp->flags |= no_flags; 11573 return; 11574 } 11575 11576 /* Get the number of maximum allowed iSCSI connections */ 11577 bp->cnic_eth_dev.max_iscsi_conn = 11578 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 11579 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 11580 11581 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 11582 bp->cnic_eth_dev.max_iscsi_conn); 11583 11584 /* 11585 * If maximum allowed number of connections is zero - 11586 * disable the feature. 11587 */ 11588 if (!bp->cnic_eth_dev.max_iscsi_conn) 11589 bp->flags |= no_flags; 11590 } 11591 11592 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 11593 { 11594 /* Port info */ 11595 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11596 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 11597 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11598 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 11599 11600 /* Node info */ 11601 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11602 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 11603 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11604 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 11605 } 11606 11607 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) 11608 { 11609 u8 count = 0; 11610 11611 if (IS_MF(bp)) { 11612 u8 fid; 11613 11614 /* iterate over absolute function ids for this path: */ 11615 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { 11616 if (IS_MF_SD(bp)) { 11617 u32 cfg = MF_CFG_RD(bp, 11618 func_mf_config[fid].config); 11619 11620 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && 11621 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == 11622 FUNC_MF_CFG_PROTOCOL_FCOE)) 11623 count++; 11624 } else { 11625 u32 cfg = MF_CFG_RD(bp, 11626 func_ext_config[fid]. 11627 func_cfg); 11628 11629 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && 11630 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) 11631 count++; 11632 } 11633 } 11634 } else { /* SF */ 11635 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; 11636 11637 for (port = 0; port < port_cnt; port++) { 11638 u32 lic = SHMEM_RD(bp, 11639 drv_lic_key[port].max_fcoe_conn) ^ 11640 FW_ENCODE_32BIT_PATTERN; 11641 if (lic) 11642 count++; 11643 } 11644 } 11645 11646 return count; 11647 } 11648 11649 static void bnx2x_get_fcoe_info(struct bnx2x *bp) 11650 { 11651 int port = BP_PORT(bp); 11652 int func = BP_ABS_FUNC(bp); 11653 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11654 drv_lic_key[port].max_fcoe_conn); 11655 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); 11656 11657 if (!CNIC_SUPPORT(bp)) { 11658 bp->flags |= NO_FCOE_FLAG; 11659 return; 11660 } 11661 11662 /* Get the number of maximum allowed FCoE connections */ 11663 bp->cnic_eth_dev.max_fcoe_conn = 11664 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 11665 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 11666 11667 /* Calculate the number of maximum allowed FCoE tasks */ 11668 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; 11669 11670 /* check if FCoE resources must be shared between different functions */ 11671 if (num_fcoe_func) 11672 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; 11673 11674 /* Read the WWN: */ 11675 if (!IS_MF(bp)) { 11676 /* Port info */ 11677 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11678 SHMEM_RD(bp, 11679 dev_info.port_hw_config[port]. 11680 fcoe_wwn_port_name_upper); 11681 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11682 SHMEM_RD(bp, 11683 dev_info.port_hw_config[port]. 11684 fcoe_wwn_port_name_lower); 11685 11686 /* Node info */ 11687 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11688 SHMEM_RD(bp, 11689 dev_info.port_hw_config[port]. 11690 fcoe_wwn_node_name_upper); 11691 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11692 SHMEM_RD(bp, 11693 dev_info.port_hw_config[port]. 11694 fcoe_wwn_node_name_lower); 11695 } else if (!IS_MF_SD(bp)) { 11696 /* Read the WWN info only if the FCoE feature is enabled for 11697 * this function. 11698 */ 11699 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) 11700 bnx2x_get_ext_wwn_info(bp, func); 11701 } else { 11702 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 11703 bnx2x_get_ext_wwn_info(bp, func); 11704 } 11705 11706 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 11707 11708 /* 11709 * If maximum allowed number of connections is zero - 11710 * disable the feature. 11711 */ 11712 if (!bp->cnic_eth_dev.max_fcoe_conn) { 11713 bp->flags |= NO_FCOE_FLAG; 11714 eth_zero_addr(bp->fip_mac); 11715 } 11716 } 11717 11718 static void bnx2x_get_cnic_info(struct bnx2x *bp) 11719 { 11720 /* 11721 * iSCSI may be dynamically disabled but reading 11722 * info here we will decrease memory usage by driver 11723 * if the feature is disabled for good 11724 */ 11725 bnx2x_get_iscsi_info(bp); 11726 bnx2x_get_fcoe_info(bp); 11727 } 11728 11729 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) 11730 { 11731 u32 val, val2; 11732 int func = BP_ABS_FUNC(bp); 11733 int port = BP_PORT(bp); 11734 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 11735 u8 *fip_mac = bp->fip_mac; 11736 11737 if (IS_MF(bp)) { 11738 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 11739 * FCoE MAC then the appropriate feature should be disabled. 11740 * In non SD mode features configuration comes from struct 11741 * func_ext_config. 11742 */ 11743 if (!IS_MF_SD(bp)) { 11744 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 11745 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 11746 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11747 iscsi_mac_addr_upper); 11748 val = MF_CFG_RD(bp, func_ext_config[func]. 11749 iscsi_mac_addr_lower); 11750 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11751 BNX2X_DEV_INFO 11752 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11753 } else { 11754 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11755 } 11756 11757 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 11758 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11759 fcoe_mac_addr_upper); 11760 val = MF_CFG_RD(bp, func_ext_config[func]. 11761 fcoe_mac_addr_lower); 11762 bnx2x_set_mac_buf(fip_mac, val, val2); 11763 BNX2X_DEV_INFO 11764 ("Read FCoE L2 MAC: %pM\n", fip_mac); 11765 } else { 11766 bp->flags |= NO_FCOE_FLAG; 11767 } 11768 11769 bp->mf_ext_config = cfg; 11770 11771 } else { /* SD MODE */ 11772 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 11773 /* use primary mac as iscsi mac */ 11774 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); 11775 11776 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 11777 BNX2X_DEV_INFO 11778 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11779 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { 11780 /* use primary mac as fip mac */ 11781 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); 11782 BNX2X_DEV_INFO("SD FCoE MODE\n"); 11783 BNX2X_DEV_INFO 11784 ("Read FIP MAC: %pM\n", fip_mac); 11785 } 11786 } 11787 11788 /* If this is a storage-only interface, use SAN mac as 11789 * primary MAC. Notice that for SD this is already the case, 11790 * as the SAN mac was copied from the primary MAC. 11791 */ 11792 if (IS_MF_FCOE_AFEX(bp)) 11793 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 11794 } else { 11795 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11796 iscsi_mac_upper); 11797 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11798 iscsi_mac_lower); 11799 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11800 11801 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11802 fcoe_fip_mac_upper); 11803 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11804 fcoe_fip_mac_lower); 11805 bnx2x_set_mac_buf(fip_mac, val, val2); 11806 } 11807 11808 /* Disable iSCSI OOO if MAC configuration is invalid. */ 11809 if (!is_valid_ether_addr(iscsi_mac)) { 11810 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11811 eth_zero_addr(iscsi_mac); 11812 } 11813 11814 /* Disable FCoE if MAC configuration is invalid. */ 11815 if (!is_valid_ether_addr(fip_mac)) { 11816 bp->flags |= NO_FCOE_FLAG; 11817 eth_zero_addr(bp->fip_mac); 11818 } 11819 } 11820 11821 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) 11822 { 11823 u32 val, val2; 11824 int func = BP_ABS_FUNC(bp); 11825 int port = BP_PORT(bp); 11826 11827 /* Zero primary MAC configuration */ 11828 eth_zero_addr(bp->dev->dev_addr); 11829 11830 if (BP_NOMCP(bp)) { 11831 BNX2X_ERROR("warning: random MAC workaround active\n"); 11832 eth_hw_addr_random(bp->dev); 11833 } else if (IS_MF(bp)) { 11834 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11835 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 11836 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 11837 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 11838 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11839 11840 if (CNIC_SUPPORT(bp)) 11841 bnx2x_get_cnic_mac_hwinfo(bp); 11842 } else { 11843 /* in SF read MACs from port configuration */ 11844 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11845 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11846 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11847 11848 if (CNIC_SUPPORT(bp)) 11849 bnx2x_get_cnic_mac_hwinfo(bp); 11850 } 11851 11852 if (!BP_NOMCP(bp)) { 11853 /* Read physical port identifier from shmem */ 11854 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11855 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11856 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); 11857 bp->flags |= HAS_PHYS_PORT_ID; 11858 } 11859 11860 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 11861 11862 if (!is_valid_ether_addr(bp->dev->dev_addr)) 11863 dev_err(&bp->pdev->dev, 11864 "bad Ethernet MAC address configuration: %pM\n" 11865 "change it manually before bringing up the appropriate network interface\n", 11866 bp->dev->dev_addr); 11867 } 11868 11869 static bool bnx2x_get_dropless_info(struct bnx2x *bp) 11870 { 11871 int tmp; 11872 u32 cfg; 11873 11874 if (IS_VF(bp)) 11875 return false; 11876 11877 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11878 /* Take function: tmp = func */ 11879 tmp = BP_ABS_FUNC(bp); 11880 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); 11881 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); 11882 } else { 11883 /* Take port: tmp = port */ 11884 tmp = BP_PORT(bp); 11885 cfg = SHMEM_RD(bp, 11886 dev_info.port_hw_config[tmp].generic_features); 11887 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); 11888 } 11889 return cfg; 11890 } 11891 11892 static void validate_set_si_mode(struct bnx2x *bp) 11893 { 11894 u8 func = BP_ABS_FUNC(bp); 11895 u32 val; 11896 11897 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11898 11899 /* check for legal mac (upper bytes) */ 11900 if (val != 0xffff) { 11901 bp->mf_mode = MULTI_FUNCTION_SI; 11902 bp->mf_config[BP_VN(bp)] = 11903 MF_CFG_RD(bp, func_mf_config[func].config); 11904 } else 11905 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 11906 } 11907 11908 static int bnx2x_get_hwinfo(struct bnx2x *bp) 11909 { 11910 int /*abs*/func = BP_ABS_FUNC(bp); 11911 int vn; 11912 u32 val = 0, val2 = 0; 11913 int rc = 0; 11914 11915 /* Validate that chip access is feasible */ 11916 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) { 11917 dev_err(&bp->pdev->dev, 11918 "Chip read returns all Fs. Preventing probe from continuing\n"); 11919 return -EINVAL; 11920 } 11921 11922 bnx2x_get_common_hwinfo(bp); 11923 11924 /* 11925 * initialize IGU parameters 11926 */ 11927 if (CHIP_IS_E1x(bp)) { 11928 bp->common.int_block = INT_BLOCK_HC; 11929 11930 bp->igu_dsb_id = DEF_SB_IGU_ID; 11931 bp->igu_base_sb = 0; 11932 } else { 11933 bp->common.int_block = INT_BLOCK_IGU; 11934 11935 /* do not allow device reset during IGU info processing */ 11936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11937 11938 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 11939 11940 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11941 int tout = 5000; 11942 11943 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 11944 11945 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 11946 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 11947 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 11948 11949 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11950 tout--; 11951 usleep_range(1000, 2000); 11952 } 11953 11954 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11955 dev_err(&bp->pdev->dev, 11956 "FORCING Normal Mode failed!!!\n"); 11957 bnx2x_release_hw_lock(bp, 11958 HW_LOCK_RESOURCE_RESET); 11959 return -EPERM; 11960 } 11961 } 11962 11963 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11964 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 11965 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 11966 } else 11967 BNX2X_DEV_INFO("IGU Normal Mode\n"); 11968 11969 rc = bnx2x_get_igu_cam_info(bp); 11970 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11971 if (rc) 11972 return rc; 11973 } 11974 11975 /* 11976 * set base FW non-default (fast path) status block id, this value is 11977 * used to initialize the fw_sb_id saved on the fp/queue structure to 11978 * determine the id used by the FW. 11979 */ 11980 if (CHIP_IS_E1x(bp)) 11981 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 11982 else /* 11983 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 11984 * the same queue are indicated on the same IGU SB). So we prefer 11985 * FW and IGU SBs to be the same value. 11986 */ 11987 bp->base_fw_ndsb = bp->igu_base_sb; 11988 11989 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 11990 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 11991 bp->igu_sb_cnt, bp->base_fw_ndsb); 11992 11993 /* 11994 * Initialize MF configuration 11995 */ 11996 bp->mf_ov = 0; 11997 bp->mf_mode = 0; 11998 bp->mf_sub_mode = 0; 11999 vn = BP_VN(bp); 12000 12001 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 12002 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 12003 bp->common.shmem2_base, SHMEM2_RD(bp, size), 12004 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 12005 12006 if (SHMEM2_HAS(bp, mf_cfg_addr)) 12007 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 12008 else 12009 bp->common.mf_cfg_base = bp->common.shmem_base + 12010 offsetof(struct shmem_region, func_mb) + 12011 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 12012 /* 12013 * get mf configuration: 12014 * 1. Existence of MF configuration 12015 * 2. MAC address must be legal (check only upper bytes) 12016 * for Switch-Independent mode; 12017 * OVLAN must be legal for Switch-Dependent mode 12018 * 3. SF_MODE configures specific MF mode 12019 */ 12020 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 12021 /* get mf configuration */ 12022 val = SHMEM_RD(bp, 12023 dev_info.shared_feature_config.config); 12024 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 12025 12026 switch (val) { 12027 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 12028 validate_set_si_mode(bp); 12029 break; 12030 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 12031 if ((!CHIP_IS_E1x(bp)) && 12032 (MF_CFG_RD(bp, func_mf_config[func]. 12033 mac_upper) != 0xffff) && 12034 (SHMEM2_HAS(bp, 12035 afex_driver_support))) { 12036 bp->mf_mode = MULTI_FUNCTION_AFEX; 12037 bp->mf_config[vn] = MF_CFG_RD(bp, 12038 func_mf_config[func].config); 12039 } else { 12040 BNX2X_DEV_INFO("can not configure afex mode\n"); 12041 } 12042 break; 12043 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 12044 /* get OV configuration */ 12045 val = MF_CFG_RD(bp, 12046 func_mf_config[FUNC_0].e1hov_tag); 12047 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 12048 12049 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 12050 bp->mf_mode = MULTI_FUNCTION_SD; 12051 bp->mf_config[vn] = MF_CFG_RD(bp, 12052 func_mf_config[func].config); 12053 } else 12054 BNX2X_DEV_INFO("illegal OV for SD\n"); 12055 break; 12056 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE: 12057 bp->mf_mode = MULTI_FUNCTION_SD; 12058 bp->mf_sub_mode = SUB_MF_MODE_BD; 12059 bp->mf_config[vn] = 12060 MF_CFG_RD(bp, 12061 func_mf_config[func].config); 12062 12063 if (SHMEM2_HAS(bp, mtu_size)) { 12064 int mtu_idx = BP_FW_MB_IDX(bp); 12065 u16 mtu_size; 12066 u32 mtu; 12067 12068 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]); 12069 mtu_size = (u16)mtu; 12070 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", 12071 mtu_size, mtu); 12072 12073 /* if valid: update device mtu */ 12074 if ((mtu_size >= ETH_MIN_PACKET_SIZE) && 12075 (mtu_size <= 12076 ETH_MAX_JUMBO_PACKET_SIZE)) 12077 bp->dev->mtu = mtu_size; 12078 } 12079 break; 12080 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: 12081 bp->mf_mode = MULTI_FUNCTION_SD; 12082 bp->mf_sub_mode = SUB_MF_MODE_UFP; 12083 bp->mf_config[vn] = 12084 MF_CFG_RD(bp, 12085 func_mf_config[func].config); 12086 break; 12087 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 12088 bp->mf_config[vn] = 0; 12089 break; 12090 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE: 12091 val2 = SHMEM_RD(bp, 12092 dev_info.shared_hw_config.config_3); 12093 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK; 12094 switch (val2) { 12095 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5: 12096 validate_set_si_mode(bp); 12097 bp->mf_sub_mode = 12098 SUB_MF_MODE_NPAR1_DOT_5; 12099 break; 12100 default: 12101 /* Unknown configuration */ 12102 bp->mf_config[vn] = 0; 12103 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n", 12104 val); 12105 } 12106 break; 12107 default: 12108 /* Unknown configuration: reset mf_config */ 12109 bp->mf_config[vn] = 0; 12110 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 12111 } 12112 } 12113 12114 BNX2X_DEV_INFO("%s function mode\n", 12115 IS_MF(bp) ? "multi" : "single"); 12116 12117 switch (bp->mf_mode) { 12118 case MULTI_FUNCTION_SD: 12119 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 12120 FUNC_MF_CFG_E1HOV_TAG_MASK; 12121 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 12122 bp->mf_ov = val; 12123 bp->path_has_ovlan = true; 12124 12125 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 12126 func, bp->mf_ov, bp->mf_ov); 12127 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || 12128 (bp->mf_sub_mode == SUB_MF_MODE_BD)) { 12129 dev_err(&bp->pdev->dev, 12130 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", 12131 func); 12132 bp->path_has_ovlan = true; 12133 } else { 12134 dev_err(&bp->pdev->dev, 12135 "No valid MF OV for func %d, aborting\n", 12136 func); 12137 return -EPERM; 12138 } 12139 break; 12140 case MULTI_FUNCTION_AFEX: 12141 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 12142 break; 12143 case MULTI_FUNCTION_SI: 12144 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 12145 func); 12146 break; 12147 default: 12148 if (vn) { 12149 dev_err(&bp->pdev->dev, 12150 "VN %d is in a single function mode, aborting\n", 12151 vn); 12152 return -EPERM; 12153 } 12154 break; 12155 } 12156 12157 /* check if other port on the path needs ovlan: 12158 * Since MF configuration is shared between ports 12159 * Possible mixed modes are only 12160 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 12161 */ 12162 if (CHIP_MODE_IS_4_PORT(bp) && 12163 !bp->path_has_ovlan && 12164 !IS_MF(bp) && 12165 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 12166 u8 other_port = !BP_PORT(bp); 12167 u8 other_func = BP_PATH(bp) + 2*other_port; 12168 val = MF_CFG_RD(bp, 12169 func_mf_config[other_func].e1hov_tag); 12170 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 12171 bp->path_has_ovlan = true; 12172 } 12173 } 12174 12175 /* adjust igu_sb_cnt to MF for E1H */ 12176 if (CHIP_IS_E1H(bp) && IS_MF(bp)) 12177 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); 12178 12179 /* port info */ 12180 bnx2x_get_port_hwinfo(bp); 12181 12182 /* Get MAC addresses */ 12183 bnx2x_get_mac_hwinfo(bp); 12184 12185 bnx2x_get_cnic_info(bp); 12186 12187 return rc; 12188 } 12189 12190 static void bnx2x_read_fwinfo(struct bnx2x *bp) 12191 { 12192 int cnt, i, block_end, rodi; 12193 char vpd_start[BNX2X_VPD_LEN+1]; 12194 char str_id_reg[VENDOR_ID_LEN+1]; 12195 char str_id_cap[VENDOR_ID_LEN+1]; 12196 char *vpd_data; 12197 char *vpd_extended_data = NULL; 12198 u8 len; 12199 12200 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 12201 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 12202 12203 if (cnt < BNX2X_VPD_LEN) 12204 goto out_not_found; 12205 12206 /* VPD RO tag should be first tag after identifier string, hence 12207 * we should be able to find it in first BNX2X_VPD_LEN chars 12208 */ 12209 i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); 12210 if (i < 0) 12211 goto out_not_found; 12212 12213 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 12214 pci_vpd_lrdt_size(&vpd_start[i]); 12215 12216 i += PCI_VPD_LRDT_TAG_SIZE; 12217 12218 if (block_end > BNX2X_VPD_LEN) { 12219 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 12220 if (vpd_extended_data == NULL) 12221 goto out_not_found; 12222 12223 /* read rest of vpd image into vpd_extended_data */ 12224 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 12225 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 12226 block_end - BNX2X_VPD_LEN, 12227 vpd_extended_data + BNX2X_VPD_LEN); 12228 if (cnt < (block_end - BNX2X_VPD_LEN)) 12229 goto out_not_found; 12230 vpd_data = vpd_extended_data; 12231 } else 12232 vpd_data = vpd_start; 12233 12234 /* now vpd_data holds full vpd content in both cases */ 12235 12236 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 12237 PCI_VPD_RO_KEYWORD_MFR_ID); 12238 if (rodi < 0) 12239 goto out_not_found; 12240 12241 len = pci_vpd_info_field_size(&vpd_data[rodi]); 12242 12243 if (len != VENDOR_ID_LEN) 12244 goto out_not_found; 12245 12246 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 12247 12248 /* vendor specific info */ 12249 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 12250 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 12251 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 12252 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 12253 12254 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 12255 PCI_VPD_RO_KEYWORD_VENDOR0); 12256 if (rodi >= 0) { 12257 len = pci_vpd_info_field_size(&vpd_data[rodi]); 12258 12259 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 12260 12261 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 12262 memcpy(bp->fw_ver, &vpd_data[rodi], len); 12263 bp->fw_ver[len] = ' '; 12264 } 12265 } 12266 kfree(vpd_extended_data); 12267 return; 12268 } 12269 out_not_found: 12270 kfree(vpd_extended_data); 12271 return; 12272 } 12273 12274 static void bnx2x_set_modes_bitmap(struct bnx2x *bp) 12275 { 12276 u32 flags = 0; 12277 12278 if (CHIP_REV_IS_FPGA(bp)) 12279 SET_FLAGS(flags, MODE_FPGA); 12280 else if (CHIP_REV_IS_EMUL(bp)) 12281 SET_FLAGS(flags, MODE_EMUL); 12282 else 12283 SET_FLAGS(flags, MODE_ASIC); 12284 12285 if (CHIP_MODE_IS_4_PORT(bp)) 12286 SET_FLAGS(flags, MODE_PORT4); 12287 else 12288 SET_FLAGS(flags, MODE_PORT2); 12289 12290 if (CHIP_IS_E2(bp)) 12291 SET_FLAGS(flags, MODE_E2); 12292 else if (CHIP_IS_E3(bp)) { 12293 SET_FLAGS(flags, MODE_E3); 12294 if (CHIP_REV(bp) == CHIP_REV_Ax) 12295 SET_FLAGS(flags, MODE_E3_A0); 12296 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 12297 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 12298 } 12299 12300 if (IS_MF(bp)) { 12301 SET_FLAGS(flags, MODE_MF); 12302 switch (bp->mf_mode) { 12303 case MULTI_FUNCTION_SD: 12304 SET_FLAGS(flags, MODE_MF_SD); 12305 break; 12306 case MULTI_FUNCTION_SI: 12307 SET_FLAGS(flags, MODE_MF_SI); 12308 break; 12309 case MULTI_FUNCTION_AFEX: 12310 SET_FLAGS(flags, MODE_MF_AFEX); 12311 break; 12312 } 12313 } else 12314 SET_FLAGS(flags, MODE_SF); 12315 12316 #if defined(__LITTLE_ENDIAN) 12317 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 12318 #else /*(__BIG_ENDIAN)*/ 12319 SET_FLAGS(flags, MODE_BIG_ENDIAN); 12320 #endif 12321 INIT_MODE_FLAGS(bp) = flags; 12322 } 12323 12324 static int bnx2x_init_bp(struct bnx2x *bp) 12325 { 12326 int func; 12327 int rc; 12328 12329 mutex_init(&bp->port.phy_mutex); 12330 mutex_init(&bp->fw_mb_mutex); 12331 mutex_init(&bp->drv_info_mutex); 12332 sema_init(&bp->stats_lock, 1); 12333 bp->drv_info_mng_owner = false; 12334 INIT_LIST_HEAD(&bp->vlan_reg); 12335 12336 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 12337 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 12338 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 12339 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); 12340 if (IS_PF(bp)) { 12341 rc = bnx2x_get_hwinfo(bp); 12342 if (rc) 12343 return rc; 12344 } else { 12345 eth_zero_addr(bp->dev->dev_addr); 12346 } 12347 12348 bnx2x_set_modes_bitmap(bp); 12349 12350 rc = bnx2x_alloc_mem_bp(bp); 12351 if (rc) 12352 return rc; 12353 12354 bnx2x_read_fwinfo(bp); 12355 12356 func = BP_FUNC(bp); 12357 12358 /* need to reset chip if undi was active */ 12359 if (IS_PF(bp) && !BP_NOMCP(bp)) { 12360 /* init fw_seq */ 12361 bp->fw_seq = 12362 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 12363 DRV_MSG_SEQ_NUMBER_MASK; 12364 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 12365 12366 rc = bnx2x_prev_unload(bp); 12367 if (rc) { 12368 bnx2x_free_mem_bp(bp); 12369 return rc; 12370 } 12371 } 12372 12373 if (CHIP_REV_IS_FPGA(bp)) 12374 dev_err(&bp->pdev->dev, "FPGA detected\n"); 12375 12376 if (BP_NOMCP(bp) && (func == 0)) 12377 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 12378 12379 bp->disable_tpa = disable_tpa; 12380 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); 12381 /* Reduce memory usage in kdump environment by disabling TPA */ 12382 bp->disable_tpa |= is_kdump_kernel(); 12383 12384 /* Set TPA flags */ 12385 if (bp->disable_tpa) { 12386 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12387 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12388 } 12389 12390 if (CHIP_IS_E1(bp)) 12391 bp->dropless_fc = false; 12392 else 12393 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); 12394 12395 bp->mrrs = mrrs; 12396 12397 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; 12398 if (IS_VF(bp)) 12399 bp->rx_ring_size = MAX_RX_AVAIL; 12400 12401 /* make sure that the numbers are in the right granularity */ 12402 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 12403 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 12404 12405 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 12406 12407 timer_setup(&bp->timer, bnx2x_timer, 0); 12408 bp->timer.expires = jiffies + bp->current_interval; 12409 12410 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && 12411 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && 12412 SHMEM2_HAS(bp, dcbx_en) && 12413 SHMEM2_RD(bp, dcbx_lldp_params_offset) && 12414 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) && 12415 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) { 12416 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 12417 bnx2x_dcbx_init_params(bp); 12418 } else { 12419 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); 12420 } 12421 12422 if (CHIP_IS_E1x(bp)) 12423 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 12424 else 12425 bp->cnic_base_cl_id = FP_SB_MAX_E2; 12426 12427 /* multiple tx priority */ 12428 if (IS_VF(bp)) 12429 bp->max_cos = 1; 12430 else if (CHIP_IS_E1x(bp)) 12431 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 12432 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 12433 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 12434 else if (CHIP_IS_E3B0(bp)) 12435 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 12436 else 12437 BNX2X_ERR("unknown chip %x revision %x\n", 12438 CHIP_NUM(bp), CHIP_REV(bp)); 12439 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); 12440 12441 /* We need at least one default status block for slow-path events, 12442 * second status block for the L2 queue, and a third status block for 12443 * CNIC if supported. 12444 */ 12445 if (IS_VF(bp)) 12446 bp->min_msix_vec_cnt = 1; 12447 else if (CNIC_SUPPORT(bp)) 12448 bp->min_msix_vec_cnt = 3; 12449 else /* PF w/o cnic */ 12450 bp->min_msix_vec_cnt = 2; 12451 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 12452 12453 bp->dump_preset_idx = 1; 12454 12455 return rc; 12456 } 12457 12458 /**************************************************************************** 12459 * General service functions 12460 ****************************************************************************/ 12461 12462 /* 12463 * net_device service functions 12464 */ 12465 12466 /* called with rtnl_lock */ 12467 static int bnx2x_open(struct net_device *dev) 12468 { 12469 struct bnx2x *bp = netdev_priv(dev); 12470 int rc; 12471 12472 bp->stats_init = true; 12473 12474 netif_carrier_off(dev); 12475 12476 bnx2x_set_power_state(bp, PCI_D0); 12477 12478 /* If parity had happen during the unload, then attentions 12479 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 12480 * want the first function loaded on the current engine to 12481 * complete the recovery. 12482 * Parity recovery is only relevant for PF driver. 12483 */ 12484 if (IS_PF(bp)) { 12485 int other_engine = BP_PATH(bp) ? 0 : 1; 12486 bool other_load_status, load_status; 12487 bool global = false; 12488 12489 other_load_status = bnx2x_get_load_status(bp, other_engine); 12490 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 12491 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 12492 bnx2x_chk_parity_attn(bp, &global, true)) { 12493 do { 12494 /* If there are attentions and they are in a 12495 * global blocks, set the GLOBAL_RESET bit 12496 * regardless whether it will be this function 12497 * that will complete the recovery or not. 12498 */ 12499 if (global) 12500 bnx2x_set_reset_global(bp); 12501 12502 /* Only the first function on the current 12503 * engine should try to recover in open. In case 12504 * of attentions in global blocks only the first 12505 * in the chip should try to recover. 12506 */ 12507 if ((!load_status && 12508 (!global || !other_load_status)) && 12509 bnx2x_trylock_leader_lock(bp) && 12510 !bnx2x_leader_reset(bp)) { 12511 netdev_info(bp->dev, 12512 "Recovered in open\n"); 12513 break; 12514 } 12515 12516 /* recovery has failed... */ 12517 bnx2x_set_power_state(bp, PCI_D3hot); 12518 bp->recovery_state = BNX2X_RECOVERY_FAILED; 12519 12520 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 12521 "If you still see this message after a few retries then power cycle is required.\n"); 12522 12523 return -EAGAIN; 12524 } while (0); 12525 } 12526 } 12527 12528 bp->recovery_state = BNX2X_RECOVERY_DONE; 12529 rc = bnx2x_nic_load(bp, LOAD_OPEN); 12530 if (rc) 12531 return rc; 12532 12533 return 0; 12534 } 12535 12536 /* called with rtnl_lock */ 12537 static int bnx2x_close(struct net_device *dev) 12538 { 12539 struct bnx2x *bp = netdev_priv(dev); 12540 12541 /* Unload the driver, release IRQs */ 12542 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 12543 12544 return 0; 12545 } 12546 12547 struct bnx2x_mcast_list_elem_group 12548 { 12549 struct list_head mcast_group_link; 12550 struct bnx2x_mcast_list_elem mcast_elems[]; 12551 }; 12552 12553 #define MCAST_ELEMS_PER_PG \ 12554 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \ 12555 sizeof(struct bnx2x_mcast_list_elem)) 12556 12557 static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list) 12558 { 12559 struct bnx2x_mcast_list_elem_group *current_mcast_group; 12560 12561 while (!list_empty(mcast_group_list)) { 12562 current_mcast_group = list_first_entry(mcast_group_list, 12563 struct bnx2x_mcast_list_elem_group, 12564 mcast_group_link); 12565 list_del(¤t_mcast_group->mcast_group_link); 12566 free_page((unsigned long)current_mcast_group); 12567 } 12568 } 12569 12570 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 12571 struct bnx2x_mcast_ramrod_params *p, 12572 struct list_head *mcast_group_list) 12573 { 12574 struct bnx2x_mcast_list_elem *mc_mac; 12575 struct netdev_hw_addr *ha; 12576 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL; 12577 int mc_count = netdev_mc_count(bp->dev); 12578 int offset = 0; 12579 12580 INIT_LIST_HEAD(&p->mcast_list); 12581 netdev_for_each_mc_addr(ha, bp->dev) { 12582 if (!offset) { 12583 current_mcast_group = 12584 (struct bnx2x_mcast_list_elem_group *) 12585 __get_free_page(GFP_ATOMIC); 12586 if (!current_mcast_group) { 12587 bnx2x_free_mcast_macs_list(mcast_group_list); 12588 BNX2X_ERR("Failed to allocate mc MAC list\n"); 12589 return -ENOMEM; 12590 } 12591 list_add(¤t_mcast_group->mcast_group_link, 12592 mcast_group_list); 12593 } 12594 mc_mac = ¤t_mcast_group->mcast_elems[offset]; 12595 mc_mac->mac = bnx2x_mc_addr(ha); 12596 list_add_tail(&mc_mac->link, &p->mcast_list); 12597 offset++; 12598 if (offset == MCAST_ELEMS_PER_PG) 12599 offset = 0; 12600 } 12601 p->mcast_list_len = mc_count; 12602 return 0; 12603 } 12604 12605 /** 12606 * bnx2x_set_uc_list - configure a new unicast MACs list. 12607 * 12608 * @bp: driver handle 12609 * 12610 * We will use zero (0) as a MAC type for these MACs. 12611 */ 12612 static int bnx2x_set_uc_list(struct bnx2x *bp) 12613 { 12614 int rc; 12615 struct net_device *dev = bp->dev; 12616 struct netdev_hw_addr *ha; 12617 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 12618 unsigned long ramrod_flags = 0; 12619 12620 /* First schedule a cleanup up of old configuration */ 12621 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 12622 if (rc < 0) { 12623 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 12624 return rc; 12625 } 12626 12627 netdev_for_each_uc_addr(ha, dev) { 12628 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 12629 BNX2X_UC_LIST_MAC, &ramrod_flags); 12630 if (rc == -EEXIST) { 12631 DP(BNX2X_MSG_SP, 12632 "Failed to schedule ADD operations: %d\n", rc); 12633 /* do not treat adding same MAC as error */ 12634 rc = 0; 12635 12636 } else if (rc < 0) { 12637 12638 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 12639 rc); 12640 return rc; 12641 } 12642 } 12643 12644 /* Execute the pending commands */ 12645 __set_bit(RAMROD_CONT, &ramrod_flags); 12646 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 12647 BNX2X_UC_LIST_MAC, &ramrod_flags); 12648 } 12649 12650 static int bnx2x_set_mc_list_e1x(struct bnx2x *bp) 12651 { 12652 LIST_HEAD(mcast_group_list); 12653 struct net_device *dev = bp->dev; 12654 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 12655 int rc = 0; 12656 12657 rparam.mcast_obj = &bp->mcast_obj; 12658 12659 /* first, clear all configured multicast MACs */ 12660 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12661 if (rc < 0) { 12662 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 12663 return rc; 12664 } 12665 12666 /* then, configure a new MACs list */ 12667 if (netdev_mc_count(dev)) { 12668 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list); 12669 if (rc) 12670 return rc; 12671 12672 /* Now add the new MACs */ 12673 rc = bnx2x_config_mcast(bp, &rparam, 12674 BNX2X_MCAST_CMD_ADD); 12675 if (rc < 0) 12676 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 12677 rc); 12678 12679 bnx2x_free_mcast_macs_list(&mcast_group_list); 12680 } 12681 12682 return rc; 12683 } 12684 12685 static int bnx2x_set_mc_list(struct bnx2x *bp) 12686 { 12687 LIST_HEAD(mcast_group_list); 12688 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 12689 struct net_device *dev = bp->dev; 12690 int rc = 0; 12691 12692 /* On older adapters, we need to flush and re-add filters */ 12693 if (CHIP_IS_E1x(bp)) 12694 return bnx2x_set_mc_list_e1x(bp); 12695 12696 rparam.mcast_obj = &bp->mcast_obj; 12697 12698 if (netdev_mc_count(dev)) { 12699 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list); 12700 if (rc) 12701 return rc; 12702 12703 /* Override the curently configured set of mc filters */ 12704 rc = bnx2x_config_mcast(bp, &rparam, 12705 BNX2X_MCAST_CMD_SET); 12706 if (rc < 0) 12707 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 12708 rc); 12709 12710 bnx2x_free_mcast_macs_list(&mcast_group_list); 12711 } else { 12712 /* If no mc addresses are required, flush the configuration */ 12713 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12714 if (rc < 0) 12715 BNX2X_ERR("Failed to clear multicast configuration %d\n", 12716 rc); 12717 } 12718 12719 return rc; 12720 } 12721 12722 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 12723 static void bnx2x_set_rx_mode(struct net_device *dev) 12724 { 12725 struct bnx2x *bp = netdev_priv(dev); 12726 12727 if (bp->state != BNX2X_STATE_OPEN) { 12728 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 12729 return; 12730 } else { 12731 /* Schedule an SP task to handle rest of change */ 12732 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, 12733 NETIF_MSG_IFUP); 12734 } 12735 } 12736 12737 void bnx2x_set_rx_mode_inner(struct bnx2x *bp) 12738 { 12739 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 12740 12741 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 12742 12743 netif_addr_lock_bh(bp->dev); 12744 12745 if (bp->dev->flags & IFF_PROMISC) { 12746 rx_mode = BNX2X_RX_MODE_PROMISC; 12747 } else if ((bp->dev->flags & IFF_ALLMULTI) || 12748 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && 12749 CHIP_IS_E1(bp))) { 12750 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12751 } else { 12752 if (IS_PF(bp)) { 12753 /* some multicasts */ 12754 if (bnx2x_set_mc_list(bp) < 0) 12755 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12756 12757 /* release bh lock, as bnx2x_set_uc_list might sleep */ 12758 netif_addr_unlock_bh(bp->dev); 12759 if (bnx2x_set_uc_list(bp) < 0) 12760 rx_mode = BNX2X_RX_MODE_PROMISC; 12761 netif_addr_lock_bh(bp->dev); 12762 } else { 12763 /* configuring mcast to a vf involves sleeping (when we 12764 * wait for the pf's response). 12765 */ 12766 bnx2x_schedule_sp_rtnl(bp, 12767 BNX2X_SP_RTNL_VFPF_MCAST, 0); 12768 } 12769 } 12770 12771 bp->rx_mode = rx_mode; 12772 /* handle ISCSI SD mode */ 12773 if (IS_MF_ISCSI_ONLY(bp)) 12774 bp->rx_mode = BNX2X_RX_MODE_NONE; 12775 12776 /* Schedule the rx_mode command */ 12777 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 12778 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 12779 netif_addr_unlock_bh(bp->dev); 12780 return; 12781 } 12782 12783 if (IS_PF(bp)) { 12784 bnx2x_set_storm_rx_mode(bp); 12785 netif_addr_unlock_bh(bp->dev); 12786 } else { 12787 /* VF will need to request the PF to make this change, and so 12788 * the VF needs to release the bottom-half lock prior to the 12789 * request (as it will likely require sleep on the VF side) 12790 */ 12791 netif_addr_unlock_bh(bp->dev); 12792 bnx2x_vfpf_storm_rx_mode(bp); 12793 } 12794 } 12795 12796 /* called with rtnl_lock */ 12797 static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 12798 int devad, u16 addr) 12799 { 12800 struct bnx2x *bp = netdev_priv(netdev); 12801 u16 value; 12802 int rc; 12803 12804 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 12805 prtad, devad, addr); 12806 12807 /* The HW expects different devad if CL22 is used */ 12808 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12809 12810 bnx2x_acquire_phy_lock(bp); 12811 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 12812 bnx2x_release_phy_lock(bp); 12813 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 12814 12815 if (!rc) 12816 rc = value; 12817 return rc; 12818 } 12819 12820 /* called with rtnl_lock */ 12821 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 12822 u16 addr, u16 value) 12823 { 12824 struct bnx2x *bp = netdev_priv(netdev); 12825 int rc; 12826 12827 DP(NETIF_MSG_LINK, 12828 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 12829 prtad, devad, addr, value); 12830 12831 /* The HW expects different devad if CL22 is used */ 12832 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12833 12834 bnx2x_acquire_phy_lock(bp); 12835 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 12836 bnx2x_release_phy_lock(bp); 12837 return rc; 12838 } 12839 12840 /* called with rtnl_lock */ 12841 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12842 { 12843 struct bnx2x *bp = netdev_priv(dev); 12844 struct mii_ioctl_data *mdio = if_mii(ifr); 12845 12846 if (!netif_running(dev)) 12847 return -EAGAIN; 12848 12849 switch (cmd) { 12850 case SIOCSHWTSTAMP: 12851 return bnx2x_hwtstamp_ioctl(bp, ifr); 12852 default: 12853 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 12854 mdio->phy_id, mdio->reg_num, mdio->val_in); 12855 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 12856 } 12857 } 12858 12859 static int bnx2x_validate_addr(struct net_device *dev) 12860 { 12861 struct bnx2x *bp = netdev_priv(dev); 12862 12863 /* query the bulletin board for mac address configured by the PF */ 12864 if (IS_VF(bp)) 12865 bnx2x_sample_bulletin(bp); 12866 12867 if (!is_valid_ether_addr(dev->dev_addr)) { 12868 BNX2X_ERR("Non-valid Ethernet address\n"); 12869 return -EADDRNOTAVAIL; 12870 } 12871 return 0; 12872 } 12873 12874 static int bnx2x_get_phys_port_id(struct net_device *netdev, 12875 struct netdev_phys_item_id *ppid) 12876 { 12877 struct bnx2x *bp = netdev_priv(netdev); 12878 12879 if (!(bp->flags & HAS_PHYS_PORT_ID)) 12880 return -EOPNOTSUPP; 12881 12882 ppid->id_len = sizeof(bp->phys_port_id); 12883 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); 12884 12885 return 0; 12886 } 12887 12888 static netdev_features_t bnx2x_features_check(struct sk_buff *skb, 12889 struct net_device *dev, 12890 netdev_features_t features) 12891 { 12892 /* 12893 * A skb with gso_size + header length > 9700 will cause a 12894 * firmware panic. Drop GSO support. 12895 * 12896 * Eventually the upper layer should not pass these packets down. 12897 * 12898 * For speed, if the gso_size is <= 9000, assume there will 12899 * not be 700 bytes of headers and pass it through. Only do a 12900 * full (slow) validation if the gso_size is > 9000. 12901 * 12902 * (Due to the way SKB_BY_FRAGS works this will also do a full 12903 * validation in that case.) 12904 */ 12905 if (unlikely(skb_is_gso(skb) && 12906 (skb_shinfo(skb)->gso_size > 9000) && 12907 !skb_gso_validate_mac_len(skb, 9700))) 12908 features &= ~NETIF_F_GSO_MASK; 12909 12910 features = vlan_features_check(skb, features); 12911 return vxlan_features_check(skb, features); 12912 } 12913 12914 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) 12915 { 12916 int rc; 12917 12918 if (IS_PF(bp)) { 12919 unsigned long ramrod_flags = 0; 12920 12921 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12922 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, 12923 add, &ramrod_flags); 12924 } else { 12925 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); 12926 } 12927 12928 return rc; 12929 } 12930 12931 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) 12932 { 12933 struct bnx2x_vlan_entry *vlan; 12934 int rc = 0; 12935 12936 /* Configure all non-configured entries */ 12937 list_for_each_entry(vlan, &bp->vlan_reg, link) { 12938 if (vlan->hw) 12939 continue; 12940 12941 if (bp->vlan_cnt >= bp->vlan_credit) 12942 return -ENOBUFS; 12943 12944 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); 12945 if (rc) { 12946 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); 12947 return rc; 12948 } 12949 12950 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); 12951 vlan->hw = true; 12952 bp->vlan_cnt++; 12953 } 12954 12955 return 0; 12956 } 12957 12958 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) 12959 { 12960 bool need_accept_any_vlan; 12961 12962 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); 12963 12964 if (bp->accept_any_vlan != need_accept_any_vlan) { 12965 bp->accept_any_vlan = need_accept_any_vlan; 12966 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", 12967 bp->accept_any_vlan ? "raised" : "cleared"); 12968 if (set_rx_mode) { 12969 if (IS_PF(bp)) 12970 bnx2x_set_rx_mode_inner(bp); 12971 else 12972 bnx2x_vfpf_storm_rx_mode(bp); 12973 } 12974 } 12975 } 12976 12977 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) 12978 { 12979 /* Don't set rx mode here. Our caller will do it. */ 12980 bnx2x_vlan_configure(bp, false); 12981 12982 return 0; 12983 } 12984 12985 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 12986 { 12987 struct bnx2x *bp = netdev_priv(dev); 12988 struct bnx2x_vlan_entry *vlan; 12989 12990 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); 12991 12992 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL); 12993 if (!vlan) 12994 return -ENOMEM; 12995 12996 vlan->vid = vid; 12997 vlan->hw = false; 12998 list_add_tail(&vlan->link, &bp->vlan_reg); 12999 13000 if (netif_running(dev)) 13001 bnx2x_vlan_configure(bp, true); 13002 13003 return 0; 13004 } 13005 13006 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 13007 { 13008 struct bnx2x *bp = netdev_priv(dev); 13009 struct bnx2x_vlan_entry *vlan; 13010 bool found = false; 13011 int rc = 0; 13012 13013 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); 13014 13015 list_for_each_entry(vlan, &bp->vlan_reg, link) 13016 if (vlan->vid == vid) { 13017 found = true; 13018 break; 13019 } 13020 13021 if (!found) { 13022 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); 13023 return -EINVAL; 13024 } 13025 13026 if (netif_running(dev) && vlan->hw) { 13027 rc = __bnx2x_vlan_configure_vid(bp, vid, false); 13028 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); 13029 bp->vlan_cnt--; 13030 } 13031 13032 list_del(&vlan->link); 13033 kfree(vlan); 13034 13035 if (netif_running(dev)) 13036 bnx2x_vlan_configure(bp, true); 13037 13038 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); 13039 13040 return rc; 13041 } 13042 13043 static const struct net_device_ops bnx2x_netdev_ops = { 13044 .ndo_open = bnx2x_open, 13045 .ndo_stop = bnx2x_close, 13046 .ndo_start_xmit = bnx2x_start_xmit, 13047 .ndo_select_queue = bnx2x_select_queue, 13048 .ndo_set_rx_mode = bnx2x_set_rx_mode, 13049 .ndo_set_mac_address = bnx2x_change_mac_addr, 13050 .ndo_validate_addr = bnx2x_validate_addr, 13051 .ndo_do_ioctl = bnx2x_ioctl, 13052 .ndo_change_mtu = bnx2x_change_mtu, 13053 .ndo_fix_features = bnx2x_fix_features, 13054 .ndo_set_features = bnx2x_set_features, 13055 .ndo_tx_timeout = bnx2x_tx_timeout, 13056 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, 13057 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, 13058 .ndo_setup_tc = __bnx2x_setup_tc, 13059 #ifdef CONFIG_BNX2X_SRIOV 13060 .ndo_set_vf_mac = bnx2x_set_vf_mac, 13061 .ndo_set_vf_vlan = bnx2x_set_vf_vlan, 13062 .ndo_get_vf_config = bnx2x_get_vf_config, 13063 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk, 13064 #endif 13065 #ifdef NETDEV_FCOE_WWNN 13066 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 13067 #endif 13068 13069 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 13070 .ndo_set_vf_link_state = bnx2x_set_vf_link_state, 13071 .ndo_features_check = bnx2x_features_check, 13072 }; 13073 13074 static int bnx2x_set_coherency_mask(struct bnx2x *bp) 13075 { 13076 struct device *dev = &bp->pdev->dev; 13077 13078 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && 13079 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { 13080 dev_err(dev, "System does not support DMA, aborting\n"); 13081 return -EIO; 13082 } 13083 13084 return 0; 13085 } 13086 13087 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) 13088 { 13089 if (bp->flags & AER_ENABLED) { 13090 pci_disable_pcie_error_reporting(bp->pdev); 13091 bp->flags &= ~AER_ENABLED; 13092 } 13093 } 13094 13095 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, 13096 struct net_device *dev, unsigned long board_type) 13097 { 13098 int rc; 13099 u32 pci_cfg_dword; 13100 bool chip_is_e1x = (board_type == BCM57710 || 13101 board_type == BCM57711 || 13102 board_type == BCM57711E); 13103 13104 SET_NETDEV_DEV(dev, &pdev->dev); 13105 13106 bp->dev = dev; 13107 bp->pdev = pdev; 13108 13109 rc = pci_enable_device(pdev); 13110 if (rc) { 13111 dev_err(&bp->pdev->dev, 13112 "Cannot enable PCI device, aborting\n"); 13113 goto err_out; 13114 } 13115 13116 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 13117 dev_err(&bp->pdev->dev, 13118 "Cannot find PCI device base address, aborting\n"); 13119 rc = -ENODEV; 13120 goto err_out_disable; 13121 } 13122 13123 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 13124 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); 13125 rc = -ENODEV; 13126 goto err_out_disable; 13127 } 13128 13129 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); 13130 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == 13131 PCICFG_REVESION_ID_ERROR_VAL) { 13132 pr_err("PCI device error, probably due to fan failure, aborting\n"); 13133 rc = -ENODEV; 13134 goto err_out_disable; 13135 } 13136 13137 if (atomic_read(&pdev->enable_cnt) == 1) { 13138 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 13139 if (rc) { 13140 dev_err(&bp->pdev->dev, 13141 "Cannot obtain PCI resources, aborting\n"); 13142 goto err_out_disable; 13143 } 13144 13145 pci_set_master(pdev); 13146 pci_save_state(pdev); 13147 } 13148 13149 if (IS_PF(bp)) { 13150 if (!pdev->pm_cap) { 13151 dev_err(&bp->pdev->dev, 13152 "Cannot find power management capability, aborting\n"); 13153 rc = -EIO; 13154 goto err_out_release; 13155 } 13156 } 13157 13158 if (!pci_is_pcie(pdev)) { 13159 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 13160 rc = -EIO; 13161 goto err_out_release; 13162 } 13163 13164 rc = bnx2x_set_coherency_mask(bp); 13165 if (rc) 13166 goto err_out_release; 13167 13168 dev->mem_start = pci_resource_start(pdev, 0); 13169 dev->base_addr = dev->mem_start; 13170 dev->mem_end = pci_resource_end(pdev, 0); 13171 13172 dev->irq = pdev->irq; 13173 13174 bp->regview = pci_ioremap_bar(pdev, 0); 13175 if (!bp->regview) { 13176 dev_err(&bp->pdev->dev, 13177 "Cannot map register space, aborting\n"); 13178 rc = -ENOMEM; 13179 goto err_out_release; 13180 } 13181 13182 /* In E1/E1H use pci device function given by kernel. 13183 * In E2/E3 read physical function from ME register since these chips 13184 * support Physical Device Assignment where kernel BDF maybe arbitrary 13185 * (depending on hypervisor). 13186 */ 13187 if (chip_is_e1x) { 13188 bp->pf_num = PCI_FUNC(pdev->devfn); 13189 } else { 13190 /* chip is E2/3*/ 13191 pci_read_config_dword(bp->pdev, 13192 PCICFG_ME_REGISTER, &pci_cfg_dword); 13193 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 13194 ME_REG_ABS_PF_NUM_SHIFT); 13195 } 13196 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 13197 13198 /* clean indirect addresses */ 13199 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 13200 PCICFG_VENDOR_ID_OFFSET); 13201 13202 /* Set PCIe reset type to fundamental for EEH recovery */ 13203 pdev->needs_freset = 1; 13204 13205 /* AER (Advanced Error reporting) configuration */ 13206 rc = pci_enable_pcie_error_reporting(pdev); 13207 if (!rc) 13208 bp->flags |= AER_ENABLED; 13209 else 13210 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); 13211 13212 /* 13213 * Clean the following indirect addresses for all functions since it 13214 * is not used by the driver. 13215 */ 13216 if (IS_PF(bp)) { 13217 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 13218 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 13219 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 13220 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 13221 13222 if (chip_is_e1x) { 13223 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 13224 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 13225 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 13226 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 13227 } 13228 13229 /* Enable internal target-read (in case we are probed after PF 13230 * FLR). Must be done prior to any BAR read access. Only for 13231 * 57712 and up 13232 */ 13233 if (!chip_is_e1x) 13234 REG_WR(bp, 13235 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 13236 } 13237 13238 dev->watchdog_timeo = TX_TIMEOUT; 13239 13240 dev->netdev_ops = &bnx2x_netdev_ops; 13241 bnx2x_set_ethtool_ops(bp, dev); 13242 13243 dev->priv_flags |= IFF_UNICAST_FLT; 13244 13245 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 13246 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 13247 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW | 13248 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 13249 if (!chip_is_e1x) { 13250 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | 13251 NETIF_F_GSO_IPXIP4 | 13252 NETIF_F_GSO_UDP_TUNNEL | 13253 NETIF_F_GSO_UDP_TUNNEL_CSUM | 13254 NETIF_F_GSO_PARTIAL; 13255 13256 dev->hw_enc_features = 13257 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 13258 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 13259 NETIF_F_GSO_IPXIP4 | 13260 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | 13261 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | 13262 NETIF_F_GSO_PARTIAL; 13263 13264 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM | 13265 NETIF_F_GSO_UDP_TUNNEL_CSUM; 13266 13267 if (IS_PF(bp)) 13268 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels; 13269 } 13270 13271 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 13272 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 13273 13274 if (IS_PF(bp)) { 13275 if (chip_is_e1x) 13276 bp->accept_any_vlan = true; 13277 else 13278 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13279 } 13280 /* For VF we'll know whether to enable VLAN filtering after 13281 * getting a response to CHANNEL_TLV_ACQUIRE from PF. 13282 */ 13283 13284 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 13285 dev->features |= NETIF_F_HIGHDMA; 13286 if (dev->features & NETIF_F_LRO) 13287 dev->features &= ~NETIF_F_GRO_HW; 13288 13289 /* Add Loopback capability to the device */ 13290 dev->hw_features |= NETIF_F_LOOPBACK; 13291 13292 #ifdef BCM_DCBNL 13293 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 13294 #endif 13295 13296 /* MTU range, 46 - 9600 */ 13297 dev->min_mtu = ETH_MIN_PACKET_SIZE; 13298 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; 13299 13300 /* get_port_hwinfo() will set prtad and mmds properly */ 13301 bp->mdio.prtad = MDIO_PRTAD_NONE; 13302 bp->mdio.mmds = 0; 13303 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 13304 bp->mdio.dev = dev; 13305 bp->mdio.mdio_read = bnx2x_mdio_read; 13306 bp->mdio.mdio_write = bnx2x_mdio_write; 13307 13308 return 0; 13309 13310 err_out_release: 13311 if (atomic_read(&pdev->enable_cnt) == 1) 13312 pci_release_regions(pdev); 13313 13314 err_out_disable: 13315 pci_disable_device(pdev); 13316 13317 err_out: 13318 return rc; 13319 } 13320 13321 static int bnx2x_check_firmware(struct bnx2x *bp) 13322 { 13323 const struct firmware *firmware = bp->firmware; 13324 struct bnx2x_fw_file_hdr *fw_hdr; 13325 struct bnx2x_fw_file_section *sections; 13326 u32 offset, len, num_ops; 13327 __be16 *ops_offsets; 13328 int i; 13329 const u8 *fw_ver; 13330 13331 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 13332 BNX2X_ERR("Wrong FW size\n"); 13333 return -EINVAL; 13334 } 13335 13336 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 13337 sections = (struct bnx2x_fw_file_section *)fw_hdr; 13338 13339 /* Make sure none of the offsets and sizes make us read beyond 13340 * the end of the firmware data */ 13341 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 13342 offset = be32_to_cpu(sections[i].offset); 13343 len = be32_to_cpu(sections[i].len); 13344 if (offset + len > firmware->size) { 13345 BNX2X_ERR("Section %d length is out of bounds\n", i); 13346 return -EINVAL; 13347 } 13348 } 13349 13350 /* Likewise for the init_ops offsets */ 13351 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 13352 ops_offsets = (__force __be16 *)(firmware->data + offset); 13353 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 13354 13355 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 13356 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 13357 BNX2X_ERR("Section offset %d is out of bounds\n", i); 13358 return -EINVAL; 13359 } 13360 } 13361 13362 /* Check FW version */ 13363 offset = be32_to_cpu(fw_hdr->fw_version.offset); 13364 fw_ver = firmware->data + offset; 13365 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 13366 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 13367 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 13368 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 13369 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 13370 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 13371 BCM_5710_FW_MAJOR_VERSION, 13372 BCM_5710_FW_MINOR_VERSION, 13373 BCM_5710_FW_REVISION_VERSION, 13374 BCM_5710_FW_ENGINEERING_VERSION); 13375 return -EINVAL; 13376 } 13377 13378 return 0; 13379 } 13380 13381 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 13382 { 13383 const __be32 *source = (const __be32 *)_source; 13384 u32 *target = (u32 *)_target; 13385 u32 i; 13386 13387 for (i = 0; i < n/4; i++) 13388 target[i] = be32_to_cpu(source[i]); 13389 } 13390 13391 /* 13392 Ops array is stored in the following format: 13393 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 13394 */ 13395 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 13396 { 13397 const __be32 *source = (const __be32 *)_source; 13398 struct raw_op *target = (struct raw_op *)_target; 13399 u32 i, j, tmp; 13400 13401 for (i = 0, j = 0; i < n/8; i++, j += 2) { 13402 tmp = be32_to_cpu(source[j]); 13403 target[i].op = (tmp >> 24) & 0xff; 13404 target[i].offset = tmp & 0xffffff; 13405 target[i].raw_data = be32_to_cpu(source[j + 1]); 13406 } 13407 } 13408 13409 /* IRO array is stored in the following format: 13410 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 13411 */ 13412 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 13413 { 13414 const __be32 *source = (const __be32 *)_source; 13415 struct iro *target = (struct iro *)_target; 13416 u32 i, j, tmp; 13417 13418 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 13419 target[i].base = be32_to_cpu(source[j]); 13420 j++; 13421 tmp = be32_to_cpu(source[j]); 13422 target[i].m1 = (tmp >> 16) & 0xffff; 13423 target[i].m2 = tmp & 0xffff; 13424 j++; 13425 tmp = be32_to_cpu(source[j]); 13426 target[i].m3 = (tmp >> 16) & 0xffff; 13427 target[i].size = tmp & 0xffff; 13428 j++; 13429 } 13430 } 13431 13432 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 13433 { 13434 const __be16 *source = (const __be16 *)_source; 13435 u16 *target = (u16 *)_target; 13436 u32 i; 13437 13438 for (i = 0; i < n/2; i++) 13439 target[i] = be16_to_cpu(source[i]); 13440 } 13441 13442 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 13443 do { \ 13444 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 13445 bp->arr = kmalloc(len, GFP_KERNEL); \ 13446 if (!bp->arr) \ 13447 goto lbl; \ 13448 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 13449 (u8 *)bp->arr, len); \ 13450 } while (0) 13451 13452 static int bnx2x_init_firmware(struct bnx2x *bp) 13453 { 13454 const char *fw_file_name; 13455 struct bnx2x_fw_file_hdr *fw_hdr; 13456 int rc; 13457 13458 if (bp->firmware) 13459 return 0; 13460 13461 if (CHIP_IS_E1(bp)) 13462 fw_file_name = FW_FILE_NAME_E1; 13463 else if (CHIP_IS_E1H(bp)) 13464 fw_file_name = FW_FILE_NAME_E1H; 13465 else if (!CHIP_IS_E1x(bp)) 13466 fw_file_name = FW_FILE_NAME_E2; 13467 else { 13468 BNX2X_ERR("Unsupported chip revision\n"); 13469 return -EINVAL; 13470 } 13471 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 13472 13473 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 13474 if (rc) { 13475 BNX2X_ERR("Can't load firmware file %s\n", 13476 fw_file_name); 13477 goto request_firmware_exit; 13478 } 13479 13480 rc = bnx2x_check_firmware(bp); 13481 if (rc) { 13482 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 13483 goto request_firmware_exit; 13484 } 13485 13486 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 13487 13488 /* Initialize the pointers to the init arrays */ 13489 /* Blob */ 13490 rc = -ENOMEM; 13491 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 13492 13493 /* Opcodes */ 13494 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 13495 13496 /* Offsets */ 13497 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 13498 be16_to_cpu_n); 13499 13500 /* STORMs firmware */ 13501 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13502 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 13503 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 13504 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 13505 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13506 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 13507 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 13508 be32_to_cpu(fw_hdr->usem_pram_data.offset); 13509 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13510 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 13511 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 13512 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 13513 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13514 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 13515 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 13516 be32_to_cpu(fw_hdr->csem_pram_data.offset); 13517 /* IRO */ 13518 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 13519 13520 return 0; 13521 13522 iro_alloc_err: 13523 kfree(bp->init_ops_offsets); 13524 init_offsets_alloc_err: 13525 kfree(bp->init_ops); 13526 init_ops_alloc_err: 13527 kfree(bp->init_data); 13528 request_firmware_exit: 13529 release_firmware(bp->firmware); 13530 bp->firmware = NULL; 13531 13532 return rc; 13533 } 13534 13535 static void bnx2x_release_firmware(struct bnx2x *bp) 13536 { 13537 kfree(bp->init_ops_offsets); 13538 kfree(bp->init_ops); 13539 kfree(bp->init_data); 13540 release_firmware(bp->firmware); 13541 bp->firmware = NULL; 13542 } 13543 13544 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 13545 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 13546 .init_hw_cmn = bnx2x_init_hw_common, 13547 .init_hw_port = bnx2x_init_hw_port, 13548 .init_hw_func = bnx2x_init_hw_func, 13549 13550 .reset_hw_cmn = bnx2x_reset_common, 13551 .reset_hw_port = bnx2x_reset_port, 13552 .reset_hw_func = bnx2x_reset_func, 13553 13554 .gunzip_init = bnx2x_gunzip_init, 13555 .gunzip_end = bnx2x_gunzip_end, 13556 13557 .init_fw = bnx2x_init_firmware, 13558 .release_fw = bnx2x_release_firmware, 13559 }; 13560 13561 void bnx2x__init_func_obj(struct bnx2x *bp) 13562 { 13563 /* Prepare DMAE related driver resources */ 13564 bnx2x_setup_dmae(bp); 13565 13566 bnx2x_init_func_obj(bp, &bp->func_obj, 13567 bnx2x_sp(bp, func_rdata), 13568 bnx2x_sp_mapping(bp, func_rdata), 13569 bnx2x_sp(bp, func_afex_rdata), 13570 bnx2x_sp_mapping(bp, func_afex_rdata), 13571 &bnx2x_func_sp_drv); 13572 } 13573 13574 /* must be called after sriov-enable */ 13575 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 13576 { 13577 int cid_count = BNX2X_L2_MAX_CID(bp); 13578 13579 if (IS_SRIOV(bp)) 13580 cid_count += BNX2X_VF_CIDS; 13581 13582 if (CNIC_SUPPORT(bp)) 13583 cid_count += CNIC_CID_MAX; 13584 13585 return roundup(cid_count, QM_CID_ROUND); 13586 } 13587 13588 /** 13589 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 13590 * @pdev: pci device 13591 * @cnic_cnt: count 13592 * 13593 */ 13594 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) 13595 { 13596 int index; 13597 u16 control = 0; 13598 13599 /* 13600 * If MSI-X is not supported - return number of SBs needed to support 13601 * one fast path queue: one FP queue + SB for CNIC 13602 */ 13603 if (!pdev->msix_cap) { 13604 dev_info(&pdev->dev, "no msix capability found\n"); 13605 return 1 + cnic_cnt; 13606 } 13607 dev_info(&pdev->dev, "msix capability found\n"); 13608 13609 /* 13610 * The value in the PCI configuration space is the index of the last 13611 * entry, namely one less than the actual size of the table, which is 13612 * exactly what we want to return from this function: number of all SBs 13613 * without the default SB. 13614 * For VFs there is no default SB, then we return (index+1). 13615 */ 13616 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); 13617 13618 index = control & PCI_MSIX_FLAGS_QSIZE; 13619 13620 return index; 13621 } 13622 13623 static int set_max_cos_est(int chip_id) 13624 { 13625 switch (chip_id) { 13626 case BCM57710: 13627 case BCM57711: 13628 case BCM57711E: 13629 return BNX2X_MULTI_TX_COS_E1X; 13630 case BCM57712: 13631 case BCM57712_MF: 13632 return BNX2X_MULTI_TX_COS_E2_E3A0; 13633 case BCM57800: 13634 case BCM57800_MF: 13635 case BCM57810: 13636 case BCM57810_MF: 13637 case BCM57840_4_10: 13638 case BCM57840_2_20: 13639 case BCM57840_O: 13640 case BCM57840_MFO: 13641 case BCM57840_MF: 13642 case BCM57811: 13643 case BCM57811_MF: 13644 return BNX2X_MULTI_TX_COS_E3B0; 13645 case BCM57712_VF: 13646 case BCM57800_VF: 13647 case BCM57810_VF: 13648 case BCM57840_VF: 13649 case BCM57811_VF: 13650 return 1; 13651 default: 13652 pr_err("Unknown board_type (%d), aborting\n", chip_id); 13653 return -ENODEV; 13654 } 13655 } 13656 13657 static int set_is_vf(int chip_id) 13658 { 13659 switch (chip_id) { 13660 case BCM57712_VF: 13661 case BCM57800_VF: 13662 case BCM57810_VF: 13663 case BCM57840_VF: 13664 case BCM57811_VF: 13665 return true; 13666 default: 13667 return false; 13668 } 13669 } 13670 13671 /* nig_tsgen registers relative address */ 13672 #define tsgen_ctrl 0x0 13673 #define tsgen_freecount 0x10 13674 #define tsgen_synctime_t0 0x20 13675 #define tsgen_offset_t0 0x28 13676 #define tsgen_drift_t0 0x30 13677 #define tsgen_synctime_t1 0x58 13678 #define tsgen_offset_t1 0x60 13679 #define tsgen_drift_t1 0x68 13680 13681 /* FW workaround for setting drift */ 13682 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, 13683 int best_val, int best_period) 13684 { 13685 struct bnx2x_func_state_params func_params = {NULL}; 13686 struct bnx2x_func_set_timesync_params *set_timesync_params = 13687 &func_params.params.set_timesync; 13688 13689 /* Prepare parameters for function state transitions */ 13690 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 13691 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 13692 13693 func_params.f_obj = &bp->func_obj; 13694 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; 13695 13696 /* Function parameters */ 13697 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; 13698 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; 13699 set_timesync_params->add_sub_drift_adjust_value = 13700 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE; 13701 set_timesync_params->drift_adjust_value = best_val; 13702 set_timesync_params->drift_adjust_period = best_period; 13703 13704 return bnx2x_func_state_change(bp, &func_params); 13705 } 13706 13707 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 13708 { 13709 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13710 int rc; 13711 int drift_dir = 1; 13712 int val, period, period1, period2, dif, dif1, dif2; 13713 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; 13714 13715 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); 13716 13717 if (!netif_running(bp->dev)) { 13718 DP(BNX2X_MSG_PTP, 13719 "PTP adjfreq called while the interface is down\n"); 13720 return -ENETDOWN; 13721 } 13722 13723 if (ppb < 0) { 13724 ppb = -ppb; 13725 drift_dir = 0; 13726 } 13727 13728 if (ppb == 0) { 13729 best_val = 1; 13730 best_period = 0x1FFFFFF; 13731 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) { 13732 best_val = 31; 13733 best_period = 1; 13734 } else { 13735 /* Changed not to allow val = 8, 16, 24 as these values 13736 * are not supported in workaround. 13737 */ 13738 for (val = 0; val <= 31; val++) { 13739 if ((val & 0x7) == 0) 13740 continue; 13741 period1 = val * 1000000 / ppb; 13742 period2 = period1 + 1; 13743 if (period1 != 0) 13744 dif1 = ppb - (val * 1000000 / period1); 13745 else 13746 dif1 = BNX2X_MAX_PHC_DRIFT; 13747 if (dif1 < 0) 13748 dif1 = -dif1; 13749 dif2 = ppb - (val * 1000000 / period2); 13750 if (dif2 < 0) 13751 dif2 = -dif2; 13752 dif = (dif1 < dif2) ? dif1 : dif2; 13753 period = (dif1 < dif2) ? period1 : period2; 13754 if (dif < best_dif) { 13755 best_dif = dif; 13756 best_val = val; 13757 best_period = period; 13758 } 13759 } 13760 } 13761 13762 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, 13763 best_period); 13764 if (rc) { 13765 BNX2X_ERR("Failed to set drift\n"); 13766 return -EFAULT; 13767 } 13768 13769 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, 13770 best_period); 13771 13772 return 0; 13773 } 13774 13775 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 13776 { 13777 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13778 13779 if (!netif_running(bp->dev)) { 13780 DP(BNX2X_MSG_PTP, 13781 "PTP adjtime called while the interface is down\n"); 13782 return -ENETDOWN; 13783 } 13784 13785 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); 13786 13787 timecounter_adjtime(&bp->timecounter, delta); 13788 13789 return 0; 13790 } 13791 13792 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 13793 { 13794 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13795 u64 ns; 13796 13797 if (!netif_running(bp->dev)) { 13798 DP(BNX2X_MSG_PTP, 13799 "PTP gettime called while the interface is down\n"); 13800 return -ENETDOWN; 13801 } 13802 13803 ns = timecounter_read(&bp->timecounter); 13804 13805 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); 13806 13807 *ts = ns_to_timespec64(ns); 13808 13809 return 0; 13810 } 13811 13812 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, 13813 const struct timespec64 *ts) 13814 { 13815 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13816 u64 ns; 13817 13818 if (!netif_running(bp->dev)) { 13819 DP(BNX2X_MSG_PTP, 13820 "PTP settime called while the interface is down\n"); 13821 return -ENETDOWN; 13822 } 13823 13824 ns = timespec64_to_ns(ts); 13825 13826 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); 13827 13828 /* Re-init the timecounter */ 13829 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); 13830 13831 return 0; 13832 } 13833 13834 /* Enable (or disable) ancillary features of the phc subsystem */ 13835 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp, 13836 struct ptp_clock_request *rq, int on) 13837 { 13838 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13839 13840 BNX2X_ERR("PHC ancillary features are not supported\n"); 13841 return -ENOTSUPP; 13842 } 13843 13844 void bnx2x_register_phc(struct bnx2x *bp) 13845 { 13846 /* Fill the ptp_clock_info struct and register PTP clock*/ 13847 bp->ptp_clock_info.owner = THIS_MODULE; 13848 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); 13849 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ 13850 bp->ptp_clock_info.n_alarm = 0; 13851 bp->ptp_clock_info.n_ext_ts = 0; 13852 bp->ptp_clock_info.n_per_out = 0; 13853 bp->ptp_clock_info.pps = 0; 13854 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; 13855 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; 13856 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; 13857 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; 13858 bp->ptp_clock_info.enable = bnx2x_ptp_enable; 13859 13860 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); 13861 if (IS_ERR(bp->ptp_clock)) { 13862 bp->ptp_clock = NULL; 13863 BNX2X_ERR("PTP clock registration failed\n"); 13864 } 13865 } 13866 13867 static int bnx2x_init_one(struct pci_dev *pdev, 13868 const struct pci_device_id *ent) 13869 { 13870 struct net_device *dev = NULL; 13871 struct bnx2x *bp; 13872 int rc, max_non_def_sbs; 13873 int rx_count, tx_count, rss_count, doorbell_size; 13874 int max_cos_est; 13875 bool is_vf; 13876 int cnic_cnt; 13877 13878 /* Management FW 'remembers' living interfaces. Allow it some time 13879 * to forget previously living interfaces, allowing a proper re-load. 13880 */ 13881 if (is_kdump_kernel()) { 13882 ktime_t now = ktime_get_boottime(); 13883 ktime_t fw_ready_time = ktime_set(5, 0); 13884 13885 if (ktime_before(now, fw_ready_time)) 13886 msleep(ktime_ms_delta(fw_ready_time, now)); 13887 } 13888 13889 /* An estimated maximum supported CoS number according to the chip 13890 * version. 13891 * We will try to roughly estimate the maximum number of CoSes this chip 13892 * may support in order to minimize the memory allocated for Tx 13893 * netdev_queue's. This number will be accurately calculated during the 13894 * initialization of bp->max_cos based on the chip versions AND chip 13895 * revision in the bnx2x_init_bp(). 13896 */ 13897 max_cos_est = set_max_cos_est(ent->driver_data); 13898 if (max_cos_est < 0) 13899 return max_cos_est; 13900 is_vf = set_is_vf(ent->driver_data); 13901 cnic_cnt = is_vf ? 0 : 1; 13902 13903 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); 13904 13905 /* add another SB for VF as it has no default SB */ 13906 max_non_def_sbs += is_vf ? 1 : 0; 13907 13908 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 13909 rss_count = max_non_def_sbs - cnic_cnt; 13910 13911 if (rss_count < 1) 13912 return -EINVAL; 13913 13914 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 13915 rx_count = rss_count + cnic_cnt; 13916 13917 /* Maximum number of netdev Tx queues: 13918 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 13919 */ 13920 tx_count = rss_count * max_cos_est + cnic_cnt; 13921 13922 /* dev zeroed in init_etherdev */ 13923 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 13924 if (!dev) 13925 return -ENOMEM; 13926 13927 bp = netdev_priv(dev); 13928 13929 bp->flags = 0; 13930 if (is_vf) 13931 bp->flags |= IS_VF_FLAG; 13932 13933 bp->igu_sb_cnt = max_non_def_sbs; 13934 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 13935 bp->msg_enable = debug; 13936 bp->cnic_support = cnic_cnt; 13937 bp->cnic_probe = bnx2x_cnic_probe; 13938 13939 pci_set_drvdata(pdev, dev); 13940 13941 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); 13942 if (rc < 0) { 13943 free_netdev(dev); 13944 return rc; 13945 } 13946 13947 BNX2X_DEV_INFO("This is a %s function\n", 13948 IS_PF(bp) ? "physical" : "virtual"); 13949 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); 13950 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); 13951 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 13952 tx_count, rx_count); 13953 13954 rc = bnx2x_init_bp(bp); 13955 if (rc) 13956 goto init_one_exit; 13957 13958 /* Map doorbells here as we need the real value of bp->max_cos which 13959 * is initialized in bnx2x_init_bp() to determine the number of 13960 * l2 connections. 13961 */ 13962 if (IS_VF(bp)) { 13963 bp->doorbells = bnx2x_vf_doorbells(bp); 13964 rc = bnx2x_vf_pci_alloc(bp); 13965 if (rc) 13966 goto init_one_freemem; 13967 } else { 13968 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 13969 if (doorbell_size > pci_resource_len(pdev, 2)) { 13970 dev_err(&bp->pdev->dev, 13971 "Cannot map doorbells, bar size too small, aborting\n"); 13972 rc = -ENOMEM; 13973 goto init_one_freemem; 13974 } 13975 bp->doorbells = ioremap(pci_resource_start(pdev, 2), 13976 doorbell_size); 13977 } 13978 if (!bp->doorbells) { 13979 dev_err(&bp->pdev->dev, 13980 "Cannot map doorbell space, aborting\n"); 13981 rc = -ENOMEM; 13982 goto init_one_freemem; 13983 } 13984 13985 if (IS_VF(bp)) { 13986 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 13987 if (rc) 13988 goto init_one_freemem; 13989 13990 #ifdef CONFIG_BNX2X_SRIOV 13991 /* VF with OLD Hypervisor or old PF do not support filtering */ 13992 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { 13993 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13994 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13995 } 13996 #endif 13997 } 13998 13999 /* Enable SRIOV if capability found in configuration space */ 14000 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 14001 if (rc) 14002 goto init_one_freemem; 14003 14004 /* calc qm_cid_count */ 14005 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 14006 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); 14007 14008 /* disable FCOE L2 queue for E1x*/ 14009 if (CHIP_IS_E1x(bp)) 14010 bp->flags |= NO_FCOE_FLAG; 14011 14012 /* Set bp->num_queues for MSI-X mode*/ 14013 bnx2x_set_num_queues(bp); 14014 14015 /* Configure interrupt mode: try to enable MSI-X/MSI if 14016 * needed. 14017 */ 14018 rc = bnx2x_set_int_mode(bp); 14019 if (rc) { 14020 dev_err(&pdev->dev, "Cannot set interrupts\n"); 14021 goto init_one_freemem; 14022 } 14023 BNX2X_DEV_INFO("set interrupts successfully\n"); 14024 14025 /* register the net device */ 14026 rc = register_netdev(dev); 14027 if (rc) { 14028 dev_err(&pdev->dev, "Cannot register net device\n"); 14029 goto init_one_freemem; 14030 } 14031 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 14032 14033 if (!NO_FCOE(bp)) { 14034 /* Add storage MAC address */ 14035 rtnl_lock(); 14036 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 14037 rtnl_unlock(); 14038 } 14039 BNX2X_DEV_INFO( 14040 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n", 14041 board_info[ent->driver_data].name, 14042 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 14043 dev->base_addr, bp->pdev->irq, dev->dev_addr); 14044 pcie_print_link_status(bp->pdev); 14045 14046 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 14047 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); 14048 14049 return 0; 14050 14051 init_one_freemem: 14052 bnx2x_free_mem_bp(bp); 14053 14054 init_one_exit: 14055 bnx2x_disable_pcie_error_reporting(bp); 14056 14057 if (bp->regview) 14058 iounmap(bp->regview); 14059 14060 if (IS_PF(bp) && bp->doorbells) 14061 iounmap(bp->doorbells); 14062 14063 free_netdev(dev); 14064 14065 if (atomic_read(&pdev->enable_cnt) == 1) 14066 pci_release_regions(pdev); 14067 14068 pci_disable_device(pdev); 14069 14070 return rc; 14071 } 14072 14073 static void __bnx2x_remove(struct pci_dev *pdev, 14074 struct net_device *dev, 14075 struct bnx2x *bp, 14076 bool remove_netdev) 14077 { 14078 /* Delete storage MAC address */ 14079 if (!NO_FCOE(bp)) { 14080 rtnl_lock(); 14081 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 14082 rtnl_unlock(); 14083 } 14084 14085 #ifdef BCM_DCBNL 14086 /* Delete app tlvs from dcbnl */ 14087 bnx2x_dcbnl_update_applist(bp, true); 14088 #endif 14089 14090 if (IS_PF(bp) && 14091 !BP_NOMCP(bp) && 14092 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) 14093 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); 14094 14095 /* Close the interface - either directly or implicitly */ 14096 if (remove_netdev) { 14097 unregister_netdev(dev); 14098 } else { 14099 rtnl_lock(); 14100 dev_close(dev); 14101 rtnl_unlock(); 14102 } 14103 14104 bnx2x_iov_remove_one(bp); 14105 14106 /* Power on: we can't let PCI layer write to us while we are in D3 */ 14107 if (IS_PF(bp)) { 14108 bnx2x_set_power_state(bp, PCI_D0); 14109 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED); 14110 14111 /* Set endianity registers to reset values in case next driver 14112 * boots in different endianty environment. 14113 */ 14114 bnx2x_reset_endianity(bp); 14115 } 14116 14117 /* Disable MSI/MSI-X */ 14118 bnx2x_disable_msi(bp); 14119 14120 /* Power off */ 14121 if (IS_PF(bp)) 14122 bnx2x_set_power_state(bp, PCI_D3hot); 14123 14124 /* Make sure RESET task is not scheduled before continuing */ 14125 cancel_delayed_work_sync(&bp->sp_rtnl_task); 14126 14127 /* send message via vfpf channel to release the resources of this vf */ 14128 if (IS_VF(bp)) 14129 bnx2x_vfpf_release(bp); 14130 14131 /* Assumes no further PCIe PM changes will occur */ 14132 if (system_state == SYSTEM_POWER_OFF) { 14133 pci_wake_from_d3(pdev, bp->wol); 14134 pci_set_power_state(pdev, PCI_D3hot); 14135 } 14136 14137 bnx2x_disable_pcie_error_reporting(bp); 14138 if (remove_netdev) { 14139 if (bp->regview) 14140 iounmap(bp->regview); 14141 14142 /* For vfs, doorbells are part of the regview and were unmapped 14143 * along with it. FW is only loaded by PF. 14144 */ 14145 if (IS_PF(bp)) { 14146 if (bp->doorbells) 14147 iounmap(bp->doorbells); 14148 14149 bnx2x_release_firmware(bp); 14150 } else { 14151 bnx2x_vf_pci_dealloc(bp); 14152 } 14153 bnx2x_free_mem_bp(bp); 14154 14155 free_netdev(dev); 14156 14157 if (atomic_read(&pdev->enable_cnt) == 1) 14158 pci_release_regions(pdev); 14159 14160 pci_disable_device(pdev); 14161 } 14162 } 14163 14164 static void bnx2x_remove_one(struct pci_dev *pdev) 14165 { 14166 struct net_device *dev = pci_get_drvdata(pdev); 14167 struct bnx2x *bp; 14168 14169 if (!dev) { 14170 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 14171 return; 14172 } 14173 bp = netdev_priv(dev); 14174 14175 __bnx2x_remove(pdev, dev, bp, true); 14176 } 14177 14178 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 14179 { 14180 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 14181 14182 bp->rx_mode = BNX2X_RX_MODE_NONE; 14183 14184 if (CNIC_LOADED(bp)) 14185 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 14186 14187 /* Stop Tx */ 14188 bnx2x_tx_disable(bp); 14189 /* Delete all NAPI objects */ 14190 bnx2x_del_all_napi(bp); 14191 if (CNIC_LOADED(bp)) 14192 bnx2x_del_all_napi_cnic(bp); 14193 netdev_reset_tc(bp->dev); 14194 14195 del_timer_sync(&bp->timer); 14196 cancel_delayed_work_sync(&bp->sp_task); 14197 cancel_delayed_work_sync(&bp->period_task); 14198 14199 if (!down_timeout(&bp->stats_lock, HZ / 10)) { 14200 bp->stats_state = STATS_STATE_DISABLED; 14201 up(&bp->stats_lock); 14202 } 14203 14204 bnx2x_save_statistics(bp); 14205 14206 netif_carrier_off(bp->dev); 14207 14208 return 0; 14209 } 14210 14211 /** 14212 * bnx2x_io_error_detected - called when PCI error is detected 14213 * @pdev: Pointer to PCI device 14214 * @state: The current pci connection state 14215 * 14216 * This function is called after a PCI bus error affecting 14217 * this device has been detected. 14218 */ 14219 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 14220 pci_channel_state_t state) 14221 { 14222 struct net_device *dev = pci_get_drvdata(pdev); 14223 struct bnx2x *bp = netdev_priv(dev); 14224 14225 rtnl_lock(); 14226 14227 BNX2X_ERR("IO error detected\n"); 14228 14229 netif_device_detach(dev); 14230 14231 if (state == pci_channel_io_perm_failure) { 14232 rtnl_unlock(); 14233 return PCI_ERS_RESULT_DISCONNECT; 14234 } 14235 14236 if (netif_running(dev)) 14237 bnx2x_eeh_nic_unload(bp); 14238 14239 bnx2x_prev_path_mark_eeh(bp); 14240 14241 pci_disable_device(pdev); 14242 14243 rtnl_unlock(); 14244 14245 /* Request a slot reset */ 14246 return PCI_ERS_RESULT_NEED_RESET; 14247 } 14248 14249 /** 14250 * bnx2x_io_slot_reset - called after the PCI bus has been reset 14251 * @pdev: Pointer to PCI device 14252 * 14253 * Restart the card from scratch, as if from a cold-boot. 14254 */ 14255 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 14256 { 14257 struct net_device *dev = pci_get_drvdata(pdev); 14258 struct bnx2x *bp = netdev_priv(dev); 14259 int i; 14260 14261 rtnl_lock(); 14262 BNX2X_ERR("IO slot reset initializing...\n"); 14263 if (pci_enable_device(pdev)) { 14264 dev_err(&pdev->dev, 14265 "Cannot re-enable PCI device after reset\n"); 14266 rtnl_unlock(); 14267 return PCI_ERS_RESULT_DISCONNECT; 14268 } 14269 14270 pci_set_master(pdev); 14271 pci_restore_state(pdev); 14272 pci_save_state(pdev); 14273 14274 if (netif_running(dev)) 14275 bnx2x_set_power_state(bp, PCI_D0); 14276 14277 if (netif_running(dev)) { 14278 BNX2X_ERR("IO slot reset --> driver unload\n"); 14279 14280 /* MCP should have been reset; Need to wait for validity */ 14281 if (bnx2x_init_shmem(bp)) { 14282 rtnl_unlock(); 14283 return PCI_ERS_RESULT_DISCONNECT; 14284 } 14285 14286 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 14287 u32 v; 14288 14289 v = SHMEM2_RD(bp, 14290 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 14291 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 14292 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 14293 } 14294 bnx2x_drain_tx_queues(bp); 14295 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); 14296 bnx2x_netif_stop(bp, 1); 14297 bnx2x_free_irq(bp); 14298 14299 /* Report UNLOAD_DONE to MCP */ 14300 bnx2x_send_unload_done(bp, true); 14301 14302 bp->sp_state = 0; 14303 bp->port.pmf = 0; 14304 14305 bnx2x_prev_unload(bp); 14306 14307 /* We should have reseted the engine, so It's fair to 14308 * assume the FW will no longer write to the bnx2x driver. 14309 */ 14310 bnx2x_squeeze_objects(bp); 14311 bnx2x_free_skbs(bp); 14312 for_each_rx_queue(bp, i) 14313 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 14314 bnx2x_free_fp_mem(bp); 14315 bnx2x_free_mem(bp); 14316 14317 bp->state = BNX2X_STATE_CLOSED; 14318 } 14319 14320 rtnl_unlock(); 14321 14322 return PCI_ERS_RESULT_RECOVERED; 14323 } 14324 14325 /** 14326 * bnx2x_io_resume - called when traffic can start flowing again 14327 * @pdev: Pointer to PCI device 14328 * 14329 * This callback is called when the error recovery driver tells us that 14330 * its OK to resume normal operation. 14331 */ 14332 static void bnx2x_io_resume(struct pci_dev *pdev) 14333 { 14334 struct net_device *dev = pci_get_drvdata(pdev); 14335 struct bnx2x *bp = netdev_priv(dev); 14336 14337 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 14338 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 14339 return; 14340 } 14341 14342 rtnl_lock(); 14343 14344 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 14345 DRV_MSG_SEQ_NUMBER_MASK; 14346 14347 if (netif_running(dev)) 14348 bnx2x_nic_load(bp, LOAD_NORMAL); 14349 14350 netif_device_attach(dev); 14351 14352 rtnl_unlock(); 14353 } 14354 14355 static const struct pci_error_handlers bnx2x_err_handler = { 14356 .error_detected = bnx2x_io_error_detected, 14357 .slot_reset = bnx2x_io_slot_reset, 14358 .resume = bnx2x_io_resume, 14359 }; 14360 14361 static void bnx2x_shutdown(struct pci_dev *pdev) 14362 { 14363 struct net_device *dev = pci_get_drvdata(pdev); 14364 struct bnx2x *bp; 14365 14366 if (!dev) 14367 return; 14368 14369 bp = netdev_priv(dev); 14370 if (!bp) 14371 return; 14372 14373 rtnl_lock(); 14374 netif_device_detach(dev); 14375 rtnl_unlock(); 14376 14377 /* Don't remove the netdevice, as there are scenarios which will cause 14378 * the kernel to hang, e.g., when trying to remove bnx2i while the 14379 * rootfs is mounted from SAN. 14380 */ 14381 __bnx2x_remove(pdev, dev, bp, false); 14382 } 14383 14384 static struct pci_driver bnx2x_pci_driver = { 14385 .name = DRV_MODULE_NAME, 14386 .id_table = bnx2x_pci_tbl, 14387 .probe = bnx2x_init_one, 14388 .remove = bnx2x_remove_one, 14389 .driver.pm = &bnx2x_pm_ops, 14390 .err_handler = &bnx2x_err_handler, 14391 #ifdef CONFIG_BNX2X_SRIOV 14392 .sriov_configure = bnx2x_sriov_configure, 14393 #endif 14394 .shutdown = bnx2x_shutdown, 14395 }; 14396 14397 static int __init bnx2x_init(void) 14398 { 14399 int ret; 14400 14401 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 14402 if (bnx2x_wq == NULL) { 14403 pr_err("Cannot create workqueue\n"); 14404 return -ENOMEM; 14405 } 14406 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); 14407 if (!bnx2x_iov_wq) { 14408 pr_err("Cannot create iov workqueue\n"); 14409 destroy_workqueue(bnx2x_wq); 14410 return -ENOMEM; 14411 } 14412 14413 ret = pci_register_driver(&bnx2x_pci_driver); 14414 if (ret) { 14415 pr_err("Cannot register driver\n"); 14416 destroy_workqueue(bnx2x_wq); 14417 destroy_workqueue(bnx2x_iov_wq); 14418 } 14419 return ret; 14420 } 14421 14422 static void __exit bnx2x_cleanup(void) 14423 { 14424 struct list_head *pos, *q; 14425 14426 pci_unregister_driver(&bnx2x_pci_driver); 14427 14428 destroy_workqueue(bnx2x_wq); 14429 destroy_workqueue(bnx2x_iov_wq); 14430 14431 /* Free globally allocated resources */ 14432 list_for_each_safe(pos, q, &bnx2x_prev_list) { 14433 struct bnx2x_prev_path_list *tmp = 14434 list_entry(pos, struct bnx2x_prev_path_list, list); 14435 list_del(pos); 14436 kfree(tmp); 14437 } 14438 } 14439 14440 void bnx2x_notify_link_changed(struct bnx2x *bp) 14441 { 14442 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 14443 } 14444 14445 module_init(bnx2x_init); 14446 module_exit(bnx2x_cleanup); 14447 14448 /** 14449 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 14450 * @bp: driver handle 14451 * 14452 * This function will wait until the ramrod completion returns. 14453 * Return 0 if success, -ENODEV if ramrod doesn't return. 14454 */ 14455 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 14456 { 14457 unsigned long ramrod_flags = 0; 14458 14459 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 14460 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 14461 &bp->iscsi_l2_mac_obj, true, 14462 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 14463 } 14464 14465 /* count denotes the number of new completions we have seen */ 14466 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 14467 { 14468 struct eth_spe *spe; 14469 int cxt_index, cxt_offset; 14470 14471 #ifdef BNX2X_STOP_ON_ERROR 14472 if (unlikely(bp->panic)) 14473 return; 14474 #endif 14475 14476 spin_lock_bh(&bp->spq_lock); 14477 BUG_ON(bp->cnic_spq_pending < count); 14478 bp->cnic_spq_pending -= count; 14479 14480 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 14481 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 14482 & SPE_HDR_CONN_TYPE) >> 14483 SPE_HDR_CONN_TYPE_SHIFT; 14484 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 14485 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 14486 14487 /* Set validation for iSCSI L2 client before sending SETUP 14488 * ramrod 14489 */ 14490 if (type == ETH_CONNECTION_TYPE) { 14491 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 14492 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 14493 ILT_PAGE_CIDS; 14494 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 14495 (cxt_index * ILT_PAGE_CIDS); 14496 bnx2x_set_ctx_validation(bp, 14497 &bp->context[cxt_index]. 14498 vcxt[cxt_offset].eth, 14499 BNX2X_ISCSI_ETH_CID(bp)); 14500 } 14501 } 14502 14503 /* 14504 * There may be not more than 8 L2, not more than 8 L5 SPEs 14505 * and in the air. We also check that number of outstanding 14506 * COMMON ramrods is not more than the EQ and SPQ can 14507 * accommodate. 14508 */ 14509 if (type == ETH_CONNECTION_TYPE) { 14510 if (!atomic_read(&bp->cq_spq_left)) 14511 break; 14512 else 14513 atomic_dec(&bp->cq_spq_left); 14514 } else if (type == NONE_CONNECTION_TYPE) { 14515 if (!atomic_read(&bp->eq_spq_left)) 14516 break; 14517 else 14518 atomic_dec(&bp->eq_spq_left); 14519 } else if ((type == ISCSI_CONNECTION_TYPE) || 14520 (type == FCOE_CONNECTION_TYPE)) { 14521 if (bp->cnic_spq_pending >= 14522 bp->cnic_eth_dev.max_kwqe_pending) 14523 break; 14524 else 14525 bp->cnic_spq_pending++; 14526 } else { 14527 BNX2X_ERR("Unknown SPE type: %d\n", type); 14528 bnx2x_panic(); 14529 break; 14530 } 14531 14532 spe = bnx2x_sp_get_next(bp); 14533 *spe = *bp->cnic_kwq_cons; 14534 14535 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 14536 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 14537 14538 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 14539 bp->cnic_kwq_cons = bp->cnic_kwq; 14540 else 14541 bp->cnic_kwq_cons++; 14542 } 14543 bnx2x_sp_prod_update(bp); 14544 spin_unlock_bh(&bp->spq_lock); 14545 } 14546 14547 static int bnx2x_cnic_sp_queue(struct net_device *dev, 14548 struct kwqe_16 *kwqes[], u32 count) 14549 { 14550 struct bnx2x *bp = netdev_priv(dev); 14551 int i; 14552 14553 #ifdef BNX2X_STOP_ON_ERROR 14554 if (unlikely(bp->panic)) { 14555 BNX2X_ERR("Can't post to SP queue while panic\n"); 14556 return -EIO; 14557 } 14558 #endif 14559 14560 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 14561 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 14562 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 14563 return -EAGAIN; 14564 } 14565 14566 spin_lock_bh(&bp->spq_lock); 14567 14568 for (i = 0; i < count; i++) { 14569 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 14570 14571 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 14572 break; 14573 14574 *bp->cnic_kwq_prod = *spe; 14575 14576 bp->cnic_kwq_pending++; 14577 14578 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 14579 spe->hdr.conn_and_cmd_data, spe->hdr.type, 14580 spe->data.update_data_addr.hi, 14581 spe->data.update_data_addr.lo, 14582 bp->cnic_kwq_pending); 14583 14584 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 14585 bp->cnic_kwq_prod = bp->cnic_kwq; 14586 else 14587 bp->cnic_kwq_prod++; 14588 } 14589 14590 spin_unlock_bh(&bp->spq_lock); 14591 14592 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 14593 bnx2x_cnic_sp_post(bp, 0); 14594 14595 return i; 14596 } 14597 14598 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 14599 { 14600 struct cnic_ops *c_ops; 14601 int rc = 0; 14602 14603 mutex_lock(&bp->cnic_mutex); 14604 c_ops = rcu_dereference_protected(bp->cnic_ops, 14605 lockdep_is_held(&bp->cnic_mutex)); 14606 if (c_ops) 14607 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 14608 mutex_unlock(&bp->cnic_mutex); 14609 14610 return rc; 14611 } 14612 14613 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 14614 { 14615 struct cnic_ops *c_ops; 14616 int rc = 0; 14617 14618 rcu_read_lock(); 14619 c_ops = rcu_dereference(bp->cnic_ops); 14620 if (c_ops) 14621 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 14622 rcu_read_unlock(); 14623 14624 return rc; 14625 } 14626 14627 /* 14628 * for commands that have no data 14629 */ 14630 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 14631 { 14632 struct cnic_ctl_info ctl = {0}; 14633 14634 ctl.cmd = cmd; 14635 14636 return bnx2x_cnic_ctl_send(bp, &ctl); 14637 } 14638 14639 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 14640 { 14641 struct cnic_ctl_info ctl = {0}; 14642 14643 /* first we tell CNIC and only then we count this as a completion */ 14644 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 14645 ctl.data.comp.cid = cid; 14646 ctl.data.comp.error = err; 14647 14648 bnx2x_cnic_ctl_send_bh(bp, &ctl); 14649 bnx2x_cnic_sp_post(bp, 0); 14650 } 14651 14652 /* Called with netif_addr_lock_bh() taken. 14653 * Sets an rx_mode config for an iSCSI ETH client. 14654 * Doesn't block. 14655 * Completion should be checked outside. 14656 */ 14657 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 14658 { 14659 unsigned long accept_flags = 0, ramrod_flags = 0; 14660 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 14661 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 14662 14663 if (start) { 14664 /* Start accepting on iSCSI L2 ring. Accept all multicasts 14665 * because it's the only way for UIO Queue to accept 14666 * multicasts (in non-promiscuous mode only one Queue per 14667 * function will receive multicast packets (leading in our 14668 * case). 14669 */ 14670 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 14671 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 14672 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 14673 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 14674 14675 /* Clear STOP_PENDING bit if START is requested */ 14676 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 14677 14678 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 14679 } else 14680 /* Clear START_PENDING bit if STOP is requested */ 14681 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 14682 14683 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 14684 set_bit(sched_state, &bp->sp_state); 14685 else { 14686 __set_bit(RAMROD_RX, &ramrod_flags); 14687 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 14688 ramrod_flags); 14689 } 14690 } 14691 14692 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 14693 { 14694 struct bnx2x *bp = netdev_priv(dev); 14695 int rc = 0; 14696 14697 switch (ctl->cmd) { 14698 case DRV_CTL_CTXTBL_WR_CMD: { 14699 u32 index = ctl->data.io.offset; 14700 dma_addr_t addr = ctl->data.io.dma_addr; 14701 14702 bnx2x_ilt_wr(bp, index, addr); 14703 break; 14704 } 14705 14706 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 14707 int count = ctl->data.credit.credit_count; 14708 14709 bnx2x_cnic_sp_post(bp, count); 14710 break; 14711 } 14712 14713 /* rtnl_lock is held. */ 14714 case DRV_CTL_START_L2_CMD: { 14715 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14716 unsigned long sp_bits = 0; 14717 14718 /* Configure the iSCSI classification object */ 14719 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 14720 cp->iscsi_l2_client_id, 14721 cp->iscsi_l2_cid, BP_FUNC(bp), 14722 bnx2x_sp(bp, mac_rdata), 14723 bnx2x_sp_mapping(bp, mac_rdata), 14724 BNX2X_FILTER_MAC_PENDING, 14725 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 14726 &bp->macs_pool); 14727 14728 /* Set iSCSI MAC address */ 14729 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 14730 if (rc) 14731 break; 14732 14733 barrier(); 14734 14735 /* Start accepting on iSCSI L2 ring */ 14736 14737 netif_addr_lock_bh(dev); 14738 bnx2x_set_iscsi_eth_rx_mode(bp, true); 14739 netif_addr_unlock_bh(dev); 14740 14741 /* bits to wait on */ 14742 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 14743 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 14744 14745 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 14746 BNX2X_ERR("rx_mode completion timed out!\n"); 14747 14748 break; 14749 } 14750 14751 /* rtnl_lock is held. */ 14752 case DRV_CTL_STOP_L2_CMD: { 14753 unsigned long sp_bits = 0; 14754 14755 /* Stop accepting on iSCSI L2 ring */ 14756 netif_addr_lock_bh(dev); 14757 bnx2x_set_iscsi_eth_rx_mode(bp, false); 14758 netif_addr_unlock_bh(dev); 14759 14760 /* bits to wait on */ 14761 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 14762 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 14763 14764 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 14765 BNX2X_ERR("rx_mode completion timed out!\n"); 14766 14767 barrier(); 14768 14769 /* Unset iSCSI L2 MAC */ 14770 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 14771 BNX2X_ISCSI_ETH_MAC, true); 14772 break; 14773 } 14774 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 14775 int count = ctl->data.credit.credit_count; 14776 14777 smp_mb__before_atomic(); 14778 atomic_add(count, &bp->cq_spq_left); 14779 smp_mb__after_atomic(); 14780 break; 14781 } 14782 case DRV_CTL_ULP_REGISTER_CMD: { 14783 int ulp_type = ctl->data.register_data.ulp_type; 14784 14785 if (CHIP_IS_E3(bp)) { 14786 int idx = BP_FW_MB_IDX(bp); 14787 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 14788 int path = BP_PATH(bp); 14789 int port = BP_PORT(bp); 14790 int i; 14791 u32 scratch_offset; 14792 u32 *host_addr; 14793 14794 /* first write capability to shmem2 */ 14795 if (ulp_type == CNIC_ULP_ISCSI) 14796 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 14797 else if (ulp_type == CNIC_ULP_FCOE) 14798 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 14799 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 14800 14801 if ((ulp_type != CNIC_ULP_FCOE) || 14802 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || 14803 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) 14804 break; 14805 14806 /* if reached here - should write fcoe capabilities */ 14807 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); 14808 if (!scratch_offset) 14809 break; 14810 scratch_offset += offsetof(struct glob_ncsi_oem_data, 14811 fcoe_features[path][port]); 14812 host_addr = (u32 *) &(ctl->data.register_data. 14813 fcoe_features); 14814 for (i = 0; i < sizeof(struct fcoe_capabilities); 14815 i += 4) 14816 REG_WR(bp, scratch_offset + i, 14817 *(host_addr + i/4)); 14818 } 14819 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 14820 break; 14821 } 14822 14823 case DRV_CTL_ULP_UNREGISTER_CMD: { 14824 int ulp_type = ctl->data.ulp_type; 14825 14826 if (CHIP_IS_E3(bp)) { 14827 int idx = BP_FW_MB_IDX(bp); 14828 u32 cap; 14829 14830 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 14831 if (ulp_type == CNIC_ULP_ISCSI) 14832 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 14833 else if (ulp_type == CNIC_ULP_FCOE) 14834 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 14835 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 14836 } 14837 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 14838 break; 14839 } 14840 14841 default: 14842 BNX2X_ERR("unknown command %x\n", ctl->cmd); 14843 rc = -EINVAL; 14844 } 14845 14846 /* For storage-only interfaces, change driver state */ 14847 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) { 14848 switch (ctl->drv_state) { 14849 case DRV_NOP: 14850 break; 14851 case DRV_ACTIVE: 14852 bnx2x_set_os_driver_state(bp, 14853 OS_DRIVER_STATE_ACTIVE); 14854 break; 14855 case DRV_INACTIVE: 14856 bnx2x_set_os_driver_state(bp, 14857 OS_DRIVER_STATE_DISABLED); 14858 break; 14859 case DRV_UNLOADED: 14860 bnx2x_set_os_driver_state(bp, 14861 OS_DRIVER_STATE_NOT_LOADED); 14862 break; 14863 default: 14864 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state); 14865 } 14866 } 14867 14868 return rc; 14869 } 14870 14871 static int bnx2x_get_fc_npiv(struct net_device *dev, 14872 struct cnic_fc_npiv_tbl *cnic_tbl) 14873 { 14874 struct bnx2x *bp = netdev_priv(dev); 14875 struct bdn_fc_npiv_tbl *tbl = NULL; 14876 u32 offset, entries; 14877 int rc = -EINVAL; 14878 int i; 14879 14880 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0])) 14881 goto out; 14882 14883 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n"); 14884 14885 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 14886 if (!tbl) { 14887 BNX2X_ERR("Failed to allocate fc_npiv table\n"); 14888 goto out; 14889 } 14890 14891 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); 14892 if (!offset) { 14893 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); 14894 goto out; 14895 } 14896 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); 14897 14898 /* Read the table contents from nvram */ 14899 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) { 14900 BNX2X_ERR("Failed to read FC-NPIV table\n"); 14901 goto out; 14902 } 14903 14904 /* Since bnx2x_nvram_read() returns data in be32, we need to convert 14905 * the number of entries back to cpu endianness. 14906 */ 14907 entries = tbl->fc_npiv_cfg.num_of_npiv; 14908 entries = (__force u32)be32_to_cpu((__force __be32)entries); 14909 tbl->fc_npiv_cfg.num_of_npiv = entries; 14910 14911 if (!tbl->fc_npiv_cfg.num_of_npiv) { 14912 DP(BNX2X_MSG_MCP, 14913 "No FC-NPIV table [valid, simply not present]\n"); 14914 goto out; 14915 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { 14916 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n", 14917 tbl->fc_npiv_cfg.num_of_npiv); 14918 goto out; 14919 } else { 14920 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n", 14921 tbl->fc_npiv_cfg.num_of_npiv); 14922 } 14923 14924 /* Copy the data into cnic-provided struct */ 14925 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; 14926 for (i = 0; i < cnic_tbl->count; i++) { 14927 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); 14928 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); 14929 } 14930 14931 rc = 0; 14932 out: 14933 kfree(tbl); 14934 return rc; 14935 } 14936 14937 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 14938 { 14939 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14940 14941 if (bp->flags & USING_MSIX_FLAG) { 14942 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 14943 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 14944 cp->irq_arr[0].vector = bp->msix_table[1].vector; 14945 } else { 14946 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 14947 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 14948 } 14949 if (!CHIP_IS_E1x(bp)) 14950 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 14951 else 14952 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 14953 14954 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 14955 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 14956 cp->irq_arr[1].status_blk = bp->def_status_blk; 14957 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 14958 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 14959 14960 cp->num_irq = 2; 14961 } 14962 14963 void bnx2x_setup_cnic_info(struct bnx2x *bp) 14964 { 14965 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14966 14967 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 14968 bnx2x_cid_ilt_lines(bp); 14969 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 14970 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 14971 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 14972 14973 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", 14974 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, 14975 cp->iscsi_l2_cid); 14976 14977 if (NO_ISCSI_OOO(bp)) 14978 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 14979 } 14980 14981 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 14982 void *data) 14983 { 14984 struct bnx2x *bp = netdev_priv(dev); 14985 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14986 int rc; 14987 14988 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); 14989 14990 if (ops == NULL) { 14991 BNX2X_ERR("NULL ops received\n"); 14992 return -EINVAL; 14993 } 14994 14995 if (!CNIC_SUPPORT(bp)) { 14996 BNX2X_ERR("Can't register CNIC when not supported\n"); 14997 return -EOPNOTSUPP; 14998 } 14999 15000 if (!CNIC_LOADED(bp)) { 15001 rc = bnx2x_load_cnic(bp); 15002 if (rc) { 15003 BNX2X_ERR("CNIC-related load failed\n"); 15004 return rc; 15005 } 15006 } 15007 15008 bp->cnic_enabled = true; 15009 15010 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 15011 if (!bp->cnic_kwq) 15012 return -ENOMEM; 15013 15014 bp->cnic_kwq_cons = bp->cnic_kwq; 15015 bp->cnic_kwq_prod = bp->cnic_kwq; 15016 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 15017 15018 bp->cnic_spq_pending = 0; 15019 bp->cnic_kwq_pending = 0; 15020 15021 bp->cnic_data = data; 15022 15023 cp->num_irq = 0; 15024 cp->drv_state |= CNIC_DRV_STATE_REGD; 15025 cp->iro_arr = bp->iro_arr; 15026 15027 bnx2x_setup_cnic_irq_info(bp); 15028 15029 rcu_assign_pointer(bp->cnic_ops, ops); 15030 15031 /* Schedule driver to read CNIC driver versions */ 15032 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 15033 15034 return 0; 15035 } 15036 15037 static int bnx2x_unregister_cnic(struct net_device *dev) 15038 { 15039 struct bnx2x *bp = netdev_priv(dev); 15040 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 15041 15042 mutex_lock(&bp->cnic_mutex); 15043 cp->drv_state = 0; 15044 RCU_INIT_POINTER(bp->cnic_ops, NULL); 15045 mutex_unlock(&bp->cnic_mutex); 15046 synchronize_rcu(); 15047 bp->cnic_enabled = false; 15048 kfree(bp->cnic_kwq); 15049 bp->cnic_kwq = NULL; 15050 15051 return 0; 15052 } 15053 15054 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 15055 { 15056 struct bnx2x *bp = netdev_priv(dev); 15057 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 15058 15059 /* If both iSCSI and FCoE are disabled - return NULL in 15060 * order to indicate CNIC that it should not try to work 15061 * with this device. 15062 */ 15063 if (NO_ISCSI(bp) && NO_FCOE(bp)) 15064 return NULL; 15065 15066 cp->drv_owner = THIS_MODULE; 15067 cp->chip_id = CHIP_ID(bp); 15068 cp->pdev = bp->pdev; 15069 cp->io_base = bp->regview; 15070 cp->io_base2 = bp->doorbells; 15071 cp->max_kwqe_pending = 8; 15072 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 15073 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 15074 bnx2x_cid_ilt_lines(bp); 15075 cp->ctx_tbl_len = CNIC_ILT_LINES; 15076 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 15077 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 15078 cp->drv_ctl = bnx2x_drv_ctl; 15079 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv; 15080 cp->drv_register_cnic = bnx2x_register_cnic; 15081 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 15082 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 15083 cp->iscsi_l2_client_id = 15084 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 15085 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 15086 15087 if (NO_ISCSI_OOO(bp)) 15088 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 15089 15090 if (NO_ISCSI(bp)) 15091 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 15092 15093 if (NO_FCOE(bp)) 15094 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 15095 15096 BNX2X_DEV_INFO( 15097 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 15098 cp->ctx_blk_size, 15099 cp->ctx_tbl_offset, 15100 cp->ctx_tbl_len, 15101 cp->starting_cid); 15102 return cp; 15103 } 15104 15105 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 15106 { 15107 struct bnx2x *bp = fp->bp; 15108 u32 offset = BAR_USTRORM_INTMEM; 15109 15110 if (IS_VF(bp)) 15111 return bnx2x_vf_ustorm_prods_offset(bp, fp); 15112 else if (!CHIP_IS_E1x(bp)) 15113 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 15114 else 15115 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 15116 15117 return offset; 15118 } 15119 15120 /* called only on E1H or E2. 15121 * When pretending to be PF, the pretend value is the function number 0...7 15122 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 15123 * combination 15124 */ 15125 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 15126 { 15127 u32 pretend_reg; 15128 15129 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) 15130 return -1; 15131 15132 /* get my own pretend register */ 15133 pretend_reg = bnx2x_get_pretend_reg(bp); 15134 REG_WR(bp, pretend_reg, pretend_func_val); 15135 REG_RD(bp, pretend_reg); 15136 return 0; 15137 } 15138 15139 static void bnx2x_ptp_task(struct work_struct *work) 15140 { 15141 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); 15142 int port = BP_PORT(bp); 15143 u32 val_seq; 15144 u64 timestamp, ns; 15145 struct skb_shared_hwtstamps shhwtstamps; 15146 bool bail = true; 15147 int i; 15148 15149 /* FW may take a while to complete timestamping; try a bit and if it's 15150 * still not complete, may indicate an error state - bail out then. 15151 */ 15152 for (i = 0; i < 10; i++) { 15153 /* Read Tx timestamp registers */ 15154 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : 15155 NIG_REG_P0_TLLH_PTP_BUF_SEQID); 15156 if (val_seq & 0x10000) { 15157 bail = false; 15158 break; 15159 } 15160 msleep(1 << i); 15161 } 15162 15163 if (!bail) { 15164 /* There is a valid timestamp value */ 15165 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : 15166 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); 15167 timestamp <<= 32; 15168 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : 15169 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB); 15170 /* Reset timestamp register to allow new timestamp */ 15171 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : 15172 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); 15173 ns = timecounter_cyc2time(&bp->timecounter, timestamp); 15174 15175 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 15176 shhwtstamps.hwtstamp = ns_to_ktime(ns); 15177 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); 15178 15179 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", 15180 timestamp, ns); 15181 } else { 15182 DP(BNX2X_MSG_PTP, 15183 "Tx timestamp is not recorded (register read=%u)\n", 15184 val_seq); 15185 bp->eth_stats.ptp_skip_tx_ts++; 15186 } 15187 15188 dev_kfree_skb_any(bp->ptp_tx_skb); 15189 bp->ptp_tx_skb = NULL; 15190 } 15191 15192 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) 15193 { 15194 int port = BP_PORT(bp); 15195 u64 timestamp, ns; 15196 15197 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : 15198 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB); 15199 timestamp <<= 32; 15200 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : 15201 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB); 15202 15203 /* Reset timestamp register to allow new timestamp */ 15204 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : 15205 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); 15206 15207 ns = timecounter_cyc2time(&bp->timecounter, timestamp); 15208 15209 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 15210 15211 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", 15212 timestamp, ns); 15213 } 15214 15215 /* Read the PHC */ 15216 static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc) 15217 { 15218 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); 15219 int port = BP_PORT(bp); 15220 u32 wb_data[2]; 15221 u64 phc_cycles; 15222 15223 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : 15224 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2); 15225 phc_cycles = wb_data[1]; 15226 phc_cycles = (phc_cycles << 32) + wb_data[0]; 15227 15228 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); 15229 15230 return phc_cycles; 15231 } 15232 15233 static void bnx2x_init_cyclecounter(struct bnx2x *bp) 15234 { 15235 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); 15236 bp->cyclecounter.read = bnx2x_cyclecounter_read; 15237 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); 15238 bp->cyclecounter.shift = 0; 15239 bp->cyclecounter.mult = 1; 15240 } 15241 15242 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) 15243 { 15244 struct bnx2x_func_state_params func_params = {NULL}; 15245 struct bnx2x_func_set_timesync_params *set_timesync_params = 15246 &func_params.params.set_timesync; 15247 15248 /* Prepare parameters for function state transitions */ 15249 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 15250 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 15251 15252 func_params.f_obj = &bp->func_obj; 15253 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; 15254 15255 /* Function parameters */ 15256 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; 15257 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; 15258 15259 return bnx2x_func_state_change(bp, &func_params); 15260 } 15261 15262 static int bnx2x_enable_ptp_packets(struct bnx2x *bp) 15263 { 15264 struct bnx2x_queue_state_params q_params; 15265 int rc, i; 15266 15267 /* send queue update ramrod to enable PTP packets */ 15268 memset(&q_params, 0, sizeof(q_params)); 15269 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 15270 q_params.cmd = BNX2X_Q_CMD_UPDATE; 15271 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, 15272 &q_params.params.update.update_flags); 15273 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS, 15274 &q_params.params.update.update_flags); 15275 15276 /* send the ramrod on all the queues of the PF */ 15277 for_each_eth_queue(bp, i) { 15278 struct bnx2x_fastpath *fp = &bp->fp[i]; 15279 15280 /* Set the appropriate Queue object */ 15281 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 15282 15283 /* Update the Queue state */ 15284 rc = bnx2x_queue_state_change(bp, &q_params); 15285 if (rc) { 15286 BNX2X_ERR("Failed to enable PTP packets\n"); 15287 return rc; 15288 } 15289 } 15290 15291 return 0; 15292 } 15293 15294 #define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5 15295 #define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB 15296 #define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) 15297 #define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) 15298 #define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE) 15299 #define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE) 15300 #define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA) 15301 #define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE) 15302 #define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF) 15303 #define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF) 15304 #define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) 15305 #define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) 15306 15307 int bnx2x_configure_ptp_filters(struct bnx2x *bp) 15308 { 15309 int port = BP_PORT(bp); 15310 u32 param, rule; 15311 int rc; 15312 15313 if (!bp->hwtstamp_ioctl_called) 15314 return 0; 15315 15316 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : 15317 NIG_REG_P0_TLLH_PTP_PARAM_MASK; 15318 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : 15319 NIG_REG_P0_TLLH_PTP_RULE_MASK; 15320 switch (bp->tx_type) { 15321 case HWTSTAMP_TX_ON: 15322 bp->flags |= TX_TIMESTAMPING_EN; 15323 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK); 15324 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK); 15325 break; 15326 case HWTSTAMP_TX_ONESTEP_SYNC: 15327 case HWTSTAMP_TX_ONESTEP_P2P: 15328 BNX2X_ERR("One-step timestamping is not supported\n"); 15329 return -ERANGE; 15330 } 15331 15332 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : 15333 NIG_REG_P0_LLH_PTP_PARAM_MASK; 15334 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK : 15335 NIG_REG_P0_LLH_PTP_RULE_MASK; 15336 switch (bp->rx_filter) { 15337 case HWTSTAMP_FILTER_NONE: 15338 break; 15339 case HWTSTAMP_FILTER_ALL: 15340 case HWTSTAMP_FILTER_SOME: 15341 case HWTSTAMP_FILTER_NTP_ALL: 15342 bp->rx_filter = HWTSTAMP_FILTER_NONE; 15343 break; 15344 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 15345 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 15346 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 15347 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 15348 /* Initialize PTP detection for UDP/IPv4 events */ 15349 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK); 15350 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK); 15351 break; 15352 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 15353 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 15354 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 15355 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 15356 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ 15357 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK); 15358 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK); 15359 break; 15360 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 15361 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 15362 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 15363 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 15364 /* Initialize PTP detection L2 events */ 15365 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK); 15366 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK); 15367 15368 break; 15369 case HWTSTAMP_FILTER_PTP_V2_EVENT: 15370 case HWTSTAMP_FILTER_PTP_V2_SYNC: 15371 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 15372 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 15373 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ 15374 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK); 15375 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK); 15376 break; 15377 } 15378 15379 /* Indicate to FW that this PF expects recorded PTP packets */ 15380 rc = bnx2x_enable_ptp_packets(bp); 15381 if (rc) 15382 return rc; 15383 15384 /* Enable sending PTP packets to host */ 15385 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : 15386 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1); 15387 15388 return 0; 15389 } 15390 15391 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) 15392 { 15393 struct hwtstamp_config config; 15394 int rc; 15395 15396 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); 15397 15398 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 15399 return -EFAULT; 15400 15401 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", 15402 config.tx_type, config.rx_filter); 15403 15404 if (config.flags) { 15405 BNX2X_ERR("config.flags is reserved for future use\n"); 15406 return -EINVAL; 15407 } 15408 15409 bp->hwtstamp_ioctl_called = true; 15410 bp->tx_type = config.tx_type; 15411 bp->rx_filter = config.rx_filter; 15412 15413 rc = bnx2x_configure_ptp_filters(bp); 15414 if (rc) 15415 return rc; 15416 15417 config.rx_filter = bp->rx_filter; 15418 15419 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 15420 -EFAULT : 0; 15421 } 15422 15423 /* Configures HW for PTP */ 15424 static int bnx2x_configure_ptp(struct bnx2x *bp) 15425 { 15426 int rc, port = BP_PORT(bp); 15427 u32 wb_data[2]; 15428 15429 /* Reset PTP event detection rules - will be configured in the IOCTL */ 15430 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : 15431 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); 15432 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : 15433 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); 15434 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : 15435 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); 15436 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : 15437 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); 15438 15439 /* Disable PTP packets to host - will be configured in the IOCTL*/ 15440 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : 15441 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); 15442 15443 /* Enable the PTP feature */ 15444 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : 15445 NIG_REG_P0_PTP_EN, 0x3F); 15446 15447 /* Enable the free-running counter */ 15448 wb_data[0] = 0; 15449 wb_data[1] = 0; 15450 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); 15451 15452 /* Reset drift register (offset register is not reset) */ 15453 rc = bnx2x_send_reset_timesync_ramrod(bp); 15454 if (rc) { 15455 BNX2X_ERR("Failed to reset PHC drift register\n"); 15456 return -EFAULT; 15457 } 15458 15459 /* Reset possibly old timestamps */ 15460 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : 15461 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); 15462 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : 15463 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); 15464 15465 return 0; 15466 } 15467 15468 /* Called during load, to initialize PTP-related stuff */ 15469 void bnx2x_init_ptp(struct bnx2x *bp) 15470 { 15471 int rc; 15472 15473 /* Configure PTP in HW */ 15474 rc = bnx2x_configure_ptp(bp); 15475 if (rc) { 15476 BNX2X_ERR("Stopping PTP initialization\n"); 15477 return; 15478 } 15479 15480 /* Init work queue for Tx timestamping */ 15481 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); 15482 15483 /* Init cyclecounter and timecounter. This is done only in the first 15484 * load. If done in every load, PTP application will fail when doing 15485 * unload / load (e.g. MTU change) while it is running. 15486 */ 15487 if (!bp->timecounter_init_done) { 15488 bnx2x_init_cyclecounter(bp); 15489 timecounter_init(&bp->timecounter, &bp->cyclecounter, 15490 ktime_to_ns(ktime_get_real())); 15491 bp->timecounter_init_done = true; 15492 } 15493 15494 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); 15495 } 15496