1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/kernel.h> 23 #include <linux/device.h> /* for dev_info() */ 24 #include <linux/timer.h> 25 #include <linux/errno.h> 26 #include <linux/ioport.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/pci.h> 30 #include <linux/init.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/bitops.h> 36 #include <linux/irq.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/time.h> 40 #include <linux/ethtool.h> 41 #include <linux/mii.h> 42 #include <linux/if_vlan.h> 43 #include <net/ip.h> 44 #include <net/ipv6.h> 45 #include <net/tcp.h> 46 #include <net/checksum.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/workqueue.h> 49 #include <linux/crc32.h> 50 #include <linux/crc32c.h> 51 #include <linux/prefetch.h> 52 #include <linux/zlib.h> 53 #include <linux/io.h> 54 #include <linux/semaphore.h> 55 #include <linux/stringify.h> 56 #include <linux/vmalloc.h> 57 58 #include "bnx2x.h" 59 #include "bnx2x_init.h" 60 #include "bnx2x_init_ops.h" 61 #include "bnx2x_cmn.h" 62 #include "bnx2x_vfpf.h" 63 #include "bnx2x_dcb.h" 64 #include "bnx2x_sp.h" 65 66 #include <linux/firmware.h> 67 #include "bnx2x_fw_file_hdr.h" 68 /* FW files */ 69 #define FW_FILE_VERSION \ 70 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 71 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 72 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 73 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 74 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 75 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 76 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 77 78 /* Time in jiffies before concluding the transmitter is hung */ 79 #define TX_TIMEOUT (5*HZ) 80 81 static char version[] = 82 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 83 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 84 85 MODULE_AUTHOR("Eliezer Tamir"); 86 MODULE_DESCRIPTION("Broadcom NetXtreme II " 87 "BCM57710/57711/57711E/" 88 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 89 "57840/57840_MF Driver"); 90 MODULE_LICENSE("GPL"); 91 MODULE_VERSION(DRV_MODULE_VERSION); 92 MODULE_FIRMWARE(FW_FILE_NAME_E1); 93 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 94 MODULE_FIRMWARE(FW_FILE_NAME_E2); 95 96 int num_queues; 97 module_param(num_queues, int, 0); 98 MODULE_PARM_DESC(num_queues, 99 " Set number of queues (default is as a number of CPUs)"); 100 101 static int disable_tpa; 102 module_param(disable_tpa, int, 0); 103 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 104 105 int int_mode; 106 module_param(int_mode, int, 0); 107 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 108 "(1 INT#x; 2 MSI)"); 109 110 static int dropless_fc; 111 module_param(dropless_fc, int, 0); 112 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 113 114 static int mrrs = -1; 115 module_param(mrrs, int, 0); 116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 117 118 static int debug; 119 module_param(debug, int, 0); 120 MODULE_PARM_DESC(debug, " Default debug msglevel"); 121 122 struct workqueue_struct *bnx2x_wq; 123 124 struct bnx2x_mac_vals { 125 u32 xmac_addr; 126 u32 xmac_val; 127 u32 emac_addr; 128 u32 emac_val; 129 u32 umac_addr; 130 u32 umac_val; 131 u32 bmac_addr; 132 u32 bmac_val[2]; 133 }; 134 135 enum bnx2x_board_type { 136 BCM57710 = 0, 137 BCM57711, 138 BCM57711E, 139 BCM57712, 140 BCM57712_MF, 141 BCM57712_VF, 142 BCM57800, 143 BCM57800_MF, 144 BCM57800_VF, 145 BCM57810, 146 BCM57810_MF, 147 BCM57810_VF, 148 BCM57840_4_10, 149 BCM57840_2_20, 150 BCM57840_MF, 151 BCM57840_VF, 152 BCM57811, 153 BCM57811_MF, 154 BCM57840_O, 155 BCM57840_MFO, 156 BCM57811_VF 157 }; 158 159 /* indexed by board_type, above */ 160 static struct { 161 char *name; 162 } board_info[] = { 163 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 164 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 165 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 166 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, 167 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, 168 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, 169 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, 170 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, 171 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, 172 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 173 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 174 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, 175 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, 176 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, 177 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, 178 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, 179 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, 180 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, 181 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 182 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, 183 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } 184 }; 185 186 #ifndef PCI_DEVICE_ID_NX2_57710 187 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 188 #endif 189 #ifndef PCI_DEVICE_ID_NX2_57711 190 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 191 #endif 192 #ifndef PCI_DEVICE_ID_NX2_57711E 193 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 194 #endif 195 #ifndef PCI_DEVICE_ID_NX2_57712 196 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 197 #endif 198 #ifndef PCI_DEVICE_ID_NX2_57712_MF 199 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 200 #endif 201 #ifndef PCI_DEVICE_ID_NX2_57712_VF 202 #define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF 203 #endif 204 #ifndef PCI_DEVICE_ID_NX2_57800 205 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 206 #endif 207 #ifndef PCI_DEVICE_ID_NX2_57800_MF 208 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 209 #endif 210 #ifndef PCI_DEVICE_ID_NX2_57800_VF 211 #define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF 212 #endif 213 #ifndef PCI_DEVICE_ID_NX2_57810 214 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 215 #endif 216 #ifndef PCI_DEVICE_ID_NX2_57810_MF 217 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 218 #endif 219 #ifndef PCI_DEVICE_ID_NX2_57840_O 220 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE 221 #endif 222 #ifndef PCI_DEVICE_ID_NX2_57810_VF 223 #define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF 224 #endif 225 #ifndef PCI_DEVICE_ID_NX2_57840_4_10 226 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 227 #endif 228 #ifndef PCI_DEVICE_ID_NX2_57840_2_20 229 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 230 #endif 231 #ifndef PCI_DEVICE_ID_NX2_57840_MFO 232 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE 233 #endif 234 #ifndef PCI_DEVICE_ID_NX2_57840_MF 235 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 236 #endif 237 #ifndef PCI_DEVICE_ID_NX2_57840_VF 238 #define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF 239 #endif 240 #ifndef PCI_DEVICE_ID_NX2_57811 241 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 242 #endif 243 #ifndef PCI_DEVICE_ID_NX2_57811_MF 244 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 245 #endif 246 #ifndef PCI_DEVICE_ID_NX2_57811_VF 247 #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF 248 #endif 249 250 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 251 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, 257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, 260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, 263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, 265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, 266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, 267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, 272 { 0 } 273 }; 274 275 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 276 277 /* Global resources for unloading a previously loaded device */ 278 #define BNX2X_PREV_WAIT_NEEDED 1 279 static DEFINE_SEMAPHORE(bnx2x_prev_sem); 280 static LIST_HEAD(bnx2x_prev_list); 281 /**************************************************************************** 282 * General service functions 283 ****************************************************************************/ 284 285 static void __storm_memset_dma_mapping(struct bnx2x *bp, 286 u32 addr, dma_addr_t mapping) 287 { 288 REG_WR(bp, addr, U64_LO(mapping)); 289 REG_WR(bp, addr + 4, U64_HI(mapping)); 290 } 291 292 static void storm_memset_spq_addr(struct bnx2x *bp, 293 dma_addr_t mapping, u16 abs_fid) 294 { 295 u32 addr = XSEM_REG_FAST_MEMORY + 296 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 297 298 __storm_memset_dma_mapping(bp, addr, mapping); 299 } 300 301 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 302 u16 pf_id) 303 { 304 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 305 pf_id); 306 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 307 pf_id); 308 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 309 pf_id); 310 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 311 pf_id); 312 } 313 314 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 315 u8 enable) 316 { 317 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 318 enable); 319 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 320 enable); 321 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 322 enable); 323 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 324 enable); 325 } 326 327 static void storm_memset_eq_data(struct bnx2x *bp, 328 struct event_ring_data *eq_data, 329 u16 pfid) 330 { 331 size_t size = sizeof(struct event_ring_data); 332 333 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 334 335 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 336 } 337 338 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 339 u16 pfid) 340 { 341 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 342 REG_WR16(bp, addr, eq_prod); 343 } 344 345 /* used only at init 346 * locking is done by mcp 347 */ 348 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 349 { 350 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 351 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 352 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 353 PCICFG_VENDOR_ID_OFFSET); 354 } 355 356 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 357 { 358 u32 val; 359 360 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 361 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 362 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 363 PCICFG_VENDOR_ID_OFFSET); 364 365 return val; 366 } 367 368 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 369 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 370 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 371 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 372 #define DMAE_DP_DST_NONE "dst_addr [none]" 373 374 static void bnx2x_dp_dmae(struct bnx2x *bp, 375 struct dmae_command *dmae, int msglvl) 376 { 377 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 378 int i; 379 380 switch (dmae->opcode & DMAE_COMMAND_DST) { 381 case DMAE_CMD_DST_PCI: 382 if (src_type == DMAE_CMD_SRC_PCI) 383 DP(msglvl, "DMAE: opcode 0x%08x\n" 384 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 385 "comp_addr [%x:%08x], comp_val 0x%08x\n", 386 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 387 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 388 dmae->comp_addr_hi, dmae->comp_addr_lo, 389 dmae->comp_val); 390 else 391 DP(msglvl, "DMAE: opcode 0x%08x\n" 392 "src [%08x], len [%d*4], dst [%x:%08x]\n" 393 "comp_addr [%x:%08x], comp_val 0x%08x\n", 394 dmae->opcode, dmae->src_addr_lo >> 2, 395 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 396 dmae->comp_addr_hi, dmae->comp_addr_lo, 397 dmae->comp_val); 398 break; 399 case DMAE_CMD_DST_GRC: 400 if (src_type == DMAE_CMD_SRC_PCI) 401 DP(msglvl, "DMAE: opcode 0x%08x\n" 402 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 403 "comp_addr [%x:%08x], comp_val 0x%08x\n", 404 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 405 dmae->len, dmae->dst_addr_lo >> 2, 406 dmae->comp_addr_hi, dmae->comp_addr_lo, 407 dmae->comp_val); 408 else 409 DP(msglvl, "DMAE: opcode 0x%08x\n" 410 "src [%08x], len [%d*4], dst [%08x]\n" 411 "comp_addr [%x:%08x], comp_val 0x%08x\n", 412 dmae->opcode, dmae->src_addr_lo >> 2, 413 dmae->len, dmae->dst_addr_lo >> 2, 414 dmae->comp_addr_hi, dmae->comp_addr_lo, 415 dmae->comp_val); 416 break; 417 default: 418 if (src_type == DMAE_CMD_SRC_PCI) 419 DP(msglvl, "DMAE: opcode 0x%08x\n" 420 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 421 "comp_addr [%x:%08x] comp_val 0x%08x\n", 422 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 423 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 424 dmae->comp_val); 425 else 426 DP(msglvl, "DMAE: opcode 0x%08x\n" 427 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 428 "comp_addr [%x:%08x] comp_val 0x%08x\n", 429 dmae->opcode, dmae->src_addr_lo >> 2, 430 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 431 dmae->comp_val); 432 break; 433 } 434 435 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) 436 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", 437 i, *(((u32 *)dmae) + i)); 438 } 439 440 /* copy command into DMAE command memory and set DMAE command go */ 441 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 442 { 443 u32 cmd_offset; 444 int i; 445 446 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 447 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 448 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 449 } 450 REG_WR(bp, dmae_reg_go_c[idx], 1); 451 } 452 453 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 454 { 455 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 456 DMAE_CMD_C_ENABLE); 457 } 458 459 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 460 { 461 return opcode & ~DMAE_CMD_SRC_RESET; 462 } 463 464 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 465 bool with_comp, u8 comp_type) 466 { 467 u32 opcode = 0; 468 469 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 470 (dst_type << DMAE_COMMAND_DST_SHIFT)); 471 472 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 473 474 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 475 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 476 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 477 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 478 479 #ifdef __BIG_ENDIAN 480 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 481 #else 482 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 483 #endif 484 if (with_comp) 485 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 486 return opcode; 487 } 488 489 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 490 struct dmae_command *dmae, 491 u8 src_type, u8 dst_type) 492 { 493 memset(dmae, 0, sizeof(struct dmae_command)); 494 495 /* set the opcode */ 496 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 497 true, DMAE_COMP_PCI); 498 499 /* fill in the completion parameters */ 500 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 501 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 502 dmae->comp_val = DMAE_COMP_VAL; 503 } 504 505 /* issue a dmae command over the init-channel and wait for completion */ 506 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 507 { 508 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 510 int rc = 0; 511 512 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); 513 514 /* Lock the dmae channel. Disable BHs to prevent a dead-lock 515 * as long as this code is called both from syscall context and 516 * from ndo_set_rx_mode() flow that may be called from BH. 517 */ 518 spin_lock_bh(&bp->dmae_lock); 519 520 /* reset completion */ 521 *wb_comp = 0; 522 523 /* post the command on the channel used for initializations */ 524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 525 526 /* wait for completion */ 527 udelay(5); 528 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 529 530 if (!cnt || 531 (bp->recovery_state != BNX2X_RECOVERY_DONE && 532 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 533 BNX2X_ERR("DMAE timeout!\n"); 534 rc = DMAE_TIMEOUT; 535 goto unlock; 536 } 537 cnt--; 538 udelay(50); 539 } 540 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 541 BNX2X_ERR("DMAE PCI error!\n"); 542 rc = DMAE_PCI_ERROR; 543 } 544 545 unlock: 546 spin_unlock_bh(&bp->dmae_lock); 547 return rc; 548 } 549 550 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 551 u32 len32) 552 { 553 int rc; 554 struct dmae_command dmae; 555 556 if (!bp->dmae_ready) { 557 u32 *data = bnx2x_sp(bp, wb_data[0]); 558 559 if (CHIP_IS_E1(bp)) 560 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 561 else 562 bnx2x_init_str_wr(bp, dst_addr, data, len32); 563 return; 564 } 565 566 /* set opcode and fixed command fields */ 567 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 568 569 /* fill in addresses and len */ 570 dmae.src_addr_lo = U64_LO(dma_addr); 571 dmae.src_addr_hi = U64_HI(dma_addr); 572 dmae.dst_addr_lo = dst_addr >> 2; 573 dmae.dst_addr_hi = 0; 574 dmae.len = len32; 575 576 /* issue the command and wait for completion */ 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 578 if (rc) { 579 BNX2X_ERR("DMAE returned failure %d\n", rc); 580 bnx2x_panic(); 581 } 582 } 583 584 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 585 { 586 int rc; 587 struct dmae_command dmae; 588 589 if (!bp->dmae_ready) { 590 u32 *data = bnx2x_sp(bp, wb_data[0]); 591 int i; 592 593 if (CHIP_IS_E1(bp)) 594 for (i = 0; i < len32; i++) 595 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 596 else 597 for (i = 0; i < len32; i++) 598 data[i] = REG_RD(bp, src_addr + i*4); 599 600 return; 601 } 602 603 /* set opcode and fixed command fields */ 604 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 605 606 /* fill in addresses and len */ 607 dmae.src_addr_lo = src_addr >> 2; 608 dmae.src_addr_hi = 0; 609 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 610 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 611 dmae.len = len32; 612 613 /* issue the command and wait for completion */ 614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 615 if (rc) { 616 BNX2X_ERR("DMAE returned failure %d\n", rc); 617 bnx2x_panic(); 618 }; 619 } 620 621 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 622 u32 addr, u32 len) 623 { 624 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 625 int offset = 0; 626 627 while (len > dmae_wr_max) { 628 bnx2x_write_dmae(bp, phys_addr + offset, 629 addr + offset, dmae_wr_max); 630 offset += dmae_wr_max * 4; 631 len -= dmae_wr_max; 632 } 633 634 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 635 } 636 637 static int bnx2x_mc_assert(struct bnx2x *bp) 638 { 639 char last_idx; 640 int i, rc = 0; 641 u32 row0, row1, row2, row3; 642 643 /* XSTORM */ 644 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 645 XSTORM_ASSERT_LIST_INDEX_OFFSET); 646 if (last_idx) 647 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 648 649 /* print the asserts */ 650 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 651 652 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 653 XSTORM_ASSERT_LIST_OFFSET(i)); 654 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 655 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 656 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 657 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 658 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 659 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 660 661 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 662 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 663 i, row3, row2, row1, row0); 664 rc++; 665 } else { 666 break; 667 } 668 } 669 670 /* TSTORM */ 671 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + 672 TSTORM_ASSERT_LIST_INDEX_OFFSET); 673 if (last_idx) 674 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 675 676 /* print the asserts */ 677 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 678 679 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + 680 TSTORM_ASSERT_LIST_OFFSET(i)); 681 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + 682 TSTORM_ASSERT_LIST_OFFSET(i) + 4); 683 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + 684 TSTORM_ASSERT_LIST_OFFSET(i) + 8); 685 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + 686 TSTORM_ASSERT_LIST_OFFSET(i) + 12); 687 688 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 689 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 690 i, row3, row2, row1, row0); 691 rc++; 692 } else { 693 break; 694 } 695 } 696 697 /* CSTORM */ 698 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 699 CSTORM_ASSERT_LIST_INDEX_OFFSET); 700 if (last_idx) 701 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 702 703 /* print the asserts */ 704 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 705 706 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 707 CSTORM_ASSERT_LIST_OFFSET(i)); 708 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 709 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 710 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 711 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 712 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 713 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 714 715 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 716 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 717 i, row3, row2, row1, row0); 718 rc++; 719 } else { 720 break; 721 } 722 } 723 724 /* USTORM */ 725 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 726 USTORM_ASSERT_LIST_INDEX_OFFSET); 727 if (last_idx) 728 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 729 730 /* print the asserts */ 731 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 732 733 row0 = REG_RD(bp, BAR_USTRORM_INTMEM + 734 USTORM_ASSERT_LIST_OFFSET(i)); 735 row1 = REG_RD(bp, BAR_USTRORM_INTMEM + 736 USTORM_ASSERT_LIST_OFFSET(i) + 4); 737 row2 = REG_RD(bp, BAR_USTRORM_INTMEM + 738 USTORM_ASSERT_LIST_OFFSET(i) + 8); 739 row3 = REG_RD(bp, BAR_USTRORM_INTMEM + 740 USTORM_ASSERT_LIST_OFFSET(i) + 12); 741 742 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 743 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 744 i, row3, row2, row1, row0); 745 rc++; 746 } else { 747 break; 748 } 749 } 750 751 return rc; 752 } 753 754 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 755 { 756 u32 addr, val; 757 u32 mark, offset; 758 __be32 data[9]; 759 int word; 760 u32 trace_shmem_base; 761 if (BP_NOMCP(bp)) { 762 BNX2X_ERR("NO MCP - can not dump\n"); 763 return; 764 } 765 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 766 (bp->common.bc_ver & 0xff0000) >> 16, 767 (bp->common.bc_ver & 0xff00) >> 8, 768 (bp->common.bc_ver & 0xff)); 769 770 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 771 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 772 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 773 774 if (BP_PATH(bp) == 0) 775 trace_shmem_base = bp->common.shmem_base; 776 else 777 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 778 addr = trace_shmem_base - 0x800; 779 780 /* validate TRCB signature */ 781 mark = REG_RD(bp, addr); 782 if (mark != MFW_TRACE_SIGNATURE) { 783 BNX2X_ERR("Trace buffer signature is missing."); 784 return ; 785 } 786 787 /* read cyclic buffer pointer */ 788 addr += 4; 789 mark = REG_RD(bp, addr); 790 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 791 + ((mark + 0x3) & ~0x3) - 0x08000000; 792 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 793 794 printk("%s", lvl); 795 796 /* dump buffer after the mark */ 797 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 798 for (word = 0; word < 8; word++) 799 data[word] = htonl(REG_RD(bp, offset + 4*word)); 800 data[8] = 0x0; 801 pr_cont("%s", (char *)data); 802 } 803 804 /* dump buffer before the mark */ 805 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 806 for (word = 0; word < 8; word++) 807 data[word] = htonl(REG_RD(bp, offset + 4*word)); 808 data[8] = 0x0; 809 pr_cont("%s", (char *)data); 810 } 811 printk("%s" "end of fw dump\n", lvl); 812 } 813 814 static void bnx2x_fw_dump(struct bnx2x *bp) 815 { 816 bnx2x_fw_dump_lvl(bp, KERN_ERR); 817 } 818 819 static void bnx2x_hc_int_disable(struct bnx2x *bp) 820 { 821 int port = BP_PORT(bp); 822 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 823 u32 val = REG_RD(bp, addr); 824 825 /* in E1 we must use only PCI configuration space to disable 826 * MSI/MSIX capability 827 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 828 */ 829 if (CHIP_IS_E1(bp)) { 830 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 831 * Use mask register to prevent from HC sending interrupts 832 * after we exit the function 833 */ 834 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 835 836 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 837 HC_CONFIG_0_REG_INT_LINE_EN_0 | 838 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 839 } else 840 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 841 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 842 HC_CONFIG_0_REG_INT_LINE_EN_0 | 843 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 844 845 DP(NETIF_MSG_IFDOWN, 846 "write %x to HC %d (addr 0x%x)\n", 847 val, port, addr); 848 849 /* flush all outstanding writes */ 850 mmiowb(); 851 852 REG_WR(bp, addr, val); 853 if (REG_RD(bp, addr) != val) 854 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 855 } 856 857 static void bnx2x_igu_int_disable(struct bnx2x *bp) 858 { 859 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 860 861 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 862 IGU_PF_CONF_INT_LINE_EN | 863 IGU_PF_CONF_ATTN_BIT_EN); 864 865 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 866 867 /* flush all outstanding writes */ 868 mmiowb(); 869 870 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 871 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 872 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 873 } 874 875 static void bnx2x_int_disable(struct bnx2x *bp) 876 { 877 if (bp->common.int_block == INT_BLOCK_HC) 878 bnx2x_hc_int_disable(bp); 879 else 880 bnx2x_igu_int_disable(bp); 881 } 882 883 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) 884 { 885 int i; 886 u16 j; 887 struct hc_sp_status_block_data sp_sb_data; 888 int func = BP_FUNC(bp); 889 #ifdef BNX2X_STOP_ON_ERROR 890 u16 start = 0, end = 0; 891 u8 cos; 892 #endif 893 if (disable_int) 894 bnx2x_int_disable(bp); 895 896 bp->stats_state = STATS_STATE_DISABLED; 897 bp->eth_stats.unrecoverable_error++; 898 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 899 900 BNX2X_ERR("begin crash dump -----------------\n"); 901 902 /* Indices */ 903 /* Common */ 904 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 905 bp->def_idx, bp->def_att_idx, bp->attn_state, 906 bp->spq_prod_idx, bp->stats_counter); 907 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 908 bp->def_status_blk->atten_status_block.attn_bits, 909 bp->def_status_blk->atten_status_block.attn_bits_ack, 910 bp->def_status_blk->atten_status_block.status_block_id, 911 bp->def_status_blk->atten_status_block.attn_bits_index); 912 BNX2X_ERR(" def ("); 913 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 914 pr_cont("0x%x%s", 915 bp->def_status_blk->sp_sb.index_values[i], 916 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 917 918 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 919 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 920 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 921 i*sizeof(u32)); 922 923 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 924 sp_sb_data.igu_sb_id, 925 sp_sb_data.igu_seg_id, 926 sp_sb_data.p_func.pf_id, 927 sp_sb_data.p_func.vnic_id, 928 sp_sb_data.p_func.vf_id, 929 sp_sb_data.p_func.vf_valid, 930 sp_sb_data.state); 931 932 for_each_eth_queue(bp, i) { 933 struct bnx2x_fastpath *fp = &bp->fp[i]; 934 int loop; 935 struct hc_status_block_data_e2 sb_data_e2; 936 struct hc_status_block_data_e1x sb_data_e1x; 937 struct hc_status_block_sm *hc_sm_p = 938 CHIP_IS_E1x(bp) ? 939 sb_data_e1x.common.state_machine : 940 sb_data_e2.common.state_machine; 941 struct hc_index_data *hc_index_p = 942 CHIP_IS_E1x(bp) ? 943 sb_data_e1x.index_data : 944 sb_data_e2.index_data; 945 u8 data_size, cos; 946 u32 *sb_data_p; 947 struct bnx2x_fp_txdata txdata; 948 949 /* Rx */ 950 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 951 i, fp->rx_bd_prod, fp->rx_bd_cons, 952 fp->rx_comp_prod, 953 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 954 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 955 fp->rx_sge_prod, fp->last_max_sge, 956 le16_to_cpu(fp->fp_hc_idx)); 957 958 /* Tx */ 959 for_each_cos_in_tx_queue(fp, cos) 960 { 961 txdata = *fp->txdata_ptr[cos]; 962 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 963 i, txdata.tx_pkt_prod, 964 txdata.tx_pkt_cons, txdata.tx_bd_prod, 965 txdata.tx_bd_cons, 966 le16_to_cpu(*txdata.tx_cons_sb)); 967 } 968 969 loop = CHIP_IS_E1x(bp) ? 970 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 971 972 /* host sb data */ 973 974 if (IS_FCOE_FP(fp)) 975 continue; 976 977 BNX2X_ERR(" run indexes ("); 978 for (j = 0; j < HC_SB_MAX_SM; j++) 979 pr_cont("0x%x%s", 980 fp->sb_running_index[j], 981 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 982 983 BNX2X_ERR(" indexes ("); 984 for (j = 0; j < loop; j++) 985 pr_cont("0x%x%s", 986 fp->sb_index_values[j], 987 (j == loop - 1) ? ")" : " "); 988 /* fw sb data */ 989 data_size = CHIP_IS_E1x(bp) ? 990 sizeof(struct hc_status_block_data_e1x) : 991 sizeof(struct hc_status_block_data_e2); 992 data_size /= sizeof(u32); 993 sb_data_p = CHIP_IS_E1x(bp) ? 994 (u32 *)&sb_data_e1x : 995 (u32 *)&sb_data_e2; 996 /* copy sb data in here */ 997 for (j = 0; j < data_size; j++) 998 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 999 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 1000 j * sizeof(u32)); 1001 1002 if (!CHIP_IS_E1x(bp)) { 1003 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1004 sb_data_e2.common.p_func.pf_id, 1005 sb_data_e2.common.p_func.vf_id, 1006 sb_data_e2.common.p_func.vf_valid, 1007 sb_data_e2.common.p_func.vnic_id, 1008 sb_data_e2.common.same_igu_sb_1b, 1009 sb_data_e2.common.state); 1010 } else { 1011 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1012 sb_data_e1x.common.p_func.pf_id, 1013 sb_data_e1x.common.p_func.vf_id, 1014 sb_data_e1x.common.p_func.vf_valid, 1015 sb_data_e1x.common.p_func.vnic_id, 1016 sb_data_e1x.common.same_igu_sb_1b, 1017 sb_data_e1x.common.state); 1018 } 1019 1020 /* SB_SMs data */ 1021 for (j = 0; j < HC_SB_MAX_SM; j++) { 1022 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 1023 j, hc_sm_p[j].__flags, 1024 hc_sm_p[j].igu_sb_id, 1025 hc_sm_p[j].igu_seg_id, 1026 hc_sm_p[j].time_to_expire, 1027 hc_sm_p[j].timer_value); 1028 } 1029 1030 /* Indices data */ 1031 for (j = 0; j < loop; j++) { 1032 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 1033 hc_index_p[j].flags, 1034 hc_index_p[j].timeout); 1035 } 1036 } 1037 1038 #ifdef BNX2X_STOP_ON_ERROR 1039 1040 /* event queue */ 1041 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1042 for (i = 0; i < NUM_EQ_DESC; i++) { 1043 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1044 1045 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1046 i, bp->eq_ring[i].message.opcode, 1047 bp->eq_ring[i].message.error); 1048 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); 1049 } 1050 1051 /* Rings */ 1052 /* Rx */ 1053 for_each_valid_rx_queue(bp, i) { 1054 struct bnx2x_fastpath *fp = &bp->fp[i]; 1055 1056 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1057 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 1058 for (j = start; j != end; j = RX_BD(j + 1)) { 1059 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 1060 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 1061 1062 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 1063 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 1064 } 1065 1066 start = RX_SGE(fp->rx_sge_prod); 1067 end = RX_SGE(fp->last_max_sge); 1068 for (j = start; j != end; j = RX_SGE(j + 1)) { 1069 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 1070 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 1071 1072 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 1073 i, j, rx_sge[1], rx_sge[0], sw_page->page); 1074 } 1075 1076 start = RCQ_BD(fp->rx_comp_cons - 10); 1077 end = RCQ_BD(fp->rx_comp_cons + 503); 1078 for (j = start; j != end; j = RCQ_BD(j + 1)) { 1079 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 1080 1081 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 1082 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 1083 } 1084 } 1085 1086 /* Tx */ 1087 for_each_valid_tx_queue(bp, i) { 1088 struct bnx2x_fastpath *fp = &bp->fp[i]; 1089 for_each_cos_in_tx_queue(fp, cos) { 1090 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1091 1092 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 1093 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 1094 for (j = start; j != end; j = TX_BD(j + 1)) { 1095 struct sw_tx_bd *sw_bd = 1096 &txdata->tx_buf_ring[j]; 1097 1098 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 1099 i, cos, j, sw_bd->skb, 1100 sw_bd->first_bd); 1101 } 1102 1103 start = TX_BD(txdata->tx_bd_cons - 10); 1104 end = TX_BD(txdata->tx_bd_cons + 254); 1105 for (j = start; j != end; j = TX_BD(j + 1)) { 1106 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 1107 1108 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 1109 i, cos, j, tx_bd[0], tx_bd[1], 1110 tx_bd[2], tx_bd[3]); 1111 } 1112 } 1113 } 1114 #endif 1115 bnx2x_fw_dump(bp); 1116 bnx2x_mc_assert(bp); 1117 BNX2X_ERR("end crash dump -----------------\n"); 1118 } 1119 1120 /* 1121 * FLR Support for E2 1122 * 1123 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 1124 * initialization. 1125 */ 1126 #define FLR_WAIT_USEC 10000 /* 10 milliseconds */ 1127 #define FLR_WAIT_INTERVAL 50 /* usec */ 1128 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 1129 1130 struct pbf_pN_buf_regs { 1131 int pN; 1132 u32 init_crd; 1133 u32 crd; 1134 u32 crd_freed; 1135 }; 1136 1137 struct pbf_pN_cmd_regs { 1138 int pN; 1139 u32 lines_occup; 1140 u32 lines_freed; 1141 }; 1142 1143 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 1144 struct pbf_pN_buf_regs *regs, 1145 u32 poll_count) 1146 { 1147 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 1148 u32 cur_cnt = poll_count; 1149 1150 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 1151 crd = crd_start = REG_RD(bp, regs->crd); 1152 init_crd = REG_RD(bp, regs->init_crd); 1153 1154 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 1155 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 1156 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 1157 1158 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 1159 (init_crd - crd_start))) { 1160 if (cur_cnt--) { 1161 udelay(FLR_WAIT_INTERVAL); 1162 crd = REG_RD(bp, regs->crd); 1163 crd_freed = REG_RD(bp, regs->crd_freed); 1164 } else { 1165 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 1166 regs->pN); 1167 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 1168 regs->pN, crd); 1169 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 1170 regs->pN, crd_freed); 1171 break; 1172 } 1173 } 1174 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 1175 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1176 } 1177 1178 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 1179 struct pbf_pN_cmd_regs *regs, 1180 u32 poll_count) 1181 { 1182 u32 occup, to_free, freed, freed_start; 1183 u32 cur_cnt = poll_count; 1184 1185 occup = to_free = REG_RD(bp, regs->lines_occup); 1186 freed = freed_start = REG_RD(bp, regs->lines_freed); 1187 1188 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 1189 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 1190 1191 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1192 if (cur_cnt--) { 1193 udelay(FLR_WAIT_INTERVAL); 1194 occup = REG_RD(bp, regs->lines_occup); 1195 freed = REG_RD(bp, regs->lines_freed); 1196 } else { 1197 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 1198 regs->pN); 1199 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 1200 regs->pN, occup); 1201 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 1202 regs->pN, freed); 1203 break; 1204 } 1205 } 1206 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1207 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1208 } 1209 1210 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1211 u32 expected, u32 poll_count) 1212 { 1213 u32 cur_cnt = poll_count; 1214 u32 val; 1215 1216 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1217 udelay(FLR_WAIT_INTERVAL); 1218 1219 return val; 1220 } 1221 1222 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1223 char *msg, u32 poll_cnt) 1224 { 1225 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1226 if (val != 0) { 1227 BNX2X_ERR("%s usage count=%d\n", msg, val); 1228 return 1; 1229 } 1230 return 0; 1231 } 1232 1233 /* Common routines with VF FLR cleanup */ 1234 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1235 { 1236 /* adjust polling timeout */ 1237 if (CHIP_REV_IS_EMUL(bp)) 1238 return FLR_POLL_CNT * 2000; 1239 1240 if (CHIP_REV_IS_FPGA(bp)) 1241 return FLR_POLL_CNT * 120; 1242 1243 return FLR_POLL_CNT; 1244 } 1245 1246 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1247 { 1248 struct pbf_pN_cmd_regs cmd_regs[] = { 1249 {0, (CHIP_IS_E3B0(bp)) ? 1250 PBF_REG_TQ_OCCUPANCY_Q0 : 1251 PBF_REG_P0_TQ_OCCUPANCY, 1252 (CHIP_IS_E3B0(bp)) ? 1253 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1254 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1255 {1, (CHIP_IS_E3B0(bp)) ? 1256 PBF_REG_TQ_OCCUPANCY_Q1 : 1257 PBF_REG_P1_TQ_OCCUPANCY, 1258 (CHIP_IS_E3B0(bp)) ? 1259 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1260 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1261 {4, (CHIP_IS_E3B0(bp)) ? 1262 PBF_REG_TQ_OCCUPANCY_LB_Q : 1263 PBF_REG_P4_TQ_OCCUPANCY, 1264 (CHIP_IS_E3B0(bp)) ? 1265 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1266 PBF_REG_P4_TQ_LINES_FREED_CNT} 1267 }; 1268 1269 struct pbf_pN_buf_regs buf_regs[] = { 1270 {0, (CHIP_IS_E3B0(bp)) ? 1271 PBF_REG_INIT_CRD_Q0 : 1272 PBF_REG_P0_INIT_CRD , 1273 (CHIP_IS_E3B0(bp)) ? 1274 PBF_REG_CREDIT_Q0 : 1275 PBF_REG_P0_CREDIT, 1276 (CHIP_IS_E3B0(bp)) ? 1277 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1278 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1279 {1, (CHIP_IS_E3B0(bp)) ? 1280 PBF_REG_INIT_CRD_Q1 : 1281 PBF_REG_P1_INIT_CRD, 1282 (CHIP_IS_E3B0(bp)) ? 1283 PBF_REG_CREDIT_Q1 : 1284 PBF_REG_P1_CREDIT, 1285 (CHIP_IS_E3B0(bp)) ? 1286 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1287 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1288 {4, (CHIP_IS_E3B0(bp)) ? 1289 PBF_REG_INIT_CRD_LB_Q : 1290 PBF_REG_P4_INIT_CRD, 1291 (CHIP_IS_E3B0(bp)) ? 1292 PBF_REG_CREDIT_LB_Q : 1293 PBF_REG_P4_CREDIT, 1294 (CHIP_IS_E3B0(bp)) ? 1295 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1296 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1297 }; 1298 1299 int i; 1300 1301 /* Verify the command queues are flushed P0, P1, P4 */ 1302 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1303 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1304 1305 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1306 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1307 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1308 } 1309 1310 #define OP_GEN_PARAM(param) \ 1311 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1312 1313 #define OP_GEN_TYPE(type) \ 1314 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1315 1316 #define OP_GEN_AGG_VECT(index) \ 1317 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1318 1319 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) 1320 { 1321 u32 op_gen_command = 0; 1322 u32 comp_addr = BAR_CSTRORM_INTMEM + 1323 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1324 int ret = 0; 1325 1326 if (REG_RD(bp, comp_addr)) { 1327 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1328 return 1; 1329 } 1330 1331 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1332 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1333 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 1334 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1335 1336 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1337 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); 1338 1339 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1340 BNX2X_ERR("FW final cleanup did not succeed\n"); 1341 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1342 (REG_RD(bp, comp_addr))); 1343 bnx2x_panic(); 1344 return 1; 1345 } 1346 /* Zero completion for next FLR */ 1347 REG_WR(bp, comp_addr, 0); 1348 1349 return ret; 1350 } 1351 1352 u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1353 { 1354 u16 status; 1355 1356 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1357 return status & PCI_EXP_DEVSTA_TRPND; 1358 } 1359 1360 /* PF FLR specific routines 1361 */ 1362 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1363 { 1364 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1365 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1366 CFC_REG_NUM_LCIDS_INSIDE_PF, 1367 "CFC PF usage counter timed out", 1368 poll_cnt)) 1369 return 1; 1370 1371 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1372 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1373 DORQ_REG_PF_USAGE_CNT, 1374 "DQ PF usage counter timed out", 1375 poll_cnt)) 1376 return 1; 1377 1378 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1379 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1380 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1381 "QM PF usage counter timed out", 1382 poll_cnt)) 1383 return 1; 1384 1385 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1386 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1387 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1388 "Timers VNIC usage counter timed out", 1389 poll_cnt)) 1390 return 1; 1391 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1392 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1393 "Timers NUM_SCANS usage counter timed out", 1394 poll_cnt)) 1395 return 1; 1396 1397 /* Wait DMAE PF usage counter to zero */ 1398 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1399 dmae_reg_go_c[INIT_DMAE_C(bp)], 1400 "DMAE command register timed out", 1401 poll_cnt)) 1402 return 1; 1403 1404 return 0; 1405 } 1406 1407 static void bnx2x_hw_enable_status(struct bnx2x *bp) 1408 { 1409 u32 val; 1410 1411 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1412 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1413 1414 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1415 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1416 1417 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1418 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1419 1420 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1421 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1422 1423 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1424 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1425 1426 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1427 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1428 1429 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1430 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1431 1432 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1433 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1434 val); 1435 } 1436 1437 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1438 { 1439 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1440 1441 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1442 1443 /* Re-enable PF target read access */ 1444 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1445 1446 /* Poll HW usage counters */ 1447 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1448 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1449 return -EBUSY; 1450 1451 /* Zero the igu 'trailing edge' and 'leading edge' */ 1452 1453 /* Send the FW cleanup command */ 1454 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1455 return -EBUSY; 1456 1457 /* ATC cleanup */ 1458 1459 /* Verify TX hw is flushed */ 1460 bnx2x_tx_hw_flushed(bp, poll_cnt); 1461 1462 /* Wait 100ms (not adjusted according to platform) */ 1463 msleep(100); 1464 1465 /* Verify no pending pci transactions */ 1466 if (bnx2x_is_pcie_pending(bp->pdev)) 1467 BNX2X_ERR("PCIE Transactions still pending\n"); 1468 1469 /* Debug */ 1470 bnx2x_hw_enable_status(bp); 1471 1472 /* 1473 * Master enable - Due to WB DMAE writes performed before this 1474 * register is re-initialized as part of the regular function init 1475 */ 1476 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1477 1478 return 0; 1479 } 1480 1481 static void bnx2x_hc_int_enable(struct bnx2x *bp) 1482 { 1483 int port = BP_PORT(bp); 1484 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1485 u32 val = REG_RD(bp, addr); 1486 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1487 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1488 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1489 1490 if (msix) { 1491 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1492 HC_CONFIG_0_REG_INT_LINE_EN_0); 1493 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1494 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1495 if (single_msix) 1496 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1497 } else if (msi) { 1498 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1499 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1500 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1501 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1502 } else { 1503 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1504 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1505 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1506 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1507 1508 if (!CHIP_IS_E1(bp)) { 1509 DP(NETIF_MSG_IFUP, 1510 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1511 1512 REG_WR(bp, addr, val); 1513 1514 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1515 } 1516 } 1517 1518 if (CHIP_IS_E1(bp)) 1519 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1520 1521 DP(NETIF_MSG_IFUP, 1522 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1523 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1524 1525 REG_WR(bp, addr, val); 1526 /* 1527 * Ensure that HC_CONFIG is written before leading/trailing edge config 1528 */ 1529 mmiowb(); 1530 barrier(); 1531 1532 if (!CHIP_IS_E1(bp)) { 1533 /* init leading/trailing edge */ 1534 if (IS_MF(bp)) { 1535 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1536 if (bp->port.pmf) 1537 /* enable nig and gpio3 attention */ 1538 val |= 0x1100; 1539 } else 1540 val = 0xffff; 1541 1542 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1543 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1544 } 1545 1546 /* Make sure that interrupts are indeed enabled from here on */ 1547 mmiowb(); 1548 } 1549 1550 static void bnx2x_igu_int_enable(struct bnx2x *bp) 1551 { 1552 u32 val; 1553 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1554 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1555 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1556 1557 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1558 1559 if (msix) { 1560 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1561 IGU_PF_CONF_SINGLE_ISR_EN); 1562 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1563 IGU_PF_CONF_ATTN_BIT_EN); 1564 1565 if (single_msix) 1566 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1567 } else if (msi) { 1568 val &= ~IGU_PF_CONF_INT_LINE_EN; 1569 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1570 IGU_PF_CONF_ATTN_BIT_EN | 1571 IGU_PF_CONF_SINGLE_ISR_EN); 1572 } else { 1573 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1574 val |= (IGU_PF_CONF_INT_LINE_EN | 1575 IGU_PF_CONF_ATTN_BIT_EN | 1576 IGU_PF_CONF_SINGLE_ISR_EN); 1577 } 1578 1579 /* Clean previous status - need to configure igu prior to ack*/ 1580 if ((!msix) || single_msix) { 1581 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1582 bnx2x_ack_int(bp); 1583 } 1584 1585 val |= IGU_PF_CONF_FUNC_EN; 1586 1587 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1588 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1589 1590 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1591 1592 if (val & IGU_PF_CONF_INT_LINE_EN) 1593 pci_intx(bp->pdev, true); 1594 1595 barrier(); 1596 1597 /* init leading/trailing edge */ 1598 if (IS_MF(bp)) { 1599 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1600 if (bp->port.pmf) 1601 /* enable nig and gpio3 attention */ 1602 val |= 0x1100; 1603 } else 1604 val = 0xffff; 1605 1606 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1607 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1608 1609 /* Make sure that interrupts are indeed enabled from here on */ 1610 mmiowb(); 1611 } 1612 1613 void bnx2x_int_enable(struct bnx2x *bp) 1614 { 1615 if (bp->common.int_block == INT_BLOCK_HC) 1616 bnx2x_hc_int_enable(bp); 1617 else 1618 bnx2x_igu_int_enable(bp); 1619 } 1620 1621 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1622 { 1623 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1624 int i, offset; 1625 1626 if (disable_hw) 1627 /* prevent the HW from sending interrupts */ 1628 bnx2x_int_disable(bp); 1629 1630 /* make sure all ISRs are done */ 1631 if (msix) { 1632 synchronize_irq(bp->msix_table[0].vector); 1633 offset = 1; 1634 if (CNIC_SUPPORT(bp)) 1635 offset++; 1636 for_each_eth_queue(bp, i) 1637 synchronize_irq(bp->msix_table[offset++].vector); 1638 } else 1639 synchronize_irq(bp->pdev->irq); 1640 1641 /* make sure sp_task is not running */ 1642 cancel_delayed_work(&bp->sp_task); 1643 cancel_delayed_work(&bp->period_task); 1644 flush_workqueue(bnx2x_wq); 1645 } 1646 1647 /* fast path */ 1648 1649 /* 1650 * General service functions 1651 */ 1652 1653 /* Return true if succeeded to acquire the lock */ 1654 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1655 { 1656 u32 lock_status; 1657 u32 resource_bit = (1 << resource); 1658 int func = BP_FUNC(bp); 1659 u32 hw_lock_control_reg; 1660 1661 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1662 "Trying to take a lock on resource %d\n", resource); 1663 1664 /* Validating that the resource is within range */ 1665 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1666 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1667 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1668 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1669 return false; 1670 } 1671 1672 if (func <= 5) 1673 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1674 else 1675 hw_lock_control_reg = 1676 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1677 1678 /* Try to acquire the lock */ 1679 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1680 lock_status = REG_RD(bp, hw_lock_control_reg); 1681 if (lock_status & resource_bit) 1682 return true; 1683 1684 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1685 "Failed to get a lock on resource %d\n", resource); 1686 return false; 1687 } 1688 1689 /** 1690 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1691 * 1692 * @bp: driver handle 1693 * 1694 * Returns the recovery leader resource id according to the engine this function 1695 * belongs to. Currently only only 2 engines is supported. 1696 */ 1697 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1698 { 1699 if (BP_PATH(bp)) 1700 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1701 else 1702 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1703 } 1704 1705 /** 1706 * bnx2x_trylock_leader_lock- try to acquire a leader lock. 1707 * 1708 * @bp: driver handle 1709 * 1710 * Tries to acquire a leader lock for current engine. 1711 */ 1712 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1713 { 1714 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1715 } 1716 1717 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1718 1719 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */ 1720 static int bnx2x_schedule_sp_task(struct bnx2x *bp) 1721 { 1722 /* Set the interrupt occurred bit for the sp-task to recognize it 1723 * must ack the interrupt and transition according to the IGU 1724 * state machine. 1725 */ 1726 atomic_set(&bp->interrupt_occurred, 1); 1727 1728 /* The sp_task must execute only after this bit 1729 * is set, otherwise we will get out of sync and miss all 1730 * further interrupts. Hence, the barrier. 1731 */ 1732 smp_wmb(); 1733 1734 /* schedule sp_task to workqueue */ 1735 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1736 } 1737 1738 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1739 { 1740 struct bnx2x *bp = fp->bp; 1741 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1742 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1743 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1744 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 1745 1746 DP(BNX2X_MSG_SP, 1747 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1748 fp->index, cid, command, bp->state, 1749 rr_cqe->ramrod_cqe.ramrod_type); 1750 1751 /* If cid is within VF range, replace the slowpath object with the 1752 * one corresponding to this VF 1753 */ 1754 if (cid >= BNX2X_FIRST_VF_CID && 1755 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) 1756 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); 1757 1758 switch (command) { 1759 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1760 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1761 drv_cmd = BNX2X_Q_CMD_UPDATE; 1762 break; 1763 1764 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1765 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1766 drv_cmd = BNX2X_Q_CMD_SETUP; 1767 break; 1768 1769 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1770 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1771 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1772 break; 1773 1774 case (RAMROD_CMD_ID_ETH_HALT): 1775 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1776 drv_cmd = BNX2X_Q_CMD_HALT; 1777 break; 1778 1779 case (RAMROD_CMD_ID_ETH_TERMINATE): 1780 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); 1781 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1782 break; 1783 1784 case (RAMROD_CMD_ID_ETH_EMPTY): 1785 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1786 drv_cmd = BNX2X_Q_CMD_EMPTY; 1787 break; 1788 1789 default: 1790 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1791 command, fp->index); 1792 return; 1793 } 1794 1795 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1796 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1797 /* q_obj->complete_cmd() failure means that this was 1798 * an unexpected completion. 1799 * 1800 * In this case we don't want to increase the bp->spq_left 1801 * because apparently we haven't sent this command the first 1802 * place. 1803 */ 1804 #ifdef BNX2X_STOP_ON_ERROR 1805 bnx2x_panic(); 1806 #else 1807 return; 1808 #endif 1809 /* SRIOV: reschedule any 'in_progress' operations */ 1810 bnx2x_iov_sp_event(bp, cid, true); 1811 1812 smp_mb__before_atomic_inc(); 1813 atomic_inc(&bp->cq_spq_left); 1814 /* push the change in bp->spq_left and towards the memory */ 1815 smp_mb__after_atomic_inc(); 1816 1817 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1818 1819 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1820 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1821 /* if Q update ramrod is completed for last Q in AFEX vif set 1822 * flow, then ACK MCP at the end 1823 * 1824 * mark pending ACK to MCP bit. 1825 * prevent case that both bits are cleared. 1826 * At the end of load/unload driver checks that 1827 * sp_state is cleared, and this order prevents 1828 * races 1829 */ 1830 smp_mb__before_clear_bit(); 1831 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1832 wmb(); 1833 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1834 smp_mb__after_clear_bit(); 1835 1836 /* schedule the sp task as mcp ack is required */ 1837 bnx2x_schedule_sp_task(bp); 1838 } 1839 1840 return; 1841 } 1842 1843 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1844 { 1845 struct bnx2x *bp = netdev_priv(dev_instance); 1846 u16 status = bnx2x_ack_int(bp); 1847 u16 mask; 1848 int i; 1849 u8 cos; 1850 1851 /* Return here if interrupt is shared and it's not for us */ 1852 if (unlikely(status == 0)) { 1853 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1854 return IRQ_NONE; 1855 } 1856 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1857 1858 #ifdef BNX2X_STOP_ON_ERROR 1859 if (unlikely(bp->panic)) 1860 return IRQ_HANDLED; 1861 #endif 1862 1863 for_each_eth_queue(bp, i) { 1864 struct bnx2x_fastpath *fp = &bp->fp[i]; 1865 1866 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); 1867 if (status & mask) { 1868 /* Handle Rx or Tx according to SB id */ 1869 prefetch(fp->rx_cons_sb); 1870 for_each_cos_in_tx_queue(fp, cos) 1871 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1872 prefetch(&fp->sb_running_index[SM_RX_ID]); 1873 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1874 status &= ~mask; 1875 } 1876 } 1877 1878 if (CNIC_SUPPORT(bp)) { 1879 mask = 0x2; 1880 if (status & (mask | 0x1)) { 1881 struct cnic_ops *c_ops = NULL; 1882 1883 rcu_read_lock(); 1884 c_ops = rcu_dereference(bp->cnic_ops); 1885 if (c_ops && (bp->cnic_eth_dev.drv_state & 1886 CNIC_DRV_STATE_HANDLES_IRQ)) 1887 c_ops->cnic_handler(bp->cnic_data, NULL); 1888 rcu_read_unlock(); 1889 1890 status &= ~mask; 1891 } 1892 } 1893 1894 if (unlikely(status & 0x1)) { 1895 1896 /* schedule sp task to perform default status block work, ack 1897 * attentions and enable interrupts. 1898 */ 1899 bnx2x_schedule_sp_task(bp); 1900 1901 status &= ~0x1; 1902 if (!status) 1903 return IRQ_HANDLED; 1904 } 1905 1906 if (unlikely(status)) 1907 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1908 status); 1909 1910 return IRQ_HANDLED; 1911 } 1912 1913 /* Link */ 1914 1915 /* 1916 * General service functions 1917 */ 1918 1919 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1920 { 1921 u32 lock_status; 1922 u32 resource_bit = (1 << resource); 1923 int func = BP_FUNC(bp); 1924 u32 hw_lock_control_reg; 1925 int cnt; 1926 1927 /* Validating that the resource is within range */ 1928 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1929 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1930 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1931 return -EINVAL; 1932 } 1933 1934 if (func <= 5) { 1935 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1936 } else { 1937 hw_lock_control_reg = 1938 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1939 } 1940 1941 /* Validating that the resource is not already taken */ 1942 lock_status = REG_RD(bp, hw_lock_control_reg); 1943 if (lock_status & resource_bit) { 1944 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 1945 lock_status, resource_bit); 1946 return -EEXIST; 1947 } 1948 1949 /* Try for 5 second every 5ms */ 1950 for (cnt = 0; cnt < 1000; cnt++) { 1951 /* Try to acquire the lock */ 1952 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1953 lock_status = REG_RD(bp, hw_lock_control_reg); 1954 if (lock_status & resource_bit) 1955 return 0; 1956 1957 usleep_range(5000, 10000); 1958 } 1959 BNX2X_ERR("Timeout\n"); 1960 return -EAGAIN; 1961 } 1962 1963 int bnx2x_release_leader_lock(struct bnx2x *bp) 1964 { 1965 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1966 } 1967 1968 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1969 { 1970 u32 lock_status; 1971 u32 resource_bit = (1 << resource); 1972 int func = BP_FUNC(bp); 1973 u32 hw_lock_control_reg; 1974 1975 /* Validating that the resource is within range */ 1976 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1977 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1978 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1979 return -EINVAL; 1980 } 1981 1982 if (func <= 5) { 1983 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1984 } else { 1985 hw_lock_control_reg = 1986 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1987 } 1988 1989 /* Validating that the resource is currently taken */ 1990 lock_status = REG_RD(bp, hw_lock_control_reg); 1991 if (!(lock_status & resource_bit)) { 1992 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", 1993 lock_status, resource_bit); 1994 return -EFAULT; 1995 } 1996 1997 REG_WR(bp, hw_lock_control_reg, resource_bit); 1998 return 0; 1999 } 2000 2001 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 2002 { 2003 /* The GPIO should be swapped if swap register is set and active */ 2004 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2005 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2006 int gpio_shift = gpio_num + 2007 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2008 u32 gpio_mask = (1 << gpio_shift); 2009 u32 gpio_reg; 2010 int value; 2011 2012 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2013 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2014 return -EINVAL; 2015 } 2016 2017 /* read GPIO value */ 2018 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2019 2020 /* get the requested pin value */ 2021 if ((gpio_reg & gpio_mask) == gpio_mask) 2022 value = 1; 2023 else 2024 value = 0; 2025 2026 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); 2027 2028 return value; 2029 } 2030 2031 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2032 { 2033 /* The GPIO should be swapped if swap register is set and active */ 2034 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2035 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2036 int gpio_shift = gpio_num + 2037 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2038 u32 gpio_mask = (1 << gpio_shift); 2039 u32 gpio_reg; 2040 2041 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2042 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2043 return -EINVAL; 2044 } 2045 2046 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2047 /* read GPIO and mask except the float bits */ 2048 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2049 2050 switch (mode) { 2051 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2052 DP(NETIF_MSG_LINK, 2053 "Set GPIO %d (shift %d) -> output low\n", 2054 gpio_num, gpio_shift); 2055 /* clear FLOAT and set CLR */ 2056 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2057 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2058 break; 2059 2060 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2061 DP(NETIF_MSG_LINK, 2062 "Set GPIO %d (shift %d) -> output high\n", 2063 gpio_num, gpio_shift); 2064 /* clear FLOAT and set SET */ 2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2067 break; 2068 2069 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2070 DP(NETIF_MSG_LINK, 2071 "Set GPIO %d (shift %d) -> input\n", 2072 gpio_num, gpio_shift); 2073 /* set FLOAT */ 2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2075 break; 2076 2077 default: 2078 break; 2079 } 2080 2081 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2083 2084 return 0; 2085 } 2086 2087 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 2088 { 2089 u32 gpio_reg = 0; 2090 int rc = 0; 2091 2092 /* Any port swapping should be handled by caller. */ 2093 2094 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2095 /* read GPIO and mask except the float bits */ 2096 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2097 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2098 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2099 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2100 2101 switch (mode) { 2102 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2103 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 2104 /* set CLR */ 2105 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2106 break; 2107 2108 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2109 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 2110 /* set SET */ 2111 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2112 break; 2113 2114 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2115 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 2116 /* set FLOAT */ 2117 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2118 break; 2119 2120 default: 2121 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 2122 rc = -EINVAL; 2123 break; 2124 } 2125 2126 if (rc == 0) 2127 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2128 2129 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2130 2131 return rc; 2132 } 2133 2134 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2135 { 2136 /* The GPIO should be swapped if swap register is set and active */ 2137 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2138 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2139 int gpio_shift = gpio_num + 2140 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2141 u32 gpio_mask = (1 << gpio_shift); 2142 u32 gpio_reg; 2143 2144 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2145 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2146 return -EINVAL; 2147 } 2148 2149 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2150 /* read GPIO int */ 2151 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 2152 2153 switch (mode) { 2154 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2155 DP(NETIF_MSG_LINK, 2156 "Clear GPIO INT %d (shift %d) -> output low\n", 2157 gpio_num, gpio_shift); 2158 /* clear SET and set CLR */ 2159 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2160 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2161 break; 2162 2163 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2164 DP(NETIF_MSG_LINK, 2165 "Set GPIO INT %d (shift %d) -> output high\n", 2166 gpio_num, gpio_shift); 2167 /* clear CLR and set SET */ 2168 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2169 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2170 break; 2171 2172 default: 2173 break; 2174 } 2175 2176 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2178 2179 return 0; 2180 } 2181 2182 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) 2183 { 2184 u32 spio_reg; 2185 2186 /* Only 2 SPIOs are configurable */ 2187 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 2188 BNX2X_ERR("Invalid SPIO 0x%x\n", spio); 2189 return -EINVAL; 2190 } 2191 2192 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2193 /* read SPIO and mask except the float bits */ 2194 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 2195 2196 switch (mode) { 2197 case MISC_SPIO_OUTPUT_LOW: 2198 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); 2199 /* clear FLOAT and set CLR */ 2200 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2201 spio_reg |= (spio << MISC_SPIO_CLR_POS); 2202 break; 2203 2204 case MISC_SPIO_OUTPUT_HIGH: 2205 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); 2206 /* clear FLOAT and set SET */ 2207 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2208 spio_reg |= (spio << MISC_SPIO_SET_POS); 2209 break; 2210 2211 case MISC_SPIO_INPUT_HI_Z: 2212 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); 2213 /* set FLOAT */ 2214 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 2215 break; 2216 2217 default: 2218 break; 2219 } 2220 2221 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2222 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2223 2224 return 0; 2225 } 2226 2227 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2228 { 2229 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2230 switch (bp->link_vars.ieee_fc & 2231 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2232 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2233 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2234 ADVERTISED_Pause); 2235 break; 2236 2237 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2238 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2239 ADVERTISED_Pause); 2240 break; 2241 2242 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2243 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2244 break; 2245 2246 default: 2247 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2248 ADVERTISED_Pause); 2249 break; 2250 } 2251 } 2252 2253 static void bnx2x_set_requested_fc(struct bnx2x *bp) 2254 { 2255 /* Initialize link parameters structure variables 2256 * It is recommended to turn off RX FC for jumbo frames 2257 * for better performance 2258 */ 2259 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2260 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2261 else 2262 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2263 } 2264 2265 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2266 { 2267 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2268 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2269 2270 if (!BP_NOMCP(bp)) { 2271 bnx2x_set_requested_fc(bp); 2272 bnx2x_acquire_phy_lock(bp); 2273 2274 if (load_mode == LOAD_DIAG) { 2275 struct link_params *lp = &bp->link_params; 2276 lp->loopback_mode = LOOPBACK_XGXS; 2277 /* do PHY loopback at 10G speed, if possible */ 2278 if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2279 if (lp->speed_cap_mask[cfx_idx] & 2280 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2281 lp->req_line_speed[cfx_idx] = 2282 SPEED_10000; 2283 else 2284 lp->req_line_speed[cfx_idx] = 2285 SPEED_1000; 2286 } 2287 } 2288 2289 if (load_mode == LOAD_LOOPBACK_EXT) { 2290 struct link_params *lp = &bp->link_params; 2291 lp->loopback_mode = LOOPBACK_EXT; 2292 } 2293 2294 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2295 2296 bnx2x_release_phy_lock(bp); 2297 2298 bnx2x_calc_fc_adv(bp); 2299 2300 if (bp->link_vars.link_up) { 2301 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2302 bnx2x_link_report(bp); 2303 } 2304 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2305 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2306 return rc; 2307 } 2308 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2309 return -EINVAL; 2310 } 2311 2312 void bnx2x_link_set(struct bnx2x *bp) 2313 { 2314 if (!BP_NOMCP(bp)) { 2315 bnx2x_acquire_phy_lock(bp); 2316 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2317 bnx2x_release_phy_lock(bp); 2318 2319 bnx2x_calc_fc_adv(bp); 2320 } else 2321 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2322 } 2323 2324 static void bnx2x__link_reset(struct bnx2x *bp) 2325 { 2326 if (!BP_NOMCP(bp)) { 2327 bnx2x_acquire_phy_lock(bp); 2328 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); 2329 bnx2x_release_phy_lock(bp); 2330 } else 2331 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2332 } 2333 2334 void bnx2x_force_link_reset(struct bnx2x *bp) 2335 { 2336 bnx2x_acquire_phy_lock(bp); 2337 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2338 bnx2x_release_phy_lock(bp); 2339 } 2340 2341 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2342 { 2343 u8 rc = 0; 2344 2345 if (!BP_NOMCP(bp)) { 2346 bnx2x_acquire_phy_lock(bp); 2347 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2348 is_serdes); 2349 bnx2x_release_phy_lock(bp); 2350 } else 2351 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2352 2353 return rc; 2354 } 2355 2356 /* Calculates the sum of vn_min_rates. 2357 It's needed for further normalizing of the min_rates. 2358 Returns: 2359 sum of vn_min_rates. 2360 or 2361 0 - if all the min_rates are 0. 2362 In the later case fairness algorithm should be deactivated. 2363 If not all min_rates are zero then those that are zeroes will be set to 1. 2364 */ 2365 static void bnx2x_calc_vn_min(struct bnx2x *bp, 2366 struct cmng_init_input *input) 2367 { 2368 int all_zero = 1; 2369 int vn; 2370 2371 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2372 u32 vn_cfg = bp->mf_config[vn]; 2373 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2374 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2375 2376 /* Skip hidden vns */ 2377 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2378 vn_min_rate = 0; 2379 /* If min rate is zero - set it to 1 */ 2380 else if (!vn_min_rate) 2381 vn_min_rate = DEF_MIN_RATE; 2382 else 2383 all_zero = 0; 2384 2385 input->vnic_min_rate[vn] = vn_min_rate; 2386 } 2387 2388 /* if ETS or all min rates are zeros - disable fairness */ 2389 if (BNX2X_IS_ETS_ENABLED(bp)) { 2390 input->flags.cmng_enables &= 2391 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2392 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2393 } else if (all_zero) { 2394 input->flags.cmng_enables &= 2395 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2396 DP(NETIF_MSG_IFUP, 2397 "All MIN values are zeroes fairness will be disabled\n"); 2398 } else 2399 input->flags.cmng_enables |= 2400 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2401 } 2402 2403 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2404 struct cmng_init_input *input) 2405 { 2406 u16 vn_max_rate; 2407 u32 vn_cfg = bp->mf_config[vn]; 2408 2409 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2410 vn_max_rate = 0; 2411 else { 2412 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2413 2414 if (IS_MF_SI(bp)) { 2415 /* maxCfg in percents of linkspeed */ 2416 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2417 } else /* SD modes */ 2418 /* maxCfg is absolute in 100Mb units */ 2419 vn_max_rate = maxCfg * 100; 2420 } 2421 2422 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2423 2424 input->vnic_max_rate[vn] = vn_max_rate; 2425 } 2426 2427 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2428 { 2429 if (CHIP_REV_IS_SLOW(bp)) 2430 return CMNG_FNS_NONE; 2431 if (IS_MF(bp)) 2432 return CMNG_FNS_MINMAX; 2433 2434 return CMNG_FNS_NONE; 2435 } 2436 2437 void bnx2x_read_mf_cfg(struct bnx2x *bp) 2438 { 2439 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2440 2441 if (BP_NOMCP(bp)) 2442 return; /* what should be the default value in this case */ 2443 2444 /* For 2 port configuration the absolute function number formula 2445 * is: 2446 * abs_func = 2 * vn + BP_PORT + BP_PATH 2447 * 2448 * and there are 4 functions per port 2449 * 2450 * For 4 port configuration it is 2451 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2452 * 2453 * and there are 2 functions per port 2454 */ 2455 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2456 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2457 2458 if (func >= E1H_FUNC_MAX) 2459 break; 2460 2461 bp->mf_config[vn] = 2462 MF_CFG_RD(bp, func_mf_config[func].config); 2463 } 2464 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2465 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2466 bp->flags |= MF_FUNC_DIS; 2467 } else { 2468 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2469 bp->flags &= ~MF_FUNC_DIS; 2470 } 2471 } 2472 2473 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2474 { 2475 struct cmng_init_input input; 2476 memset(&input, 0, sizeof(struct cmng_init_input)); 2477 2478 input.port_rate = bp->link_vars.line_speed; 2479 2480 if (cmng_type == CMNG_FNS_MINMAX) { 2481 int vn; 2482 2483 /* read mf conf from shmem */ 2484 if (read_cfg) 2485 bnx2x_read_mf_cfg(bp); 2486 2487 /* vn_weight_sum and enable fairness if not 0 */ 2488 bnx2x_calc_vn_min(bp, &input); 2489 2490 /* calculate and set min-max rate for each vn */ 2491 if (bp->port.pmf) 2492 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2493 bnx2x_calc_vn_max(bp, vn, &input); 2494 2495 /* always enable rate shaping and fairness */ 2496 input.flags.cmng_enables |= 2497 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2498 2499 bnx2x_init_cmng(&input, &bp->cmng); 2500 return; 2501 } 2502 2503 /* rate shaping and fairness are disabled */ 2504 DP(NETIF_MSG_IFUP, 2505 "rate shaping and fairness are disabled\n"); 2506 } 2507 2508 static void storm_memset_cmng(struct bnx2x *bp, 2509 struct cmng_init *cmng, 2510 u8 port) 2511 { 2512 int vn; 2513 size_t size = sizeof(struct cmng_struct_per_port); 2514 2515 u32 addr = BAR_XSTRORM_INTMEM + 2516 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2517 2518 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2519 2520 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2521 int func = func_by_vn(bp, vn); 2522 2523 addr = BAR_XSTRORM_INTMEM + 2524 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2525 size = sizeof(struct rate_shaping_vars_per_vn); 2526 __storm_memset_struct(bp, addr, size, 2527 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2528 2529 addr = BAR_XSTRORM_INTMEM + 2530 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2531 size = sizeof(struct fairness_vars_per_vn); 2532 __storm_memset_struct(bp, addr, size, 2533 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2534 } 2535 } 2536 2537 /* This function is called upon link interrupt */ 2538 static void bnx2x_link_attn(struct bnx2x *bp) 2539 { 2540 /* Make sure that we are synced with the current statistics */ 2541 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2542 2543 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2544 2545 if (bp->link_vars.link_up) { 2546 2547 /* dropless flow control */ 2548 if (!CHIP_IS_E1(bp) && bp->dropless_fc) { 2549 int port = BP_PORT(bp); 2550 u32 pause_enabled = 0; 2551 2552 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2553 pause_enabled = 1; 2554 2555 REG_WR(bp, BAR_USTRORM_INTMEM + 2556 USTORM_ETH_PAUSE_ENABLED_OFFSET(port), 2557 pause_enabled); 2558 } 2559 2560 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2561 struct host_port_stats *pstats; 2562 2563 pstats = bnx2x_sp(bp, port_stats); 2564 /* reset old mac stats */ 2565 memset(&(pstats->mac_stx[0]), 0, 2566 sizeof(struct mac_stx)); 2567 } 2568 if (bp->state == BNX2X_STATE_OPEN) 2569 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2570 } 2571 2572 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2573 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2574 2575 if (cmng_fns != CMNG_FNS_NONE) { 2576 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2577 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2578 } else 2579 /* rate shaping and fairness are disabled */ 2580 DP(NETIF_MSG_IFUP, 2581 "single function mode without fairness\n"); 2582 } 2583 2584 __bnx2x_link_report(bp); 2585 2586 if (IS_MF(bp)) 2587 bnx2x_link_sync_notify(bp); 2588 } 2589 2590 void bnx2x__link_status_update(struct bnx2x *bp) 2591 { 2592 if (bp->state != BNX2X_STATE_OPEN) 2593 return; 2594 2595 /* read updated dcb configuration */ 2596 if (IS_PF(bp)) { 2597 bnx2x_dcbx_pmf_update(bp); 2598 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2599 if (bp->link_vars.link_up) 2600 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2601 else 2602 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2603 /* indicate link status */ 2604 bnx2x_link_report(bp); 2605 2606 } else { /* VF */ 2607 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | 2608 SUPPORTED_10baseT_Full | 2609 SUPPORTED_100baseT_Half | 2610 SUPPORTED_100baseT_Full | 2611 SUPPORTED_1000baseT_Full | 2612 SUPPORTED_2500baseX_Full | 2613 SUPPORTED_10000baseT_Full | 2614 SUPPORTED_TP | 2615 SUPPORTED_FIBRE | 2616 SUPPORTED_Autoneg | 2617 SUPPORTED_Pause | 2618 SUPPORTED_Asym_Pause); 2619 bp->port.advertising[0] = bp->port.supported[0]; 2620 2621 bp->link_params.bp = bp; 2622 bp->link_params.port = BP_PORT(bp); 2623 bp->link_params.req_duplex[0] = DUPLEX_FULL; 2624 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; 2625 bp->link_params.req_line_speed[0] = SPEED_10000; 2626 bp->link_params.speed_cap_mask[0] = 0x7f0000; 2627 bp->link_params.switch_cfg = SWITCH_CFG_10G; 2628 bp->link_vars.mac_type = MAC_TYPE_BMAC; 2629 bp->link_vars.line_speed = SPEED_10000; 2630 bp->link_vars.link_status = 2631 (LINK_STATUS_LINK_UP | 2632 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 2633 bp->link_vars.link_up = 1; 2634 bp->link_vars.duplex = DUPLEX_FULL; 2635 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2636 __bnx2x_link_report(bp); 2637 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2638 } 2639 } 2640 2641 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2642 u16 vlan_val, u8 allowed_prio) 2643 { 2644 struct bnx2x_func_state_params func_params = {NULL}; 2645 struct bnx2x_func_afex_update_params *f_update_params = 2646 &func_params.params.afex_update; 2647 2648 func_params.f_obj = &bp->func_obj; 2649 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2650 2651 /* no need to wait for RAMROD completion, so don't 2652 * set RAMROD_COMP_WAIT flag 2653 */ 2654 2655 f_update_params->vif_id = vifid; 2656 f_update_params->afex_default_vlan = vlan_val; 2657 f_update_params->allowed_priorities = allowed_prio; 2658 2659 /* if ramrod can not be sent, response to MCP immediately */ 2660 if (bnx2x_func_state_change(bp, &func_params) < 0) 2661 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2662 2663 return 0; 2664 } 2665 2666 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2667 u16 vif_index, u8 func_bit_map) 2668 { 2669 struct bnx2x_func_state_params func_params = {NULL}; 2670 struct bnx2x_func_afex_viflists_params *update_params = 2671 &func_params.params.afex_viflists; 2672 int rc; 2673 u32 drv_msg_code; 2674 2675 /* validate only LIST_SET and LIST_GET are received from switch */ 2676 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2677 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2678 cmd_type); 2679 2680 func_params.f_obj = &bp->func_obj; 2681 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2682 2683 /* set parameters according to cmd_type */ 2684 update_params->afex_vif_list_command = cmd_type; 2685 update_params->vif_list_index = vif_index; 2686 update_params->func_bit_map = 2687 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2688 update_params->func_to_clear = 0; 2689 drv_msg_code = 2690 (cmd_type == VIF_LIST_RULE_GET) ? 2691 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2692 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2693 2694 /* if ramrod can not be sent, respond to MCP immediately for 2695 * SET and GET requests (other are not triggered from MCP) 2696 */ 2697 rc = bnx2x_func_state_change(bp, &func_params); 2698 if (rc < 0) 2699 bnx2x_fw_command(bp, drv_msg_code, 0); 2700 2701 return 0; 2702 } 2703 2704 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2705 { 2706 struct afex_stats afex_stats; 2707 u32 func = BP_ABS_FUNC(bp); 2708 u32 mf_config; 2709 u16 vlan_val; 2710 u32 vlan_prio; 2711 u16 vif_id; 2712 u8 allowed_prio; 2713 u8 vlan_mode; 2714 u32 addr_to_write, vifid, addrs, stats_type, i; 2715 2716 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2717 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2718 DP(BNX2X_MSG_MCP, 2719 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2720 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2721 } 2722 2723 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2724 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2725 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2726 DP(BNX2X_MSG_MCP, 2727 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2728 vifid, addrs); 2729 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2730 addrs); 2731 } 2732 2733 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2734 addr_to_write = SHMEM2_RD(bp, 2735 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2736 stats_type = SHMEM2_RD(bp, 2737 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2738 2739 DP(BNX2X_MSG_MCP, 2740 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2741 addr_to_write); 2742 2743 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2744 2745 /* write response to scratchpad, for MCP */ 2746 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2747 REG_WR(bp, addr_to_write + i*sizeof(u32), 2748 *(((u32 *)(&afex_stats))+i)); 2749 2750 /* send ack message to MCP */ 2751 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2752 } 2753 2754 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2755 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2756 bp->mf_config[BP_VN(bp)] = mf_config; 2757 DP(BNX2X_MSG_MCP, 2758 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2759 mf_config); 2760 2761 /* if VIF_SET is "enabled" */ 2762 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2763 /* set rate limit directly to internal RAM */ 2764 struct cmng_init_input cmng_input; 2765 struct rate_shaping_vars_per_vn m_rs_vn; 2766 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2767 u32 addr = BAR_XSTRORM_INTMEM + 2768 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2769 2770 bp->mf_config[BP_VN(bp)] = mf_config; 2771 2772 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2773 m_rs_vn.vn_counter.rate = 2774 cmng_input.vnic_max_rate[BP_VN(bp)]; 2775 m_rs_vn.vn_counter.quota = 2776 (m_rs_vn.vn_counter.rate * 2777 RS_PERIODIC_TIMEOUT_USEC) / 8; 2778 2779 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2780 2781 /* read relevant values from mf_cfg struct in shmem */ 2782 vif_id = 2783 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2784 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2785 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2786 vlan_val = 2787 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2788 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2789 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2790 vlan_prio = (mf_config & 2791 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2792 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2793 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2794 vlan_mode = 2795 (MF_CFG_RD(bp, 2796 func_mf_config[func].afex_config) & 2797 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2798 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2799 allowed_prio = 2800 (MF_CFG_RD(bp, 2801 func_mf_config[func].afex_config) & 2802 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2803 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2804 2805 /* send ramrod to FW, return in case of failure */ 2806 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2807 allowed_prio)) 2808 return; 2809 2810 bp->afex_def_vlan_tag = vlan_val; 2811 bp->afex_vlan_mode = vlan_mode; 2812 } else { 2813 /* notify link down because BP->flags is disabled */ 2814 bnx2x_link_report(bp); 2815 2816 /* send INVALID VIF ramrod to FW */ 2817 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2818 2819 /* Reset the default afex VLAN */ 2820 bp->afex_def_vlan_tag = -1; 2821 } 2822 } 2823 } 2824 2825 static void bnx2x_pmf_update(struct bnx2x *bp) 2826 { 2827 int port = BP_PORT(bp); 2828 u32 val; 2829 2830 bp->port.pmf = 1; 2831 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2832 2833 /* 2834 * We need the mb() to ensure the ordering between the writing to 2835 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2836 */ 2837 smp_mb(); 2838 2839 /* queue a periodic task */ 2840 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2841 2842 bnx2x_dcbx_pmf_update(bp); 2843 2844 /* enable nig attention */ 2845 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2846 if (bp->common.int_block == INT_BLOCK_HC) { 2847 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2848 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2849 } else if (!CHIP_IS_E1x(bp)) { 2850 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2851 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2852 } 2853 2854 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2855 } 2856 2857 /* end of Link */ 2858 2859 /* slow path */ 2860 2861 /* 2862 * General service functions 2863 */ 2864 2865 /* send the MCP a request, block until there is a reply */ 2866 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2867 { 2868 int mb_idx = BP_FW_MB_IDX(bp); 2869 u32 seq; 2870 u32 rc = 0; 2871 u32 cnt = 1; 2872 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2873 2874 mutex_lock(&bp->fw_mb_mutex); 2875 seq = ++bp->fw_seq; 2876 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2877 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2878 2879 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 2880 (command | seq), param); 2881 2882 do { 2883 /* let the FW do it's magic ... */ 2884 msleep(delay); 2885 2886 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 2887 2888 /* Give the FW up to 5 second (500*10ms) */ 2889 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2890 2891 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2892 cnt*delay, rc, seq); 2893 2894 /* is this a reply to our command? */ 2895 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 2896 rc &= FW_MSG_CODE_MASK; 2897 else { 2898 /* FW BUG! */ 2899 BNX2X_ERR("FW failed to respond!\n"); 2900 bnx2x_fw_dump(bp); 2901 rc = 0; 2902 } 2903 mutex_unlock(&bp->fw_mb_mutex); 2904 2905 return rc; 2906 } 2907 2908 static void storm_memset_func_cfg(struct bnx2x *bp, 2909 struct tstorm_eth_function_common_config *tcfg, 2910 u16 abs_fid) 2911 { 2912 size_t size = sizeof(struct tstorm_eth_function_common_config); 2913 2914 u32 addr = BAR_TSTRORM_INTMEM + 2915 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 2916 2917 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 2918 } 2919 2920 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2921 { 2922 if (CHIP_IS_E1x(bp)) { 2923 struct tstorm_eth_function_common_config tcfg = {0}; 2924 2925 storm_memset_func_cfg(bp, &tcfg, p->func_id); 2926 } 2927 2928 /* Enable the function in the FW */ 2929 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 2930 storm_memset_func_en(bp, p->func_id, 1); 2931 2932 /* spq */ 2933 if (p->func_flgs & FUNC_FLG_SPQ) { 2934 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 2935 REG_WR(bp, XSEM_REG_FAST_MEMORY + 2936 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 2937 } 2938 } 2939 2940 /** 2941 * bnx2x_get_common_flags - Return common flags 2942 * 2943 * @bp device handle 2944 * @fp queue handle 2945 * @zero_stats TRUE if statistics zeroing is needed 2946 * 2947 * Return the flags that are common for the Tx-only and not normal connections. 2948 */ 2949 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2950 struct bnx2x_fastpath *fp, 2951 bool zero_stats) 2952 { 2953 unsigned long flags = 0; 2954 2955 /* PF driver will always initialize the Queue to an ACTIVE state */ 2956 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2957 2958 /* tx only connections collect statistics (on the same index as the 2959 * parent connection). The statistics are zeroed when the parent 2960 * connection is initialized. 2961 */ 2962 2963 __set_bit(BNX2X_Q_FLG_STATS, &flags); 2964 if (zero_stats) 2965 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 2966 2967 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); 2968 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); 2969 2970 #ifdef BNX2X_STOP_ON_ERROR 2971 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 2972 #endif 2973 2974 return flags; 2975 } 2976 2977 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2978 struct bnx2x_fastpath *fp, 2979 bool leading) 2980 { 2981 unsigned long flags = 0; 2982 2983 /* calculate other queue flags */ 2984 if (IS_MF_SD(bp)) 2985 __set_bit(BNX2X_Q_FLG_OV, &flags); 2986 2987 if (IS_FCOE_FP(fp)) { 2988 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2989 /* For FCoE - force usage of default priority (for afex) */ 2990 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 2991 } 2992 2993 if (!fp->disable_tpa) { 2994 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2995 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 2996 if (fp->mode == TPA_MODE_GRO) 2997 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 2998 } 2999 3000 if (leading) { 3001 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 3002 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 3003 } 3004 3005 /* Always set HW VLAN stripping */ 3006 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 3007 3008 /* configure silent vlan removal */ 3009 if (IS_MF_AFEX(bp)) 3010 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 3011 3012 return flags | bnx2x_get_common_flags(bp, fp, true); 3013 } 3014 3015 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 3016 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 3017 u8 cos) 3018 { 3019 gen_init->stat_id = bnx2x_stats_id(fp); 3020 gen_init->spcl_id = fp->cl_id; 3021 3022 /* Always use mini-jumbo MTU for FCoE L2 ring */ 3023 if (IS_FCOE_FP(fp)) 3024 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 3025 else 3026 gen_init->mtu = bp->dev->mtu; 3027 3028 gen_init->cos = cos; 3029 } 3030 3031 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 3032 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 3033 struct bnx2x_rxq_setup_params *rxq_init) 3034 { 3035 u8 max_sge = 0; 3036 u16 sge_sz = 0; 3037 u16 tpa_agg_size = 0; 3038 3039 if (!fp->disable_tpa) { 3040 pause->sge_th_lo = SGE_TH_LO(bp); 3041 pause->sge_th_hi = SGE_TH_HI(bp); 3042 3043 /* validate SGE ring has enough to cross high threshold */ 3044 WARN_ON(bp->dropless_fc && 3045 pause->sge_th_hi + FW_PREFETCH_CNT > 3046 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 3047 3048 tpa_agg_size = TPA_AGG_SIZE; 3049 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 3050 SGE_PAGE_SHIFT; 3051 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 3052 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 3053 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); 3054 } 3055 3056 /* pause - not for e1 */ 3057 if (!CHIP_IS_E1(bp)) { 3058 pause->bd_th_lo = BD_TH_LO(bp); 3059 pause->bd_th_hi = BD_TH_HI(bp); 3060 3061 pause->rcq_th_lo = RCQ_TH_LO(bp); 3062 pause->rcq_th_hi = RCQ_TH_HI(bp); 3063 /* 3064 * validate that rings have enough entries to cross 3065 * high thresholds 3066 */ 3067 WARN_ON(bp->dropless_fc && 3068 pause->bd_th_hi + FW_PREFETCH_CNT > 3069 bp->rx_ring_size); 3070 WARN_ON(bp->dropless_fc && 3071 pause->rcq_th_hi + FW_PREFETCH_CNT > 3072 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 3073 3074 pause->pri_map = 1; 3075 } 3076 3077 /* rxq setup */ 3078 rxq_init->dscr_map = fp->rx_desc_mapping; 3079 rxq_init->sge_map = fp->rx_sge_mapping; 3080 rxq_init->rcq_map = fp->rx_comp_mapping; 3081 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 3082 3083 /* This should be a maximum number of data bytes that may be 3084 * placed on the BD (not including paddings). 3085 */ 3086 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 3087 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 3088 3089 rxq_init->cl_qzone_id = fp->cl_qzone_id; 3090 rxq_init->tpa_agg_sz = tpa_agg_size; 3091 rxq_init->sge_buf_sz = sge_sz; 3092 rxq_init->max_sges_pkt = max_sge; 3093 rxq_init->rss_engine_id = BP_FUNC(bp); 3094 rxq_init->mcast_engine_id = BP_FUNC(bp); 3095 3096 /* Maximum number or simultaneous TPA aggregation for this Queue. 3097 * 3098 * For PF Clients it should be the maximum available number. 3099 * VF driver(s) may want to define it to a smaller value. 3100 */ 3101 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 3102 3103 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 3104 rxq_init->fw_sb_id = fp->fw_sb_id; 3105 3106 if (IS_FCOE_FP(fp)) 3107 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 3108 else 3109 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 3110 /* configure silent vlan removal 3111 * if multi function mode is afex, then mask default vlan 3112 */ 3113 if (IS_MF_AFEX(bp)) { 3114 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 3115 rxq_init->silent_removal_mask = VLAN_VID_MASK; 3116 } 3117 } 3118 3119 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 3120 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 3121 u8 cos) 3122 { 3123 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 3124 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 3125 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 3126 txq_init->fw_sb_id = fp->fw_sb_id; 3127 3128 /* 3129 * set the tss leading client id for TX classification == 3130 * leading RSS client id 3131 */ 3132 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 3133 3134 if (IS_FCOE_FP(fp)) { 3135 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 3136 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 3137 } 3138 } 3139 3140 static void bnx2x_pf_init(struct bnx2x *bp) 3141 { 3142 struct bnx2x_func_init_params func_init = {0}; 3143 struct event_ring_data eq_data = { {0} }; 3144 u16 flags; 3145 3146 if (!CHIP_IS_E1x(bp)) { 3147 /* reset IGU PF statistics: MSIX + ATTN */ 3148 /* PF */ 3149 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3150 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3151 (CHIP_MODE_IS_4_PORT(bp) ? 3152 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3153 /* ATTN */ 3154 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3155 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3156 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 3157 (CHIP_MODE_IS_4_PORT(bp) ? 3158 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3159 } 3160 3161 /* function setup flags */ 3162 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 3163 3164 /* This flag is relevant for E1x only. 3165 * E2 doesn't have a TPA configuration in a function level. 3166 */ 3167 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 3168 3169 func_init.func_flgs = flags; 3170 func_init.pf_id = BP_FUNC(bp); 3171 func_init.func_id = BP_FUNC(bp); 3172 func_init.spq_map = bp->spq_mapping; 3173 func_init.spq_prod = bp->spq_prod_idx; 3174 3175 bnx2x_func_init(bp, &func_init); 3176 3177 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 3178 3179 /* 3180 * Congestion management values depend on the link rate 3181 * There is no active link so initial link rate is set to 10 Gbps. 3182 * When the link comes up The congestion management values are 3183 * re-calculated according to the actual link rate. 3184 */ 3185 bp->link_vars.line_speed = SPEED_10000; 3186 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 3187 3188 /* Only the PMF sets the HW */ 3189 if (bp->port.pmf) 3190 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3191 3192 /* init Event Queue - PCI bus guarantees correct endianity*/ 3193 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 3194 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 3195 eq_data.producer = bp->eq_prod; 3196 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 3197 eq_data.sb_id = DEF_SB_ID; 3198 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3199 } 3200 3201 static void bnx2x_e1h_disable(struct bnx2x *bp) 3202 { 3203 int port = BP_PORT(bp); 3204 3205 bnx2x_tx_disable(bp); 3206 3207 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3208 } 3209 3210 static void bnx2x_e1h_enable(struct bnx2x *bp) 3211 { 3212 int port = BP_PORT(bp); 3213 3214 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3215 3216 /* Tx queue should be only re-enabled */ 3217 netif_tx_wake_all_queues(bp->dev); 3218 3219 /* 3220 * Should not call netif_carrier_on since it will be called if the link 3221 * is up when checking for link state 3222 */ 3223 } 3224 3225 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3226 3227 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3228 { 3229 struct eth_stats_info *ether_stat = 3230 &bp->slowpath->drv_info_to_mcp.ether_stat; 3231 struct bnx2x_vlan_mac_obj *mac_obj = 3232 &bp->sp_objs->mac_obj; 3233 int i; 3234 3235 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3236 ETH_STAT_INFO_VERSION_LEN); 3237 3238 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the 3239 * mac_local field in ether_stat struct. The base address is offset by 2 3240 * bytes to account for the field being 8 bytes but a mac address is 3241 * only 6 bytes. Likewise, the stride for the get_n_elements function is 3242 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes 3243 * allocated by the ether_stat struct, so the macs will land in their 3244 * proper positions. 3245 */ 3246 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) 3247 memset(ether_stat->mac_local + i, 0, 3248 sizeof(ether_stat->mac_local[0])); 3249 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3250 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3251 ether_stat->mac_local + MAC_PAD, MAC_PAD, 3252 ETH_ALEN); 3253 ether_stat->mtu_size = bp->dev->mtu; 3254 if (bp->dev->features & NETIF_F_RXCSUM) 3255 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3256 if (bp->dev->features & NETIF_F_TSO) 3257 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3258 ether_stat->feature_flags |= bp->common.boot_mode; 3259 3260 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3261 3262 ether_stat->txq_size = bp->tx_ring_size; 3263 ether_stat->rxq_size = bp->rx_ring_size; 3264 } 3265 3266 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3267 { 3268 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3269 struct fcoe_stats_info *fcoe_stat = 3270 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3271 3272 if (!CNIC_LOADED(bp)) 3273 return; 3274 3275 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); 3276 3277 fcoe_stat->qos_priority = 3278 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3279 3280 /* insert FCoE stats from ramrod response */ 3281 if (!NO_FCOE(bp)) { 3282 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3283 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3284 tstorm_queue_statistics; 3285 3286 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3287 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3288 xstorm_queue_statistics; 3289 3290 struct fcoe_statistics_params *fw_fcoe_stat = 3291 &bp->fw_stats_data->fcoe; 3292 3293 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, 3294 fcoe_stat->rx_bytes_lo, 3295 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3296 3297 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3298 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3299 fcoe_stat->rx_bytes_lo, 3300 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3301 3302 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3303 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3304 fcoe_stat->rx_bytes_lo, 3305 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3306 3307 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3308 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3309 fcoe_stat->rx_bytes_lo, 3310 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3311 3312 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3313 fcoe_stat->rx_frames_lo, 3314 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3315 3316 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3317 fcoe_stat->rx_frames_lo, 3318 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3319 3320 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3321 fcoe_stat->rx_frames_lo, 3322 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3323 3324 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3325 fcoe_stat->rx_frames_lo, 3326 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3327 3328 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, 3329 fcoe_stat->tx_bytes_lo, 3330 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3331 3332 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3333 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3334 fcoe_stat->tx_bytes_lo, 3335 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3336 3337 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3338 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3339 fcoe_stat->tx_bytes_lo, 3340 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3341 3342 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3343 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3344 fcoe_stat->tx_bytes_lo, 3345 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3346 3347 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3348 fcoe_stat->tx_frames_lo, 3349 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3350 3351 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3352 fcoe_stat->tx_frames_lo, 3353 fcoe_q_xstorm_stats->ucast_pkts_sent); 3354 3355 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3356 fcoe_stat->tx_frames_lo, 3357 fcoe_q_xstorm_stats->bcast_pkts_sent); 3358 3359 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3360 fcoe_stat->tx_frames_lo, 3361 fcoe_q_xstorm_stats->mcast_pkts_sent); 3362 } 3363 3364 /* ask L5 driver to add data to the struct */ 3365 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3366 } 3367 3368 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3369 { 3370 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3371 struct iscsi_stats_info *iscsi_stat = 3372 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3373 3374 if (!CNIC_LOADED(bp)) 3375 return; 3376 3377 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, 3378 ETH_ALEN); 3379 3380 iscsi_stat->qos_priority = 3381 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3382 3383 /* ask L5 driver to add data to the struct */ 3384 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3385 } 3386 3387 /* called due to MCP event (on pmf): 3388 * reread new bandwidth configuration 3389 * configure FW 3390 * notify others function about the change 3391 */ 3392 static void bnx2x_config_mf_bw(struct bnx2x *bp) 3393 { 3394 if (bp->link_vars.link_up) { 3395 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3396 bnx2x_link_sync_notify(bp); 3397 } 3398 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3399 } 3400 3401 static void bnx2x_set_mf_bw(struct bnx2x *bp) 3402 { 3403 bnx2x_config_mf_bw(bp); 3404 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3405 } 3406 3407 static void bnx2x_handle_eee_event(struct bnx2x *bp) 3408 { 3409 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3410 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3411 } 3412 3413 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3414 { 3415 enum drv_info_opcode op_code; 3416 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3417 3418 /* if drv_info version supported by MFW doesn't match - send NACK */ 3419 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3420 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3421 return; 3422 } 3423 3424 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3425 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3426 3427 memset(&bp->slowpath->drv_info_to_mcp, 0, 3428 sizeof(union drv_info_to_mcp)); 3429 3430 switch (op_code) { 3431 case ETH_STATS_OPCODE: 3432 bnx2x_drv_info_ether_stat(bp); 3433 break; 3434 case FCOE_STATS_OPCODE: 3435 bnx2x_drv_info_fcoe_stat(bp); 3436 break; 3437 case ISCSI_STATS_OPCODE: 3438 bnx2x_drv_info_iscsi_stat(bp); 3439 break; 3440 default: 3441 /* if op code isn't supported - send NACK */ 3442 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3443 return; 3444 } 3445 3446 /* if we got drv_info attn from MFW then these fields are defined in 3447 * shmem2 for sure 3448 */ 3449 SHMEM2_WR(bp, drv_info_host_addr_lo, 3450 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3451 SHMEM2_WR(bp, drv_info_host_addr_hi, 3452 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3453 3454 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3455 } 3456 3457 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3458 { 3459 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 3460 3461 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3462 3463 /* 3464 * This is the only place besides the function initialization 3465 * where the bp->flags can change so it is done without any 3466 * locks 3467 */ 3468 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3469 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3470 bp->flags |= MF_FUNC_DIS; 3471 3472 bnx2x_e1h_disable(bp); 3473 } else { 3474 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3475 bp->flags &= ~MF_FUNC_DIS; 3476 3477 bnx2x_e1h_enable(bp); 3478 } 3479 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3480 } 3481 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3482 bnx2x_config_mf_bw(bp); 3483 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3484 } 3485 3486 /* Report results to MCP */ 3487 if (dcc_event) 3488 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); 3489 else 3490 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); 3491 } 3492 3493 /* must be called under the spq lock */ 3494 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3495 { 3496 struct eth_spe *next_spe = bp->spq_prod_bd; 3497 3498 if (bp->spq_prod_bd == bp->spq_last_bd) { 3499 bp->spq_prod_bd = bp->spq; 3500 bp->spq_prod_idx = 0; 3501 DP(BNX2X_MSG_SP, "end of spq\n"); 3502 } else { 3503 bp->spq_prod_bd++; 3504 bp->spq_prod_idx++; 3505 } 3506 return next_spe; 3507 } 3508 3509 /* must be called under the spq lock */ 3510 static void bnx2x_sp_prod_update(struct bnx2x *bp) 3511 { 3512 int func = BP_FUNC(bp); 3513 3514 /* 3515 * Make sure that BD data is updated before writing the producer: 3516 * BD data is written to the memory, the producer is read from the 3517 * memory, thus we need a full memory barrier to ensure the ordering. 3518 */ 3519 mb(); 3520 3521 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3522 bp->spq_prod_idx); 3523 mmiowb(); 3524 } 3525 3526 /** 3527 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3528 * 3529 * @cmd: command to check 3530 * @cmd_type: command type 3531 */ 3532 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3533 { 3534 if ((cmd_type == NONE_CONNECTION_TYPE) || 3535 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3536 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3537 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3538 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3539 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3540 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3541 return true; 3542 else 3543 return false; 3544 } 3545 3546 /** 3547 * bnx2x_sp_post - place a single command on an SP ring 3548 * 3549 * @bp: driver handle 3550 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3551 * @cid: SW CID the command is related to 3552 * @data_hi: command private data address (high 32 bits) 3553 * @data_lo: command private data address (low 32 bits) 3554 * @cmd_type: command type (e.g. NONE, ETH) 3555 * 3556 * SP data is handled as if it's always an address pair, thus data fields are 3557 * not swapped to little endian in upper functions. Instead this function swaps 3558 * data as if it's two u32 fields. 3559 */ 3560 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3561 u32 data_hi, u32 data_lo, int cmd_type) 3562 { 3563 struct eth_spe *spe; 3564 u16 type; 3565 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3566 3567 #ifdef BNX2X_STOP_ON_ERROR 3568 if (unlikely(bp->panic)) { 3569 BNX2X_ERR("Can't post SP when there is panic\n"); 3570 return -EIO; 3571 } 3572 #endif 3573 3574 spin_lock_bh(&bp->spq_lock); 3575 3576 if (common) { 3577 if (!atomic_read(&bp->eq_spq_left)) { 3578 BNX2X_ERR("BUG! EQ ring full!\n"); 3579 spin_unlock_bh(&bp->spq_lock); 3580 bnx2x_panic(); 3581 return -EBUSY; 3582 } 3583 } else if (!atomic_read(&bp->cq_spq_left)) { 3584 BNX2X_ERR("BUG! SPQ ring full!\n"); 3585 spin_unlock_bh(&bp->spq_lock); 3586 bnx2x_panic(); 3587 return -EBUSY; 3588 } 3589 3590 spe = bnx2x_sp_get_next(bp); 3591 3592 /* CID needs port number to be encoded int it */ 3593 spe->hdr.conn_and_cmd_data = 3594 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3595 HW_CID(bp, cid)); 3596 3597 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3598 3599 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3600 SPE_HDR_FUNCTION_ID); 3601 3602 spe->hdr.type = cpu_to_le16(type); 3603 3604 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3605 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3606 3607 /* 3608 * It's ok if the actual decrement is issued towards the memory 3609 * somewhere between the spin_lock and spin_unlock. Thus no 3610 * more explicit memory barrier is needed. 3611 */ 3612 if (common) 3613 atomic_dec(&bp->eq_spq_left); 3614 else 3615 atomic_dec(&bp->cq_spq_left); 3616 3617 DP(BNX2X_MSG_SP, 3618 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3619 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3620 (u32)(U64_LO(bp->spq_mapping) + 3621 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3622 HW_CID(bp, cid), data_hi, data_lo, type, 3623 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3624 3625 bnx2x_sp_prod_update(bp); 3626 spin_unlock_bh(&bp->spq_lock); 3627 return 0; 3628 } 3629 3630 /* acquire split MCP access lock register */ 3631 static int bnx2x_acquire_alr(struct bnx2x *bp) 3632 { 3633 u32 j, val; 3634 int rc = 0; 3635 3636 might_sleep(); 3637 for (j = 0; j < 1000; j++) { 3638 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); 3639 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); 3640 if (val & MCPR_ACCESS_LOCK_LOCK) 3641 break; 3642 3643 usleep_range(5000, 10000); 3644 } 3645 if (!(val & MCPR_ACCESS_LOCK_LOCK)) { 3646 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3647 rc = -EBUSY; 3648 } 3649 3650 return rc; 3651 } 3652 3653 /* release split MCP access lock register */ 3654 static void bnx2x_release_alr(struct bnx2x *bp) 3655 { 3656 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 3657 } 3658 3659 #define BNX2X_DEF_SB_ATT_IDX 0x0001 3660 #define BNX2X_DEF_SB_IDX 0x0002 3661 3662 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3663 { 3664 struct host_sp_status_block *def_sb = bp->def_status_blk; 3665 u16 rc = 0; 3666 3667 barrier(); /* status block is written to by the chip */ 3668 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3669 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3670 rc |= BNX2X_DEF_SB_ATT_IDX; 3671 } 3672 3673 if (bp->def_idx != def_sb->sp_sb.running_index) { 3674 bp->def_idx = def_sb->sp_sb.running_index; 3675 rc |= BNX2X_DEF_SB_IDX; 3676 } 3677 3678 /* Do not reorder: indices reading should complete before handling */ 3679 barrier(); 3680 return rc; 3681 } 3682 3683 /* 3684 * slow path service functions 3685 */ 3686 3687 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 3688 { 3689 int port = BP_PORT(bp); 3690 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 3691 MISC_REG_AEU_MASK_ATTN_FUNC_0; 3692 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 3693 NIG_REG_MASK_INTERRUPT_PORT0; 3694 u32 aeu_mask; 3695 u32 nig_mask = 0; 3696 u32 reg_addr; 3697 3698 if (bp->attn_state & asserted) 3699 BNX2X_ERR("IGU ERROR\n"); 3700 3701 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3702 aeu_mask = REG_RD(bp, aeu_addr); 3703 3704 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 3705 aeu_mask, asserted); 3706 aeu_mask &= ~(asserted & 0x3ff); 3707 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3708 3709 REG_WR(bp, aeu_addr, aeu_mask); 3710 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3711 3712 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 3713 bp->attn_state |= asserted; 3714 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 3715 3716 if (asserted & ATTN_HARD_WIRED_MASK) { 3717 if (asserted & ATTN_NIG_FOR_FUNC) { 3718 3719 bnx2x_acquire_phy_lock(bp); 3720 3721 /* save nig interrupt mask */ 3722 nig_mask = REG_RD(bp, nig_int_mask_addr); 3723 3724 /* If nig_mask is not set, no need to call the update 3725 * function. 3726 */ 3727 if (nig_mask) { 3728 REG_WR(bp, nig_int_mask_addr, 0); 3729 3730 bnx2x_link_attn(bp); 3731 } 3732 3733 /* handle unicore attn? */ 3734 } 3735 if (asserted & ATTN_SW_TIMER_4_FUNC) 3736 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 3737 3738 if (asserted & GPIO_2_FUNC) 3739 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 3740 3741 if (asserted & GPIO_3_FUNC) 3742 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 3743 3744 if (asserted & GPIO_4_FUNC) 3745 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 3746 3747 if (port == 0) { 3748 if (asserted & ATTN_GENERAL_ATTN_1) { 3749 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 3750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3751 } 3752 if (asserted & ATTN_GENERAL_ATTN_2) { 3753 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 3754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3755 } 3756 if (asserted & ATTN_GENERAL_ATTN_3) { 3757 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 3758 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3759 } 3760 } else { 3761 if (asserted & ATTN_GENERAL_ATTN_4) { 3762 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 3763 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3764 } 3765 if (asserted & ATTN_GENERAL_ATTN_5) { 3766 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 3767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3768 } 3769 if (asserted & ATTN_GENERAL_ATTN_6) { 3770 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 3771 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3772 } 3773 } 3774 3775 } /* if hardwired */ 3776 3777 if (bp->common.int_block == INT_BLOCK_HC) 3778 reg_addr = (HC_REG_COMMAND_REG + port*32 + 3779 COMMAND_REG_ATTN_BITS_SET); 3780 else 3781 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 3782 3783 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 3784 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 3785 REG_WR(bp, reg_addr, asserted); 3786 3787 /* now set back the mask */ 3788 if (asserted & ATTN_NIG_FOR_FUNC) { 3789 /* Verify that IGU ack through BAR was written before restoring 3790 * NIG mask. This loop should exit after 2-3 iterations max. 3791 */ 3792 if (bp->common.int_block != INT_BLOCK_HC) { 3793 u32 cnt = 0, igu_acked; 3794 do { 3795 igu_acked = REG_RD(bp, 3796 IGU_REG_ATTENTION_ACK_BITS); 3797 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 3798 (++cnt < MAX_IGU_ATTN_ACK_TO)); 3799 if (!igu_acked) 3800 DP(NETIF_MSG_HW, 3801 "Failed to verify IGU ack on time\n"); 3802 barrier(); 3803 } 3804 REG_WR(bp, nig_int_mask_addr, nig_mask); 3805 bnx2x_release_phy_lock(bp); 3806 } 3807 } 3808 3809 static void bnx2x_fan_failure(struct bnx2x *bp) 3810 { 3811 int port = BP_PORT(bp); 3812 u32 ext_phy_config; 3813 /* mark the failure */ 3814 ext_phy_config = 3815 SHMEM_RD(bp, 3816 dev_info.port_hw_config[port].external_phy_config); 3817 3818 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 3819 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 3820 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 3821 ext_phy_config); 3822 3823 /* log the failure */ 3824 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 3825 "Please contact OEM Support for assistance\n"); 3826 3827 /* Schedule device reset (unload) 3828 * This is due to some boards consuming sufficient power when driver is 3829 * up to overheat if fan fails. 3830 */ 3831 smp_mb__before_clear_bit(); 3832 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); 3833 smp_mb__after_clear_bit(); 3834 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3835 } 3836 3837 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3838 { 3839 int port = BP_PORT(bp); 3840 int reg_offset; 3841 u32 val; 3842 3843 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 3844 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 3845 3846 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 3847 3848 val = REG_RD(bp, reg_offset); 3849 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 3850 REG_WR(bp, reg_offset, val); 3851 3852 BNX2X_ERR("SPIO5 hw attention\n"); 3853 3854 /* Fan failure attention */ 3855 bnx2x_hw_reset_phy(&bp->link_params); 3856 bnx2x_fan_failure(bp); 3857 } 3858 3859 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 3860 bnx2x_acquire_phy_lock(bp); 3861 bnx2x_handle_module_detect_int(&bp->link_params); 3862 bnx2x_release_phy_lock(bp); 3863 } 3864 3865 if (attn & HW_INTERRUT_ASSERT_SET_0) { 3866 3867 val = REG_RD(bp, reg_offset); 3868 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 3869 REG_WR(bp, reg_offset, val); 3870 3871 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 3872 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 3873 bnx2x_panic(); 3874 } 3875 } 3876 3877 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3878 { 3879 u32 val; 3880 3881 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 3882 3883 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 3884 BNX2X_ERR("DB hw attention 0x%x\n", val); 3885 /* DORQ discard attention */ 3886 if (val & 0x2) 3887 BNX2X_ERR("FATAL error from DORQ\n"); 3888 } 3889 3890 if (attn & HW_INTERRUT_ASSERT_SET_1) { 3891 3892 int port = BP_PORT(bp); 3893 int reg_offset; 3894 3895 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 3896 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 3897 3898 val = REG_RD(bp, reg_offset); 3899 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 3900 REG_WR(bp, reg_offset, val); 3901 3902 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 3903 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 3904 bnx2x_panic(); 3905 } 3906 } 3907 3908 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3909 { 3910 u32 val; 3911 3912 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3913 3914 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 3915 BNX2X_ERR("CFC hw attention 0x%x\n", val); 3916 /* CFC error attention */ 3917 if (val & 0x2) 3918 BNX2X_ERR("FATAL error from CFC\n"); 3919 } 3920 3921 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3922 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 3923 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 3924 /* RQ_USDMDP_FIFO_OVERFLOW */ 3925 if (val & 0x18000) 3926 BNX2X_ERR("FATAL error from PXP\n"); 3927 3928 if (!CHIP_IS_E1x(bp)) { 3929 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 3930 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 3931 } 3932 } 3933 3934 if (attn & HW_INTERRUT_ASSERT_SET_2) { 3935 3936 int port = BP_PORT(bp); 3937 int reg_offset; 3938 3939 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 3940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 3941 3942 val = REG_RD(bp, reg_offset); 3943 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 3944 REG_WR(bp, reg_offset, val); 3945 3946 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 3947 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 3948 bnx2x_panic(); 3949 } 3950 } 3951 3952 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3953 { 3954 u32 val; 3955 3956 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3957 3958 if (attn & BNX2X_PMF_LINK_ASSERT) { 3959 int func = BP_FUNC(bp); 3960 3961 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3962 bnx2x_read_mf_cfg(bp); 3963 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3964 func_mf_config[BP_ABS_FUNC(bp)].config); 3965 val = SHMEM_RD(bp, 3966 func_mb[BP_FW_MB_IDX(bp)].drv_status); 3967 if (val & DRV_STATUS_DCC_EVENT_MASK) 3968 bnx2x_dcc_event(bp, 3969 (val & DRV_STATUS_DCC_EVENT_MASK)); 3970 3971 if (val & DRV_STATUS_SET_MF_BW) 3972 bnx2x_set_mf_bw(bp); 3973 3974 if (val & DRV_STATUS_DRV_INFO_REQ) 3975 bnx2x_handle_drv_info_req(bp); 3976 3977 if (val & DRV_STATUS_VF_DISABLED) 3978 bnx2x_vf_handle_flr_event(bp); 3979 3980 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3981 bnx2x_pmf_update(bp); 3982 3983 if (bp->port.pmf && 3984 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 3985 bp->dcbx_enabled > 0) 3986 /* start dcbx state machine */ 3987 bnx2x_dcbx_set_params(bp, 3988 BNX2X_DCBX_STATE_NEG_RECEIVED); 3989 if (val & DRV_STATUS_AFEX_EVENT_MASK) 3990 bnx2x_handle_afex_cmd(bp, 3991 val & DRV_STATUS_AFEX_EVENT_MASK); 3992 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 3993 bnx2x_handle_eee_event(bp); 3994 if (bp->link_vars.periodic_flags & 3995 PERIODIC_FLAGS_LINK_EVENT) { 3996 /* sync with link */ 3997 bnx2x_acquire_phy_lock(bp); 3998 bp->link_vars.periodic_flags &= 3999 ~PERIODIC_FLAGS_LINK_EVENT; 4000 bnx2x_release_phy_lock(bp); 4001 if (IS_MF(bp)) 4002 bnx2x_link_sync_notify(bp); 4003 bnx2x_link_report(bp); 4004 } 4005 /* Always call it here: bnx2x_link_report() will 4006 * prevent the link indication duplication. 4007 */ 4008 bnx2x__link_status_update(bp); 4009 } else if (attn & BNX2X_MC_ASSERT_BITS) { 4010 4011 BNX2X_ERR("MC assert!\n"); 4012 bnx2x_mc_assert(bp); 4013 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 4014 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 4015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 4016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 4017 bnx2x_panic(); 4018 4019 } else if (attn & BNX2X_MCP_ASSERT) { 4020 4021 BNX2X_ERR("MCP assert!\n"); 4022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 4023 bnx2x_fw_dump(bp); 4024 4025 } else 4026 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 4027 } 4028 4029 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 4030 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 4031 if (attn & BNX2X_GRC_TIMEOUT) { 4032 val = CHIP_IS_E1(bp) ? 0 : 4033 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 4034 BNX2X_ERR("GRC time-out 0x%08x\n", val); 4035 } 4036 if (attn & BNX2X_GRC_RSV) { 4037 val = CHIP_IS_E1(bp) ? 0 : 4038 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 4039 BNX2X_ERR("GRC reserved 0x%08x\n", val); 4040 } 4041 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 4042 } 4043 } 4044 4045 /* 4046 * Bits map: 4047 * 0-7 - Engine0 load counter. 4048 * 8-15 - Engine1 load counter. 4049 * 16 - Engine0 RESET_IN_PROGRESS bit. 4050 * 17 - Engine1 RESET_IN_PROGRESS bit. 4051 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 4052 * on the engine 4053 * 19 - Engine1 ONE_IS_LOADED. 4054 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 4055 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 4056 * just the one belonging to its engine). 4057 * 4058 */ 4059 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 4060 4061 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 4062 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 4063 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 4064 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 4065 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 4066 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 4067 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 4068 4069 /* 4070 * Set the GLOBAL_RESET bit. 4071 * 4072 * Should be run under rtnl lock 4073 */ 4074 void bnx2x_set_reset_global(struct bnx2x *bp) 4075 { 4076 u32 val; 4077 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4078 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4079 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 4080 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4081 } 4082 4083 /* 4084 * Clear the GLOBAL_RESET bit. 4085 * 4086 * Should be run under rtnl lock 4087 */ 4088 static void bnx2x_clear_reset_global(struct bnx2x *bp) 4089 { 4090 u32 val; 4091 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4092 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4093 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 4094 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4095 } 4096 4097 /* 4098 * Checks the GLOBAL_RESET bit. 4099 * 4100 * should be run under rtnl lock 4101 */ 4102 static bool bnx2x_reset_is_global(struct bnx2x *bp) 4103 { 4104 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4105 4106 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 4107 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 4108 } 4109 4110 /* 4111 * Clear RESET_IN_PROGRESS bit for the current engine. 4112 * 4113 * Should be run under rtnl lock 4114 */ 4115 static void bnx2x_set_reset_done(struct bnx2x *bp) 4116 { 4117 u32 val; 4118 u32 bit = BP_PATH(bp) ? 4119 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4120 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4121 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4122 4123 /* Clear the bit */ 4124 val &= ~bit; 4125 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4126 4127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4128 } 4129 4130 /* 4131 * Set RESET_IN_PROGRESS for the current engine. 4132 * 4133 * should be run under rtnl lock 4134 */ 4135 void bnx2x_set_reset_in_progress(struct bnx2x *bp) 4136 { 4137 u32 val; 4138 u32 bit = BP_PATH(bp) ? 4139 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4140 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4141 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4142 4143 /* Set the bit */ 4144 val |= bit; 4145 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4146 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4147 } 4148 4149 /* 4150 * Checks the RESET_IN_PROGRESS bit for the given engine. 4151 * should be run under rtnl lock 4152 */ 4153 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 4154 { 4155 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4156 u32 bit = engine ? 4157 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4158 4159 /* return false if bit is set */ 4160 return (val & bit) ? false : true; 4161 } 4162 4163 /* 4164 * set pf load for the current pf. 4165 * 4166 * should be run under rtnl lock 4167 */ 4168 void bnx2x_set_pf_load(struct bnx2x *bp) 4169 { 4170 u32 val1, val; 4171 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4172 BNX2X_PATH0_LOAD_CNT_MASK; 4173 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4174 BNX2X_PATH0_LOAD_CNT_SHIFT; 4175 4176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4177 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4178 4179 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 4180 4181 /* get the current counter value */ 4182 val1 = (val & mask) >> shift; 4183 4184 /* set bit of that PF */ 4185 val1 |= (1 << bp->pf_num); 4186 4187 /* clear the old value */ 4188 val &= ~mask; 4189 4190 /* set the new one */ 4191 val |= ((val1 << shift) & mask); 4192 4193 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4194 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4195 } 4196 4197 /** 4198 * bnx2x_clear_pf_load - clear pf load mark 4199 * 4200 * @bp: driver handle 4201 * 4202 * Should be run under rtnl lock. 4203 * Decrements the load counter for the current engine. Returns 4204 * whether other functions are still loaded 4205 */ 4206 bool bnx2x_clear_pf_load(struct bnx2x *bp) 4207 { 4208 u32 val1, val; 4209 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4210 BNX2X_PATH0_LOAD_CNT_MASK; 4211 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4212 BNX2X_PATH0_LOAD_CNT_SHIFT; 4213 4214 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4215 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4216 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 4217 4218 /* get the current counter value */ 4219 val1 = (val & mask) >> shift; 4220 4221 /* clear bit of that PF */ 4222 val1 &= ~(1 << bp->pf_num); 4223 4224 /* clear the old value */ 4225 val &= ~mask; 4226 4227 /* set the new one */ 4228 val |= ((val1 << shift) & mask); 4229 4230 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4231 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4232 return val1 != 0; 4233 } 4234 4235 /* 4236 * Read the load status for the current engine. 4237 * 4238 * should be run under rtnl lock 4239 */ 4240 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4241 { 4242 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4243 BNX2X_PATH0_LOAD_CNT_MASK); 4244 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4245 BNX2X_PATH0_LOAD_CNT_SHIFT); 4246 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4247 4248 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4249 4250 val = (val & mask) >> shift; 4251 4252 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4253 engine, val); 4254 4255 return val != 0; 4256 } 4257 4258 static void _print_parity(struct bnx2x *bp, u32 reg) 4259 { 4260 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); 4261 } 4262 4263 static void _print_next_block(int idx, const char *blk) 4264 { 4265 pr_cont("%s%s", idx ? ", " : "", blk); 4266 } 4267 4268 static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4269 int par_num, bool print) 4270 { 4271 int i = 0; 4272 u32 cur_bit = 0; 4273 for (i = 0; sig; i++) { 4274 cur_bit = ((u32)0x1 << i); 4275 if (sig & cur_bit) { 4276 switch (cur_bit) { 4277 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4278 if (print) { 4279 _print_next_block(par_num++, "BRB"); 4280 _print_parity(bp, 4281 BRB1_REG_BRB1_PRTY_STS); 4282 } 4283 break; 4284 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4285 if (print) { 4286 _print_next_block(par_num++, "PARSER"); 4287 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4288 } 4289 break; 4290 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4291 if (print) { 4292 _print_next_block(par_num++, "TSDM"); 4293 _print_parity(bp, 4294 TSDM_REG_TSDM_PRTY_STS); 4295 } 4296 break; 4297 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4298 if (print) { 4299 _print_next_block(par_num++, 4300 "SEARCHER"); 4301 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4302 } 4303 break; 4304 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4305 if (print) { 4306 _print_next_block(par_num++, "TCM"); 4307 _print_parity(bp, 4308 TCM_REG_TCM_PRTY_STS); 4309 } 4310 break; 4311 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4312 if (print) { 4313 _print_next_block(par_num++, "TSEMI"); 4314 _print_parity(bp, 4315 TSEM_REG_TSEM_PRTY_STS_0); 4316 _print_parity(bp, 4317 TSEM_REG_TSEM_PRTY_STS_1); 4318 } 4319 break; 4320 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4321 if (print) { 4322 _print_next_block(par_num++, "XPB"); 4323 _print_parity(bp, GRCBASE_XPB + 4324 PB_REG_PB_PRTY_STS); 4325 } 4326 break; 4327 } 4328 4329 /* Clear the bit */ 4330 sig &= ~cur_bit; 4331 } 4332 } 4333 4334 return par_num; 4335 } 4336 4337 static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4338 int par_num, bool *global, 4339 bool print) 4340 { 4341 int i = 0; 4342 u32 cur_bit = 0; 4343 for (i = 0; sig; i++) { 4344 cur_bit = ((u32)0x1 << i); 4345 if (sig & cur_bit) { 4346 switch (cur_bit) { 4347 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4348 if (print) { 4349 _print_next_block(par_num++, "PBF"); 4350 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4351 } 4352 break; 4353 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4354 if (print) { 4355 _print_next_block(par_num++, "QM"); 4356 _print_parity(bp, QM_REG_QM_PRTY_STS); 4357 } 4358 break; 4359 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4360 if (print) { 4361 _print_next_block(par_num++, "TM"); 4362 _print_parity(bp, TM_REG_TM_PRTY_STS); 4363 } 4364 break; 4365 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4366 if (print) { 4367 _print_next_block(par_num++, "XSDM"); 4368 _print_parity(bp, 4369 XSDM_REG_XSDM_PRTY_STS); 4370 } 4371 break; 4372 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4373 if (print) { 4374 _print_next_block(par_num++, "XCM"); 4375 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4376 } 4377 break; 4378 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4379 if (print) { 4380 _print_next_block(par_num++, "XSEMI"); 4381 _print_parity(bp, 4382 XSEM_REG_XSEM_PRTY_STS_0); 4383 _print_parity(bp, 4384 XSEM_REG_XSEM_PRTY_STS_1); 4385 } 4386 break; 4387 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4388 if (print) { 4389 _print_next_block(par_num++, 4390 "DOORBELLQ"); 4391 _print_parity(bp, 4392 DORQ_REG_DORQ_PRTY_STS); 4393 } 4394 break; 4395 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4396 if (print) { 4397 _print_next_block(par_num++, "NIG"); 4398 if (CHIP_IS_E1x(bp)) { 4399 _print_parity(bp, 4400 NIG_REG_NIG_PRTY_STS); 4401 } else { 4402 _print_parity(bp, 4403 NIG_REG_NIG_PRTY_STS_0); 4404 _print_parity(bp, 4405 NIG_REG_NIG_PRTY_STS_1); 4406 } 4407 } 4408 break; 4409 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4410 if (print) 4411 _print_next_block(par_num++, 4412 "VAUX PCI CORE"); 4413 *global = true; 4414 break; 4415 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4416 if (print) { 4417 _print_next_block(par_num++, "DEBUG"); 4418 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4419 } 4420 break; 4421 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4422 if (print) { 4423 _print_next_block(par_num++, "USDM"); 4424 _print_parity(bp, 4425 USDM_REG_USDM_PRTY_STS); 4426 } 4427 break; 4428 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4429 if (print) { 4430 _print_next_block(par_num++, "UCM"); 4431 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4432 } 4433 break; 4434 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4435 if (print) { 4436 _print_next_block(par_num++, "USEMI"); 4437 _print_parity(bp, 4438 USEM_REG_USEM_PRTY_STS_0); 4439 _print_parity(bp, 4440 USEM_REG_USEM_PRTY_STS_1); 4441 } 4442 break; 4443 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4444 if (print) { 4445 _print_next_block(par_num++, "UPB"); 4446 _print_parity(bp, GRCBASE_UPB + 4447 PB_REG_PB_PRTY_STS); 4448 } 4449 break; 4450 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4451 if (print) { 4452 _print_next_block(par_num++, "CSDM"); 4453 _print_parity(bp, 4454 CSDM_REG_CSDM_PRTY_STS); 4455 } 4456 break; 4457 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4458 if (print) { 4459 _print_next_block(par_num++, "CCM"); 4460 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4461 } 4462 break; 4463 } 4464 4465 /* Clear the bit */ 4466 sig &= ~cur_bit; 4467 } 4468 } 4469 4470 return par_num; 4471 } 4472 4473 static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4474 int par_num, bool print) 4475 { 4476 int i = 0; 4477 u32 cur_bit = 0; 4478 for (i = 0; sig; i++) { 4479 cur_bit = ((u32)0x1 << i); 4480 if (sig & cur_bit) { 4481 switch (cur_bit) { 4482 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4483 if (print) { 4484 _print_next_block(par_num++, "CSEMI"); 4485 _print_parity(bp, 4486 CSEM_REG_CSEM_PRTY_STS_0); 4487 _print_parity(bp, 4488 CSEM_REG_CSEM_PRTY_STS_1); 4489 } 4490 break; 4491 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4492 if (print) { 4493 _print_next_block(par_num++, "PXP"); 4494 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4495 _print_parity(bp, 4496 PXP2_REG_PXP2_PRTY_STS_0); 4497 _print_parity(bp, 4498 PXP2_REG_PXP2_PRTY_STS_1); 4499 } 4500 break; 4501 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4502 if (print) 4503 _print_next_block(par_num++, 4504 "PXPPCICLOCKCLIENT"); 4505 break; 4506 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4507 if (print) { 4508 _print_next_block(par_num++, "CFC"); 4509 _print_parity(bp, 4510 CFC_REG_CFC_PRTY_STS); 4511 } 4512 break; 4513 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4514 if (print) { 4515 _print_next_block(par_num++, "CDU"); 4516 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4517 } 4518 break; 4519 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4520 if (print) { 4521 _print_next_block(par_num++, "DMAE"); 4522 _print_parity(bp, 4523 DMAE_REG_DMAE_PRTY_STS); 4524 } 4525 break; 4526 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4527 if (print) { 4528 _print_next_block(par_num++, "IGU"); 4529 if (CHIP_IS_E1x(bp)) 4530 _print_parity(bp, 4531 HC_REG_HC_PRTY_STS); 4532 else 4533 _print_parity(bp, 4534 IGU_REG_IGU_PRTY_STS); 4535 } 4536 break; 4537 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4538 if (print) { 4539 _print_next_block(par_num++, "MISC"); 4540 _print_parity(bp, 4541 MISC_REG_MISC_PRTY_STS); 4542 } 4543 break; 4544 } 4545 4546 /* Clear the bit */ 4547 sig &= ~cur_bit; 4548 } 4549 } 4550 4551 return par_num; 4552 } 4553 4554 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4555 bool *global, bool print) 4556 { 4557 int i = 0; 4558 u32 cur_bit = 0; 4559 for (i = 0; sig; i++) { 4560 cur_bit = ((u32)0x1 << i); 4561 if (sig & cur_bit) { 4562 switch (cur_bit) { 4563 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4564 if (print) 4565 _print_next_block(par_num++, "MCP ROM"); 4566 *global = true; 4567 break; 4568 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4569 if (print) 4570 _print_next_block(par_num++, 4571 "MCP UMP RX"); 4572 *global = true; 4573 break; 4574 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4575 if (print) 4576 _print_next_block(par_num++, 4577 "MCP UMP TX"); 4578 *global = true; 4579 break; 4580 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4581 if (print) 4582 _print_next_block(par_num++, 4583 "MCP SCPAD"); 4584 *global = true; 4585 break; 4586 } 4587 4588 /* Clear the bit */ 4589 sig &= ~cur_bit; 4590 } 4591 } 4592 4593 return par_num; 4594 } 4595 4596 static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4597 int par_num, bool print) 4598 { 4599 int i = 0; 4600 u32 cur_bit = 0; 4601 for (i = 0; sig; i++) { 4602 cur_bit = ((u32)0x1 << i); 4603 if (sig & cur_bit) { 4604 switch (cur_bit) { 4605 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4606 if (print) { 4607 _print_next_block(par_num++, "PGLUE_B"); 4608 _print_parity(bp, 4609 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4610 } 4611 break; 4612 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4613 if (print) { 4614 _print_next_block(par_num++, "ATC"); 4615 _print_parity(bp, 4616 ATC_REG_ATC_PRTY_STS); 4617 } 4618 break; 4619 } 4620 4621 /* Clear the bit */ 4622 sig &= ~cur_bit; 4623 } 4624 } 4625 4626 return par_num; 4627 } 4628 4629 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4630 u32 *sig) 4631 { 4632 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4633 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4634 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4635 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4636 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4637 int par_num = 0; 4638 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4639 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4640 sig[0] & HW_PRTY_ASSERT_SET_0, 4641 sig[1] & HW_PRTY_ASSERT_SET_1, 4642 sig[2] & HW_PRTY_ASSERT_SET_2, 4643 sig[3] & HW_PRTY_ASSERT_SET_3, 4644 sig[4] & HW_PRTY_ASSERT_SET_4); 4645 if (print) 4646 netdev_err(bp->dev, 4647 "Parity errors detected in blocks: "); 4648 par_num = bnx2x_check_blocks_with_parity0(bp, 4649 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4650 par_num = bnx2x_check_blocks_with_parity1(bp, 4651 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4652 par_num = bnx2x_check_blocks_with_parity2(bp, 4653 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4654 par_num = bnx2x_check_blocks_with_parity3( 4655 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4656 par_num = bnx2x_check_blocks_with_parity4(bp, 4657 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4658 4659 if (print) 4660 pr_cont("\n"); 4661 4662 return true; 4663 } else 4664 return false; 4665 } 4666 4667 /** 4668 * bnx2x_chk_parity_attn - checks for parity attentions. 4669 * 4670 * @bp: driver handle 4671 * @global: true if there was a global attention 4672 * @print: show parity attention in syslog 4673 */ 4674 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 4675 { 4676 struct attn_route attn = { {0} }; 4677 int port = BP_PORT(bp); 4678 4679 attn.sig[0] = REG_RD(bp, 4680 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 4681 port*4); 4682 attn.sig[1] = REG_RD(bp, 4683 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 4684 port*4); 4685 attn.sig[2] = REG_RD(bp, 4686 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 4687 port*4); 4688 attn.sig[3] = REG_RD(bp, 4689 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4690 port*4); 4691 4692 if (!CHIP_IS_E1x(bp)) 4693 attn.sig[4] = REG_RD(bp, 4694 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 4695 port*4); 4696 4697 return bnx2x_parity_attn(bp, global, print, attn.sig); 4698 } 4699 4700 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4701 { 4702 u32 val; 4703 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4704 4705 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 4706 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 4707 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 4708 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 4709 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 4710 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 4711 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 4712 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 4713 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 4714 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 4715 if (val & 4716 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 4717 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 4718 if (val & 4719 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 4720 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 4721 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 4722 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 4723 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 4724 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 4725 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 4726 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 4727 } 4728 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 4729 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 4730 BNX2X_ERR("ATC hw attention 0x%x\n", val); 4731 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 4732 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 4733 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 4734 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 4735 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 4736 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 4737 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 4738 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 4739 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 4740 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 4741 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 4742 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 4743 } 4744 4745 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4746 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 4747 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 4748 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4749 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4750 } 4751 } 4752 4753 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4754 { 4755 struct attn_route attn, *group_mask; 4756 int port = BP_PORT(bp); 4757 int index; 4758 u32 reg_addr; 4759 u32 val; 4760 u32 aeu_mask; 4761 bool global = false; 4762 4763 /* need to take HW lock because MCP or other port might also 4764 try to handle this event */ 4765 bnx2x_acquire_alr(bp); 4766 4767 if (bnx2x_chk_parity_attn(bp, &global, true)) { 4768 #ifndef BNX2X_STOP_ON_ERROR 4769 bp->recovery_state = BNX2X_RECOVERY_INIT; 4770 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4771 /* Disable HW interrupts */ 4772 bnx2x_int_disable(bp); 4773 /* In case of parity errors don't handle attentions so that 4774 * other function would "see" parity errors. 4775 */ 4776 #else 4777 bnx2x_panic(); 4778 #endif 4779 bnx2x_release_alr(bp); 4780 return; 4781 } 4782 4783 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 4784 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 4785 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 4786 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 4787 if (!CHIP_IS_E1x(bp)) 4788 attn.sig[4] = 4789 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 4790 else 4791 attn.sig[4] = 0; 4792 4793 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 4794 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 4795 4796 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4797 if (deasserted & (1 << index)) { 4798 group_mask = &bp->attn_group[index]; 4799 4800 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 4801 index, 4802 group_mask->sig[0], group_mask->sig[1], 4803 group_mask->sig[2], group_mask->sig[3], 4804 group_mask->sig[4]); 4805 4806 bnx2x_attn_int_deasserted4(bp, 4807 attn.sig[4] & group_mask->sig[4]); 4808 bnx2x_attn_int_deasserted3(bp, 4809 attn.sig[3] & group_mask->sig[3]); 4810 bnx2x_attn_int_deasserted1(bp, 4811 attn.sig[1] & group_mask->sig[1]); 4812 bnx2x_attn_int_deasserted2(bp, 4813 attn.sig[2] & group_mask->sig[2]); 4814 bnx2x_attn_int_deasserted0(bp, 4815 attn.sig[0] & group_mask->sig[0]); 4816 } 4817 } 4818 4819 bnx2x_release_alr(bp); 4820 4821 if (bp->common.int_block == INT_BLOCK_HC) 4822 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4823 COMMAND_REG_ATTN_BITS_CLR); 4824 else 4825 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 4826 4827 val = ~deasserted; 4828 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 4829 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4830 REG_WR(bp, reg_addr, val); 4831 4832 if (~bp->attn_state & deasserted) 4833 BNX2X_ERR("IGU ERROR\n"); 4834 4835 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4836 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4837 4838 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4839 aeu_mask = REG_RD(bp, reg_addr); 4840 4841 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 4842 aeu_mask, deasserted); 4843 aeu_mask |= (deasserted & 0x3ff); 4844 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4845 4846 REG_WR(bp, reg_addr, aeu_mask); 4847 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4848 4849 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4850 bp->attn_state &= ~deasserted; 4851 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4852 } 4853 4854 static void bnx2x_attn_int(struct bnx2x *bp) 4855 { 4856 /* read local copy of bits */ 4857 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 4858 attn_bits); 4859 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 4860 attn_bits_ack); 4861 u32 attn_state = bp->attn_state; 4862 4863 /* look for changed bits */ 4864 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 4865 u32 deasserted = ~attn_bits & attn_ack & attn_state; 4866 4867 DP(NETIF_MSG_HW, 4868 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 4869 attn_bits, attn_ack, asserted, deasserted); 4870 4871 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 4872 BNX2X_ERR("BAD attention state\n"); 4873 4874 /* handle bits that were raised */ 4875 if (asserted) 4876 bnx2x_attn_int_asserted(bp, asserted); 4877 4878 if (deasserted) 4879 bnx2x_attn_int_deasserted(bp, deasserted); 4880 } 4881 4882 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 4883 u16 index, u8 op, u8 update) 4884 { 4885 u32 igu_addr = bp->igu_base_addr; 4886 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 4887 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 4888 igu_addr); 4889 } 4890 4891 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4892 { 4893 /* No memory barriers */ 4894 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4895 mmiowb(); /* keep prod updates ordered */ 4896 } 4897 4898 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4899 union event_ring_elem *elem) 4900 { 4901 u8 err = elem->message.error; 4902 4903 if (!bp->cnic_eth_dev.starting_cid || 4904 (cid < bp->cnic_eth_dev.starting_cid && 4905 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 4906 return 1; 4907 4908 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 4909 4910 if (unlikely(err)) { 4911 4912 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 4913 cid); 4914 bnx2x_panic_dump(bp, false); 4915 } 4916 bnx2x_cnic_cfc_comp(bp, cid, err); 4917 return 0; 4918 } 4919 4920 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4921 { 4922 struct bnx2x_mcast_ramrod_params rparam; 4923 int rc; 4924 4925 memset(&rparam, 0, sizeof(rparam)); 4926 4927 rparam.mcast_obj = &bp->mcast_obj; 4928 4929 netif_addr_lock_bh(bp->dev); 4930 4931 /* Clear pending state for the last command */ 4932 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 4933 4934 /* If there are pending mcast commands - send them */ 4935 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 4936 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 4937 if (rc < 0) 4938 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 4939 rc); 4940 } 4941 4942 netif_addr_unlock_bh(bp->dev); 4943 } 4944 4945 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4946 union event_ring_elem *elem) 4947 { 4948 unsigned long ramrod_flags = 0; 4949 int rc = 0; 4950 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4951 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 4952 4953 /* Always push next commands out, don't wait here */ 4954 __set_bit(RAMROD_CONT, &ramrod_flags); 4955 4956 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) 4957 >> BNX2X_SWCID_SHIFT) { 4958 case BNX2X_FILTER_MAC_PENDING: 4959 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4960 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 4961 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4962 else 4963 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 4964 4965 break; 4966 case BNX2X_FILTER_MCAST_PENDING: 4967 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 4968 /* This is only relevant for 57710 where multicast MACs are 4969 * configured as unicast MACs using the same ramrod. 4970 */ 4971 bnx2x_handle_mcast_eqe(bp); 4972 return; 4973 default: 4974 BNX2X_ERR("Unsupported classification command: %d\n", 4975 elem->message.data.eth_event.echo); 4976 return; 4977 } 4978 4979 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 4980 4981 if (rc < 0) 4982 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 4983 else if (rc > 0) 4984 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 4985 } 4986 4987 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4988 4989 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4990 { 4991 netif_addr_lock_bh(bp->dev); 4992 4993 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 4994 4995 /* Send rx_mode command again if was requested */ 4996 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4997 bnx2x_set_storm_rx_mode(bp); 4998 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4999 &bp->sp_state)) 5000 bnx2x_set_iscsi_eth_rx_mode(bp, true); 5001 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 5002 &bp->sp_state)) 5003 bnx2x_set_iscsi_eth_rx_mode(bp, false); 5004 5005 netif_addr_unlock_bh(bp->dev); 5006 } 5007 5008 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 5009 union event_ring_elem *elem) 5010 { 5011 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 5012 DP(BNX2X_MSG_SP, 5013 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 5014 elem->message.data.vif_list_event.func_bit_map); 5015 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 5016 elem->message.data.vif_list_event.func_bit_map); 5017 } else if (elem->message.data.vif_list_event.echo == 5018 VIF_LIST_RULE_SET) { 5019 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 5020 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 5021 } 5022 } 5023 5024 /* called with rtnl_lock */ 5025 static void bnx2x_after_function_update(struct bnx2x *bp) 5026 { 5027 int q, rc; 5028 struct bnx2x_fastpath *fp; 5029 struct bnx2x_queue_state_params queue_params = {NULL}; 5030 struct bnx2x_queue_update_params *q_update_params = 5031 &queue_params.params.update; 5032 5033 /* Send Q update command with afex vlan removal values for all Qs */ 5034 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 5035 5036 /* set silent vlan removal values according to vlan mode */ 5037 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5038 &q_update_params->update_flags); 5039 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 5040 &q_update_params->update_flags); 5041 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5042 5043 /* in access mode mark mask and value are 0 to strip all vlans */ 5044 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 5045 q_update_params->silent_removal_value = 0; 5046 q_update_params->silent_removal_mask = 0; 5047 } else { 5048 q_update_params->silent_removal_value = 5049 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 5050 q_update_params->silent_removal_mask = VLAN_VID_MASK; 5051 } 5052 5053 for_each_eth_queue(bp, q) { 5054 /* Set the appropriate Queue object */ 5055 fp = &bp->fp[q]; 5056 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5057 5058 /* send the ramrod */ 5059 rc = bnx2x_queue_state_change(bp, &queue_params); 5060 if (rc < 0) 5061 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5062 q); 5063 } 5064 5065 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { 5066 fp = &bp->fp[FCOE_IDX(bp)]; 5067 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5068 5069 /* clear pending completion bit */ 5070 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5071 5072 /* mark latest Q bit */ 5073 smp_mb__before_clear_bit(); 5074 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5075 smp_mb__after_clear_bit(); 5076 5077 /* send Q update ramrod for FCoE Q */ 5078 rc = bnx2x_queue_state_change(bp, &queue_params); 5079 if (rc < 0) 5080 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5081 q); 5082 } else { 5083 /* If no FCoE ring - ACK MCP now */ 5084 bnx2x_link_report(bp); 5085 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5086 } 5087 } 5088 5089 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 5090 struct bnx2x *bp, u32 cid) 5091 { 5092 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 5093 5094 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) 5095 return &bnx2x_fcoe_sp_obj(bp, q_obj); 5096 else 5097 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 5098 } 5099 5100 static void bnx2x_eq_int(struct bnx2x *bp) 5101 { 5102 u16 hw_cons, sw_cons, sw_prod; 5103 union event_ring_elem *elem; 5104 u8 echo; 5105 u32 cid; 5106 u8 opcode; 5107 int rc, spqe_cnt = 0; 5108 struct bnx2x_queue_sp_obj *q_obj; 5109 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 5110 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 5111 5112 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 5113 5114 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 5115 * when we get the next-page we need to adjust so the loop 5116 * condition below will be met. The next element is the size of a 5117 * regular element and hence incrementing by 1 5118 */ 5119 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 5120 hw_cons++; 5121 5122 /* This function may never run in parallel with itself for a 5123 * specific bp, thus there is no need in "paired" read memory 5124 * barrier here. 5125 */ 5126 sw_cons = bp->eq_cons; 5127 sw_prod = bp->eq_prod; 5128 5129 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 5130 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 5131 5132 for (; sw_cons != hw_cons; 5133 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 5134 5135 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 5136 5137 rc = bnx2x_iov_eq_sp_event(bp, elem); 5138 if (!rc) { 5139 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", 5140 rc); 5141 goto next_spqe; 5142 } 5143 5144 /* elem CID originates from FW; actually LE */ 5145 cid = SW_CID((__force __le32) 5146 elem->message.data.cfc_del_event.cid); 5147 opcode = elem->message.opcode; 5148 5149 /* handle eq element */ 5150 switch (opcode) { 5151 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5152 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); 5153 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); 5154 continue; 5155 5156 case EVENT_RING_OPCODE_STAT_QUERY: 5157 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 5158 "got statistics comp event %d\n", 5159 bp->stats_comp++); 5160 /* nothing to do with stats comp */ 5161 goto next_spqe; 5162 5163 case EVENT_RING_OPCODE_CFC_DEL: 5164 /* handle according to cid range */ 5165 /* 5166 * we may want to verify here that the bp state is 5167 * HALTING 5168 */ 5169 DP(BNX2X_MSG_SP, 5170 "got delete ramrod for MULTI[%d]\n", cid); 5171 5172 if (CNIC_LOADED(bp) && 5173 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 5174 goto next_spqe; 5175 5176 q_obj = bnx2x_cid_to_q_obj(bp, cid); 5177 5178 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 5179 break; 5180 5181 goto next_spqe; 5182 5183 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5184 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 5185 if (f_obj->complete_cmd(bp, f_obj, 5186 BNX2X_F_CMD_TX_STOP)) 5187 break; 5188 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 5189 goto next_spqe; 5190 5191 case EVENT_RING_OPCODE_START_TRAFFIC: 5192 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 5193 if (f_obj->complete_cmd(bp, f_obj, 5194 BNX2X_F_CMD_TX_START)) 5195 break; 5196 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 5197 goto next_spqe; 5198 5199 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 5200 echo = elem->message.data.function_update_event.echo; 5201 if (echo == SWITCH_UPDATE) { 5202 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5203 "got FUNC_SWITCH_UPDATE ramrod\n"); 5204 if (f_obj->complete_cmd( 5205 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) 5206 break; 5207 5208 } else { 5209 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5210 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5211 f_obj->complete_cmd(bp, f_obj, 5212 BNX2X_F_CMD_AFEX_UPDATE); 5213 5214 /* We will perform the Queues update from 5215 * sp_rtnl task as all Queue SP operations 5216 * should run under rtnl_lock. 5217 */ 5218 smp_mb__before_clear_bit(); 5219 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 5220 &bp->sp_rtnl_state); 5221 smp_mb__after_clear_bit(); 5222 5223 schedule_delayed_work(&bp->sp_rtnl_task, 0); 5224 } 5225 5226 goto next_spqe; 5227 5228 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 5229 f_obj->complete_cmd(bp, f_obj, 5230 BNX2X_F_CMD_AFEX_VIFLISTS); 5231 bnx2x_after_afex_vif_lists(bp, elem); 5232 goto next_spqe; 5233 case EVENT_RING_OPCODE_FUNCTION_START: 5234 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5235 "got FUNC_START ramrod\n"); 5236 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 5237 break; 5238 5239 goto next_spqe; 5240 5241 case EVENT_RING_OPCODE_FUNCTION_STOP: 5242 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5243 "got FUNC_STOP ramrod\n"); 5244 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 5245 break; 5246 5247 goto next_spqe; 5248 } 5249 5250 switch (opcode | bp->state) { 5251 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5252 BNX2X_STATE_OPEN): 5253 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5254 BNX2X_STATE_OPENING_WAIT4_PORT): 5255 cid = elem->message.data.eth_event.echo & 5256 BNX2X_SWCID_MASK; 5257 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5258 cid); 5259 rss_raw->clear_pending(rss_raw); 5260 break; 5261 5262 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 5263 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 5264 case (EVENT_RING_OPCODE_SET_MAC | 5265 BNX2X_STATE_CLOSING_WAIT4_HALT): 5266 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5267 BNX2X_STATE_OPEN): 5268 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5269 BNX2X_STATE_DIAG): 5270 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5271 BNX2X_STATE_CLOSING_WAIT4_HALT): 5272 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); 5273 bnx2x_handle_classification_eqe(bp, elem); 5274 break; 5275 5276 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5277 BNX2X_STATE_OPEN): 5278 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5279 BNX2X_STATE_DIAG): 5280 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5281 BNX2X_STATE_CLOSING_WAIT4_HALT): 5282 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 5283 bnx2x_handle_mcast_eqe(bp); 5284 break; 5285 5286 case (EVENT_RING_OPCODE_FILTERS_RULES | 5287 BNX2X_STATE_OPEN): 5288 case (EVENT_RING_OPCODE_FILTERS_RULES | 5289 BNX2X_STATE_DIAG): 5290 case (EVENT_RING_OPCODE_FILTERS_RULES | 5291 BNX2X_STATE_CLOSING_WAIT4_HALT): 5292 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 5293 bnx2x_handle_rx_mode_eqe(bp); 5294 break; 5295 default: 5296 /* unknown event log error and continue */ 5297 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 5298 elem->message.opcode, bp->state); 5299 } 5300 next_spqe: 5301 spqe_cnt++; 5302 } /* for */ 5303 5304 smp_mb__before_atomic_inc(); 5305 atomic_add(spqe_cnt, &bp->eq_spq_left); 5306 5307 bp->eq_cons = sw_cons; 5308 bp->eq_prod = sw_prod; 5309 /* Make sure that above mem writes were issued towards the memory */ 5310 smp_wmb(); 5311 5312 /* update producer */ 5313 bnx2x_update_eq_prod(bp, bp->eq_prod); 5314 } 5315 5316 static void bnx2x_sp_task(struct work_struct *work) 5317 { 5318 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 5319 5320 DP(BNX2X_MSG_SP, "sp task invoked\n"); 5321 5322 /* make sure the atomic interrupt_occurred has been written */ 5323 smp_rmb(); 5324 if (atomic_read(&bp->interrupt_occurred)) { 5325 5326 /* what work needs to be performed? */ 5327 u16 status = bnx2x_update_dsb_idx(bp); 5328 5329 DP(BNX2X_MSG_SP, "status %x\n", status); 5330 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); 5331 atomic_set(&bp->interrupt_occurred, 0); 5332 5333 /* HW attentions */ 5334 if (status & BNX2X_DEF_SB_ATT_IDX) { 5335 bnx2x_attn_int(bp); 5336 status &= ~BNX2X_DEF_SB_ATT_IDX; 5337 } 5338 5339 /* SP events: STAT_QUERY and others */ 5340 if (status & BNX2X_DEF_SB_IDX) { 5341 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5342 5343 if (FCOE_INIT(bp) && 5344 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5345 /* Prevent local bottom-halves from running as 5346 * we are going to change the local NAPI list. 5347 */ 5348 local_bh_disable(); 5349 napi_schedule(&bnx2x_fcoe(bp, napi)); 5350 local_bh_enable(); 5351 } 5352 5353 /* Handle EQ completions */ 5354 bnx2x_eq_int(bp); 5355 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5356 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5357 5358 status &= ~BNX2X_DEF_SB_IDX; 5359 } 5360 5361 /* if status is non zero then perhaps something went wrong */ 5362 if (unlikely(status)) 5363 DP(BNX2X_MSG_SP, 5364 "got an unknown interrupt! (status 0x%x)\n", status); 5365 5366 /* ack status block only if something was actually handled */ 5367 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5368 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5369 } 5370 5371 /* must be called after the EQ processing (since eq leads to sriov 5372 * ramrod completion flows). 5373 * This flow may have been scheduled by the arrival of a ramrod 5374 * completion, or by the sriov code rescheduling itself. 5375 */ 5376 bnx2x_iov_sp_task(bp); 5377 5378 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5379 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5380 &bp->sp_state)) { 5381 bnx2x_link_report(bp); 5382 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5383 } 5384 } 5385 5386 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5387 { 5388 struct net_device *dev = dev_instance; 5389 struct bnx2x *bp = netdev_priv(dev); 5390 5391 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5392 IGU_INT_DISABLE, 0); 5393 5394 #ifdef BNX2X_STOP_ON_ERROR 5395 if (unlikely(bp->panic)) 5396 return IRQ_HANDLED; 5397 #endif 5398 5399 if (CNIC_LOADED(bp)) { 5400 struct cnic_ops *c_ops; 5401 5402 rcu_read_lock(); 5403 c_ops = rcu_dereference(bp->cnic_ops); 5404 if (c_ops) 5405 c_ops->cnic_handler(bp->cnic_data, NULL); 5406 rcu_read_unlock(); 5407 } 5408 5409 /* schedule sp task to perform default status block work, ack 5410 * attentions and enable interrupts. 5411 */ 5412 bnx2x_schedule_sp_task(bp); 5413 5414 return IRQ_HANDLED; 5415 } 5416 5417 /* end of slow path */ 5418 5419 void bnx2x_drv_pulse(struct bnx2x *bp) 5420 { 5421 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5422 bp->fw_drv_pulse_wr_seq); 5423 } 5424 5425 static void bnx2x_timer(unsigned long data) 5426 { 5427 struct bnx2x *bp = (struct bnx2x *) data; 5428 5429 if (!netif_running(bp->dev)) 5430 return; 5431 5432 if (IS_PF(bp) && 5433 !BP_NOMCP(bp)) { 5434 int mb_idx = BP_FW_MB_IDX(bp); 5435 u32 drv_pulse; 5436 u32 mcp_pulse; 5437 5438 ++bp->fw_drv_pulse_wr_seq; 5439 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5440 /* TBD - add SYSTEM_TIME */ 5441 drv_pulse = bp->fw_drv_pulse_wr_seq; 5442 bnx2x_drv_pulse(bp); 5443 5444 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5445 MCP_PULSE_SEQ_MASK); 5446 /* The delta between driver pulse and mcp response 5447 * should be 1 (before mcp response) or 0 (after mcp response) 5448 */ 5449 if ((drv_pulse != mcp_pulse) && 5450 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5451 /* someone lost a heartbeat... */ 5452 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5453 drv_pulse, mcp_pulse); 5454 } 5455 } 5456 5457 if (bp->state == BNX2X_STATE_OPEN) 5458 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5459 5460 /* sample pf vf bulletin board for new posts from pf */ 5461 if (IS_VF(bp)) 5462 bnx2x_sample_bulletin(bp); 5463 5464 mod_timer(&bp->timer, jiffies + bp->current_interval); 5465 } 5466 5467 /* end of Statistics */ 5468 5469 /* nic init */ 5470 5471 /* 5472 * nic init service functions 5473 */ 5474 5475 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5476 { 5477 u32 i; 5478 if (!(len%4) && !(addr%4)) 5479 for (i = 0; i < len; i += 4) 5480 REG_WR(bp, addr + i, fill); 5481 else 5482 for (i = 0; i < len; i++) 5483 REG_WR8(bp, addr + i, fill); 5484 } 5485 5486 /* helper: writes FP SP data to FW - data_size in dwords */ 5487 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5488 int fw_sb_id, 5489 u32 *sb_data_p, 5490 u32 data_size) 5491 { 5492 int index; 5493 for (index = 0; index < data_size; index++) 5494 REG_WR(bp, BAR_CSTRORM_INTMEM + 5495 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5496 sizeof(u32)*index, 5497 *(sb_data_p + index)); 5498 } 5499 5500 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5501 { 5502 u32 *sb_data_p; 5503 u32 data_size = 0; 5504 struct hc_status_block_data_e2 sb_data_e2; 5505 struct hc_status_block_data_e1x sb_data_e1x; 5506 5507 /* disable the function first */ 5508 if (!CHIP_IS_E1x(bp)) { 5509 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5510 sb_data_e2.common.state = SB_DISABLED; 5511 sb_data_e2.common.p_func.vf_valid = false; 5512 sb_data_p = (u32 *)&sb_data_e2; 5513 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5514 } else { 5515 memset(&sb_data_e1x, 0, 5516 sizeof(struct hc_status_block_data_e1x)); 5517 sb_data_e1x.common.state = SB_DISABLED; 5518 sb_data_e1x.common.p_func.vf_valid = false; 5519 sb_data_p = (u32 *)&sb_data_e1x; 5520 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5521 } 5522 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5523 5524 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5525 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5526 CSTORM_STATUS_BLOCK_SIZE); 5527 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5528 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5529 CSTORM_SYNC_BLOCK_SIZE); 5530 } 5531 5532 /* helper: writes SP SB data to FW */ 5533 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5534 struct hc_sp_status_block_data *sp_sb_data) 5535 { 5536 int func = BP_FUNC(bp); 5537 int i; 5538 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5539 REG_WR(bp, BAR_CSTRORM_INTMEM + 5540 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5541 i*sizeof(u32), 5542 *((u32 *)sp_sb_data + i)); 5543 } 5544 5545 static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5546 { 5547 int func = BP_FUNC(bp); 5548 struct hc_sp_status_block_data sp_sb_data; 5549 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5550 5551 sp_sb_data.state = SB_DISABLED; 5552 sp_sb_data.p_func.vf_valid = false; 5553 5554 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5555 5556 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5557 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5558 CSTORM_SP_STATUS_BLOCK_SIZE); 5559 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5560 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5561 CSTORM_SP_SYNC_BLOCK_SIZE); 5562 } 5563 5564 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5565 int igu_sb_id, int igu_seg_id) 5566 { 5567 hc_sm->igu_sb_id = igu_sb_id; 5568 hc_sm->igu_seg_id = igu_seg_id; 5569 hc_sm->timer_value = 0xFF; 5570 hc_sm->time_to_expire = 0xFFFFFFFF; 5571 } 5572 5573 /* allocates state machine ids. */ 5574 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5575 { 5576 /* zero out state machine indices */ 5577 /* rx indices */ 5578 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5579 5580 /* tx indices */ 5581 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5582 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5583 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5584 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5585 5586 /* map indices */ 5587 /* rx indices */ 5588 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5589 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5590 5591 /* tx indices */ 5592 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5593 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5594 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5595 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5596 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5597 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5598 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5599 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5600 } 5601 5602 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5603 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5604 { 5605 int igu_seg_id; 5606 5607 struct hc_status_block_data_e2 sb_data_e2; 5608 struct hc_status_block_data_e1x sb_data_e1x; 5609 struct hc_status_block_sm *hc_sm_p; 5610 int data_size; 5611 u32 *sb_data_p; 5612 5613 if (CHIP_INT_MODE_IS_BC(bp)) 5614 igu_seg_id = HC_SEG_ACCESS_NORM; 5615 else 5616 igu_seg_id = IGU_SEG_ACCESS_NORM; 5617 5618 bnx2x_zero_fp_sb(bp, fw_sb_id); 5619 5620 if (!CHIP_IS_E1x(bp)) { 5621 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5622 sb_data_e2.common.state = SB_ENABLED; 5623 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5624 sb_data_e2.common.p_func.vf_id = vfid; 5625 sb_data_e2.common.p_func.vf_valid = vf_valid; 5626 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5627 sb_data_e2.common.same_igu_sb_1b = true; 5628 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5629 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5630 hc_sm_p = sb_data_e2.common.state_machine; 5631 sb_data_p = (u32 *)&sb_data_e2; 5632 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5633 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5634 } else { 5635 memset(&sb_data_e1x, 0, 5636 sizeof(struct hc_status_block_data_e1x)); 5637 sb_data_e1x.common.state = SB_ENABLED; 5638 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5639 sb_data_e1x.common.p_func.vf_id = 0xff; 5640 sb_data_e1x.common.p_func.vf_valid = false; 5641 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5642 sb_data_e1x.common.same_igu_sb_1b = true; 5643 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5644 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5645 hc_sm_p = sb_data_e1x.common.state_machine; 5646 sb_data_p = (u32 *)&sb_data_e1x; 5647 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5648 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5649 } 5650 5651 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5652 igu_sb_id, igu_seg_id); 5653 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 5654 igu_sb_id, igu_seg_id); 5655 5656 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 5657 5658 /* write indices to HW - PCI guarantees endianity of regpairs */ 5659 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5660 } 5661 5662 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 5663 u16 tx_usec, u16 rx_usec) 5664 { 5665 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 5666 false, rx_usec); 5667 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5668 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 5669 tx_usec); 5670 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5671 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 5672 tx_usec); 5673 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5674 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 5675 tx_usec); 5676 } 5677 5678 static void bnx2x_init_def_sb(struct bnx2x *bp) 5679 { 5680 struct host_sp_status_block *def_sb = bp->def_status_blk; 5681 dma_addr_t mapping = bp->def_status_blk_mapping; 5682 int igu_sp_sb_index; 5683 int igu_seg_id; 5684 int port = BP_PORT(bp); 5685 int func = BP_FUNC(bp); 5686 int reg_offset, reg_offset_en5; 5687 u64 section; 5688 int index; 5689 struct hc_sp_status_block_data sp_sb_data; 5690 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5691 5692 if (CHIP_INT_MODE_IS_BC(bp)) { 5693 igu_sp_sb_index = DEF_SB_IGU_ID; 5694 igu_seg_id = HC_SEG_ACCESS_DEF; 5695 } else { 5696 igu_sp_sb_index = bp->igu_dsb_id; 5697 igu_seg_id = IGU_SEG_ACCESS_DEF; 5698 } 5699 5700 /* ATTN */ 5701 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5702 atten_status_block); 5703 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5704 5705 bp->attn_state = 0; 5706 5707 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5709 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5710 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 5711 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5712 int sindex; 5713 /* take care of sig[0]..sig[4] */ 5714 for (sindex = 0; sindex < 4; sindex++) 5715 bp->attn_group[index].sig[sindex] = 5716 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 5717 5718 if (!CHIP_IS_E1x(bp)) 5719 /* 5720 * enable5 is separate from the rest of the registers, 5721 * and therefore the address skip is 4 5722 * and not 16 between the different groups 5723 */ 5724 bp->attn_group[index].sig[4] = REG_RD(bp, 5725 reg_offset_en5 + 0x4*index); 5726 else 5727 bp->attn_group[index].sig[4] = 0; 5728 } 5729 5730 if (bp->common.int_block == INT_BLOCK_HC) { 5731 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 5732 HC_REG_ATTN_MSG0_ADDR_L); 5733 5734 REG_WR(bp, reg_offset, U64_LO(section)); 5735 REG_WR(bp, reg_offset + 4, U64_HI(section)); 5736 } else if (!CHIP_IS_E1x(bp)) { 5737 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5738 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5739 } 5740 5741 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5742 sp_sb); 5743 5744 bnx2x_zero_sp_sb(bp); 5745 5746 /* PCI guarantees endianity of regpairs */ 5747 sp_sb_data.state = SB_ENABLED; 5748 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5749 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5750 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5751 sp_sb_data.igu_seg_id = igu_seg_id; 5752 sp_sb_data.p_func.pf_id = func; 5753 sp_sb_data.p_func.vnic_id = BP_VN(bp); 5754 sp_sb_data.p_func.vf_id = 0xff; 5755 5756 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5757 5758 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5759 } 5760 5761 void bnx2x_update_coalesce(struct bnx2x *bp) 5762 { 5763 int i; 5764 5765 for_each_eth_queue(bp, i) 5766 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 5767 bp->tx_ticks, bp->rx_ticks); 5768 } 5769 5770 static void bnx2x_init_sp_ring(struct bnx2x *bp) 5771 { 5772 spin_lock_init(&bp->spq_lock); 5773 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 5774 5775 bp->spq_prod_idx = 0; 5776 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5777 bp->spq_prod_bd = bp->spq; 5778 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5779 } 5780 5781 static void bnx2x_init_eq_ring(struct bnx2x *bp) 5782 { 5783 int i; 5784 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5785 union event_ring_elem *elem = 5786 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 5787 5788 elem->next_page.addr.hi = 5789 cpu_to_le32(U64_HI(bp->eq_mapping + 5790 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 5791 elem->next_page.addr.lo = 5792 cpu_to_le32(U64_LO(bp->eq_mapping + 5793 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 5794 } 5795 bp->eq_cons = 0; 5796 bp->eq_prod = NUM_EQ_DESC; 5797 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5798 /* we want a warning message before it gets wrought... */ 5799 atomic_set(&bp->eq_spq_left, 5800 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5801 } 5802 5803 /* called with netif_addr_lock_bh() */ 5804 int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 5805 unsigned long rx_mode_flags, 5806 unsigned long rx_accept_flags, 5807 unsigned long tx_accept_flags, 5808 unsigned long ramrod_flags) 5809 { 5810 struct bnx2x_rx_mode_ramrod_params ramrod_param; 5811 int rc; 5812 5813 memset(&ramrod_param, 0, sizeof(ramrod_param)); 5814 5815 /* Prepare ramrod parameters */ 5816 ramrod_param.cid = 0; 5817 ramrod_param.cl_id = cl_id; 5818 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 5819 ramrod_param.func_id = BP_FUNC(bp); 5820 5821 ramrod_param.pstate = &bp->sp_state; 5822 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 5823 5824 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 5825 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 5826 5827 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5828 5829 ramrod_param.ramrod_flags = ramrod_flags; 5830 ramrod_param.rx_mode_flags = rx_mode_flags; 5831 5832 ramrod_param.rx_accept_flags = rx_accept_flags; 5833 ramrod_param.tx_accept_flags = tx_accept_flags; 5834 5835 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 5836 if (rc < 0) { 5837 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 5838 return rc; 5839 } 5840 5841 return 0; 5842 } 5843 5844 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, 5845 unsigned long *rx_accept_flags, 5846 unsigned long *tx_accept_flags) 5847 { 5848 /* Clear the flags first */ 5849 *rx_accept_flags = 0; 5850 *tx_accept_flags = 0; 5851 5852 switch (rx_mode) { 5853 case BNX2X_RX_MODE_NONE: 5854 /* 5855 * 'drop all' supersedes any accept flags that may have been 5856 * passed to the function. 5857 */ 5858 break; 5859 case BNX2X_RX_MODE_NORMAL: 5860 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 5861 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); 5862 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 5863 5864 /* internal switching mode */ 5865 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 5866 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); 5867 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 5868 5869 break; 5870 case BNX2X_RX_MODE_ALLMULTI: 5871 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 5872 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 5873 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 5874 5875 /* internal switching mode */ 5876 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 5877 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 5878 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 5879 5880 break; 5881 case BNX2X_RX_MODE_PROMISC: 5882 /* According to definition of SI mode, iface in promisc mode 5883 * should receive matched and unmatched (in resolution of port) 5884 * unicast packets. 5885 */ 5886 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); 5887 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 5888 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 5889 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 5890 5891 /* internal switching mode */ 5892 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 5893 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 5894 5895 if (IS_MF_SI(bp)) 5896 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); 5897 else 5898 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 5899 5900 break; 5901 default: 5902 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); 5903 return -EINVAL; 5904 } 5905 5906 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 5907 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 5908 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 5909 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 5910 } 5911 5912 return 0; 5913 } 5914 5915 /* called with netif_addr_lock_bh() */ 5916 int bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5917 { 5918 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5919 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5920 int rc; 5921 5922 if (!NO_FCOE(bp)) 5923 /* Configure rx_mode of FCoE Queue */ 5924 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5925 5926 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, 5927 &tx_accept_flags); 5928 if (rc) 5929 return rc; 5930 5931 __set_bit(RAMROD_RX, &ramrod_flags); 5932 __set_bit(RAMROD_TX, &ramrod_flags); 5933 5934 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, 5935 rx_accept_flags, tx_accept_flags, 5936 ramrod_flags); 5937 } 5938 5939 static void bnx2x_init_internal_common(struct bnx2x *bp) 5940 { 5941 int i; 5942 5943 if (IS_MF_SI(bp)) 5944 /* 5945 * In switch independent mode, the TSTORM needs to accept 5946 * packets that failed classification, since approximate match 5947 * mac addresses aren't written to NIG LLH 5948 */ 5949 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5950 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); 5951 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ 5952 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5953 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); 5954 5955 /* Zero this manually as its initialization is 5956 currently missing in the initTool */ 5957 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 5958 REG_WR(bp, BAR_USTRORM_INTMEM + 5959 USTORM_AGG_DATA_OFFSET + i * 4, 0); 5960 if (!CHIP_IS_E1x(bp)) { 5961 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 5962 CHIP_INT_MODE_IS_BC(bp) ? 5963 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 5964 } 5965 } 5966 5967 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 5968 { 5969 switch (load_code) { 5970 case FW_MSG_CODE_DRV_LOAD_COMMON: 5971 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5972 bnx2x_init_internal_common(bp); 5973 /* no break */ 5974 5975 case FW_MSG_CODE_DRV_LOAD_PORT: 5976 /* nothing to do */ 5977 /* no break */ 5978 5979 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5980 /* internal memory per function is 5981 initialized inside bnx2x_pf_init */ 5982 break; 5983 5984 default: 5985 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5986 break; 5987 } 5988 } 5989 5990 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5991 { 5992 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); 5993 } 5994 5995 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5996 { 5997 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); 5998 } 5999 6000 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 6001 { 6002 if (CHIP_IS_E1x(fp->bp)) 6003 return BP_L_ID(fp->bp) + fp->index; 6004 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 6005 return bnx2x_fp_igu_sb_id(fp); 6006 } 6007 6008 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 6009 { 6010 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 6011 u8 cos; 6012 unsigned long q_type = 0; 6013 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 6014 fp->rx_queue = fp_idx; 6015 fp->cid = fp_idx; 6016 fp->cl_id = bnx2x_fp_cl_id(fp); 6017 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 6018 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 6019 /* qZone id equals to FW (per path) client id */ 6020 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 6021 6022 /* init shortcut */ 6023 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 6024 6025 /* Setup SB indices */ 6026 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 6027 6028 /* Configure Queue State object */ 6029 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6030 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6031 6032 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 6033 6034 /* init tx data */ 6035 for_each_cos_in_tx_queue(fp, cos) { 6036 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 6037 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 6038 FP_COS_TO_TXQ(fp, cos, bp), 6039 BNX2X_TX_SB_INDEX_BASE + cos, fp); 6040 cids[cos] = fp->txdata_ptr[cos]->cid; 6041 } 6042 6043 /* nothing more for vf to do here */ 6044 if (IS_VF(bp)) 6045 return; 6046 6047 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 6048 fp->fw_sb_id, fp->igu_sb_id); 6049 bnx2x_update_fpsb_idx(fp); 6050 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 6051 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6052 bnx2x_sp_mapping(bp, q_rdata), q_type); 6053 6054 /** 6055 * Configure classification DBs: Always enable Tx switching 6056 */ 6057 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 6058 6059 DP(NETIF_MSG_IFUP, 6060 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6061 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6062 fp->igu_sb_id); 6063 } 6064 6065 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 6066 { 6067 int i; 6068 6069 for (i = 1; i <= NUM_TX_RINGS; i++) { 6070 struct eth_tx_next_bd *tx_next_bd = 6071 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 6072 6073 tx_next_bd->addr_hi = 6074 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 6075 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6076 tx_next_bd->addr_lo = 6077 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 6078 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6079 } 6080 6081 *txdata->tx_cons_sb = cpu_to_le16(0); 6082 6083 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 6084 txdata->tx_db.data.zero_fill1 = 0; 6085 txdata->tx_db.data.prod = 0; 6086 6087 txdata->tx_pkt_prod = 0; 6088 txdata->tx_pkt_cons = 0; 6089 txdata->tx_bd_prod = 0; 6090 txdata->tx_bd_cons = 0; 6091 txdata->tx_pkt = 0; 6092 } 6093 6094 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) 6095 { 6096 int i; 6097 6098 for_each_tx_queue_cnic(bp, i) 6099 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); 6100 } 6101 6102 static void bnx2x_init_tx_rings(struct bnx2x *bp) 6103 { 6104 int i; 6105 u8 cos; 6106 6107 for_each_eth_queue(bp, i) 6108 for_each_cos_in_tx_queue(&bp->fp[i], cos) 6109 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 6110 } 6111 6112 void bnx2x_nic_init_cnic(struct bnx2x *bp) 6113 { 6114 if (!NO_FCOE(bp)) 6115 bnx2x_init_fcoe_fp(bp); 6116 6117 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 6118 BNX2X_VF_ID_INVALID, false, 6119 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 6120 6121 /* ensure status block indices were read */ 6122 rmb(); 6123 bnx2x_init_rx_rings_cnic(bp); 6124 bnx2x_init_tx_rings_cnic(bp); 6125 6126 /* flush all */ 6127 mb(); 6128 mmiowb(); 6129 } 6130 6131 void bnx2x_pre_irq_nic_init(struct bnx2x *bp) 6132 { 6133 int i; 6134 6135 /* Setup NIC internals and enable interrupts */ 6136 for_each_eth_queue(bp, i) 6137 bnx2x_init_eth_fp(bp, i); 6138 6139 /* ensure status block indices were read */ 6140 rmb(); 6141 bnx2x_init_rx_rings(bp); 6142 bnx2x_init_tx_rings(bp); 6143 6144 if (IS_PF(bp)) { 6145 /* Initialize MOD_ABS interrupts */ 6146 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6147 bp->common.shmem_base, 6148 bp->common.shmem2_base, BP_PORT(bp)); 6149 6150 /* initialize the default status block and sp ring */ 6151 bnx2x_init_def_sb(bp); 6152 bnx2x_update_dsb_idx(bp); 6153 bnx2x_init_sp_ring(bp); 6154 } else { 6155 bnx2x_memset_stats(bp); 6156 } 6157 } 6158 6159 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) 6160 { 6161 bnx2x_init_eq_ring(bp); 6162 bnx2x_init_internal(bp, load_code); 6163 bnx2x_pf_init(bp); 6164 bnx2x_stats_init(bp); 6165 6166 /* flush all before enabling interrupts */ 6167 mb(); 6168 mmiowb(); 6169 6170 bnx2x_int_enable(bp); 6171 6172 /* Check for SPIO5 */ 6173 bnx2x_attn_int_deasserted0(bp, 6174 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 6175 AEU_INPUTS_ATTN_BITS_SPIO5); 6176 } 6177 6178 /* gzip service functions */ 6179 static int bnx2x_gunzip_init(struct bnx2x *bp) 6180 { 6181 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 6182 &bp->gunzip_mapping, GFP_KERNEL); 6183 if (bp->gunzip_buf == NULL) 6184 goto gunzip_nomem1; 6185 6186 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 6187 if (bp->strm == NULL) 6188 goto gunzip_nomem2; 6189 6190 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 6191 if (bp->strm->workspace == NULL) 6192 goto gunzip_nomem3; 6193 6194 return 0; 6195 6196 gunzip_nomem3: 6197 kfree(bp->strm); 6198 bp->strm = NULL; 6199 6200 gunzip_nomem2: 6201 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6202 bp->gunzip_mapping); 6203 bp->gunzip_buf = NULL; 6204 6205 gunzip_nomem1: 6206 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 6207 return -ENOMEM; 6208 } 6209 6210 static void bnx2x_gunzip_end(struct bnx2x *bp) 6211 { 6212 if (bp->strm) { 6213 vfree(bp->strm->workspace); 6214 kfree(bp->strm); 6215 bp->strm = NULL; 6216 } 6217 6218 if (bp->gunzip_buf) { 6219 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6220 bp->gunzip_mapping); 6221 bp->gunzip_buf = NULL; 6222 } 6223 } 6224 6225 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 6226 { 6227 int n, rc; 6228 6229 /* check gzip header */ 6230 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 6231 BNX2X_ERR("Bad gzip header\n"); 6232 return -EINVAL; 6233 } 6234 6235 n = 10; 6236 6237 #define FNAME 0x8 6238 6239 if (zbuf[3] & FNAME) 6240 while ((zbuf[n++] != 0) && (n < len)); 6241 6242 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 6243 bp->strm->avail_in = len - n; 6244 bp->strm->next_out = bp->gunzip_buf; 6245 bp->strm->avail_out = FW_BUF_SIZE; 6246 6247 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 6248 if (rc != Z_OK) 6249 return rc; 6250 6251 rc = zlib_inflate(bp->strm, Z_FINISH); 6252 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 6253 netdev_err(bp->dev, "Firmware decompression error: %s\n", 6254 bp->strm->msg); 6255 6256 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6257 if (bp->gunzip_outlen & 0x3) 6258 netdev_err(bp->dev, 6259 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6260 bp->gunzip_outlen); 6261 bp->gunzip_outlen >>= 2; 6262 6263 zlib_inflateEnd(bp->strm); 6264 6265 if (rc == Z_STREAM_END) 6266 return 0; 6267 6268 return rc; 6269 } 6270 6271 /* nic load/unload */ 6272 6273 /* 6274 * General service functions 6275 */ 6276 6277 /* send a NIG loopback debug packet */ 6278 static void bnx2x_lb_pckt(struct bnx2x *bp) 6279 { 6280 u32 wb_write[3]; 6281 6282 /* Ethernet source and destination addresses */ 6283 wb_write[0] = 0x55555555; 6284 wb_write[1] = 0x55555555; 6285 wb_write[2] = 0x20; /* SOP */ 6286 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6287 6288 /* NON-IP protocol */ 6289 wb_write[0] = 0x09000000; 6290 wb_write[1] = 0x55555555; 6291 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 6292 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6293 } 6294 6295 /* some of the internal memories 6296 * are not directly readable from the driver 6297 * to test them we send debug packets 6298 */ 6299 static int bnx2x_int_mem_test(struct bnx2x *bp) 6300 { 6301 int factor; 6302 int count, i; 6303 u32 val = 0; 6304 6305 if (CHIP_REV_IS_FPGA(bp)) 6306 factor = 120; 6307 else if (CHIP_REV_IS_EMUL(bp)) 6308 factor = 200; 6309 else 6310 factor = 1; 6311 6312 /* Disable inputs of parser neighbor blocks */ 6313 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6314 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6315 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6316 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6317 6318 /* Write 0 to parser credits for CFC search request */ 6319 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6320 6321 /* send Ethernet packet */ 6322 bnx2x_lb_pckt(bp); 6323 6324 /* TODO do i reset NIG statistic? */ 6325 /* Wait until NIG register shows 1 packet of size 0x10 */ 6326 count = 1000 * factor; 6327 while (count) { 6328 6329 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6330 val = *bnx2x_sp(bp, wb_data[0]); 6331 if (val == 0x10) 6332 break; 6333 6334 usleep_range(10000, 20000); 6335 count--; 6336 } 6337 if (val != 0x10) { 6338 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6339 return -1; 6340 } 6341 6342 /* Wait until PRS register shows 1 packet */ 6343 count = 1000 * factor; 6344 while (count) { 6345 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6346 if (val == 1) 6347 break; 6348 6349 usleep_range(10000, 20000); 6350 count--; 6351 } 6352 if (val != 0x1) { 6353 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6354 return -2; 6355 } 6356 6357 /* Reset and init BRB, PRS */ 6358 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6359 msleep(50); 6360 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6361 msleep(50); 6362 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6363 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6364 6365 DP(NETIF_MSG_HW, "part2\n"); 6366 6367 /* Disable inputs of parser neighbor blocks */ 6368 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6369 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6370 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6371 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6372 6373 /* Write 0 to parser credits for CFC search request */ 6374 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6375 6376 /* send 10 Ethernet packets */ 6377 for (i = 0; i < 10; i++) 6378 bnx2x_lb_pckt(bp); 6379 6380 /* Wait until NIG register shows 10 + 1 6381 packets of size 11*0x10 = 0xb0 */ 6382 count = 1000 * factor; 6383 while (count) { 6384 6385 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6386 val = *bnx2x_sp(bp, wb_data[0]); 6387 if (val == 0xb0) 6388 break; 6389 6390 usleep_range(10000, 20000); 6391 count--; 6392 } 6393 if (val != 0xb0) { 6394 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6395 return -3; 6396 } 6397 6398 /* Wait until PRS register shows 2 packets */ 6399 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6400 if (val != 2) 6401 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6402 6403 /* Write 1 to parser credits for CFC search request */ 6404 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6405 6406 /* Wait until PRS register shows 3 packets */ 6407 msleep(10 * factor); 6408 /* Wait until NIG register shows 1 packet of size 0x10 */ 6409 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6410 if (val != 3) 6411 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6412 6413 /* clear NIG EOP FIFO */ 6414 for (i = 0; i < 11; i++) 6415 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6416 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6417 if (val != 1) { 6418 BNX2X_ERR("clear of NIG failed\n"); 6419 return -4; 6420 } 6421 6422 /* Reset and init BRB, PRS, NIG */ 6423 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6424 msleep(50); 6425 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6426 msleep(50); 6427 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6428 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6429 if (!CNIC_SUPPORT(bp)) 6430 /* set NIC mode */ 6431 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6432 6433 /* Enable inputs of parser neighbor blocks */ 6434 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6435 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6436 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6437 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6438 6439 DP(NETIF_MSG_HW, "done\n"); 6440 6441 return 0; /* OK */ 6442 } 6443 6444 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6445 { 6446 u32 val; 6447 6448 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6449 if (!CHIP_IS_E1x(bp)) 6450 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6451 else 6452 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6453 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6454 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6455 /* 6456 * mask read length error interrupts in brb for parser 6457 * (parsing unit and 'checksum and crc' unit) 6458 * these errors are legal (PU reads fixed length and CAC can cause 6459 * read length error on truncated packets) 6460 */ 6461 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6462 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6463 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6464 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6465 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6466 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6467 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6468 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6469 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6470 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6471 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6472 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6473 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6474 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6475 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6476 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6477 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6478 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6479 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6480 6481 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 6482 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 6483 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; 6484 if (!CHIP_IS_E1x(bp)) 6485 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 6486 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; 6487 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); 6488 6489 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6490 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6491 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6492 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6493 6494 if (!CHIP_IS_E1x(bp)) 6495 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6496 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6497 6498 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6499 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6500 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6501 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6502 } 6503 6504 static void bnx2x_reset_common(struct bnx2x *bp) 6505 { 6506 u32 val = 0x1400; 6507 6508 /* reset_common */ 6509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6510 0xd3ffff7f); 6511 6512 if (CHIP_IS_E3(bp)) { 6513 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6514 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6515 } 6516 6517 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6518 } 6519 6520 static void bnx2x_setup_dmae(struct bnx2x *bp) 6521 { 6522 bp->dmae_ready = 0; 6523 spin_lock_init(&bp->dmae_lock); 6524 } 6525 6526 static void bnx2x_init_pxp(struct bnx2x *bp) 6527 { 6528 u16 devctl; 6529 int r_order, w_order; 6530 6531 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6532 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6533 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6534 if (bp->mrrs == -1) 6535 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6536 else { 6537 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6538 r_order = bp->mrrs; 6539 } 6540 6541 bnx2x_init_pxp_arb(bp, r_order, w_order); 6542 } 6543 6544 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6545 { 6546 int is_required; 6547 u32 val; 6548 int port; 6549 6550 if (BP_NOMCP(bp)) 6551 return; 6552 6553 is_required = 0; 6554 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6555 SHARED_HW_CFG_FAN_FAILURE_MASK; 6556 6557 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6558 is_required = 1; 6559 6560 /* 6561 * The fan failure mechanism is usually related to the PHY type since 6562 * the power consumption of the board is affected by the PHY. Currently, 6563 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6564 */ 6565 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6566 for (port = PORT_0; port < PORT_MAX; port++) { 6567 is_required |= 6568 bnx2x_fan_failure_det_req( 6569 bp, 6570 bp->common.shmem_base, 6571 bp->common.shmem2_base, 6572 port); 6573 } 6574 6575 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6576 6577 if (is_required == 0) 6578 return; 6579 6580 /* Fan failure is indicated by SPIO 5 */ 6581 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 6582 6583 /* set to active low mode */ 6584 val = REG_RD(bp, MISC_REG_SPIO_INT); 6585 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 6586 REG_WR(bp, MISC_REG_SPIO_INT, val); 6587 6588 /* enable interrupt to signal the IGU */ 6589 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6590 val |= MISC_SPIO_SPIO5; 6591 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6592 } 6593 6594 void bnx2x_pf_disable(struct bnx2x *bp) 6595 { 6596 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6597 val &= ~IGU_PF_CONF_FUNC_EN; 6598 6599 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6600 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6601 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6602 } 6603 6604 static void bnx2x__common_init_phy(struct bnx2x *bp) 6605 { 6606 u32 shmem_base[2], shmem2_base[2]; 6607 /* Avoid common init in case MFW supports LFA */ 6608 if (SHMEM2_RD(bp, size) > 6609 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 6610 return; 6611 shmem_base[0] = bp->common.shmem_base; 6612 shmem2_base[0] = bp->common.shmem2_base; 6613 if (!CHIP_IS_E1x(bp)) { 6614 shmem_base[1] = 6615 SHMEM2_RD(bp, other_shmem_base_addr); 6616 shmem2_base[1] = 6617 SHMEM2_RD(bp, other_shmem2_base_addr); 6618 } 6619 bnx2x_acquire_phy_lock(bp); 6620 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 6621 bp->common.chip_id); 6622 bnx2x_release_phy_lock(bp); 6623 } 6624 6625 /** 6626 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6627 * 6628 * @bp: driver handle 6629 */ 6630 static int bnx2x_init_hw_common(struct bnx2x *bp) 6631 { 6632 u32 val; 6633 6634 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 6635 6636 /* 6637 * take the RESET lock to protect undi_unload flow from accessing 6638 * registers while we're resetting the chip 6639 */ 6640 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6641 6642 bnx2x_reset_common(bp); 6643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 6644 6645 val = 0xfffc; 6646 if (CHIP_IS_E3(bp)) { 6647 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6648 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6649 } 6650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 6651 6652 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6653 6654 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 6655 6656 if (!CHIP_IS_E1x(bp)) { 6657 u8 abs_func_id; 6658 6659 /** 6660 * 4-port mode or 2-port mode we need to turn of master-enable 6661 * for everyone, after that, turn it back on for self. 6662 * so, we disregard multi-function or not, and always disable 6663 * for all functions on the given path, this means 0,2,4,6 for 6664 * path 0 and 1,3,5,7 for path 1 6665 */ 6666 for (abs_func_id = BP_PATH(bp); 6667 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 6668 if (abs_func_id == BP_ABS_FUNC(bp)) { 6669 REG_WR(bp, 6670 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 6671 1); 6672 continue; 6673 } 6674 6675 bnx2x_pretend_func(bp, abs_func_id); 6676 /* clear pf enable */ 6677 bnx2x_pf_disable(bp); 6678 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6679 } 6680 } 6681 6682 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 6683 if (CHIP_IS_E1(bp)) { 6684 /* enable HW interrupt from PXP on USDM overflow 6685 bit 16 on INT_MASK_0 */ 6686 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6687 } 6688 6689 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6690 bnx2x_init_pxp(bp); 6691 6692 #ifdef __BIG_ENDIAN 6693 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); 6694 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); 6695 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 6696 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 6697 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 6698 /* make sure this value is 0 */ 6699 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 6700 6701 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 6702 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 6703 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); 6704 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); 6705 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 6706 #endif 6707 6708 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6709 6710 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6711 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 6712 6713 /* let the HW do it's magic ... */ 6714 msleep(100); 6715 /* finish PXP init */ 6716 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 6717 if (val != 1) { 6718 BNX2X_ERR("PXP2 CFG failed\n"); 6719 return -EBUSY; 6720 } 6721 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 6722 if (val != 1) { 6723 BNX2X_ERR("PXP2 RD_INIT failed\n"); 6724 return -EBUSY; 6725 } 6726 6727 /* Timers bug workaround E2 only. We need to set the entire ILT to 6728 * have entries with value "0" and valid bit on. 6729 * This needs to be done by the first PF that is loaded in a path 6730 * (i.e. common phase) 6731 */ 6732 if (!CHIP_IS_E1x(bp)) { 6733 /* In E2 there is a bug in the timers block that can cause function 6 / 7 6734 * (i.e. vnic3) to start even if it is marked as "scan-off". 6735 * This occurs when a different function (func2,3) is being marked 6736 * as "scan-off". Real-life scenario for example: if a driver is being 6737 * load-unloaded while func6,7 are down. This will cause the timer to access 6738 * the ilt, translate to a logical address and send a request to read/write. 6739 * Since the ilt for the function that is down is not valid, this will cause 6740 * a translation error which is unrecoverable. 6741 * The Workaround is intended to make sure that when this happens nothing fatal 6742 * will occur. The workaround: 6743 * 1. First PF driver which loads on a path will: 6744 * a. After taking the chip out of reset, by using pretend, 6745 * it will write "0" to the following registers of 6746 * the other vnics. 6747 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6748 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 6749 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 6750 * And for itself it will write '1' to 6751 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 6752 * dmae-operations (writing to pram for example.) 6753 * note: can be done for only function 6,7 but cleaner this 6754 * way. 6755 * b. Write zero+valid to the entire ILT. 6756 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 6757 * VNIC3 (of that port). The range allocated will be the 6758 * entire ILT. This is needed to prevent ILT range error. 6759 * 2. Any PF driver load flow: 6760 * a. ILT update with the physical addresses of the allocated 6761 * logical pages. 6762 * b. Wait 20msec. - note that this timeout is needed to make 6763 * sure there are no requests in one of the PXP internal 6764 * queues with "old" ILT addresses. 6765 * c. PF enable in the PGLC. 6766 * d. Clear the was_error of the PF in the PGLC. (could have 6767 * occurred while driver was down) 6768 * e. PF enable in the CFC (WEAK + STRONG) 6769 * f. Timers scan enable 6770 * 3. PF driver unload flow: 6771 * a. Clear the Timers scan_en. 6772 * b. Polling for scan_on=0 for that PF. 6773 * c. Clear the PF enable bit in the PXP. 6774 * d. Clear the PF enable in the CFC (WEAK + STRONG) 6775 * e. Write zero+valid to all ILT entries (The valid bit must 6776 * stay set) 6777 * f. If this is VNIC 3 of a port then also init 6778 * first_timers_ilt_entry to zero and last_timers_ilt_entry 6779 * to the last entry in the ILT. 6780 * 6781 * Notes: 6782 * Currently the PF error in the PGLC is non recoverable. 6783 * In the future the there will be a recovery routine for this error. 6784 * Currently attention is masked. 6785 * Having an MCP lock on the load/unload process does not guarantee that 6786 * there is no Timer disable during Func6/7 enable. This is because the 6787 * Timers scan is currently being cleared by the MCP on FLR. 6788 * Step 2.d can be done only for PF6/7 and the driver can also check if 6789 * there is error before clearing it. But the flow above is simpler and 6790 * more general. 6791 * All ILT entries are written by zero+valid and not just PF6/7 6792 * ILT entries since in the future the ILT entries allocation for 6793 * PF-s might be dynamic. 6794 */ 6795 struct ilt_client_info ilt_cli; 6796 struct bnx2x_ilt ilt; 6797 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 6798 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 6799 6800 /* initialize dummy TM client */ 6801 ilt_cli.start = 0; 6802 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 6803 ilt_cli.client_num = ILT_CLIENT_TM; 6804 6805 /* Step 1: set zeroes to all ilt page entries with valid bit on 6806 * Step 2: set the timers first/last ilt entry to point 6807 * to the entire range to prevent ILT range error for 3rd/4th 6808 * vnic (this code assumes existence of the vnic) 6809 * 6810 * both steps performed by call to bnx2x_ilt_client_init_op() 6811 * with dummy TM client 6812 * 6813 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 6814 * and his brother are split registers 6815 */ 6816 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 6817 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 6818 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6819 6820 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 6821 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 6822 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 6823 } 6824 6825 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 6826 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 6827 6828 if (!CHIP_IS_E1x(bp)) { 6829 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 6830 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 6831 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 6832 6833 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 6834 6835 /* let the HW do it's magic ... */ 6836 do { 6837 msleep(200); 6838 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 6839 } while (factor-- && (val != 1)); 6840 6841 if (val != 1) { 6842 BNX2X_ERR("ATC_INIT failed\n"); 6843 return -EBUSY; 6844 } 6845 } 6846 6847 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 6848 6849 bnx2x_iov_init_dmae(bp); 6850 6851 /* clean the DMAE memory */ 6852 bp->dmae_ready = 1; 6853 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 6854 6855 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 6856 6857 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 6858 6859 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 6860 6861 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 6862 6863 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 6864 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 6865 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 6866 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6867 6868 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 6869 6870 /* QM queues pointers table */ 6871 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 6872 6873 /* soft reset pulse */ 6874 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6875 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6876 6877 if (CNIC_SUPPORT(bp)) 6878 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6879 6880 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6881 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6882 if (!CHIP_REV_IS_SLOW(bp)) 6883 /* enable hw interrupt from doorbell Q */ 6884 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6885 6886 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6887 6888 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6889 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6890 6891 if (!CHIP_IS_E1(bp)) 6892 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6893 6894 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 6895 if (IS_MF_AFEX(bp)) { 6896 /* configure that VNTag and VLAN headers must be 6897 * received in afex mode 6898 */ 6899 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 6900 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 6901 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 6902 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 6903 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 6904 } else { 6905 /* Bit-map indicating which L2 hdrs may appear 6906 * after the basic Ethernet header 6907 */ 6908 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6909 bp->path_has_ovlan ? 7 : 6); 6910 } 6911 } 6912 6913 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6914 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6915 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 6916 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 6917 6918 if (!CHIP_IS_E1x(bp)) { 6919 /* reset VFC memories */ 6920 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6921 VFC_MEMORIES_RST_REG_CAM_RST | 6922 VFC_MEMORIES_RST_REG_RAM_RST); 6923 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6924 VFC_MEMORIES_RST_REG_CAM_RST | 6925 VFC_MEMORIES_RST_REG_RAM_RST); 6926 6927 msleep(20); 6928 } 6929 6930 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 6931 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 6932 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 6933 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 6934 6935 /* sync semi rtc */ 6936 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6937 0x80000000); 6938 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 6939 0x80000000); 6940 6941 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 6942 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6943 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6944 6945 if (!CHIP_IS_E1x(bp)) { 6946 if (IS_MF_AFEX(bp)) { 6947 /* configure that VNTag and VLAN headers must be 6948 * sent in afex mode 6949 */ 6950 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 6951 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 6952 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 6953 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 6954 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 6955 } else { 6956 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6957 bp->path_has_ovlan ? 7 : 6); 6958 } 6959 } 6960 6961 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6962 6963 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6964 6965 if (CNIC_SUPPORT(bp)) { 6966 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6967 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6968 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6969 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6970 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6971 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6972 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6973 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6974 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6975 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6976 } 6977 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6978 6979 if (sizeof(union cdu_context) != 1024) 6980 /* we currently assume that a context is 1024 bytes */ 6981 dev_alert(&bp->pdev->dev, 6982 "please adjust the size of cdu_context(%ld)\n", 6983 (long)sizeof(union cdu_context)); 6984 6985 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 6986 val = (4 << 24) + (0 << 12) + 1024; 6987 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 6988 6989 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 6990 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 6991 /* enable context validation interrupt from CFC */ 6992 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6993 6994 /* set the thresholds to prevent CFC/CDU race */ 6995 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 6996 6997 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 6998 6999 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 7000 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 7001 7002 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 7003 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 7004 7005 /* Reset PCIE errors for debug */ 7006 REG_WR(bp, 0x2814, 0xffffffff); 7007 REG_WR(bp, 0x3820, 0xffffffff); 7008 7009 if (!CHIP_IS_E1x(bp)) { 7010 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 7011 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 7012 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 7013 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 7014 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 7015 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 7016 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 7017 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 7018 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 7019 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 7020 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 7021 } 7022 7023 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 7024 if (!CHIP_IS_E1(bp)) { 7025 /* in E3 this done in per-port section */ 7026 if (!CHIP_IS_E3(bp)) 7027 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7028 } 7029 if (CHIP_IS_E1H(bp)) 7030 /* not applicable for E2 (and above ...) */ 7031 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 7032 7033 if (CHIP_REV_IS_SLOW(bp)) 7034 msleep(200); 7035 7036 /* finish CFC init */ 7037 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 7038 if (val != 1) { 7039 BNX2X_ERR("CFC LL_INIT failed\n"); 7040 return -EBUSY; 7041 } 7042 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 7043 if (val != 1) { 7044 BNX2X_ERR("CFC AC_INIT failed\n"); 7045 return -EBUSY; 7046 } 7047 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 7048 if (val != 1) { 7049 BNX2X_ERR("CFC CAM_INIT failed\n"); 7050 return -EBUSY; 7051 } 7052 REG_WR(bp, CFC_REG_DEBUG0, 0); 7053 7054 if (CHIP_IS_E1(bp)) { 7055 /* read NIG statistic 7056 to see if this is our first up since powerup */ 7057 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 7058 val = *bnx2x_sp(bp, wb_data[0]); 7059 7060 /* do internal memory self test */ 7061 if ((val == 0) && bnx2x_int_mem_test(bp)) { 7062 BNX2X_ERR("internal mem self test failed\n"); 7063 return -EBUSY; 7064 } 7065 } 7066 7067 bnx2x_setup_fan_failure_detection(bp); 7068 7069 /* clear PXP2 attentions */ 7070 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 7071 7072 bnx2x_enable_blocks_attention(bp); 7073 bnx2x_enable_blocks_parity(bp); 7074 7075 if (!BP_NOMCP(bp)) { 7076 if (CHIP_IS_E1x(bp)) 7077 bnx2x__common_init_phy(bp); 7078 } else 7079 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 7080 7081 return 0; 7082 } 7083 7084 /** 7085 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 7086 * 7087 * @bp: driver handle 7088 */ 7089 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 7090 { 7091 int rc = bnx2x_init_hw_common(bp); 7092 7093 if (rc) 7094 return rc; 7095 7096 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 7097 if (!BP_NOMCP(bp)) 7098 bnx2x__common_init_phy(bp); 7099 7100 return 0; 7101 } 7102 7103 static int bnx2x_init_hw_port(struct bnx2x *bp) 7104 { 7105 int port = BP_PORT(bp); 7106 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7107 u32 low, high; 7108 u32 val; 7109 7110 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7111 7112 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 7113 7114 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7115 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7116 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7117 7118 /* Timers bug workaround: disables the pf_master bit in pglue at 7119 * common phase, we need to enable it here before any dmae access are 7120 * attempted. Therefore we manually added the enable-master to the 7121 * port phase (it also happens in the function phase) 7122 */ 7123 if (!CHIP_IS_E1x(bp)) 7124 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7125 7126 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7127 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7128 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7129 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7130 7131 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7132 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7133 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7134 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7135 7136 /* QM cid (connection) count */ 7137 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 7138 7139 if (CNIC_SUPPORT(bp)) { 7140 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7141 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 7142 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 7143 } 7144 7145 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7146 7147 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7148 7149 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 7150 7151 if (IS_MF(bp)) 7152 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 7153 else if (bp->dev->mtu > 4096) { 7154 if (bp->flags & ONE_PORT_FLAG) 7155 low = 160; 7156 else { 7157 val = bp->dev->mtu; 7158 /* (24*1024 + val*4)/256 */ 7159 low = 96 + (val/64) + 7160 ((val % 64) ? 1 : 0); 7161 } 7162 } else 7163 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 7164 high = low + 56; /* 14*1024/256 */ 7165 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 7166 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 7167 } 7168 7169 if (CHIP_MODE_IS_4_PORT(bp)) 7170 REG_WR(bp, (BP_PORT(bp) ? 7171 BRB1_REG_MAC_GUARANTIED_1 : 7172 BRB1_REG_MAC_GUARANTIED_0), 40); 7173 7174 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7175 if (CHIP_IS_E3B0(bp)) { 7176 if (IS_MF_AFEX(bp)) { 7177 /* configure headers for AFEX mode */ 7178 REG_WR(bp, BP_PORT(bp) ? 7179 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7180 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 7181 REG_WR(bp, BP_PORT(bp) ? 7182 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 7183 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 7184 REG_WR(bp, BP_PORT(bp) ? 7185 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 7186 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 7187 } else { 7188 /* Ovlan exists only if we are in multi-function + 7189 * switch-dependent mode, in switch-independent there 7190 * is no ovlan headers 7191 */ 7192 REG_WR(bp, BP_PORT(bp) ? 7193 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7194 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 7195 (bp->path_has_ovlan ? 7 : 6)); 7196 } 7197 } 7198 7199 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7200 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7201 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7202 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7203 7204 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7205 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7206 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7207 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7208 7209 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7210 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7211 7212 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7213 7214 if (CHIP_IS_E1x(bp)) { 7215 /* configure PBF to work without PAUSE mtu 9000 */ 7216 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 7217 7218 /* update threshold */ 7219 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 7220 /* update init credit */ 7221 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 7222 7223 /* probe changes */ 7224 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 7225 udelay(50); 7226 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 7227 } 7228 7229 if (CNIC_SUPPORT(bp)) 7230 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7231 7232 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7233 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7234 7235 if (CHIP_IS_E1(bp)) { 7236 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7237 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7238 } 7239 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7240 7241 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7242 7243 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7244 /* init aeu_mask_attn_func_0/1: 7245 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use 7246 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF 7247 * bits 4-7 are used for "per vn group attention" */ 7248 val = IS_MF(bp) ? 0xF7 : 0x7; 7249 /* Enable DCBX attention for all but E1 */ 7250 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7251 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7252 7253 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7254 7255 if (!CHIP_IS_E1x(bp)) { 7256 /* Bit-map indicating which L2 hdrs may appear after the 7257 * basic Ethernet header 7258 */ 7259 if (IS_MF_AFEX(bp)) 7260 REG_WR(bp, BP_PORT(bp) ? 7261 NIG_REG_P1_HDRS_AFTER_BASIC : 7262 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 7263 else 7264 REG_WR(bp, BP_PORT(bp) ? 7265 NIG_REG_P1_HDRS_AFTER_BASIC : 7266 NIG_REG_P0_HDRS_AFTER_BASIC, 7267 IS_MF_SD(bp) ? 7 : 6); 7268 7269 if (CHIP_IS_E3(bp)) 7270 REG_WR(bp, BP_PORT(bp) ? 7271 NIG_REG_LLH1_MF_MODE : 7272 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7273 } 7274 if (!CHIP_IS_E3(bp)) 7275 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 7276 7277 if (!CHIP_IS_E1(bp)) { 7278 /* 0x2 disable mf_ov, 0x1 enable */ 7279 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 7280 (IS_MF_SD(bp) ? 0x1 : 0x2)); 7281 7282 if (!CHIP_IS_E1x(bp)) { 7283 val = 0; 7284 switch (bp->mf_mode) { 7285 case MULTI_FUNCTION_SD: 7286 val = 1; 7287 break; 7288 case MULTI_FUNCTION_SI: 7289 case MULTI_FUNCTION_AFEX: 7290 val = 2; 7291 break; 7292 } 7293 7294 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 7295 NIG_REG_LLH0_CLS_TYPE), val); 7296 } 7297 { 7298 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 7299 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 7300 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 7301 } 7302 } 7303 7304 /* If SPIO5 is set to generate interrupts, enable it for this port */ 7305 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 7306 if (val & MISC_SPIO_SPIO5) { 7307 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 7308 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 7309 val = REG_RD(bp, reg_addr); 7310 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 7311 REG_WR(bp, reg_addr, val); 7312 } 7313 7314 return 0; 7315 } 7316 7317 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 7318 { 7319 int reg; 7320 u32 wb_write[2]; 7321 7322 if (CHIP_IS_E1(bp)) 7323 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 7324 else 7325 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 7326 7327 wb_write[0] = ONCHIP_ADDR1(addr); 7328 wb_write[1] = ONCHIP_ADDR2(addr); 7329 REG_WR_DMAE(bp, reg, wb_write, 2); 7330 } 7331 7332 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) 7333 { 7334 u32 data, ctl, cnt = 100; 7335 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 7336 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 7337 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 7338 u32 sb_bit = 1 << (idu_sb_id%32); 7339 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 7340 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 7341 7342 /* Not supported in BC mode */ 7343 if (CHIP_INT_MODE_IS_BC(bp)) 7344 return; 7345 7346 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 7347 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 7348 IGU_REGULAR_CLEANUP_SET | 7349 IGU_REGULAR_BCLEANUP; 7350 7351 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 7352 func_encode << IGU_CTRL_REG_FID_SHIFT | 7353 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7354 7355 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7356 data, igu_addr_data); 7357 REG_WR(bp, igu_addr_data, data); 7358 mmiowb(); 7359 barrier(); 7360 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7361 ctl, igu_addr_ctl); 7362 REG_WR(bp, igu_addr_ctl, ctl); 7363 mmiowb(); 7364 barrier(); 7365 7366 /* wait for clean up to finish */ 7367 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7368 msleep(20); 7369 7370 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7371 DP(NETIF_MSG_HW, 7372 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7373 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7374 } 7375 } 7376 7377 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7378 { 7379 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7380 } 7381 7382 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7383 { 7384 u32 i, base = FUNC_ILT_BASE(func); 7385 for (i = base; i < base + ILT_PER_FUNC; i++) 7386 bnx2x_ilt_wr(bp, i, 0); 7387 } 7388 7389 static void bnx2x_init_searcher(struct bnx2x *bp) 7390 { 7391 int port = BP_PORT(bp); 7392 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7393 /* T1 hash bits value determines the T1 number of entries */ 7394 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7395 } 7396 7397 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) 7398 { 7399 int rc; 7400 struct bnx2x_func_state_params func_params = {NULL}; 7401 struct bnx2x_func_switch_update_params *switch_update_params = 7402 &func_params.params.switch_update; 7403 7404 /* Prepare parameters for function state transitions */ 7405 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 7406 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 7407 7408 func_params.f_obj = &bp->func_obj; 7409 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 7410 7411 /* Function parameters */ 7412 switch_update_params->suspend = suspend; 7413 7414 rc = bnx2x_func_state_change(bp, &func_params); 7415 7416 return rc; 7417 } 7418 7419 static int bnx2x_reset_nic_mode(struct bnx2x *bp) 7420 { 7421 int rc, i, port = BP_PORT(bp); 7422 int vlan_en = 0, mac_en[NUM_MACS]; 7423 7424 /* Close input from network */ 7425 if (bp->mf_mode == SINGLE_FUNCTION) { 7426 bnx2x_set_rx_filter(&bp->link_params, 0); 7427 } else { 7428 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : 7429 NIG_REG_LLH0_FUNC_EN); 7430 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7431 NIG_REG_LLH0_FUNC_EN, 0); 7432 for (i = 0; i < NUM_MACS; i++) { 7433 mac_en[i] = REG_RD(bp, port ? 7434 (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7435 4 * i) : 7436 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 7437 4 * i)); 7438 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7439 4 * i) : 7440 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); 7441 } 7442 } 7443 7444 /* Close BMC to host */ 7445 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7446 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); 7447 7448 /* Suspend Tx switching to the PF. Completion of this ramrod 7449 * further guarantees that all the packets of that PF / child 7450 * VFs in BRB were processed by the Parser, so it is safe to 7451 * change the NIC_MODE register. 7452 */ 7453 rc = bnx2x_func_switch_update(bp, 1); 7454 if (rc) { 7455 BNX2X_ERR("Can't suspend tx-switching!\n"); 7456 return rc; 7457 } 7458 7459 /* Change NIC_MODE register */ 7460 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7461 7462 /* Open input from network */ 7463 if (bp->mf_mode == SINGLE_FUNCTION) { 7464 bnx2x_set_rx_filter(&bp->link_params, 1); 7465 } else { 7466 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7467 NIG_REG_LLH0_FUNC_EN, vlan_en); 7468 for (i = 0; i < NUM_MACS; i++) { 7469 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7470 4 * i) : 7471 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 7472 mac_en[i]); 7473 } 7474 } 7475 7476 /* Enable BMC to host */ 7477 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7478 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); 7479 7480 /* Resume Tx switching to the PF */ 7481 rc = bnx2x_func_switch_update(bp, 0); 7482 if (rc) { 7483 BNX2X_ERR("Can't resume tx-switching!\n"); 7484 return rc; 7485 } 7486 7487 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7488 return 0; 7489 } 7490 7491 int bnx2x_init_hw_func_cnic(struct bnx2x *bp) 7492 { 7493 int rc; 7494 7495 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); 7496 7497 if (CONFIGURE_NIC_MODE(bp)) { 7498 /* Configure searcher as part of function hw init */ 7499 bnx2x_init_searcher(bp); 7500 7501 /* Reset NIC mode */ 7502 rc = bnx2x_reset_nic_mode(bp); 7503 if (rc) 7504 BNX2X_ERR("Can't change NIC mode!\n"); 7505 return rc; 7506 } 7507 7508 return 0; 7509 } 7510 7511 static int bnx2x_init_hw_func(struct bnx2x *bp) 7512 { 7513 int port = BP_PORT(bp); 7514 int func = BP_FUNC(bp); 7515 int init_phase = PHASE_PF0 + func; 7516 struct bnx2x_ilt *ilt = BP_ILT(bp); 7517 u16 cdu_ilt_start; 7518 u32 addr, val; 7519 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7520 int i, main_mem_width, rc; 7521 7522 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7523 7524 /* FLR cleanup - hmmm */ 7525 if (!CHIP_IS_E1x(bp)) { 7526 rc = bnx2x_pf_flr_clnup(bp); 7527 if (rc) { 7528 bnx2x_fw_dump(bp); 7529 return rc; 7530 } 7531 } 7532 7533 /* set MSI reconfigure capability */ 7534 if (bp->common.int_block == INT_BLOCK_HC) { 7535 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7536 val = REG_RD(bp, addr); 7537 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7538 REG_WR(bp, addr, val); 7539 } 7540 7541 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7542 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7543 7544 ilt = BP_ILT(bp); 7545 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7546 7547 if (IS_SRIOV(bp)) 7548 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; 7549 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); 7550 7551 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes 7552 * those of the VFs, so start line should be reset 7553 */ 7554 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7555 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7556 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7557 ilt->lines[cdu_ilt_start + i].page_mapping = 7558 bp->context[i].cxt_mapping; 7559 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 7560 } 7561 7562 bnx2x_ilt_init_op(bp, INITOP_SET); 7563 7564 if (!CONFIGURE_NIC_MODE(bp)) { 7565 bnx2x_init_searcher(bp); 7566 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7567 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7568 } else { 7569 /* Set NIC mode */ 7570 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7571 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); 7572 } 7573 7574 if (!CHIP_IS_E1x(bp)) { 7575 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7576 7577 /* Turn on a single ISR mode in IGU if driver is going to use 7578 * INT#x or MSI 7579 */ 7580 if (!(bp->flags & USING_MSIX_FLAG)) 7581 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 7582 /* 7583 * Timers workaround bug: function init part. 7584 * Need to wait 20msec after initializing ILT, 7585 * needed to make sure there are no requests in 7586 * one of the PXP internal queues with "old" ILT addresses 7587 */ 7588 msleep(20); 7589 /* 7590 * Master enable - Due to WB DMAE writes performed before this 7591 * register is re-initialized as part of the regular function 7592 * init 7593 */ 7594 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7595 /* Enable the function in IGU */ 7596 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 7597 } 7598 7599 bp->dmae_ready = 1; 7600 7601 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7602 7603 if (!CHIP_IS_E1x(bp)) 7604 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 7605 7606 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7607 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7608 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7609 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7610 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7611 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7612 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7613 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7614 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7615 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7616 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7617 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7618 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7619 7620 if (!CHIP_IS_E1x(bp)) 7621 REG_WR(bp, QM_REG_PF_EN, 1); 7622 7623 if (!CHIP_IS_E1x(bp)) { 7624 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7625 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7626 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7627 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7628 } 7629 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7630 7631 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7632 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7633 7634 bnx2x_iov_init_dq(bp); 7635 7636 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7637 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7638 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7639 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7640 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7641 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7642 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7643 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7644 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7645 if (!CHIP_IS_E1x(bp)) 7646 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 7647 7648 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7649 7650 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7651 7652 if (!CHIP_IS_E1x(bp)) 7653 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 7654 7655 if (IS_MF(bp)) { 7656 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7657 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 7658 } 7659 7660 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7661 7662 /* HC init per function */ 7663 if (bp->common.int_block == INT_BLOCK_HC) { 7664 if (CHIP_IS_E1H(bp)) { 7665 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7666 7667 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7668 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7669 } 7670 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7671 7672 } else { 7673 int num_segs, sb_idx, prod_offset; 7674 7675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7676 7677 if (!CHIP_IS_E1x(bp)) { 7678 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 7679 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 7680 } 7681 7682 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7683 7684 if (!CHIP_IS_E1x(bp)) { 7685 int dsb_idx = 0; 7686 /** 7687 * Producer memory: 7688 * E2 mode: address 0-135 match to the mapping memory; 7689 * 136 - PF0 default prod; 137 - PF1 default prod; 7690 * 138 - PF2 default prod; 139 - PF3 default prod; 7691 * 140 - PF0 attn prod; 141 - PF1 attn prod; 7692 * 142 - PF2 attn prod; 143 - PF3 attn prod; 7693 * 144-147 reserved. 7694 * 7695 * E1.5 mode - In backward compatible mode; 7696 * for non default SB; each even line in the memory 7697 * holds the U producer and each odd line hold 7698 * the C producer. The first 128 producers are for 7699 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 7700 * producers are for the DSB for each PF. 7701 * Each PF has five segments: (the order inside each 7702 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 7703 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 7704 * 144-147 attn prods; 7705 */ 7706 /* non-default-status-blocks */ 7707 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7708 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 7709 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 7710 prod_offset = (bp->igu_base_sb + sb_idx) * 7711 num_segs; 7712 7713 for (i = 0; i < num_segs; i++) { 7714 addr = IGU_REG_PROD_CONS_MEMORY + 7715 (prod_offset + i) * 4; 7716 REG_WR(bp, addr, 0); 7717 } 7718 /* send consumer update with value 0 */ 7719 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 7720 USTORM_ID, 0, IGU_INT_NOP, 1); 7721 bnx2x_igu_clear_sb(bp, 7722 bp->igu_base_sb + sb_idx); 7723 } 7724 7725 /* default-status-blocks */ 7726 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7727 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 7728 7729 if (CHIP_MODE_IS_4_PORT(bp)) 7730 dsb_idx = BP_FUNC(bp); 7731 else 7732 dsb_idx = BP_VN(bp); 7733 7734 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 7735 IGU_BC_BASE_DSB_PROD + dsb_idx : 7736 IGU_NORM_BASE_DSB_PROD + dsb_idx); 7737 7738 /* 7739 * igu prods come in chunks of E1HVN_MAX (4) - 7740 * does not matters what is the current chip mode 7741 */ 7742 for (i = 0; i < (num_segs * E1HVN_MAX); 7743 i += E1HVN_MAX) { 7744 addr = IGU_REG_PROD_CONS_MEMORY + 7745 (prod_offset + i)*4; 7746 REG_WR(bp, addr, 0); 7747 } 7748 /* send consumer update with 0 */ 7749 if (CHIP_INT_MODE_IS_BC(bp)) { 7750 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7751 USTORM_ID, 0, IGU_INT_NOP, 1); 7752 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7753 CSTORM_ID, 0, IGU_INT_NOP, 1); 7754 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7755 XSTORM_ID, 0, IGU_INT_NOP, 1); 7756 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7757 TSTORM_ID, 0, IGU_INT_NOP, 1); 7758 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7759 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7760 } else { 7761 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7762 USTORM_ID, 0, IGU_INT_NOP, 1); 7763 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7764 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7765 } 7766 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 7767 7768 /* !!! These should become driver const once 7769 rf-tool supports split-68 const */ 7770 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 7771 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 7772 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 7773 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 7774 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 7775 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 7776 } 7777 } 7778 7779 /* Reset PCIE errors for debug */ 7780 REG_WR(bp, 0x2114, 0xffffffff); 7781 REG_WR(bp, 0x2120, 0xffffffff); 7782 7783 if (CHIP_IS_E1x(bp)) { 7784 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 7785 main_mem_base = HC_REG_MAIN_MEMORY + 7786 BP_PORT(bp) * (main_mem_size * 4); 7787 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 7788 main_mem_width = 8; 7789 7790 val = REG_RD(bp, main_mem_prty_clr); 7791 if (val) 7792 DP(NETIF_MSG_HW, 7793 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 7794 val); 7795 7796 /* Clear "false" parity errors in MSI-X table */ 7797 for (i = main_mem_base; 7798 i < main_mem_base + main_mem_size * 4; 7799 i += main_mem_width) { 7800 bnx2x_read_dmae(bp, i, main_mem_width / 4); 7801 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 7802 i, main_mem_width / 4); 7803 } 7804 /* Clear HC parity attention */ 7805 REG_RD(bp, main_mem_prty_clr); 7806 } 7807 7808 #ifdef BNX2X_STOP_ON_ERROR 7809 /* Enable STORMs SP logging */ 7810 REG_WR8(bp, BAR_USTRORM_INTMEM + 7811 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7812 REG_WR8(bp, BAR_TSTRORM_INTMEM + 7813 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7814 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7815 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7816 REG_WR8(bp, BAR_XSTRORM_INTMEM + 7817 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7818 #endif 7819 7820 bnx2x_phy_probe(&bp->link_params); 7821 7822 return 0; 7823 } 7824 7825 void bnx2x_free_mem_cnic(struct bnx2x *bp) 7826 { 7827 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); 7828 7829 if (!CHIP_IS_E1x(bp)) 7830 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 7831 sizeof(struct host_hc_status_block_e2)); 7832 else 7833 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 7834 sizeof(struct host_hc_status_block_e1x)); 7835 7836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7837 } 7838 7839 void bnx2x_free_mem(struct bnx2x *bp) 7840 { 7841 int i; 7842 7843 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7844 sizeof(struct host_sp_status_block)); 7845 7846 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7847 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7848 7849 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7850 sizeof(struct bnx2x_slowpath)); 7851 7852 for (i = 0; i < L2_ILT_LINES(bp); i++) 7853 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 7854 bp->context[i].size); 7855 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7856 7857 BNX2X_FREE(bp->ilt->lines); 7858 7859 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7860 7861 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7862 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7863 7864 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7865 7866 bnx2x_iov_free_mem(bp); 7867 } 7868 7869 int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 7870 { 7871 if (!CHIP_IS_E1x(bp)) 7872 /* size = the status block + ramrod buffers */ 7873 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7874 sizeof(struct host_hc_status_block_e2)); 7875 else 7876 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, 7877 &bp->cnic_sb_mapping, 7878 sizeof(struct 7879 host_hc_status_block_e1x)); 7880 7881 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) 7882 /* allocate searcher T2 table, as it wasn't allocated before */ 7883 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7884 7885 /* write address to which L5 should insert its values */ 7886 bp->cnic_eth_dev.addr_drv_info_to_mcp = 7887 &bp->slowpath->drv_info_to_mcp; 7888 7889 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) 7890 goto alloc_mem_err; 7891 7892 return 0; 7893 7894 alloc_mem_err: 7895 bnx2x_free_mem_cnic(bp); 7896 BNX2X_ERR("Can't allocate memory\n"); 7897 return -ENOMEM; 7898 } 7899 7900 int bnx2x_alloc_mem(struct bnx2x *bp) 7901 { 7902 int i, allocated, context_size; 7903 7904 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) 7905 /* allocate searcher T2 table */ 7906 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7907 7908 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7909 sizeof(struct host_sp_status_block)); 7910 7911 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7912 sizeof(struct bnx2x_slowpath)); 7913 7914 /* Allocate memory for CDU context: 7915 * This memory is allocated separately and not in the generic ILT 7916 * functions because CDU differs in few aspects: 7917 * 1. There are multiple entities allocating memory for context - 7918 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 7919 * its own ILT lines. 7920 * 2. Since CDU page-size is not a single 4KB page (which is the case 7921 * for the other ILT clients), to be efficient we want to support 7922 * allocation of sub-page-size in the last entry. 7923 * 3. Context pointers are used by the driver to pass to FW / update 7924 * the context (for the other ILT clients the pointers are used just to 7925 * free the memory during unload). 7926 */ 7927 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7928 7929 for (i = 0, allocated = 0; allocated < context_size; i++) { 7930 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 7931 (context_size - allocated)); 7932 BNX2X_PCI_ALLOC(bp->context[i].vcxt, 7933 &bp->context[i].cxt_mapping, 7934 bp->context[i].size); 7935 allocated += bp->context[i].size; 7936 } 7937 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7938 7939 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7940 goto alloc_mem_err; 7941 7942 if (bnx2x_iov_alloc_mem(bp)) 7943 goto alloc_mem_err; 7944 7945 /* Slow path ring */ 7946 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 7947 7948 /* EQ */ 7949 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 7950 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7951 7952 return 0; 7953 7954 alloc_mem_err: 7955 bnx2x_free_mem(bp); 7956 BNX2X_ERR("Can't allocate memory\n"); 7957 return -ENOMEM; 7958 } 7959 7960 /* 7961 * Init service functions 7962 */ 7963 7964 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 7965 struct bnx2x_vlan_mac_obj *obj, bool set, 7966 int mac_type, unsigned long *ramrod_flags) 7967 { 7968 int rc; 7969 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 7970 7971 memset(&ramrod_param, 0, sizeof(ramrod_param)); 7972 7973 /* Fill general parameters */ 7974 ramrod_param.vlan_mac_obj = obj; 7975 ramrod_param.ramrod_flags = *ramrod_flags; 7976 7977 /* Fill a user request section if needed */ 7978 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 7979 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 7980 7981 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 7982 7983 /* Set the command: ADD or DEL */ 7984 if (set) 7985 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 7986 else 7987 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 7988 } 7989 7990 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 7991 7992 if (rc == -EEXIST) { 7993 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 7994 /* do not treat adding same MAC as error */ 7995 rc = 0; 7996 } else if (rc < 0) 7997 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 7998 7999 return rc; 8000 } 8001 8002 int bnx2x_del_all_macs(struct bnx2x *bp, 8003 struct bnx2x_vlan_mac_obj *mac_obj, 8004 int mac_type, bool wait_for_comp) 8005 { 8006 int rc; 8007 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 8008 8009 /* Wait for completion of requested */ 8010 if (wait_for_comp) 8011 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8012 8013 /* Set the mac type of addresses we want to clear */ 8014 __set_bit(mac_type, &vlan_mac_flags); 8015 8016 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 8017 if (rc < 0) 8018 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 8019 8020 return rc; 8021 } 8022 8023 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 8024 { 8025 if (is_zero_ether_addr(bp->dev->dev_addr) && 8026 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 8027 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 8028 "Ignoring Zero MAC for STORAGE SD mode\n"); 8029 return 0; 8030 } 8031 8032 if (IS_PF(bp)) { 8033 unsigned long ramrod_flags = 0; 8034 8035 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 8036 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8037 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, 8038 &bp->sp_objs->mac_obj, set, 8039 BNX2X_ETH_MAC, &ramrod_flags); 8040 } else { /* vf */ 8041 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, 8042 bp->fp->index, true); 8043 } 8044 } 8045 8046 int bnx2x_setup_leading(struct bnx2x *bp) 8047 { 8048 return bnx2x_setup_queue(bp, &bp->fp[0], 1); 8049 } 8050 8051 /** 8052 * bnx2x_set_int_mode - configure interrupt mode 8053 * 8054 * @bp: driver handle 8055 * 8056 * In case of MSI-X it will also try to enable MSI-X. 8057 */ 8058 int bnx2x_set_int_mode(struct bnx2x *bp) 8059 { 8060 int rc = 0; 8061 8062 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) 8063 return -EINVAL; 8064 8065 switch (int_mode) { 8066 case BNX2X_INT_MODE_MSIX: 8067 /* attempt to enable msix */ 8068 rc = bnx2x_enable_msix(bp); 8069 8070 /* msix attained */ 8071 if (!rc) 8072 return 0; 8073 8074 /* vfs use only msix */ 8075 if (rc && IS_VF(bp)) 8076 return rc; 8077 8078 /* failed to enable multiple MSI-X */ 8079 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 8080 bp->num_queues, 8081 1 + bp->num_cnic_queues); 8082 8083 /* falling through... */ 8084 case BNX2X_INT_MODE_MSI: 8085 bnx2x_enable_msi(bp); 8086 8087 /* falling through... */ 8088 case BNX2X_INT_MODE_INTX: 8089 bp->num_ethernet_queues = 1; 8090 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 8091 BNX2X_DEV_INFO("set number of queues to 1\n"); 8092 break; 8093 default: 8094 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); 8095 return -EINVAL; 8096 } 8097 return 0; 8098 } 8099 8100 /* must be called prior to any HW initializations */ 8101 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 8102 { 8103 if (IS_SRIOV(bp)) 8104 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; 8105 return L2_ILT_LINES(bp); 8106 } 8107 8108 void bnx2x_ilt_set_info(struct bnx2x *bp) 8109 { 8110 struct ilt_client_info *ilt_client; 8111 struct bnx2x_ilt *ilt = BP_ILT(bp); 8112 u16 line = 0; 8113 8114 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 8115 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 8116 8117 /* CDU */ 8118 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 8119 ilt_client->client_num = ILT_CLIENT_CDU; 8120 ilt_client->page_size = CDU_ILT_PAGE_SZ; 8121 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 8122 ilt_client->start = line; 8123 line += bnx2x_cid_ilt_lines(bp); 8124 8125 if (CNIC_SUPPORT(bp)) 8126 line += CNIC_ILT_LINES; 8127 ilt_client->end = line - 1; 8128 8129 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8130 ilt_client->start, 8131 ilt_client->end, 8132 ilt_client->page_size, 8133 ilt_client->flags, 8134 ilog2(ilt_client->page_size >> 12)); 8135 8136 /* QM */ 8137 if (QM_INIT(bp->qm_cid_count)) { 8138 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 8139 ilt_client->client_num = ILT_CLIENT_QM; 8140 ilt_client->page_size = QM_ILT_PAGE_SZ; 8141 ilt_client->flags = 0; 8142 ilt_client->start = line; 8143 8144 /* 4 bytes for each cid */ 8145 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 8146 QM_ILT_PAGE_SZ); 8147 8148 ilt_client->end = line - 1; 8149 8150 DP(NETIF_MSG_IFUP, 8151 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8152 ilt_client->start, 8153 ilt_client->end, 8154 ilt_client->page_size, 8155 ilt_client->flags, 8156 ilog2(ilt_client->page_size >> 12)); 8157 } 8158 8159 if (CNIC_SUPPORT(bp)) { 8160 /* SRC */ 8161 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 8162 ilt_client->client_num = ILT_CLIENT_SRC; 8163 ilt_client->page_size = SRC_ILT_PAGE_SZ; 8164 ilt_client->flags = 0; 8165 ilt_client->start = line; 8166 line += SRC_ILT_LINES; 8167 ilt_client->end = line - 1; 8168 8169 DP(NETIF_MSG_IFUP, 8170 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8171 ilt_client->start, 8172 ilt_client->end, 8173 ilt_client->page_size, 8174 ilt_client->flags, 8175 ilog2(ilt_client->page_size >> 12)); 8176 8177 /* TM */ 8178 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 8179 ilt_client->client_num = ILT_CLIENT_TM; 8180 ilt_client->page_size = TM_ILT_PAGE_SZ; 8181 ilt_client->flags = 0; 8182 ilt_client->start = line; 8183 line += TM_ILT_LINES; 8184 ilt_client->end = line - 1; 8185 8186 DP(NETIF_MSG_IFUP, 8187 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8188 ilt_client->start, 8189 ilt_client->end, 8190 ilt_client->page_size, 8191 ilt_client->flags, 8192 ilog2(ilt_client->page_size >> 12)); 8193 } 8194 8195 BUG_ON(line > ILT_MAX_LINES); 8196 } 8197 8198 /** 8199 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 8200 * 8201 * @bp: driver handle 8202 * @fp: pointer to fastpath 8203 * @init_params: pointer to parameters structure 8204 * 8205 * parameters configured: 8206 * - HC configuration 8207 * - Queue's CDU context 8208 */ 8209 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 8210 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 8211 { 8212 u8 cos; 8213 int cxt_index, cxt_offset; 8214 8215 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 8216 if (!IS_FCOE_FP(fp)) { 8217 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 8218 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 8219 8220 /* If HC is supported, enable host coalescing in the transition 8221 * to INIT state. 8222 */ 8223 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 8224 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 8225 8226 /* HC rate */ 8227 init_params->rx.hc_rate = bp->rx_ticks ? 8228 (1000000 / bp->rx_ticks) : 0; 8229 init_params->tx.hc_rate = bp->tx_ticks ? 8230 (1000000 / bp->tx_ticks) : 0; 8231 8232 /* FW SB ID */ 8233 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 8234 fp->fw_sb_id; 8235 8236 /* 8237 * CQ index among the SB indices: FCoE clients uses the default 8238 * SB, therefore it's different. 8239 */ 8240 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 8241 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 8242 } 8243 8244 /* set maximum number of COSs supported by this queue */ 8245 init_params->max_cos = fp->max_cos; 8246 8247 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 8248 fp->index, init_params->max_cos); 8249 8250 /* set the context pointers queue object */ 8251 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 8252 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 8253 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 8254 ILT_PAGE_CIDS); 8255 init_params->cxts[cos] = 8256 &bp->context[cxt_index].vcxt[cxt_offset].eth; 8257 } 8258 } 8259 8260 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8261 struct bnx2x_queue_state_params *q_params, 8262 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 8263 int tx_index, bool leading) 8264 { 8265 memset(tx_only_params, 0, sizeof(*tx_only_params)); 8266 8267 /* Set the command */ 8268 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 8269 8270 /* Set tx-only QUEUE flags: don't zero statistics */ 8271 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 8272 8273 /* choose the index of the cid to send the slow path on */ 8274 tx_only_params->cid_index = tx_index; 8275 8276 /* Set general TX_ONLY_SETUP parameters */ 8277 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 8278 8279 /* Set Tx TX_ONLY_SETUP parameters */ 8280 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 8281 8282 DP(NETIF_MSG_IFUP, 8283 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 8284 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 8285 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 8286 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 8287 8288 /* send the ramrod */ 8289 return bnx2x_queue_state_change(bp, q_params); 8290 } 8291 8292 /** 8293 * bnx2x_setup_queue - setup queue 8294 * 8295 * @bp: driver handle 8296 * @fp: pointer to fastpath 8297 * @leading: is leading 8298 * 8299 * This function performs 2 steps in a Queue state machine 8300 * actually: 1) RESET->INIT 2) INIT->SETUP 8301 */ 8302 8303 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8304 bool leading) 8305 { 8306 struct bnx2x_queue_state_params q_params = {NULL}; 8307 struct bnx2x_queue_setup_params *setup_params = 8308 &q_params.params.setup; 8309 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 8310 &q_params.params.tx_only; 8311 int rc; 8312 u8 tx_index; 8313 8314 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 8315 8316 /* reset IGU state skip FCoE L2 queue */ 8317 if (!IS_FCOE_FP(fp)) 8318 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 8319 IGU_INT_ENABLE, 0); 8320 8321 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8322 /* We want to wait for completion in this context */ 8323 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8324 8325 /* Prepare the INIT parameters */ 8326 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 8327 8328 /* Set the command */ 8329 q_params.cmd = BNX2X_Q_CMD_INIT; 8330 8331 /* Change the state to INIT */ 8332 rc = bnx2x_queue_state_change(bp, &q_params); 8333 if (rc) { 8334 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 8335 return rc; 8336 } 8337 8338 DP(NETIF_MSG_IFUP, "init complete\n"); 8339 8340 /* Now move the Queue to the SETUP state... */ 8341 memset(setup_params, 0, sizeof(*setup_params)); 8342 8343 /* Set QUEUE flags */ 8344 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 8345 8346 /* Set general SETUP parameters */ 8347 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 8348 FIRST_TX_COS_INDEX); 8349 8350 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 8351 &setup_params->rxq_params); 8352 8353 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 8354 FIRST_TX_COS_INDEX); 8355 8356 /* Set the command */ 8357 q_params.cmd = BNX2X_Q_CMD_SETUP; 8358 8359 if (IS_FCOE_FP(fp)) 8360 bp->fcoe_init = true; 8361 8362 /* Change the state to SETUP */ 8363 rc = bnx2x_queue_state_change(bp, &q_params); 8364 if (rc) { 8365 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 8366 return rc; 8367 } 8368 8369 /* loop through the relevant tx-only indices */ 8370 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8371 tx_index < fp->max_cos; 8372 tx_index++) { 8373 8374 /* prepare and send tx-only ramrod*/ 8375 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 8376 tx_only_params, tx_index, leading); 8377 if (rc) { 8378 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 8379 fp->index, tx_index); 8380 return rc; 8381 } 8382 } 8383 8384 return rc; 8385 } 8386 8387 static int bnx2x_stop_queue(struct bnx2x *bp, int index) 8388 { 8389 struct bnx2x_fastpath *fp = &bp->fp[index]; 8390 struct bnx2x_fp_txdata *txdata; 8391 struct bnx2x_queue_state_params q_params = {NULL}; 8392 int rc, tx_index; 8393 8394 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 8395 8396 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8397 /* We want to wait for completion in this context */ 8398 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8399 8400 /* close tx-only connections */ 8401 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8402 tx_index < fp->max_cos; 8403 tx_index++){ 8404 8405 /* ascertain this is a normal queue*/ 8406 txdata = fp->txdata_ptr[tx_index]; 8407 8408 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 8409 txdata->txq_index); 8410 8411 /* send halt terminate on tx-only connection */ 8412 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8413 memset(&q_params.params.terminate, 0, 8414 sizeof(q_params.params.terminate)); 8415 q_params.params.terminate.cid_index = tx_index; 8416 8417 rc = bnx2x_queue_state_change(bp, &q_params); 8418 if (rc) 8419 return rc; 8420 8421 /* send halt terminate on tx-only connection */ 8422 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8423 memset(&q_params.params.cfc_del, 0, 8424 sizeof(q_params.params.cfc_del)); 8425 q_params.params.cfc_del.cid_index = tx_index; 8426 rc = bnx2x_queue_state_change(bp, &q_params); 8427 if (rc) 8428 return rc; 8429 } 8430 /* Stop the primary connection: */ 8431 /* ...halt the connection */ 8432 q_params.cmd = BNX2X_Q_CMD_HALT; 8433 rc = bnx2x_queue_state_change(bp, &q_params); 8434 if (rc) 8435 return rc; 8436 8437 /* ...terminate the connection */ 8438 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8439 memset(&q_params.params.terminate, 0, 8440 sizeof(q_params.params.terminate)); 8441 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8442 rc = bnx2x_queue_state_change(bp, &q_params); 8443 if (rc) 8444 return rc; 8445 /* ...delete cfc entry */ 8446 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8447 memset(&q_params.params.cfc_del, 0, 8448 sizeof(q_params.params.cfc_del)); 8449 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8450 return bnx2x_queue_state_change(bp, &q_params); 8451 } 8452 8453 static void bnx2x_reset_func(struct bnx2x *bp) 8454 { 8455 int port = BP_PORT(bp); 8456 int func = BP_FUNC(bp); 8457 int i; 8458 8459 /* Disable the function in the FW */ 8460 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8461 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8462 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8463 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8464 8465 /* FP SBs */ 8466 for_each_eth_queue(bp, i) { 8467 struct bnx2x_fastpath *fp = &bp->fp[i]; 8468 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8469 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 8470 SB_DISABLED); 8471 } 8472 8473 if (CNIC_LOADED(bp)) 8474 /* CNIC SB */ 8475 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8476 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 8477 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); 8478 8479 /* SP SB */ 8480 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8481 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8482 SB_DISABLED); 8483 8484 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 8485 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8486 0); 8487 8488 /* Configure IGU */ 8489 if (bp->common.int_block == INT_BLOCK_HC) { 8490 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8491 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8492 } else { 8493 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8494 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8495 } 8496 8497 if (CNIC_LOADED(bp)) { 8498 /* Disable Timer scan */ 8499 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8500 /* 8501 * Wait for at least 10ms and up to 2 second for the timers 8502 * scan to complete 8503 */ 8504 for (i = 0; i < 200; i++) { 8505 usleep_range(10000, 20000); 8506 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8507 break; 8508 } 8509 } 8510 /* Clear ILT */ 8511 bnx2x_clear_func_ilt(bp, func); 8512 8513 /* Timers workaround bug for E2: if this is vnic-3, 8514 * we need to set the entire ilt range for this timers. 8515 */ 8516 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8517 struct ilt_client_info ilt_cli; 8518 /* use dummy TM client */ 8519 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 8520 ilt_cli.start = 0; 8521 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 8522 ilt_cli.client_num = ILT_CLIENT_TM; 8523 8524 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8525 } 8526 8527 /* this assumes that reset_port() called before reset_func()*/ 8528 if (!CHIP_IS_E1x(bp)) 8529 bnx2x_pf_disable(bp); 8530 8531 bp->dmae_ready = 0; 8532 } 8533 8534 static void bnx2x_reset_port(struct bnx2x *bp) 8535 { 8536 int port = BP_PORT(bp); 8537 u32 val; 8538 8539 /* Reset physical Link */ 8540 bnx2x__link_reset(bp); 8541 8542 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 8543 8544 /* Do not rcv packets to BRB */ 8545 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 8546 /* Do not direct rcv packets that are not for MCP to the BRB */ 8547 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 8548 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 8549 8550 /* Configure AEU */ 8551 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 8552 8553 msleep(100); 8554 /* Check for BRB port occupancy */ 8555 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 8556 if (val) 8557 DP(NETIF_MSG_IFDOWN, 8558 "BRB1 is not empty %d blocks are occupied\n", val); 8559 8560 /* TODO: Close Doorbell port? */ 8561 } 8562 8563 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8564 { 8565 struct bnx2x_func_state_params func_params = {NULL}; 8566 8567 /* Prepare parameters for function state transitions */ 8568 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8569 8570 func_params.f_obj = &bp->func_obj; 8571 func_params.cmd = BNX2X_F_CMD_HW_RESET; 8572 8573 func_params.params.hw_init.load_phase = load_code; 8574 8575 return bnx2x_func_state_change(bp, &func_params); 8576 } 8577 8578 static int bnx2x_func_stop(struct bnx2x *bp) 8579 { 8580 struct bnx2x_func_state_params func_params = {NULL}; 8581 int rc; 8582 8583 /* Prepare parameters for function state transitions */ 8584 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8585 func_params.f_obj = &bp->func_obj; 8586 func_params.cmd = BNX2X_F_CMD_STOP; 8587 8588 /* 8589 * Try to stop the function the 'good way'. If fails (in case 8590 * of a parity error during bnx2x_chip_cleanup()) and we are 8591 * not in a debug mode, perform a state transaction in order to 8592 * enable further HW_RESET transaction. 8593 */ 8594 rc = bnx2x_func_state_change(bp, &func_params); 8595 if (rc) { 8596 #ifdef BNX2X_STOP_ON_ERROR 8597 return rc; 8598 #else 8599 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 8600 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 8601 return bnx2x_func_state_change(bp, &func_params); 8602 #endif 8603 } 8604 8605 return 0; 8606 } 8607 8608 /** 8609 * bnx2x_send_unload_req - request unload mode from the MCP. 8610 * 8611 * @bp: driver handle 8612 * @unload_mode: requested function's unload mode 8613 * 8614 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 8615 */ 8616 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 8617 { 8618 u32 reset_code = 0; 8619 int port = BP_PORT(bp); 8620 8621 /* Select the UNLOAD request mode */ 8622 if (unload_mode == UNLOAD_NORMAL) 8623 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8624 8625 else if (bp->flags & NO_WOL_FLAG) 8626 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 8627 8628 else if (bp->wol) { 8629 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8630 u8 *mac_addr = bp->dev->dev_addr; 8631 u32 val; 8632 u16 pmc; 8633 8634 /* The mac address is written to entries 1-4 to 8635 * preserve entry 0 which is used by the PMF 8636 */ 8637 u8 entry = (BP_VN(bp) + 1)*8; 8638 8639 val = (mac_addr[0] << 8) | mac_addr[1]; 8640 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 8641 8642 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 8643 (mac_addr[4] << 8) | mac_addr[5]; 8644 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8645 8646 /* Enable the PME and clear the status */ 8647 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8648 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8649 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8650 8651 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8652 8653 } else 8654 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8655 8656 /* Send the request to the MCP */ 8657 if (!BP_NOMCP(bp)) 8658 reset_code = bnx2x_fw_command(bp, reset_code, 0); 8659 else { 8660 int path = BP_PATH(bp); 8661 8662 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 8663 path, load_count[path][0], load_count[path][1], 8664 load_count[path][2]); 8665 load_count[path][0]--; 8666 load_count[path][1 + port]--; 8667 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 8668 path, load_count[path][0], load_count[path][1], 8669 load_count[path][2]); 8670 if (load_count[path][0] == 0) 8671 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 8672 else if (load_count[path][1 + port] == 0) 8673 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 8674 else 8675 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 8676 } 8677 8678 return reset_code; 8679 } 8680 8681 /** 8682 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8683 * 8684 * @bp: driver handle 8685 * @keep_link: true iff link should be kept up 8686 */ 8687 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) 8688 { 8689 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 8690 8691 /* Report UNLOAD_DONE to MCP */ 8692 if (!BP_NOMCP(bp)) 8693 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 8694 } 8695 8696 static int bnx2x_func_wait_started(struct bnx2x *bp) 8697 { 8698 int tout = 50; 8699 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8700 8701 if (!bp->port.pmf) 8702 return 0; 8703 8704 /* 8705 * (assumption: No Attention from MCP at this stage) 8706 * PMF probably in the middle of TX disable/enable transaction 8707 * 1. Sync IRS for default SB 8708 * 2. Sync SP queue - this guarantees us that attention handling started 8709 * 3. Wait, that TX disable/enable transaction completes 8710 * 8711 * 1+2 guarantee that if DCBx attention was scheduled it already changed 8712 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 8713 * received completion for the transaction the state is TX_STOPPED. 8714 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8715 * transaction. 8716 */ 8717 8718 /* make sure default SB ISR is done */ 8719 if (msix) 8720 synchronize_irq(bp->msix_table[0].vector); 8721 else 8722 synchronize_irq(bp->pdev->irq); 8723 8724 flush_workqueue(bnx2x_wq); 8725 8726 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8727 BNX2X_F_STATE_STARTED && tout--) 8728 msleep(20); 8729 8730 if (bnx2x_func_get_state(bp, &bp->func_obj) != 8731 BNX2X_F_STATE_STARTED) { 8732 #ifdef BNX2X_STOP_ON_ERROR 8733 BNX2X_ERR("Wrong function state\n"); 8734 return -EBUSY; 8735 #else 8736 /* 8737 * Failed to complete the transaction in a "good way" 8738 * Force both transactions with CLR bit 8739 */ 8740 struct bnx2x_func_state_params func_params = {NULL}; 8741 8742 DP(NETIF_MSG_IFDOWN, 8743 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 8744 8745 func_params.f_obj = &bp->func_obj; 8746 __set_bit(RAMROD_DRV_CLR_ONLY, 8747 &func_params.ramrod_flags); 8748 8749 /* STARTED-->TX_ST0PPED */ 8750 func_params.cmd = BNX2X_F_CMD_TX_STOP; 8751 bnx2x_func_state_change(bp, &func_params); 8752 8753 /* TX_ST0PPED-->STARTED */ 8754 func_params.cmd = BNX2X_F_CMD_TX_START; 8755 return bnx2x_func_state_change(bp, &func_params); 8756 #endif 8757 } 8758 8759 return 0; 8760 } 8761 8762 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 8763 { 8764 int port = BP_PORT(bp); 8765 int i, rc = 0; 8766 u8 cos; 8767 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 8768 u32 reset_code; 8769 8770 /* Wait until tx fastpath tasks complete */ 8771 for_each_tx_queue(bp, i) { 8772 struct bnx2x_fastpath *fp = &bp->fp[i]; 8773 8774 for_each_cos_in_tx_queue(fp, cos) 8775 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 8776 #ifdef BNX2X_STOP_ON_ERROR 8777 if (rc) 8778 return; 8779 #endif 8780 } 8781 8782 /* Give HW time to discard old tx messages */ 8783 usleep_range(1000, 2000); 8784 8785 /* Clean all ETH MACs */ 8786 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 8787 false); 8788 if (rc < 0) 8789 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8790 8791 /* Clean up UC list */ 8792 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 8793 true); 8794 if (rc < 0) 8795 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8796 rc); 8797 8798 /* Disable LLH */ 8799 if (!CHIP_IS_E1(bp)) 8800 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8801 8802 /* Set "drop all" (stop Rx). 8803 * We need to take a netif_addr_lock() here in order to prevent 8804 * a race between the completion code and this code. 8805 */ 8806 netif_addr_lock_bh(bp->dev); 8807 /* Schedule the rx_mode command */ 8808 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 8809 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 8810 else 8811 bnx2x_set_storm_rx_mode(bp); 8812 8813 /* Cleanup multicast configuration */ 8814 rparam.mcast_obj = &bp->mcast_obj; 8815 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 8816 if (rc < 0) 8817 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 8818 8819 netif_addr_unlock_bh(bp->dev); 8820 8821 bnx2x_iov_chip_cleanup(bp); 8822 8823 /* 8824 * Send the UNLOAD_REQUEST to the MCP. This will return if 8825 * this function should perform FUNC, PORT or COMMON HW 8826 * reset. 8827 */ 8828 reset_code = bnx2x_send_unload_req(bp, unload_mode); 8829 8830 /* 8831 * (assumption: No Attention from MCP at this stage) 8832 * PMF probably in the middle of TX disable/enable transaction 8833 */ 8834 rc = bnx2x_func_wait_started(bp); 8835 if (rc) { 8836 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 8837 #ifdef BNX2X_STOP_ON_ERROR 8838 return; 8839 #endif 8840 } 8841 8842 /* Close multi and leading connections 8843 * Completions for ramrods are collected in a synchronous way 8844 */ 8845 for_each_eth_queue(bp, i) 8846 if (bnx2x_stop_queue(bp, i)) 8847 #ifdef BNX2X_STOP_ON_ERROR 8848 return; 8849 #else 8850 goto unload_error; 8851 #endif 8852 8853 if (CNIC_LOADED(bp)) { 8854 for_each_cnic_queue(bp, i) 8855 if (bnx2x_stop_queue(bp, i)) 8856 #ifdef BNX2X_STOP_ON_ERROR 8857 return; 8858 #else 8859 goto unload_error; 8860 #endif 8861 } 8862 8863 /* If SP settings didn't get completed so far - something 8864 * very wrong has happen. 8865 */ 8866 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 8867 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 8868 8869 #ifndef BNX2X_STOP_ON_ERROR 8870 unload_error: 8871 #endif 8872 rc = bnx2x_func_stop(bp); 8873 if (rc) { 8874 BNX2X_ERR("Function stop failed!\n"); 8875 #ifdef BNX2X_STOP_ON_ERROR 8876 return; 8877 #endif 8878 } 8879 8880 /* Disable HW interrupts, NAPI */ 8881 bnx2x_netif_stop(bp, 1); 8882 /* Delete all NAPI objects */ 8883 bnx2x_del_all_napi(bp); 8884 if (CNIC_LOADED(bp)) 8885 bnx2x_del_all_napi_cnic(bp); 8886 8887 /* Release IRQs */ 8888 bnx2x_free_irq(bp); 8889 8890 /* Reset the chip */ 8891 rc = bnx2x_reset_hw(bp, reset_code); 8892 if (rc) 8893 BNX2X_ERR("HW_RESET failed\n"); 8894 8895 /* Report UNLOAD_DONE to MCP */ 8896 bnx2x_send_unload_done(bp, keep_link); 8897 } 8898 8899 void bnx2x_disable_close_the_gate(struct bnx2x *bp) 8900 { 8901 u32 val; 8902 8903 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 8904 8905 if (CHIP_IS_E1(bp)) { 8906 int port = BP_PORT(bp); 8907 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8908 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8909 8910 val = REG_RD(bp, addr); 8911 val &= ~(0x300); 8912 REG_WR(bp, addr, val); 8913 } else { 8914 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 8915 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 8916 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 8917 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 8918 } 8919 } 8920 8921 /* Close gates #2, #3 and #4: */ 8922 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 8923 { 8924 u32 val; 8925 8926 /* Gates #2 and #4a are closed/opened for "not E1" only */ 8927 if (!CHIP_IS_E1(bp)) { 8928 /* #4 */ 8929 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 8930 /* #2 */ 8931 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 8932 } 8933 8934 /* #3 */ 8935 if (CHIP_IS_E1x(bp)) { 8936 /* Prevent interrupts from HC on both ports */ 8937 val = REG_RD(bp, HC_REG_CONFIG_1); 8938 REG_WR(bp, HC_REG_CONFIG_1, 8939 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 8940 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 8941 8942 val = REG_RD(bp, HC_REG_CONFIG_0); 8943 REG_WR(bp, HC_REG_CONFIG_0, 8944 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 8945 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 8946 } else { 8947 /* Prevent incoming interrupts in IGU */ 8948 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 8949 8950 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 8951 (!close) ? 8952 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 8953 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 8954 } 8955 8956 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 8957 close ? "closing" : "opening"); 8958 mmiowb(); 8959 } 8960 8961 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 8962 8963 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 8964 { 8965 /* Do some magic... */ 8966 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8967 *magic_val = val & SHARED_MF_CLP_MAGIC; 8968 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 8969 } 8970 8971 /** 8972 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 8973 * 8974 * @bp: driver handle 8975 * @magic_val: old value of the `magic' bit. 8976 */ 8977 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 8978 { 8979 /* Restore the `magic' bit value... */ 8980 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8981 MF_CFG_WR(bp, shared_mf_config.clp_mb, 8982 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 8983 } 8984 8985 /** 8986 * bnx2x_reset_mcp_prep - prepare for MCP reset. 8987 * 8988 * @bp: driver handle 8989 * @magic_val: old value of 'magic' bit. 8990 * 8991 * Takes care of CLP configurations. 8992 */ 8993 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 8994 { 8995 u32 shmem; 8996 u32 validity_offset; 8997 8998 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 8999 9000 /* Set `magic' bit in order to save MF config */ 9001 if (!CHIP_IS_E1(bp)) 9002 bnx2x_clp_reset_prep(bp, magic_val); 9003 9004 /* Get shmem offset */ 9005 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9006 validity_offset = 9007 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); 9008 9009 /* Clear validity map flags */ 9010 if (shmem > 0) 9011 REG_WR(bp, shmem + validity_offset, 0); 9012 } 9013 9014 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 9015 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 9016 9017 /** 9018 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 9019 * 9020 * @bp: driver handle 9021 */ 9022 static void bnx2x_mcp_wait_one(struct bnx2x *bp) 9023 { 9024 /* special handling for emulation and FPGA, 9025 wait 10 times longer */ 9026 if (CHIP_REV_IS_SLOW(bp)) 9027 msleep(MCP_ONE_TIMEOUT*10); 9028 else 9029 msleep(MCP_ONE_TIMEOUT); 9030 } 9031 9032 /* 9033 * initializes bp->common.shmem_base and waits for validity signature to appear 9034 */ 9035 static int bnx2x_init_shmem(struct bnx2x *bp) 9036 { 9037 int cnt = 0; 9038 u32 val = 0; 9039 9040 do { 9041 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9042 if (bp->common.shmem_base) { 9043 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9044 if (val & SHR_MEM_VALIDITY_MB) 9045 return 0; 9046 } 9047 9048 bnx2x_mcp_wait_one(bp); 9049 9050 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 9051 9052 BNX2X_ERR("BAD MCP validity signature\n"); 9053 9054 return -ENODEV; 9055 } 9056 9057 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 9058 { 9059 int rc = bnx2x_init_shmem(bp); 9060 9061 /* Restore the `magic' bit value */ 9062 if (!CHIP_IS_E1(bp)) 9063 bnx2x_clp_reset_done(bp, magic_val); 9064 9065 return rc; 9066 } 9067 9068 static void bnx2x_pxp_prep(struct bnx2x *bp) 9069 { 9070 if (!CHIP_IS_E1(bp)) { 9071 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 9072 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 9073 mmiowb(); 9074 } 9075 } 9076 9077 /* 9078 * Reset the whole chip except for: 9079 * - PCIE core 9080 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 9081 * one reset bit) 9082 * - IGU 9083 * - MISC (including AEU) 9084 * - GRC 9085 * - RBCN, RBCP 9086 */ 9087 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 9088 { 9089 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 9090 u32 global_bits2, stay_reset2; 9091 9092 /* 9093 * Bits that have to be set in reset_mask2 if we want to reset 'global' 9094 * (per chip) blocks. 9095 */ 9096 global_bits2 = 9097 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 9098 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 9099 9100 /* Don't reset the following blocks. 9101 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 9102 * reset, as in 4 port device they might still be owned 9103 * by the MCP (there is only one leader per path). 9104 */ 9105 not_reset_mask1 = 9106 MISC_REGISTERS_RESET_REG_1_RST_HC | 9107 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 9108 MISC_REGISTERS_RESET_REG_1_RST_PXP; 9109 9110 not_reset_mask2 = 9111 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 9112 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 9113 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 9114 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 9115 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 9116 MISC_REGISTERS_RESET_REG_2_RST_GRC | 9117 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 9118 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 9119 MISC_REGISTERS_RESET_REG_2_RST_ATC | 9120 MISC_REGISTERS_RESET_REG_2_PGLC | 9121 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 9122 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 9123 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 9124 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 9125 MISC_REGISTERS_RESET_REG_2_UMAC0 | 9126 MISC_REGISTERS_RESET_REG_2_UMAC1; 9127 9128 /* 9129 * Keep the following blocks in reset: 9130 * - all xxMACs are handled by the bnx2x_link code. 9131 */ 9132 stay_reset2 = 9133 MISC_REGISTERS_RESET_REG_2_XMAC | 9134 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 9135 9136 /* Full reset masks according to the chip */ 9137 reset_mask1 = 0xffffffff; 9138 9139 if (CHIP_IS_E1(bp)) 9140 reset_mask2 = 0xffff; 9141 else if (CHIP_IS_E1H(bp)) 9142 reset_mask2 = 0x1ffff; 9143 else if (CHIP_IS_E2(bp)) 9144 reset_mask2 = 0xfffff; 9145 else /* CHIP_IS_E3 */ 9146 reset_mask2 = 0x3ffffff; 9147 9148 /* Don't reset global blocks unless we need to */ 9149 if (!global) 9150 reset_mask2 &= ~global_bits2; 9151 9152 /* 9153 * In case of attention in the QM, we need to reset PXP 9154 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 9155 * because otherwise QM reset would release 'close the gates' shortly 9156 * before resetting the PXP, then the PSWRQ would send a write 9157 * request to PGLUE. Then when PXP is reset, PGLUE would try to 9158 * read the payload data from PSWWR, but PSWWR would not 9159 * respond. The write queue in PGLUE would stuck, dmae commands 9160 * would not return. Therefore it's important to reset the second 9161 * reset register (containing the 9162 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 9163 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 9164 * bit). 9165 */ 9166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 9167 reset_mask2 & (~not_reset_mask2)); 9168 9169 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 9170 reset_mask1 & (~not_reset_mask1)); 9171 9172 barrier(); 9173 mmiowb(); 9174 9175 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 9176 reset_mask2 & (~stay_reset2)); 9177 9178 barrier(); 9179 mmiowb(); 9180 9181 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 9182 mmiowb(); 9183 } 9184 9185 /** 9186 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 9187 * It should get cleared in no more than 1s. 9188 * 9189 * @bp: driver handle 9190 * 9191 * It should get cleared in no more than 1s. Returns 0 if 9192 * pending writes bit gets cleared. 9193 */ 9194 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 9195 { 9196 u32 cnt = 1000; 9197 u32 pend_bits = 0; 9198 9199 do { 9200 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 9201 9202 if (pend_bits == 0) 9203 break; 9204 9205 usleep_range(1000, 2000); 9206 } while (cnt-- > 0); 9207 9208 if (cnt <= 0) { 9209 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 9210 pend_bits); 9211 return -EBUSY; 9212 } 9213 9214 return 0; 9215 } 9216 9217 static int bnx2x_process_kill(struct bnx2x *bp, bool global) 9218 { 9219 int cnt = 1000; 9220 u32 val = 0; 9221 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 9222 u32 tags_63_32 = 0; 9223 9224 /* Empty the Tetris buffer, wait for 1s */ 9225 do { 9226 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 9227 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 9228 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 9229 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 9230 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 9231 if (CHIP_IS_E3(bp)) 9232 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); 9233 9234 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 9235 ((port_is_idle_0 & 0x1) == 0x1) && 9236 ((port_is_idle_1 & 0x1) == 0x1) && 9237 (pgl_exp_rom2 == 0xffffffff) && 9238 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) 9239 break; 9240 usleep_range(1000, 2000); 9241 } while (cnt-- > 0); 9242 9243 if (cnt <= 0) { 9244 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 9245 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 9246 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 9247 pgl_exp_rom2); 9248 return -EAGAIN; 9249 } 9250 9251 barrier(); 9252 9253 /* Close gates #2, #3 and #4 */ 9254 bnx2x_set_234_gates(bp, true); 9255 9256 /* Poll for IGU VQs for 57712 and newer chips */ 9257 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 9258 return -EAGAIN; 9259 9260 /* TBD: Indicate that "process kill" is in progress to MCP */ 9261 9262 /* Clear "unprepared" bit */ 9263 REG_WR(bp, MISC_REG_UNPREPARED, 0); 9264 barrier(); 9265 9266 /* Make sure all is written to the chip before the reset */ 9267 mmiowb(); 9268 9269 /* Wait for 1ms to empty GLUE and PCI-E core queues, 9270 * PSWHST, GRC and PSWRD Tetris buffer. 9271 */ 9272 usleep_range(1000, 2000); 9273 9274 /* Prepare to chip reset: */ 9275 /* MCP */ 9276 if (global) 9277 bnx2x_reset_mcp_prep(bp, &val); 9278 9279 /* PXP */ 9280 bnx2x_pxp_prep(bp); 9281 barrier(); 9282 9283 /* reset the chip */ 9284 bnx2x_process_kill_chip_reset(bp, global); 9285 barrier(); 9286 9287 /* Recover after reset: */ 9288 /* MCP */ 9289 if (global && bnx2x_reset_mcp_comp(bp, val)) 9290 return -EAGAIN; 9291 9292 /* TBD: Add resetting the NO_MCP mode DB here */ 9293 9294 /* Open the gates #2, #3 and #4 */ 9295 bnx2x_set_234_gates(bp, false); 9296 9297 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 9298 * reset state, re-enable attentions. */ 9299 9300 return 0; 9301 } 9302 9303 static int bnx2x_leader_reset(struct bnx2x *bp) 9304 { 9305 int rc = 0; 9306 bool global = bnx2x_reset_is_global(bp); 9307 u32 load_code; 9308 9309 /* if not going to reset MCP - load "fake" driver to reset HW while 9310 * driver is owner of the HW 9311 */ 9312 if (!global && !BP_NOMCP(bp)) { 9313 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 9314 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 9315 if (!load_code) { 9316 BNX2X_ERR("MCP response failure, aborting\n"); 9317 rc = -EAGAIN; 9318 goto exit_leader_reset; 9319 } 9320 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 9321 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 9322 BNX2X_ERR("MCP unexpected resp, aborting\n"); 9323 rc = -EAGAIN; 9324 goto exit_leader_reset2; 9325 } 9326 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 9327 if (!load_code) { 9328 BNX2X_ERR("MCP response failure, aborting\n"); 9329 rc = -EAGAIN; 9330 goto exit_leader_reset2; 9331 } 9332 } 9333 9334 /* Try to recover after the failure */ 9335 if (bnx2x_process_kill(bp, global)) { 9336 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 9337 BP_PATH(bp)); 9338 rc = -EAGAIN; 9339 goto exit_leader_reset2; 9340 } 9341 9342 /* 9343 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 9344 * state. 9345 */ 9346 bnx2x_set_reset_done(bp); 9347 if (global) 9348 bnx2x_clear_reset_global(bp); 9349 9350 exit_leader_reset2: 9351 /* unload "fake driver" if it was loaded */ 9352 if (!global && !BP_NOMCP(bp)) { 9353 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 9354 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9355 } 9356 exit_leader_reset: 9357 bp->is_leader = 0; 9358 bnx2x_release_leader_lock(bp); 9359 smp_mb(); 9360 return rc; 9361 } 9362 9363 static void bnx2x_recovery_failed(struct bnx2x *bp) 9364 { 9365 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 9366 9367 /* Disconnect this device */ 9368 netif_device_detach(bp->dev); 9369 9370 /* 9371 * Block ifup for all function on this engine until "process kill" 9372 * or power cycle. 9373 */ 9374 bnx2x_set_reset_in_progress(bp); 9375 9376 /* Shut down the power */ 9377 bnx2x_set_power_state(bp, PCI_D3hot); 9378 9379 bp->recovery_state = BNX2X_RECOVERY_FAILED; 9380 9381 smp_mb(); 9382 } 9383 9384 /* 9385 * Assumption: runs under rtnl lock. This together with the fact 9386 * that it's called only from bnx2x_sp_rtnl() ensure that it 9387 * will never be called when netif_running(bp->dev) is false. 9388 */ 9389 static void bnx2x_parity_recover(struct bnx2x *bp) 9390 { 9391 bool global = false; 9392 u32 error_recovered, error_unrecovered; 9393 bool is_parity; 9394 9395 DP(NETIF_MSG_HW, "Handling parity\n"); 9396 while (1) { 9397 switch (bp->recovery_state) { 9398 case BNX2X_RECOVERY_INIT: 9399 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 9400 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 9401 WARN_ON(!is_parity); 9402 9403 /* Try to get a LEADER_LOCK HW lock */ 9404 if (bnx2x_trylock_leader_lock(bp)) { 9405 bnx2x_set_reset_in_progress(bp); 9406 /* 9407 * Check if there is a global attention and if 9408 * there was a global attention, set the global 9409 * reset bit. 9410 */ 9411 9412 if (global) 9413 bnx2x_set_reset_global(bp); 9414 9415 bp->is_leader = 1; 9416 } 9417 9418 /* Stop the driver */ 9419 /* If interface has been removed - break */ 9420 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) 9421 return; 9422 9423 bp->recovery_state = BNX2X_RECOVERY_WAIT; 9424 9425 /* Ensure "is_leader", MCP command sequence and 9426 * "recovery_state" update values are seen on other 9427 * CPUs. 9428 */ 9429 smp_mb(); 9430 break; 9431 9432 case BNX2X_RECOVERY_WAIT: 9433 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 9434 if (bp->is_leader) { 9435 int other_engine = BP_PATH(bp) ? 0 : 1; 9436 bool other_load_status = 9437 bnx2x_get_load_status(bp, other_engine); 9438 bool load_status = 9439 bnx2x_get_load_status(bp, BP_PATH(bp)); 9440 global = bnx2x_reset_is_global(bp); 9441 9442 /* 9443 * In case of a parity in a global block, let 9444 * the first leader that performs a 9445 * leader_reset() reset the global blocks in 9446 * order to clear global attentions. Otherwise 9447 * the gates will remain closed for that 9448 * engine. 9449 */ 9450 if (load_status || 9451 (global && other_load_status)) { 9452 /* Wait until all other functions get 9453 * down. 9454 */ 9455 schedule_delayed_work(&bp->sp_rtnl_task, 9456 HZ/10); 9457 return; 9458 } else { 9459 /* If all other functions got down - 9460 * try to bring the chip back to 9461 * normal. In any case it's an exit 9462 * point for a leader. 9463 */ 9464 if (bnx2x_leader_reset(bp)) { 9465 bnx2x_recovery_failed(bp); 9466 return; 9467 } 9468 9469 /* If we are here, means that the 9470 * leader has succeeded and doesn't 9471 * want to be a leader any more. Try 9472 * to continue as a none-leader. 9473 */ 9474 break; 9475 } 9476 } else { /* non-leader */ 9477 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 9478 /* Try to get a LEADER_LOCK HW lock as 9479 * long as a former leader may have 9480 * been unloaded by the user or 9481 * released a leadership by another 9482 * reason. 9483 */ 9484 if (bnx2x_trylock_leader_lock(bp)) { 9485 /* I'm a leader now! Restart a 9486 * switch case. 9487 */ 9488 bp->is_leader = 1; 9489 break; 9490 } 9491 9492 schedule_delayed_work(&bp->sp_rtnl_task, 9493 HZ/10); 9494 return; 9495 9496 } else { 9497 /* 9498 * If there was a global attention, wait 9499 * for it to be cleared. 9500 */ 9501 if (bnx2x_reset_is_global(bp)) { 9502 schedule_delayed_work( 9503 &bp->sp_rtnl_task, 9504 HZ/10); 9505 return; 9506 } 9507 9508 error_recovered = 9509 bp->eth_stats.recoverable_error; 9510 error_unrecovered = 9511 bp->eth_stats.unrecoverable_error; 9512 bp->recovery_state = 9513 BNX2X_RECOVERY_NIC_LOADING; 9514 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 9515 error_unrecovered++; 9516 netdev_err(bp->dev, 9517 "Recovery failed. Power cycle needed\n"); 9518 /* Disconnect this device */ 9519 netif_device_detach(bp->dev); 9520 /* Shut down the power */ 9521 bnx2x_set_power_state( 9522 bp, PCI_D3hot); 9523 smp_mb(); 9524 } else { 9525 bp->recovery_state = 9526 BNX2X_RECOVERY_DONE; 9527 error_recovered++; 9528 smp_mb(); 9529 } 9530 bp->eth_stats.recoverable_error = 9531 error_recovered; 9532 bp->eth_stats.unrecoverable_error = 9533 error_unrecovered; 9534 9535 return; 9536 } 9537 } 9538 default: 9539 return; 9540 } 9541 } 9542 } 9543 9544 static int bnx2x_close(struct net_device *dev); 9545 9546 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 9547 * scheduled on a general queue in order to prevent a dead lock. 9548 */ 9549 static void bnx2x_sp_rtnl_task(struct work_struct *work) 9550 { 9551 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 9552 9553 rtnl_lock(); 9554 9555 if (!netif_running(bp->dev)) { 9556 rtnl_unlock(); 9557 return; 9558 } 9559 9560 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 9561 #ifdef BNX2X_STOP_ON_ERROR 9562 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9563 "you will need to reboot when done\n"); 9564 goto sp_rtnl_not_reset; 9565 #endif 9566 /* 9567 * Clear all pending SP commands as we are going to reset the 9568 * function anyway. 9569 */ 9570 bp->sp_rtnl_state = 0; 9571 smp_mb(); 9572 9573 bnx2x_parity_recover(bp); 9574 9575 rtnl_unlock(); 9576 return; 9577 } 9578 9579 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9580 #ifdef BNX2X_STOP_ON_ERROR 9581 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9582 "you will need to reboot when done\n"); 9583 goto sp_rtnl_not_reset; 9584 #endif 9585 9586 /* 9587 * Clear all pending SP commands as we are going to reset the 9588 * function anyway. 9589 */ 9590 bp->sp_rtnl_state = 0; 9591 smp_mb(); 9592 9593 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 9594 bnx2x_nic_load(bp, LOAD_NORMAL); 9595 9596 rtnl_unlock(); 9597 return; 9598 } 9599 #ifdef BNX2X_STOP_ON_ERROR 9600 sp_rtnl_not_reset: 9601 #endif 9602 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9603 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9604 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 9605 bnx2x_after_function_update(bp); 9606 /* 9607 * in case of fan failure we need to reset id if the "stop on error" 9608 * debug flag is set, since we trying to prevent permanent overheating 9609 * damage 9610 */ 9611 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 9612 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 9613 netif_device_detach(bp->dev); 9614 bnx2x_close(bp->dev); 9615 rtnl_unlock(); 9616 return; 9617 } 9618 9619 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { 9620 DP(BNX2X_MSG_SP, 9621 "sending set mcast vf pf channel message from rtnl sp-task\n"); 9622 bnx2x_vfpf_set_mcast(bp->dev); 9623 } 9624 9625 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 9626 &bp->sp_rtnl_state)) { 9627 DP(BNX2X_MSG_SP, 9628 "sending set storm rx mode vf pf channel message from rtnl sp-task\n"); 9629 bnx2x_vfpf_storm_rx_mode(bp); 9630 } 9631 9632 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 9633 &bp->sp_rtnl_state)) 9634 bnx2x_pf_set_vfs_vlan(bp); 9635 9636 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9637 * can be called from other contexts as well) 9638 */ 9639 rtnl_unlock(); 9640 9641 /* enable SR-IOV if applicable */ 9642 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 9643 &bp->sp_rtnl_state)) { 9644 bnx2x_disable_sriov(bp); 9645 bnx2x_enable_sriov(bp); 9646 } 9647 } 9648 9649 static void bnx2x_period_task(struct work_struct *work) 9650 { 9651 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 9652 9653 if (!netif_running(bp->dev)) 9654 goto period_task_exit; 9655 9656 if (CHIP_REV_IS_SLOW(bp)) { 9657 BNX2X_ERR("period task called on emulation, ignoring\n"); 9658 goto period_task_exit; 9659 } 9660 9661 bnx2x_acquire_phy_lock(bp); 9662 /* 9663 * The barrier is needed to ensure the ordering between the writing to 9664 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 9665 * the reading here. 9666 */ 9667 smp_mb(); 9668 if (bp->port.pmf) { 9669 bnx2x_period_func(&bp->link_params, &bp->link_vars); 9670 9671 /* Re-queue task in 1 sec */ 9672 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 9673 } 9674 9675 bnx2x_release_phy_lock(bp); 9676 period_task_exit: 9677 return; 9678 } 9679 9680 /* 9681 * Init service functions 9682 */ 9683 9684 u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 9685 { 9686 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9687 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 9688 return base + (BP_ABS_FUNC(bp)) * stride; 9689 } 9690 9691 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 9692 struct bnx2x_mac_vals *vals) 9693 { 9694 u32 val, base_addr, offset, mask, reset_reg; 9695 bool mac_stopped = false; 9696 u8 port = BP_PORT(bp); 9697 9698 /* reset addresses as they also mark which values were changed */ 9699 vals->bmac_addr = 0; 9700 vals->umac_addr = 0; 9701 vals->xmac_addr = 0; 9702 vals->emac_addr = 0; 9703 9704 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 9705 9706 if (!CHIP_IS_E3(bp)) { 9707 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9708 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9709 if ((mask & reset_reg) && val) { 9710 u32 wb_data[2]; 9711 BNX2X_DEV_INFO("Disable bmac Rx\n"); 9712 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 9713 : NIG_REG_INGRESS_BMAC0_MEM; 9714 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 9715 : BIGMAC_REGISTER_BMAC_CONTROL; 9716 9717 /* 9718 * use rd/wr since we cannot use dmae. This is safe 9719 * since MCP won't access the bus due to the request 9720 * to unload, and no function on the path can be 9721 * loaded at this time. 9722 */ 9723 wb_data[0] = REG_RD(bp, base_addr + offset); 9724 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 9725 vals->bmac_addr = base_addr + offset; 9726 vals->bmac_val[0] = wb_data[0]; 9727 vals->bmac_val[1] = wb_data[1]; 9728 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 9729 REG_WR(bp, vals->bmac_addr, wb_data[0]); 9730 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); 9731 } 9732 BNX2X_DEV_INFO("Disable emac Rx\n"); 9733 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; 9734 vals->emac_val = REG_RD(bp, vals->emac_addr); 9735 REG_WR(bp, vals->emac_addr, 0); 9736 mac_stopped = true; 9737 } else { 9738 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9739 BNX2X_DEV_INFO("Disable xmac Rx\n"); 9740 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9741 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 9742 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9743 val & ~(1 << 1)); 9744 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9745 val | (1 << 1)); 9746 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 9747 vals->xmac_val = REG_RD(bp, vals->xmac_addr); 9748 REG_WR(bp, vals->xmac_addr, 0); 9749 mac_stopped = true; 9750 } 9751 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9752 if (mask & reset_reg) { 9753 BNX2X_DEV_INFO("Disable umac Rx\n"); 9754 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9755 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 9756 vals->umac_val = REG_RD(bp, vals->umac_addr); 9757 REG_WR(bp, vals->umac_addr, 0); 9758 mac_stopped = true; 9759 } 9760 } 9761 9762 if (mac_stopped) 9763 msleep(20); 9764 } 9765 9766 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9767 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9768 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9769 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9770 9771 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) 9772 { 9773 u16 rcq, bd; 9774 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9775 9776 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9777 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9778 9779 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9780 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9781 9782 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 9783 port, bd, rcq); 9784 } 9785 9786 static int bnx2x_prev_mcp_done(struct bnx2x *bp) 9787 { 9788 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 9789 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9790 if (!rc) { 9791 BNX2X_ERR("MCP response failure, aborting\n"); 9792 return -EBUSY; 9793 } 9794 9795 return 0; 9796 } 9797 9798 static struct bnx2x_prev_path_list * 9799 bnx2x_prev_path_get_entry(struct bnx2x *bp) 9800 { 9801 struct bnx2x_prev_path_list *tmp_list; 9802 9803 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) 9804 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 9805 bp->pdev->bus->number == tmp_list->bus && 9806 BP_PATH(bp) == tmp_list->path) 9807 return tmp_list; 9808 9809 return NULL; 9810 } 9811 9812 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) 9813 { 9814 struct bnx2x_prev_path_list *tmp_list; 9815 int rc; 9816 9817 rc = down_interruptible(&bnx2x_prev_sem); 9818 if (rc) { 9819 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9820 return rc; 9821 } 9822 9823 tmp_list = bnx2x_prev_path_get_entry(bp); 9824 if (tmp_list) { 9825 tmp_list->aer = 1; 9826 rc = 0; 9827 } else { 9828 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", 9829 BP_PATH(bp)); 9830 } 9831 9832 up(&bnx2x_prev_sem); 9833 9834 return rc; 9835 } 9836 9837 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 9838 { 9839 struct bnx2x_prev_path_list *tmp_list; 9840 int rc = false; 9841 9842 if (down_trylock(&bnx2x_prev_sem)) 9843 return false; 9844 9845 tmp_list = bnx2x_prev_path_get_entry(bp); 9846 if (tmp_list) { 9847 if (tmp_list->aer) { 9848 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", 9849 BP_PATH(bp)); 9850 } else { 9851 rc = true; 9852 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 9853 BP_PATH(bp)); 9854 } 9855 } 9856 9857 up(&bnx2x_prev_sem); 9858 9859 return rc; 9860 } 9861 9862 bool bnx2x_port_after_undi(struct bnx2x *bp) 9863 { 9864 struct bnx2x_prev_path_list *entry; 9865 bool val; 9866 9867 down(&bnx2x_prev_sem); 9868 9869 entry = bnx2x_prev_path_get_entry(bp); 9870 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); 9871 9872 up(&bnx2x_prev_sem); 9873 9874 return val; 9875 } 9876 9877 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 9878 { 9879 struct bnx2x_prev_path_list *tmp_list; 9880 int rc; 9881 9882 rc = down_interruptible(&bnx2x_prev_sem); 9883 if (rc) { 9884 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9885 return rc; 9886 } 9887 9888 /* Check whether the entry for this path already exists */ 9889 tmp_list = bnx2x_prev_path_get_entry(bp); 9890 if (tmp_list) { 9891 if (!tmp_list->aer) { 9892 BNX2X_ERR("Re-Marking the path.\n"); 9893 } else { 9894 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", 9895 BP_PATH(bp)); 9896 tmp_list->aer = 0; 9897 } 9898 up(&bnx2x_prev_sem); 9899 return 0; 9900 } 9901 up(&bnx2x_prev_sem); 9902 9903 /* Create an entry for this path and add it */ 9904 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 9905 if (!tmp_list) { 9906 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 9907 return -ENOMEM; 9908 } 9909 9910 tmp_list->bus = bp->pdev->bus->number; 9911 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9912 tmp_list->path = BP_PATH(bp); 9913 tmp_list->aer = 0; 9914 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 9915 9916 rc = down_interruptible(&bnx2x_prev_sem); 9917 if (rc) { 9918 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9919 kfree(tmp_list); 9920 } else { 9921 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", 9922 BP_PATH(bp)); 9923 list_add(&tmp_list->list, &bnx2x_prev_list); 9924 up(&bnx2x_prev_sem); 9925 } 9926 9927 return rc; 9928 } 9929 9930 static int bnx2x_do_flr(struct bnx2x *bp) 9931 { 9932 int i; 9933 u16 status; 9934 struct pci_dev *dev = bp->pdev; 9935 9936 if (CHIP_IS_E1x(bp)) { 9937 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 9938 return -EINVAL; 9939 } 9940 9941 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 9942 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9943 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 9944 bp->common.bc_ver); 9945 return -EINVAL; 9946 } 9947 9948 /* Wait for Transaction Pending bit clean */ 9949 for (i = 0; i < 4; i++) { 9950 if (i) 9951 msleep((1 << (i - 1)) * 100); 9952 9953 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 9954 if (!(status & PCI_EXP_DEVSTA_TRPND)) 9955 goto clear; 9956 } 9957 9958 dev_err(&dev->dev, 9959 "transaction is not cleared; proceeding with reset anyway\n"); 9960 9961 clear: 9962 9963 BNX2X_DEV_INFO("Initiating FLR\n"); 9964 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 9965 9966 return 0; 9967 } 9968 9969 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) 9970 { 9971 int rc; 9972 9973 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 9974 9975 /* Test if previous unload process was already finished for this path */ 9976 if (bnx2x_prev_is_path_marked(bp)) 9977 return bnx2x_prev_mcp_done(bp); 9978 9979 BNX2X_DEV_INFO("Path is unmarked\n"); 9980 9981 /* If function has FLR capabilities, and existing FW version matches 9982 * the one required, then FLR will be sufficient to clean any residue 9983 * left by previous driver 9984 */ 9985 rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION); 9986 9987 if (!rc) { 9988 /* fw version is good */ 9989 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); 9990 rc = bnx2x_do_flr(bp); 9991 } 9992 9993 if (!rc) { 9994 /* FLR was performed */ 9995 BNX2X_DEV_INFO("FLR successful\n"); 9996 return 0; 9997 } 9998 9999 BNX2X_DEV_INFO("Could not FLR\n"); 10000 10001 /* Close the MCP request, return failure*/ 10002 rc = bnx2x_prev_mcp_done(bp); 10003 if (!rc) 10004 rc = BNX2X_PREV_WAIT_NEEDED; 10005 10006 return rc; 10007 } 10008 10009 static int bnx2x_prev_unload_common(struct bnx2x *bp) 10010 { 10011 u32 reset_reg, tmp_reg = 0, rc; 10012 bool prev_undi = false; 10013 struct bnx2x_mac_vals mac_vals; 10014 10015 /* It is possible a previous function received 'common' answer, 10016 * but hasn't loaded yet, therefore creating a scenario of 10017 * multiple functions receiving 'common' on the same path. 10018 */ 10019 BNX2X_DEV_INFO("Common unload Flow\n"); 10020 10021 memset(&mac_vals, 0, sizeof(mac_vals)); 10022 10023 if (bnx2x_prev_is_path_marked(bp)) 10024 return bnx2x_prev_mcp_done(bp); 10025 10026 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 10027 10028 /* Reset should be performed after BRB is emptied */ 10029 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10030 u32 timer_count = 1000; 10031 10032 /* Close the MAC Rx to prevent BRB from filling up */ 10033 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10034 10035 /* close LLH filters towards the BRB */ 10036 bnx2x_set_rx_filter(&bp->link_params, 0); 10037 10038 /* Check if the UNDI driver was previously loaded 10039 * UNDI driver initializes CID offset for normal bell to 0x7 10040 */ 10041 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 10042 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 10043 if (tmp_reg == 0x7) { 10044 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10045 prev_undi = true; 10046 /* clear the UNDI indication */ 10047 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 10048 /* clear possible idle check errors */ 10049 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); 10050 } 10051 } 10052 if (!CHIP_IS_E1x(bp)) 10053 /* block FW from writing to host */ 10054 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10055 10056 /* wait until BRB is empty */ 10057 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10058 while (timer_count) { 10059 u32 prev_brb = tmp_reg; 10060 10061 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10062 if (!tmp_reg) 10063 break; 10064 10065 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 10066 10067 /* reset timer as long as BRB actually gets emptied */ 10068 if (prev_brb > tmp_reg) 10069 timer_count = 1000; 10070 else 10071 timer_count--; 10072 10073 /* If UNDI resides in memory, manually increment it */ 10074 if (prev_undi) 10075 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); 10076 10077 udelay(10); 10078 } 10079 10080 if (!timer_count) 10081 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 10082 } 10083 10084 /* No packets are in the pipeline, path is ready for reset */ 10085 bnx2x_reset_common(bp); 10086 10087 if (mac_vals.xmac_addr) 10088 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10089 if (mac_vals.umac_addr) 10090 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); 10091 if (mac_vals.emac_addr) 10092 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10093 if (mac_vals.bmac_addr) { 10094 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 10095 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 10096 } 10097 10098 rc = bnx2x_prev_mark_path(bp, prev_undi); 10099 if (rc) { 10100 bnx2x_prev_mcp_done(bp); 10101 return rc; 10102 } 10103 10104 return bnx2x_prev_mcp_done(bp); 10105 } 10106 10107 /* previous driver DMAE transaction may have occurred when pre-boot stage ended 10108 * and boot began, or when kdump kernel was loaded. Either case would invalidate 10109 * the addresses of the transaction, resulting in was-error bit set in the pci 10110 * causing all hw-to-host pcie transactions to timeout. If this happened we want 10111 * to clear the interrupt which detected this from the pglueb and the was done 10112 * bit 10113 */ 10114 static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 10115 { 10116 if (!CHIP_IS_E1x(bp)) { 10117 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 10118 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 10119 DP(BNX2X_MSG_SP, 10120 "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); 10121 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 10122 1 << BP_FUNC(bp)); 10123 } 10124 } 10125 } 10126 10127 static int bnx2x_prev_unload(struct bnx2x *bp) 10128 { 10129 int time_counter = 10; 10130 u32 rc, fw, hw_lock_reg, hw_lock_val; 10131 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10132 10133 /* clear hw from errors which may have resulted from an interrupted 10134 * dmae transaction. 10135 */ 10136 bnx2x_prev_interrupted_dmae(bp); 10137 10138 /* Release previously held locks */ 10139 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10140 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 10141 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 10142 10143 hw_lock_val = REG_RD(bp, hw_lock_reg); 10144 if (hw_lock_val) { 10145 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 10146 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 10147 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 10148 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 10149 } 10150 10151 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 10152 REG_WR(bp, hw_lock_reg, 0xffffffff); 10153 } else 10154 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 10155 10156 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 10157 BNX2X_DEV_INFO("Release previously held alr\n"); 10158 bnx2x_release_alr(bp); 10159 } 10160 10161 do { 10162 int aer = 0; 10163 /* Lock MCP using an unload request */ 10164 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10165 if (!fw) { 10166 BNX2X_ERR("MCP response failure, aborting\n"); 10167 rc = -EBUSY; 10168 break; 10169 } 10170 10171 rc = down_interruptible(&bnx2x_prev_sem); 10172 if (rc) { 10173 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", 10174 rc); 10175 } else { 10176 /* If Path is marked by EEH, ignore unload status */ 10177 aer = !!(bnx2x_prev_path_get_entry(bp) && 10178 bnx2x_prev_path_get_entry(bp)->aer); 10179 up(&bnx2x_prev_sem); 10180 } 10181 10182 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { 10183 rc = bnx2x_prev_unload_common(bp); 10184 break; 10185 } 10186 10187 /* non-common reply from MCP might require looping */ 10188 rc = bnx2x_prev_unload_uncommon(bp); 10189 if (rc != BNX2X_PREV_WAIT_NEEDED) 10190 break; 10191 10192 msleep(20); 10193 } while (--time_counter); 10194 10195 if (!time_counter || rc) { 10196 BNX2X_ERR("Failed unloading previous driver, aborting\n"); 10197 rc = -EBUSY; 10198 } 10199 10200 /* Mark function if its port was used to boot from SAN */ 10201 if (bnx2x_port_after_undi(bp)) 10202 bp->link_params.feature_config_flags |= 10203 FEATURE_CONFIG_BOOT_FROM_SAN; 10204 10205 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 10206 10207 return rc; 10208 } 10209 10210 static void bnx2x_get_common_hwinfo(struct bnx2x *bp) 10211 { 10212 u32 val, val2, val3, val4, id, boot_mode; 10213 u16 pmc; 10214 10215 /* Get the chip revision id and number. */ 10216 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 10217 val = REG_RD(bp, MISC_REG_CHIP_NUM); 10218 id = ((val & 0xffff) << 16); 10219 val = REG_RD(bp, MISC_REG_CHIP_REV); 10220 id |= ((val & 0xf) << 12); 10221 10222 /* Metal is read from PCI regs, but we can't access >=0x400 from 10223 * the configuration space (so we need to reg_rd) 10224 */ 10225 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); 10226 id |= (((val >> 24) & 0xf) << 4); 10227 val = REG_RD(bp, MISC_REG_BOND_ID); 10228 id |= (val & 0xf); 10229 bp->common.chip_id = id; 10230 10231 /* force 57811 according to MISC register */ 10232 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 10233 if (CHIP_IS_57810(bp)) 10234 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 10235 (bp->common.chip_id & 0x0000FFFF); 10236 else if (CHIP_IS_57810_MF(bp)) 10237 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 10238 (bp->common.chip_id & 0x0000FFFF); 10239 bp->common.chip_id |= 0x1; 10240 } 10241 10242 /* Set doorbell size */ 10243 bp->db_size = (1 << BNX2X_DB_SHIFT); 10244 10245 if (!CHIP_IS_E1x(bp)) { 10246 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 10247 if ((val & 1) == 0) 10248 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 10249 else 10250 val = (val >> 1) & 1; 10251 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 10252 "2_PORT_MODE"); 10253 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 10254 CHIP_2_PORT_MODE; 10255 10256 if (CHIP_MODE_IS_4_PORT(bp)) 10257 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 10258 else 10259 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 10260 } else { 10261 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 10262 bp->pfid = bp->pf_num; /* 0..7 */ 10263 } 10264 10265 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 10266 10267 bp->link_params.chip_id = bp->common.chip_id; 10268 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 10269 10270 val = (REG_RD(bp, 0x2874) & 0x55); 10271 if ((bp->common.chip_id & 0x1) || 10272 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 10273 bp->flags |= ONE_PORT_FLAG; 10274 BNX2X_DEV_INFO("single port device\n"); 10275 } 10276 10277 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 10278 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 10279 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 10280 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 10281 bp->common.flash_size, bp->common.flash_size); 10282 10283 bnx2x_init_shmem(bp); 10284 10285 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 10286 MISC_REG_GENERIC_CR_1 : 10287 MISC_REG_GENERIC_CR_0)); 10288 10289 bp->link_params.shmem_base = bp->common.shmem_base; 10290 bp->link_params.shmem2_base = bp->common.shmem2_base; 10291 if (SHMEM2_RD(bp, size) > 10292 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 10293 bp->link_params.lfa_base = 10294 REG_RD(bp, bp->common.shmem2_base + 10295 (u32)offsetof(struct shmem2_region, 10296 lfa_host_addr[BP_PORT(bp)])); 10297 else 10298 bp->link_params.lfa_base = 0; 10299 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 10300 bp->common.shmem_base, bp->common.shmem2_base); 10301 10302 if (!bp->common.shmem_base) { 10303 BNX2X_DEV_INFO("MCP not active\n"); 10304 bp->flags |= NO_MCP_FLAG; 10305 return; 10306 } 10307 10308 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 10309 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 10310 10311 bp->link_params.hw_led_mode = ((bp->common.hw_config & 10312 SHARED_HW_CFG_LED_MODE_MASK) >> 10313 SHARED_HW_CFG_LED_MODE_SHIFT); 10314 10315 bp->link_params.feature_config_flags = 0; 10316 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 10317 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 10318 bp->link_params.feature_config_flags |= 10319 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 10320 else 10321 bp->link_params.feature_config_flags &= 10322 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 10323 10324 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 10325 bp->common.bc_ver = val; 10326 BNX2X_DEV_INFO("bc_ver %X\n", val); 10327 if (val < BNX2X_BC_VER) { 10328 /* for now only warn 10329 * later we might need to enforce this */ 10330 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 10331 BNX2X_BC_VER, val); 10332 } 10333 bp->link_params.feature_config_flags |= 10334 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 10335 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 10336 10337 bp->link_params.feature_config_flags |= 10338 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 10339 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 10340 bp->link_params.feature_config_flags |= 10341 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 10342 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 10343 bp->link_params.feature_config_flags |= 10344 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 10345 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 10346 10347 bp->link_params.feature_config_flags |= 10348 (val >= REQ_BC_VER_4_MT_SUPPORTED) ? 10349 FEATURE_CONFIG_MT_SUPPORT : 0; 10350 10351 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 10352 BC_SUPPORTS_PFC_STATS : 0; 10353 10354 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? 10355 BC_SUPPORTS_FCOE_FEATURES : 0; 10356 10357 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10358 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10359 boot_mode = SHMEM_RD(bp, 10360 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10361 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 10362 switch (boot_mode) { 10363 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 10364 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 10365 break; 10366 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 10367 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 10368 break; 10369 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 10370 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 10371 break; 10372 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 10373 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 10374 break; 10375 } 10376 10377 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 10378 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 10379 10380 BNX2X_DEV_INFO("%sWoL capable\n", 10381 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 10382 10383 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 10384 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 10385 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 10386 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 10387 10388 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 10389 val, val2, val3, val4); 10390 } 10391 10392 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 10393 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 10394 10395 static int bnx2x_get_igu_cam_info(struct bnx2x *bp) 10396 { 10397 int pfid = BP_FUNC(bp); 10398 int igu_sb_id; 10399 u32 val; 10400 u8 fid, igu_sb_cnt = 0; 10401 10402 bp->igu_base_sb = 0xff; 10403 if (CHIP_INT_MODE_IS_BC(bp)) { 10404 int vn = BP_VN(bp); 10405 igu_sb_cnt = bp->igu_sb_cnt; 10406 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 10407 FP_SB_MAX_E1x; 10408 10409 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 10410 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 10411 10412 return 0; 10413 } 10414 10415 /* IGU in normal mode - read CAM */ 10416 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 10417 igu_sb_id++) { 10418 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 10419 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 10420 continue; 10421 fid = IGU_FID(val); 10422 if ((fid & IGU_FID_ENCODE_IS_PF)) { 10423 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 10424 continue; 10425 if (IGU_VEC(val) == 0) 10426 /* default status block */ 10427 bp->igu_dsb_id = igu_sb_id; 10428 else { 10429 if (bp->igu_base_sb == 0xff) 10430 bp->igu_base_sb = igu_sb_id; 10431 igu_sb_cnt++; 10432 } 10433 } 10434 } 10435 10436 #ifdef CONFIG_PCI_MSI 10437 /* Due to new PF resource allocation by MFW T7.4 and above, it's 10438 * optional that number of CAM entries will not be equal to the value 10439 * advertised in PCI. 10440 * Driver should use the minimal value of both as the actual status 10441 * block count 10442 */ 10443 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 10444 #endif 10445 10446 if (igu_sb_cnt == 0) { 10447 BNX2X_ERR("CAM configuration error\n"); 10448 return -EINVAL; 10449 } 10450 10451 return 0; 10452 } 10453 10454 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) 10455 { 10456 int cfg_size = 0, idx, port = BP_PORT(bp); 10457 10458 /* Aggregation of supported attributes of all external phys */ 10459 bp->port.supported[0] = 0; 10460 bp->port.supported[1] = 0; 10461 switch (bp->link_params.num_phys) { 10462 case 1: 10463 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 10464 cfg_size = 1; 10465 break; 10466 case 2: 10467 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 10468 cfg_size = 1; 10469 break; 10470 case 3: 10471 if (bp->link_params.multi_phy_config & 10472 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 10473 bp->port.supported[1] = 10474 bp->link_params.phy[EXT_PHY1].supported; 10475 bp->port.supported[0] = 10476 bp->link_params.phy[EXT_PHY2].supported; 10477 } else { 10478 bp->port.supported[0] = 10479 bp->link_params.phy[EXT_PHY1].supported; 10480 bp->port.supported[1] = 10481 bp->link_params.phy[EXT_PHY2].supported; 10482 } 10483 cfg_size = 2; 10484 break; 10485 } 10486 10487 if (!(bp->port.supported[0] || bp->port.supported[1])) { 10488 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 10489 SHMEM_RD(bp, 10490 dev_info.port_hw_config[port].external_phy_config), 10491 SHMEM_RD(bp, 10492 dev_info.port_hw_config[port].external_phy_config2)); 10493 return; 10494 } 10495 10496 if (CHIP_IS_E3(bp)) 10497 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 10498 else { 10499 switch (switch_cfg) { 10500 case SWITCH_CFG_1G: 10501 bp->port.phy_addr = REG_RD( 10502 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 10503 break; 10504 case SWITCH_CFG_10G: 10505 bp->port.phy_addr = REG_RD( 10506 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 10507 break; 10508 default: 10509 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 10510 bp->port.link_config[0]); 10511 return; 10512 } 10513 } 10514 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 10515 /* mask what we support according to speed_cap_mask per configuration */ 10516 for (idx = 0; idx < cfg_size; idx++) { 10517 if (!(bp->link_params.speed_cap_mask[idx] & 10518 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 10519 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 10520 10521 if (!(bp->link_params.speed_cap_mask[idx] & 10522 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 10523 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 10524 10525 if (!(bp->link_params.speed_cap_mask[idx] & 10526 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 10527 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 10528 10529 if (!(bp->link_params.speed_cap_mask[idx] & 10530 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 10531 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 10532 10533 if (!(bp->link_params.speed_cap_mask[idx] & 10534 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 10535 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 10536 SUPPORTED_1000baseT_Full); 10537 10538 if (!(bp->link_params.speed_cap_mask[idx] & 10539 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 10540 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 10541 10542 if (!(bp->link_params.speed_cap_mask[idx] & 10543 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 10544 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 10545 } 10546 10547 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 10548 bp->port.supported[1]); 10549 } 10550 10551 static void bnx2x_link_settings_requested(struct bnx2x *bp) 10552 { 10553 u32 link_config, idx, cfg_size = 0; 10554 bp->port.advertising[0] = 0; 10555 bp->port.advertising[1] = 0; 10556 switch (bp->link_params.num_phys) { 10557 case 1: 10558 case 2: 10559 cfg_size = 1; 10560 break; 10561 case 3: 10562 cfg_size = 2; 10563 break; 10564 } 10565 for (idx = 0; idx < cfg_size; idx++) { 10566 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 10567 link_config = bp->port.link_config[idx]; 10568 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 10569 case PORT_FEATURE_LINK_SPEED_AUTO: 10570 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 10571 bp->link_params.req_line_speed[idx] = 10572 SPEED_AUTO_NEG; 10573 bp->port.advertising[idx] |= 10574 bp->port.supported[idx]; 10575 if (bp->link_params.phy[EXT_PHY1].type == 10576 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 10577 bp->port.advertising[idx] |= 10578 (SUPPORTED_100baseT_Half | 10579 SUPPORTED_100baseT_Full); 10580 } else { 10581 /* force 10G, no AN */ 10582 bp->link_params.req_line_speed[idx] = 10583 SPEED_10000; 10584 bp->port.advertising[idx] |= 10585 (ADVERTISED_10000baseT_Full | 10586 ADVERTISED_FIBRE); 10587 continue; 10588 } 10589 break; 10590 10591 case PORT_FEATURE_LINK_SPEED_10M_FULL: 10592 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 10593 bp->link_params.req_line_speed[idx] = 10594 SPEED_10; 10595 bp->port.advertising[idx] |= 10596 (ADVERTISED_10baseT_Full | 10597 ADVERTISED_TP); 10598 } else { 10599 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10600 link_config, 10601 bp->link_params.speed_cap_mask[idx]); 10602 return; 10603 } 10604 break; 10605 10606 case PORT_FEATURE_LINK_SPEED_10M_HALF: 10607 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 10608 bp->link_params.req_line_speed[idx] = 10609 SPEED_10; 10610 bp->link_params.req_duplex[idx] = 10611 DUPLEX_HALF; 10612 bp->port.advertising[idx] |= 10613 (ADVERTISED_10baseT_Half | 10614 ADVERTISED_TP); 10615 } else { 10616 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10617 link_config, 10618 bp->link_params.speed_cap_mask[idx]); 10619 return; 10620 } 10621 break; 10622 10623 case PORT_FEATURE_LINK_SPEED_100M_FULL: 10624 if (bp->port.supported[idx] & 10625 SUPPORTED_100baseT_Full) { 10626 bp->link_params.req_line_speed[idx] = 10627 SPEED_100; 10628 bp->port.advertising[idx] |= 10629 (ADVERTISED_100baseT_Full | 10630 ADVERTISED_TP); 10631 } else { 10632 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10633 link_config, 10634 bp->link_params.speed_cap_mask[idx]); 10635 return; 10636 } 10637 break; 10638 10639 case PORT_FEATURE_LINK_SPEED_100M_HALF: 10640 if (bp->port.supported[idx] & 10641 SUPPORTED_100baseT_Half) { 10642 bp->link_params.req_line_speed[idx] = 10643 SPEED_100; 10644 bp->link_params.req_duplex[idx] = 10645 DUPLEX_HALF; 10646 bp->port.advertising[idx] |= 10647 (ADVERTISED_100baseT_Half | 10648 ADVERTISED_TP); 10649 } else { 10650 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10651 link_config, 10652 bp->link_params.speed_cap_mask[idx]); 10653 return; 10654 } 10655 break; 10656 10657 case PORT_FEATURE_LINK_SPEED_1G: 10658 if (bp->port.supported[idx] & 10659 SUPPORTED_1000baseT_Full) { 10660 bp->link_params.req_line_speed[idx] = 10661 SPEED_1000; 10662 bp->port.advertising[idx] |= 10663 (ADVERTISED_1000baseT_Full | 10664 ADVERTISED_TP); 10665 } else { 10666 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10667 link_config, 10668 bp->link_params.speed_cap_mask[idx]); 10669 return; 10670 } 10671 break; 10672 10673 case PORT_FEATURE_LINK_SPEED_2_5G: 10674 if (bp->port.supported[idx] & 10675 SUPPORTED_2500baseX_Full) { 10676 bp->link_params.req_line_speed[idx] = 10677 SPEED_2500; 10678 bp->port.advertising[idx] |= 10679 (ADVERTISED_2500baseX_Full | 10680 ADVERTISED_TP); 10681 } else { 10682 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10683 link_config, 10684 bp->link_params.speed_cap_mask[idx]); 10685 return; 10686 } 10687 break; 10688 10689 case PORT_FEATURE_LINK_SPEED_10G_CX4: 10690 if (bp->port.supported[idx] & 10691 SUPPORTED_10000baseT_Full) { 10692 bp->link_params.req_line_speed[idx] = 10693 SPEED_10000; 10694 bp->port.advertising[idx] |= 10695 (ADVERTISED_10000baseT_Full | 10696 ADVERTISED_FIBRE); 10697 } else { 10698 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10699 link_config, 10700 bp->link_params.speed_cap_mask[idx]); 10701 return; 10702 } 10703 break; 10704 case PORT_FEATURE_LINK_SPEED_20G: 10705 bp->link_params.req_line_speed[idx] = SPEED_20000; 10706 10707 break; 10708 default: 10709 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 10710 link_config); 10711 bp->link_params.req_line_speed[idx] = 10712 SPEED_AUTO_NEG; 10713 bp->port.advertising[idx] = 10714 bp->port.supported[idx]; 10715 break; 10716 } 10717 10718 bp->link_params.req_flow_ctrl[idx] = (link_config & 10719 PORT_FEATURE_FLOW_CONTROL_MASK); 10720 if (bp->link_params.req_flow_ctrl[idx] == 10721 BNX2X_FLOW_CTRL_AUTO) { 10722 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) 10723 bp->link_params.req_flow_ctrl[idx] = 10724 BNX2X_FLOW_CTRL_NONE; 10725 else 10726 bnx2x_set_requested_fc(bp); 10727 } 10728 10729 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 10730 bp->link_params.req_line_speed[idx], 10731 bp->link_params.req_duplex[idx], 10732 bp->link_params.req_flow_ctrl[idx], 10733 bp->port.advertising[idx]); 10734 } 10735 } 10736 10737 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 10738 { 10739 __be16 mac_hi_be = cpu_to_be16(mac_hi); 10740 __be32 mac_lo_be = cpu_to_be32(mac_lo); 10741 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); 10742 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); 10743 } 10744 10745 static void bnx2x_get_port_hwinfo(struct bnx2x *bp) 10746 { 10747 int port = BP_PORT(bp); 10748 u32 config; 10749 u32 ext_phy_type, ext_phy_config, eee_mode; 10750 10751 bp->link_params.bp = bp; 10752 bp->link_params.port = port; 10753 10754 bp->link_params.lane_config = 10755 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 10756 10757 bp->link_params.speed_cap_mask[0] = 10758 SHMEM_RD(bp, 10759 dev_info.port_hw_config[port].speed_capability_mask) & 10760 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 10761 bp->link_params.speed_cap_mask[1] = 10762 SHMEM_RD(bp, 10763 dev_info.port_hw_config[port].speed_capability_mask2) & 10764 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 10765 bp->port.link_config[0] = 10766 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 10767 10768 bp->port.link_config[1] = 10769 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 10770 10771 bp->link_params.multi_phy_config = 10772 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 10773 /* If the device is capable of WoL, set the default state according 10774 * to the HW 10775 */ 10776 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 10777 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 10778 (config & PORT_FEATURE_WOL_ENABLED)); 10779 10780 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 10781 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) 10782 bp->flags |= NO_ISCSI_FLAG; 10783 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 10784 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) 10785 bp->flags |= NO_FCOE_FLAG; 10786 10787 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 10788 bp->link_params.lane_config, 10789 bp->link_params.speed_cap_mask[0], 10790 bp->port.link_config[0]); 10791 10792 bp->link_params.switch_cfg = (bp->port.link_config[0] & 10793 PORT_FEATURE_CONNECTED_SWITCH_MASK); 10794 bnx2x_phy_probe(&bp->link_params); 10795 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 10796 10797 bnx2x_link_settings_requested(bp); 10798 10799 /* 10800 * If connected directly, work with the internal PHY, otherwise, work 10801 * with the external PHY 10802 */ 10803 ext_phy_config = 10804 SHMEM_RD(bp, 10805 dev_info.port_hw_config[port].external_phy_config); 10806 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 10807 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 10808 bp->mdio.prtad = bp->port.phy_addr; 10809 10810 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 10811 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 10812 bp->mdio.prtad = 10813 XGXS_EXT_PHY_ADDR(ext_phy_config); 10814 10815 /* Configure link feature according to nvram value */ 10816 eee_mode = (((SHMEM_RD(bp, dev_info. 10817 port_feature_config[port].eee_power_mode)) & 10818 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 10819 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 10820 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 10821 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 10822 EEE_MODE_ENABLE_LPI | 10823 EEE_MODE_OUTPUT_TIME; 10824 } else { 10825 bp->link_params.eee_mode = 0; 10826 } 10827 } 10828 10829 void bnx2x_get_iscsi_info(struct bnx2x *bp) 10830 { 10831 u32 no_flags = NO_ISCSI_FLAG; 10832 int port = BP_PORT(bp); 10833 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10834 drv_lic_key[port].max_iscsi_conn); 10835 10836 if (!CNIC_SUPPORT(bp)) { 10837 bp->flags |= no_flags; 10838 return; 10839 } 10840 10841 /* Get the number of maximum allowed iSCSI connections */ 10842 bp->cnic_eth_dev.max_iscsi_conn = 10843 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10844 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 10845 10846 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 10847 bp->cnic_eth_dev.max_iscsi_conn); 10848 10849 /* 10850 * If maximum allowed number of connections is zero - 10851 * disable the feature. 10852 */ 10853 if (!bp->cnic_eth_dev.max_iscsi_conn) 10854 bp->flags |= no_flags; 10855 } 10856 10857 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10858 { 10859 /* Port info */ 10860 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10861 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 10862 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10863 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 10864 10865 /* Node info */ 10866 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10867 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 10868 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10869 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10870 } 10871 10872 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) 10873 { 10874 u8 count = 0; 10875 10876 if (IS_MF(bp)) { 10877 u8 fid; 10878 10879 /* iterate over absolute function ids for this path: */ 10880 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { 10881 if (IS_MF_SD(bp)) { 10882 u32 cfg = MF_CFG_RD(bp, 10883 func_mf_config[fid].config); 10884 10885 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && 10886 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == 10887 FUNC_MF_CFG_PROTOCOL_FCOE)) 10888 count++; 10889 } else { 10890 u32 cfg = MF_CFG_RD(bp, 10891 func_ext_config[fid]. 10892 func_cfg); 10893 10894 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && 10895 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) 10896 count++; 10897 } 10898 } 10899 } else { /* SF */ 10900 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; 10901 10902 for (port = 0; port < port_cnt; port++) { 10903 u32 lic = SHMEM_RD(bp, 10904 drv_lic_key[port].max_fcoe_conn) ^ 10905 FW_ENCODE_32BIT_PATTERN; 10906 if (lic) 10907 count++; 10908 } 10909 } 10910 10911 return count; 10912 } 10913 10914 static void bnx2x_get_fcoe_info(struct bnx2x *bp) 10915 { 10916 int port = BP_PORT(bp); 10917 int func = BP_ABS_FUNC(bp); 10918 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10919 drv_lic_key[port].max_fcoe_conn); 10920 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); 10921 10922 if (!CNIC_SUPPORT(bp)) { 10923 bp->flags |= NO_FCOE_FLAG; 10924 return; 10925 } 10926 10927 /* Get the number of maximum allowed FCoE connections */ 10928 bp->cnic_eth_dev.max_fcoe_conn = 10929 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10930 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 10931 10932 /* Calculate the number of maximum allowed FCoE tasks */ 10933 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; 10934 10935 /* check if FCoE resources must be shared between different functions */ 10936 if (num_fcoe_func) 10937 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; 10938 10939 /* Read the WWN: */ 10940 if (!IS_MF(bp)) { 10941 /* Port info */ 10942 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10943 SHMEM_RD(bp, 10944 dev_info.port_hw_config[port]. 10945 fcoe_wwn_port_name_upper); 10946 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10947 SHMEM_RD(bp, 10948 dev_info.port_hw_config[port]. 10949 fcoe_wwn_port_name_lower); 10950 10951 /* Node info */ 10952 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10953 SHMEM_RD(bp, 10954 dev_info.port_hw_config[port]. 10955 fcoe_wwn_node_name_upper); 10956 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10957 SHMEM_RD(bp, 10958 dev_info.port_hw_config[port]. 10959 fcoe_wwn_node_name_lower); 10960 } else if (!IS_MF_SD(bp)) { 10961 /* 10962 * Read the WWN info only if the FCoE feature is enabled for 10963 * this function. 10964 */ 10965 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 10966 bnx2x_get_ext_wwn_info(bp, func); 10967 10968 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) { 10969 bnx2x_get_ext_wwn_info(bp, func); 10970 } 10971 10972 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 10973 10974 /* 10975 * If maximum allowed number of connections is zero - 10976 * disable the feature. 10977 */ 10978 if (!bp->cnic_eth_dev.max_fcoe_conn) 10979 bp->flags |= NO_FCOE_FLAG; 10980 } 10981 10982 static void bnx2x_get_cnic_info(struct bnx2x *bp) 10983 { 10984 /* 10985 * iSCSI may be dynamically disabled but reading 10986 * info here we will decrease memory usage by driver 10987 * if the feature is disabled for good 10988 */ 10989 bnx2x_get_iscsi_info(bp); 10990 bnx2x_get_fcoe_info(bp); 10991 } 10992 10993 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) 10994 { 10995 u32 val, val2; 10996 int func = BP_ABS_FUNC(bp); 10997 int port = BP_PORT(bp); 10998 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10999 u8 *fip_mac = bp->fip_mac; 11000 11001 if (IS_MF(bp)) { 11002 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 11003 * FCoE MAC then the appropriate feature should be disabled. 11004 * In non SD mode features configuration comes from struct 11005 * func_ext_config. 11006 */ 11007 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) { 11008 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 11009 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 11010 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11011 iscsi_mac_addr_upper); 11012 val = MF_CFG_RD(bp, func_ext_config[func]. 11013 iscsi_mac_addr_lower); 11014 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11015 BNX2X_DEV_INFO 11016 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11017 } else { 11018 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11019 } 11020 11021 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 11022 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11023 fcoe_mac_addr_upper); 11024 val = MF_CFG_RD(bp, func_ext_config[func]. 11025 fcoe_mac_addr_lower); 11026 bnx2x_set_mac_buf(fip_mac, val, val2); 11027 BNX2X_DEV_INFO 11028 ("Read FCoE L2 MAC: %pM\n", fip_mac); 11029 } else { 11030 bp->flags |= NO_FCOE_FLAG; 11031 } 11032 11033 bp->mf_ext_config = cfg; 11034 11035 } else { /* SD MODE */ 11036 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 11037 /* use primary mac as iscsi mac */ 11038 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); 11039 11040 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 11041 BNX2X_DEV_INFO 11042 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11043 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { 11044 /* use primary mac as fip mac */ 11045 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); 11046 BNX2X_DEV_INFO("SD FCoE MODE\n"); 11047 BNX2X_DEV_INFO 11048 ("Read FIP MAC: %pM\n", fip_mac); 11049 } 11050 } 11051 11052 /* If this is a storage-only interface, use SAN mac as 11053 * primary MAC. Notice that for SD this is already the case, 11054 * as the SAN mac was copied from the primary MAC. 11055 */ 11056 if (IS_MF_FCOE_AFEX(bp)) 11057 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 11058 } else { 11059 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11060 iscsi_mac_upper); 11061 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11062 iscsi_mac_lower); 11063 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11064 11065 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11066 fcoe_fip_mac_upper); 11067 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11068 fcoe_fip_mac_lower); 11069 bnx2x_set_mac_buf(fip_mac, val, val2); 11070 } 11071 11072 /* Disable iSCSI OOO if MAC configuration is invalid. */ 11073 if (!is_valid_ether_addr(iscsi_mac)) { 11074 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11075 memset(iscsi_mac, 0, ETH_ALEN); 11076 } 11077 11078 /* Disable FCoE if MAC configuration is invalid. */ 11079 if (!is_valid_ether_addr(fip_mac)) { 11080 bp->flags |= NO_FCOE_FLAG; 11081 memset(bp->fip_mac, 0, ETH_ALEN); 11082 } 11083 } 11084 11085 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) 11086 { 11087 u32 val, val2; 11088 int func = BP_ABS_FUNC(bp); 11089 int port = BP_PORT(bp); 11090 11091 /* Zero primary MAC configuration */ 11092 memset(bp->dev->dev_addr, 0, ETH_ALEN); 11093 11094 if (BP_NOMCP(bp)) { 11095 BNX2X_ERROR("warning: random MAC workaround active\n"); 11096 eth_hw_addr_random(bp->dev); 11097 } else if (IS_MF(bp)) { 11098 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11099 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 11100 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 11101 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 11102 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11103 11104 if (CNIC_SUPPORT(bp)) 11105 bnx2x_get_cnic_mac_hwinfo(bp); 11106 } else { 11107 /* in SF read MACs from port configuration */ 11108 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11109 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11110 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11111 11112 if (CNIC_SUPPORT(bp)) 11113 bnx2x_get_cnic_mac_hwinfo(bp); 11114 } 11115 11116 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 11117 11118 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 11119 dev_err(&bp->pdev->dev, 11120 "bad Ethernet MAC address configuration: %pM\n" 11121 "change it manually before bringing up the appropriate network interface\n", 11122 bp->dev->dev_addr); 11123 } 11124 11125 static bool bnx2x_get_dropless_info(struct bnx2x *bp) 11126 { 11127 int tmp; 11128 u32 cfg; 11129 11130 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11131 /* Take function: tmp = func */ 11132 tmp = BP_ABS_FUNC(bp); 11133 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); 11134 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); 11135 } else { 11136 /* Take port: tmp = port */ 11137 tmp = BP_PORT(bp); 11138 cfg = SHMEM_RD(bp, 11139 dev_info.port_hw_config[tmp].generic_features); 11140 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); 11141 } 11142 return cfg; 11143 } 11144 11145 static int bnx2x_get_hwinfo(struct bnx2x *bp) 11146 { 11147 int /*abs*/func = BP_ABS_FUNC(bp); 11148 int vn; 11149 u32 val = 0; 11150 int rc = 0; 11151 11152 bnx2x_get_common_hwinfo(bp); 11153 11154 /* 11155 * initialize IGU parameters 11156 */ 11157 if (CHIP_IS_E1x(bp)) { 11158 bp->common.int_block = INT_BLOCK_HC; 11159 11160 bp->igu_dsb_id = DEF_SB_IGU_ID; 11161 bp->igu_base_sb = 0; 11162 } else { 11163 bp->common.int_block = INT_BLOCK_IGU; 11164 11165 /* do not allow device reset during IGU info processing */ 11166 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11167 11168 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 11169 11170 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11171 int tout = 5000; 11172 11173 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 11174 11175 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 11176 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 11177 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 11178 11179 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11180 tout--; 11181 usleep_range(1000, 2000); 11182 } 11183 11184 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11185 dev_err(&bp->pdev->dev, 11186 "FORCING Normal Mode failed!!!\n"); 11187 bnx2x_release_hw_lock(bp, 11188 HW_LOCK_RESOURCE_RESET); 11189 return -EPERM; 11190 } 11191 } 11192 11193 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11194 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 11195 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 11196 } else 11197 BNX2X_DEV_INFO("IGU Normal Mode\n"); 11198 11199 rc = bnx2x_get_igu_cam_info(bp); 11200 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11201 if (rc) 11202 return rc; 11203 } 11204 11205 /* 11206 * set base FW non-default (fast path) status block id, this value is 11207 * used to initialize the fw_sb_id saved on the fp/queue structure to 11208 * determine the id used by the FW. 11209 */ 11210 if (CHIP_IS_E1x(bp)) 11211 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 11212 else /* 11213 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 11214 * the same queue are indicated on the same IGU SB). So we prefer 11215 * FW and IGU SBs to be the same value. 11216 */ 11217 bp->base_fw_ndsb = bp->igu_base_sb; 11218 11219 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 11220 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 11221 bp->igu_sb_cnt, bp->base_fw_ndsb); 11222 11223 /* 11224 * Initialize MF configuration 11225 */ 11226 11227 bp->mf_ov = 0; 11228 bp->mf_mode = 0; 11229 vn = BP_VN(bp); 11230 11231 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 11232 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 11233 bp->common.shmem2_base, SHMEM2_RD(bp, size), 11234 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 11235 11236 if (SHMEM2_HAS(bp, mf_cfg_addr)) 11237 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 11238 else 11239 bp->common.mf_cfg_base = bp->common.shmem_base + 11240 offsetof(struct shmem_region, func_mb) + 11241 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 11242 /* 11243 * get mf configuration: 11244 * 1. Existence of MF configuration 11245 * 2. MAC address must be legal (check only upper bytes) 11246 * for Switch-Independent mode; 11247 * OVLAN must be legal for Switch-Dependent mode 11248 * 3. SF_MODE configures specific MF mode 11249 */ 11250 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 11251 /* get mf configuration */ 11252 val = SHMEM_RD(bp, 11253 dev_info.shared_feature_config.config); 11254 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 11255 11256 switch (val) { 11257 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 11258 val = MF_CFG_RD(bp, func_mf_config[func]. 11259 mac_upper); 11260 /* check for legal mac (upper bytes)*/ 11261 if (val != 0xffff) { 11262 bp->mf_mode = MULTI_FUNCTION_SI; 11263 bp->mf_config[vn] = MF_CFG_RD(bp, 11264 func_mf_config[func].config); 11265 } else 11266 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 11267 break; 11268 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 11269 if ((!CHIP_IS_E1x(bp)) && 11270 (MF_CFG_RD(bp, func_mf_config[func]. 11271 mac_upper) != 0xffff) && 11272 (SHMEM2_HAS(bp, 11273 afex_driver_support))) { 11274 bp->mf_mode = MULTI_FUNCTION_AFEX; 11275 bp->mf_config[vn] = MF_CFG_RD(bp, 11276 func_mf_config[func].config); 11277 } else { 11278 BNX2X_DEV_INFO("can not configure afex mode\n"); 11279 } 11280 break; 11281 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 11282 /* get OV configuration */ 11283 val = MF_CFG_RD(bp, 11284 func_mf_config[FUNC_0].e1hov_tag); 11285 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 11286 11287 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 11288 bp->mf_mode = MULTI_FUNCTION_SD; 11289 bp->mf_config[vn] = MF_CFG_RD(bp, 11290 func_mf_config[func].config); 11291 } else 11292 BNX2X_DEV_INFO("illegal OV for SD\n"); 11293 break; 11294 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 11295 bp->mf_config[vn] = 0; 11296 break; 11297 default: 11298 /* Unknown configuration: reset mf_config */ 11299 bp->mf_config[vn] = 0; 11300 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 11301 } 11302 } 11303 11304 BNX2X_DEV_INFO("%s function mode\n", 11305 IS_MF(bp) ? "multi" : "single"); 11306 11307 switch (bp->mf_mode) { 11308 case MULTI_FUNCTION_SD: 11309 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 11310 FUNC_MF_CFG_E1HOV_TAG_MASK; 11311 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 11312 bp->mf_ov = val; 11313 bp->path_has_ovlan = true; 11314 11315 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 11316 func, bp->mf_ov, bp->mf_ov); 11317 } else { 11318 dev_err(&bp->pdev->dev, 11319 "No valid MF OV for func %d, aborting\n", 11320 func); 11321 return -EPERM; 11322 } 11323 break; 11324 case MULTI_FUNCTION_AFEX: 11325 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 11326 break; 11327 case MULTI_FUNCTION_SI: 11328 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 11329 func); 11330 break; 11331 default: 11332 if (vn) { 11333 dev_err(&bp->pdev->dev, 11334 "VN %d is in a single function mode, aborting\n", 11335 vn); 11336 return -EPERM; 11337 } 11338 break; 11339 } 11340 11341 /* check if other port on the path needs ovlan: 11342 * Since MF configuration is shared between ports 11343 * Possible mixed modes are only 11344 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 11345 */ 11346 if (CHIP_MODE_IS_4_PORT(bp) && 11347 !bp->path_has_ovlan && 11348 !IS_MF(bp) && 11349 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 11350 u8 other_port = !BP_PORT(bp); 11351 u8 other_func = BP_PATH(bp) + 2*other_port; 11352 val = MF_CFG_RD(bp, 11353 func_mf_config[other_func].e1hov_tag); 11354 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 11355 bp->path_has_ovlan = true; 11356 } 11357 } 11358 11359 /* adjust igu_sb_cnt to MF for E1x */ 11360 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11361 bp->igu_sb_cnt /= E1HVN_MAX; 11362 11363 /* port info */ 11364 bnx2x_get_port_hwinfo(bp); 11365 11366 /* Get MAC addresses */ 11367 bnx2x_get_mac_hwinfo(bp); 11368 11369 bnx2x_get_cnic_info(bp); 11370 11371 return rc; 11372 } 11373 11374 static void bnx2x_read_fwinfo(struct bnx2x *bp) 11375 { 11376 int cnt, i, block_end, rodi; 11377 char vpd_start[BNX2X_VPD_LEN+1]; 11378 char str_id_reg[VENDOR_ID_LEN+1]; 11379 char str_id_cap[VENDOR_ID_LEN+1]; 11380 char *vpd_data; 11381 char *vpd_extended_data = NULL; 11382 u8 len; 11383 11384 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 11385 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 11386 11387 if (cnt < BNX2X_VPD_LEN) 11388 goto out_not_found; 11389 11390 /* VPD RO tag should be first tag after identifier string, hence 11391 * we should be able to find it in first BNX2X_VPD_LEN chars 11392 */ 11393 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 11394 PCI_VPD_LRDT_RO_DATA); 11395 if (i < 0) 11396 goto out_not_found; 11397 11398 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 11399 pci_vpd_lrdt_size(&vpd_start[i]); 11400 11401 i += PCI_VPD_LRDT_TAG_SIZE; 11402 11403 if (block_end > BNX2X_VPD_LEN) { 11404 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 11405 if (vpd_extended_data == NULL) 11406 goto out_not_found; 11407 11408 /* read rest of vpd image into vpd_extended_data */ 11409 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 11410 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 11411 block_end - BNX2X_VPD_LEN, 11412 vpd_extended_data + BNX2X_VPD_LEN); 11413 if (cnt < (block_end - BNX2X_VPD_LEN)) 11414 goto out_not_found; 11415 vpd_data = vpd_extended_data; 11416 } else 11417 vpd_data = vpd_start; 11418 11419 /* now vpd_data holds full vpd content in both cases */ 11420 11421 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 11422 PCI_VPD_RO_KEYWORD_MFR_ID); 11423 if (rodi < 0) 11424 goto out_not_found; 11425 11426 len = pci_vpd_info_field_size(&vpd_data[rodi]); 11427 11428 if (len != VENDOR_ID_LEN) 11429 goto out_not_found; 11430 11431 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 11432 11433 /* vendor specific info */ 11434 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 11435 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 11436 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 11437 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 11438 11439 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 11440 PCI_VPD_RO_KEYWORD_VENDOR0); 11441 if (rodi >= 0) { 11442 len = pci_vpd_info_field_size(&vpd_data[rodi]); 11443 11444 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 11445 11446 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 11447 memcpy(bp->fw_ver, &vpd_data[rodi], len); 11448 bp->fw_ver[len] = ' '; 11449 } 11450 } 11451 kfree(vpd_extended_data); 11452 return; 11453 } 11454 out_not_found: 11455 kfree(vpd_extended_data); 11456 return; 11457 } 11458 11459 static void bnx2x_set_modes_bitmap(struct bnx2x *bp) 11460 { 11461 u32 flags = 0; 11462 11463 if (CHIP_REV_IS_FPGA(bp)) 11464 SET_FLAGS(flags, MODE_FPGA); 11465 else if (CHIP_REV_IS_EMUL(bp)) 11466 SET_FLAGS(flags, MODE_EMUL); 11467 else 11468 SET_FLAGS(flags, MODE_ASIC); 11469 11470 if (CHIP_MODE_IS_4_PORT(bp)) 11471 SET_FLAGS(flags, MODE_PORT4); 11472 else 11473 SET_FLAGS(flags, MODE_PORT2); 11474 11475 if (CHIP_IS_E2(bp)) 11476 SET_FLAGS(flags, MODE_E2); 11477 else if (CHIP_IS_E3(bp)) { 11478 SET_FLAGS(flags, MODE_E3); 11479 if (CHIP_REV(bp) == CHIP_REV_Ax) 11480 SET_FLAGS(flags, MODE_E3_A0); 11481 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 11482 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 11483 } 11484 11485 if (IS_MF(bp)) { 11486 SET_FLAGS(flags, MODE_MF); 11487 switch (bp->mf_mode) { 11488 case MULTI_FUNCTION_SD: 11489 SET_FLAGS(flags, MODE_MF_SD); 11490 break; 11491 case MULTI_FUNCTION_SI: 11492 SET_FLAGS(flags, MODE_MF_SI); 11493 break; 11494 case MULTI_FUNCTION_AFEX: 11495 SET_FLAGS(flags, MODE_MF_AFEX); 11496 break; 11497 } 11498 } else 11499 SET_FLAGS(flags, MODE_SF); 11500 11501 #if defined(__LITTLE_ENDIAN) 11502 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 11503 #else /*(__BIG_ENDIAN)*/ 11504 SET_FLAGS(flags, MODE_BIG_ENDIAN); 11505 #endif 11506 INIT_MODE_FLAGS(bp) = flags; 11507 } 11508 11509 static int bnx2x_init_bp(struct bnx2x *bp) 11510 { 11511 int func; 11512 int rc; 11513 11514 mutex_init(&bp->port.phy_mutex); 11515 mutex_init(&bp->fw_mb_mutex); 11516 spin_lock_init(&bp->stats_lock); 11517 11518 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11519 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11520 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 11521 if (IS_PF(bp)) { 11522 rc = bnx2x_get_hwinfo(bp); 11523 if (rc) 11524 return rc; 11525 } else { 11526 eth_zero_addr(bp->dev->dev_addr); 11527 } 11528 11529 bnx2x_set_modes_bitmap(bp); 11530 11531 rc = bnx2x_alloc_mem_bp(bp); 11532 if (rc) 11533 return rc; 11534 11535 bnx2x_read_fwinfo(bp); 11536 11537 func = BP_FUNC(bp); 11538 11539 /* need to reset chip if undi was active */ 11540 if (IS_PF(bp) && !BP_NOMCP(bp)) { 11541 /* init fw_seq */ 11542 bp->fw_seq = 11543 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 11544 DRV_MSG_SEQ_NUMBER_MASK; 11545 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 11546 11547 bnx2x_prev_unload(bp); 11548 } 11549 11550 if (CHIP_REV_IS_FPGA(bp)) 11551 dev_err(&bp->pdev->dev, "FPGA detected\n"); 11552 11553 if (BP_NOMCP(bp) && (func == 0)) 11554 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 11555 11556 bp->disable_tpa = disable_tpa; 11557 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11558 11559 /* Set TPA flags */ 11560 if (bp->disable_tpa) { 11561 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 11562 bp->dev->features &= ~NETIF_F_LRO; 11563 } else { 11564 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 11565 bp->dev->features |= NETIF_F_LRO; 11566 } 11567 11568 if (CHIP_IS_E1(bp)) 11569 bp->dropless_fc = 0; 11570 else 11571 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); 11572 11573 bp->mrrs = mrrs; 11574 11575 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 11576 if (IS_VF(bp)) 11577 bp->rx_ring_size = MAX_RX_AVAIL; 11578 11579 /* make sure that the numbers are in the right granularity */ 11580 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 11581 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 11582 11583 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 11584 11585 init_timer(&bp->timer); 11586 bp->timer.expires = jiffies + bp->current_interval; 11587 bp->timer.data = (unsigned long) bp; 11588 bp->timer.function = bnx2x_timer; 11589 11590 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && 11591 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && 11592 SHMEM2_RD(bp, dcbx_lldp_params_offset) && 11593 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { 11594 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 11595 bnx2x_dcbx_init_params(bp); 11596 } else { 11597 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); 11598 } 11599 11600 if (CHIP_IS_E1x(bp)) 11601 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 11602 else 11603 bp->cnic_base_cl_id = FP_SB_MAX_E2; 11604 11605 /* multiple tx priority */ 11606 if (IS_VF(bp)) 11607 bp->max_cos = 1; 11608 else if (CHIP_IS_E1x(bp)) 11609 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 11610 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 11611 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 11612 else if (CHIP_IS_E3B0(bp)) 11613 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 11614 else 11615 BNX2X_ERR("unknown chip %x revision %x\n", 11616 CHIP_NUM(bp), CHIP_REV(bp)); 11617 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); 11618 11619 /* We need at least one default status block for slow-path events, 11620 * second status block for the L2 queue, and a third status block for 11621 * CNIC if supported. 11622 */ 11623 if (CNIC_SUPPORT(bp)) 11624 bp->min_msix_vec_cnt = 3; 11625 else 11626 bp->min_msix_vec_cnt = 2; 11627 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 11628 11629 return rc; 11630 } 11631 11632 /**************************************************************************** 11633 * General service functions 11634 ****************************************************************************/ 11635 11636 /* 11637 * net_device service functions 11638 */ 11639 11640 /* called with rtnl_lock */ 11641 static int bnx2x_open(struct net_device *dev) 11642 { 11643 struct bnx2x *bp = netdev_priv(dev); 11644 bool global = false; 11645 int other_engine = BP_PATH(bp) ? 0 : 1; 11646 bool other_load_status, load_status; 11647 int rc; 11648 11649 bp->stats_init = true; 11650 11651 netif_carrier_off(dev); 11652 11653 bnx2x_set_power_state(bp, PCI_D0); 11654 11655 /* If parity had happen during the unload, then attentions 11656 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 11657 * want the first function loaded on the current engine to 11658 * complete the recovery. 11659 * Parity recovery is only relevant for PF driver. 11660 */ 11661 if (IS_PF(bp)) { 11662 other_load_status = bnx2x_get_load_status(bp, other_engine); 11663 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 11664 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 11665 bnx2x_chk_parity_attn(bp, &global, true)) { 11666 do { 11667 /* If there are attentions and they are in a 11668 * global blocks, set the GLOBAL_RESET bit 11669 * regardless whether it will be this function 11670 * that will complete the recovery or not. 11671 */ 11672 if (global) 11673 bnx2x_set_reset_global(bp); 11674 11675 /* Only the first function on the current 11676 * engine should try to recover in open. In case 11677 * of attentions in global blocks only the first 11678 * in the chip should try to recover. 11679 */ 11680 if ((!load_status && 11681 (!global || !other_load_status)) && 11682 bnx2x_trylock_leader_lock(bp) && 11683 !bnx2x_leader_reset(bp)) { 11684 netdev_info(bp->dev, 11685 "Recovered in open\n"); 11686 break; 11687 } 11688 11689 /* recovery has failed... */ 11690 bnx2x_set_power_state(bp, PCI_D3hot); 11691 bp->recovery_state = BNX2X_RECOVERY_FAILED; 11692 11693 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 11694 "If you still see this message after a few retries then power cycle is required.\n"); 11695 11696 return -EAGAIN; 11697 } while (0); 11698 } 11699 } 11700 11701 bp->recovery_state = BNX2X_RECOVERY_DONE; 11702 rc = bnx2x_nic_load(bp, LOAD_OPEN); 11703 if (rc) 11704 return rc; 11705 return bnx2x_open_epilog(bp); 11706 } 11707 11708 /* called with rtnl_lock */ 11709 static int bnx2x_close(struct net_device *dev) 11710 { 11711 struct bnx2x *bp = netdev_priv(dev); 11712 11713 /* Unload the driver, release IRQs */ 11714 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 11715 11716 return 0; 11717 } 11718 11719 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 11720 struct bnx2x_mcast_ramrod_params *p) 11721 { 11722 int mc_count = netdev_mc_count(bp->dev); 11723 struct bnx2x_mcast_list_elem *mc_mac = 11724 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 11725 struct netdev_hw_addr *ha; 11726 11727 if (!mc_mac) 11728 return -ENOMEM; 11729 11730 INIT_LIST_HEAD(&p->mcast_list); 11731 11732 netdev_for_each_mc_addr(ha, bp->dev) { 11733 mc_mac->mac = bnx2x_mc_addr(ha); 11734 list_add_tail(&mc_mac->link, &p->mcast_list); 11735 mc_mac++; 11736 } 11737 11738 p->mcast_list_len = mc_count; 11739 11740 return 0; 11741 } 11742 11743 static void bnx2x_free_mcast_macs_list( 11744 struct bnx2x_mcast_ramrod_params *p) 11745 { 11746 struct bnx2x_mcast_list_elem *mc_mac = 11747 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, 11748 link); 11749 11750 WARN_ON(!mc_mac); 11751 kfree(mc_mac); 11752 } 11753 11754 /** 11755 * bnx2x_set_uc_list - configure a new unicast MACs list. 11756 * 11757 * @bp: driver handle 11758 * 11759 * We will use zero (0) as a MAC type for these MACs. 11760 */ 11761 static int bnx2x_set_uc_list(struct bnx2x *bp) 11762 { 11763 int rc; 11764 struct net_device *dev = bp->dev; 11765 struct netdev_hw_addr *ha; 11766 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 11767 unsigned long ramrod_flags = 0; 11768 11769 /* First schedule a cleanup up of old configuration */ 11770 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 11771 if (rc < 0) { 11772 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 11773 return rc; 11774 } 11775 11776 netdev_for_each_uc_addr(ha, dev) { 11777 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 11778 BNX2X_UC_LIST_MAC, &ramrod_flags); 11779 if (rc == -EEXIST) { 11780 DP(BNX2X_MSG_SP, 11781 "Failed to schedule ADD operations: %d\n", rc); 11782 /* do not treat adding same MAC as error */ 11783 rc = 0; 11784 11785 } else if (rc < 0) { 11786 11787 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 11788 rc); 11789 return rc; 11790 } 11791 } 11792 11793 /* Execute the pending commands */ 11794 __set_bit(RAMROD_CONT, &ramrod_flags); 11795 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 11796 BNX2X_UC_LIST_MAC, &ramrod_flags); 11797 } 11798 11799 static int bnx2x_set_mc_list(struct bnx2x *bp) 11800 { 11801 struct net_device *dev = bp->dev; 11802 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11803 int rc = 0; 11804 11805 rparam.mcast_obj = &bp->mcast_obj; 11806 11807 /* first, clear all configured multicast MACs */ 11808 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 11809 if (rc < 0) { 11810 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 11811 return rc; 11812 } 11813 11814 /* then, configure a new MACs list */ 11815 if (netdev_mc_count(dev)) { 11816 rc = bnx2x_init_mcast_macs_list(bp, &rparam); 11817 if (rc) { 11818 BNX2X_ERR("Failed to create multicast MACs list: %d\n", 11819 rc); 11820 return rc; 11821 } 11822 11823 /* Now add the new MACs */ 11824 rc = bnx2x_config_mcast(bp, &rparam, 11825 BNX2X_MCAST_CMD_ADD); 11826 if (rc < 0) 11827 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 11828 rc); 11829 11830 bnx2x_free_mcast_macs_list(&rparam); 11831 } 11832 11833 return rc; 11834 } 11835 11836 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 11837 void bnx2x_set_rx_mode(struct net_device *dev) 11838 { 11839 struct bnx2x *bp = netdev_priv(dev); 11840 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 11841 11842 if (bp->state != BNX2X_STATE_OPEN) { 11843 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 11844 return; 11845 } 11846 11847 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 11848 11849 if (dev->flags & IFF_PROMISC) 11850 rx_mode = BNX2X_RX_MODE_PROMISC; 11851 else if ((dev->flags & IFF_ALLMULTI) || 11852 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 11853 CHIP_IS_E1(bp))) 11854 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11855 else { 11856 if (IS_PF(bp)) { 11857 /* some multicasts */ 11858 if (bnx2x_set_mc_list(bp) < 0) 11859 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11860 11861 if (bnx2x_set_uc_list(bp) < 0) 11862 rx_mode = BNX2X_RX_MODE_PROMISC; 11863 } else { 11864 /* configuring mcast to a vf involves sleeping (when we 11865 * wait for the pf's response). Since this function is 11866 * called from non sleepable context we must schedule 11867 * a work item for this purpose 11868 */ 11869 smp_mb__before_clear_bit(); 11870 set_bit(BNX2X_SP_RTNL_VFPF_MCAST, 11871 &bp->sp_rtnl_state); 11872 smp_mb__after_clear_bit(); 11873 schedule_delayed_work(&bp->sp_rtnl_task, 0); 11874 } 11875 } 11876 11877 bp->rx_mode = rx_mode; 11878 /* handle ISCSI SD mode */ 11879 if (IS_MF_ISCSI_SD(bp)) 11880 bp->rx_mode = BNX2X_RX_MODE_NONE; 11881 11882 /* Schedule the rx_mode command */ 11883 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11884 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 11885 return; 11886 } 11887 11888 if (IS_PF(bp)) { 11889 bnx2x_set_storm_rx_mode(bp); 11890 } else { 11891 /* configuring rx mode to storms in a vf involves sleeping (when 11892 * we wait for the pf's response). Since this function is 11893 * called from non sleepable context we must schedule 11894 * a work item for this purpose 11895 */ 11896 smp_mb__before_clear_bit(); 11897 set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 11898 &bp->sp_rtnl_state); 11899 smp_mb__after_clear_bit(); 11900 schedule_delayed_work(&bp->sp_rtnl_task, 0); 11901 } 11902 } 11903 11904 /* called with rtnl_lock */ 11905 static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 11906 int devad, u16 addr) 11907 { 11908 struct bnx2x *bp = netdev_priv(netdev); 11909 u16 value; 11910 int rc; 11911 11912 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 11913 prtad, devad, addr); 11914 11915 /* The HW expects different devad if CL22 is used */ 11916 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11917 11918 bnx2x_acquire_phy_lock(bp); 11919 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 11920 bnx2x_release_phy_lock(bp); 11921 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 11922 11923 if (!rc) 11924 rc = value; 11925 return rc; 11926 } 11927 11928 /* called with rtnl_lock */ 11929 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 11930 u16 addr, u16 value) 11931 { 11932 struct bnx2x *bp = netdev_priv(netdev); 11933 int rc; 11934 11935 DP(NETIF_MSG_LINK, 11936 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 11937 prtad, devad, addr, value); 11938 11939 /* The HW expects different devad if CL22 is used */ 11940 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11941 11942 bnx2x_acquire_phy_lock(bp); 11943 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 11944 bnx2x_release_phy_lock(bp); 11945 return rc; 11946 } 11947 11948 /* called with rtnl_lock */ 11949 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11950 { 11951 struct bnx2x *bp = netdev_priv(dev); 11952 struct mii_ioctl_data *mdio = if_mii(ifr); 11953 11954 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 11955 mdio->phy_id, mdio->reg_num, mdio->val_in); 11956 11957 if (!netif_running(dev)) 11958 return -EAGAIN; 11959 11960 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 11961 } 11962 11963 #ifdef CONFIG_NET_POLL_CONTROLLER 11964 static void poll_bnx2x(struct net_device *dev) 11965 { 11966 struct bnx2x *bp = netdev_priv(dev); 11967 int i; 11968 11969 for_each_eth_queue(bp, i) { 11970 struct bnx2x_fastpath *fp = &bp->fp[i]; 11971 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 11972 } 11973 } 11974 #endif 11975 11976 static int bnx2x_validate_addr(struct net_device *dev) 11977 { 11978 struct bnx2x *bp = netdev_priv(dev); 11979 11980 /* query the bulletin board for mac address configured by the PF */ 11981 if (IS_VF(bp)) 11982 bnx2x_sample_bulletin(bp); 11983 11984 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 11985 BNX2X_ERR("Non-valid Ethernet address\n"); 11986 return -EADDRNOTAVAIL; 11987 } 11988 return 0; 11989 } 11990 11991 static const struct net_device_ops bnx2x_netdev_ops = { 11992 .ndo_open = bnx2x_open, 11993 .ndo_stop = bnx2x_close, 11994 .ndo_start_xmit = bnx2x_start_xmit, 11995 .ndo_select_queue = bnx2x_select_queue, 11996 .ndo_set_rx_mode = bnx2x_set_rx_mode, 11997 .ndo_set_mac_address = bnx2x_change_mac_addr, 11998 .ndo_validate_addr = bnx2x_validate_addr, 11999 .ndo_do_ioctl = bnx2x_ioctl, 12000 .ndo_change_mtu = bnx2x_change_mtu, 12001 .ndo_fix_features = bnx2x_fix_features, 12002 .ndo_set_features = bnx2x_set_features, 12003 .ndo_tx_timeout = bnx2x_tx_timeout, 12004 #ifdef CONFIG_NET_POLL_CONTROLLER 12005 .ndo_poll_controller = poll_bnx2x, 12006 #endif 12007 .ndo_setup_tc = bnx2x_setup_tc, 12008 #ifdef CONFIG_BNX2X_SRIOV 12009 .ndo_set_vf_mac = bnx2x_set_vf_mac, 12010 .ndo_set_vf_vlan = bnx2x_set_vf_vlan, 12011 .ndo_get_vf_config = bnx2x_get_vf_config, 12012 #endif 12013 #ifdef NETDEV_FCOE_WWNN 12014 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 12015 #endif 12016 }; 12017 12018 static int bnx2x_set_coherency_mask(struct bnx2x *bp) 12019 { 12020 struct device *dev = &bp->pdev->dev; 12021 12022 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 12023 bp->flags |= USING_DAC_FLAG; 12024 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 12025 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 12026 return -EIO; 12027 } 12028 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 12029 dev_err(dev, "System does not support DMA, aborting\n"); 12030 return -EIO; 12031 } 12032 12033 return 0; 12034 } 12035 12036 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, 12037 struct net_device *dev, unsigned long board_type) 12038 { 12039 int rc; 12040 u32 pci_cfg_dword; 12041 bool chip_is_e1x = (board_type == BCM57710 || 12042 board_type == BCM57711 || 12043 board_type == BCM57711E); 12044 12045 SET_NETDEV_DEV(dev, &pdev->dev); 12046 12047 bp->dev = dev; 12048 bp->pdev = pdev; 12049 12050 rc = pci_enable_device(pdev); 12051 if (rc) { 12052 dev_err(&bp->pdev->dev, 12053 "Cannot enable PCI device, aborting\n"); 12054 goto err_out; 12055 } 12056 12057 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12058 dev_err(&bp->pdev->dev, 12059 "Cannot find PCI device base address, aborting\n"); 12060 rc = -ENODEV; 12061 goto err_out_disable; 12062 } 12063 12064 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12065 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); 12066 rc = -ENODEV; 12067 goto err_out_disable; 12068 } 12069 12070 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); 12071 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == 12072 PCICFG_REVESION_ID_ERROR_VAL) { 12073 pr_err("PCI device error, probably due to fan failure, aborting\n"); 12074 rc = -ENODEV; 12075 goto err_out_disable; 12076 } 12077 12078 if (atomic_read(&pdev->enable_cnt) == 1) { 12079 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12080 if (rc) { 12081 dev_err(&bp->pdev->dev, 12082 "Cannot obtain PCI resources, aborting\n"); 12083 goto err_out_disable; 12084 } 12085 12086 pci_set_master(pdev); 12087 pci_save_state(pdev); 12088 } 12089 12090 if (IS_PF(bp)) { 12091 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 12092 if (bp->pm_cap == 0) { 12093 dev_err(&bp->pdev->dev, 12094 "Cannot find power management capability, aborting\n"); 12095 rc = -EIO; 12096 goto err_out_release; 12097 } 12098 } 12099 12100 if (!pci_is_pcie(pdev)) { 12101 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 12102 rc = -EIO; 12103 goto err_out_release; 12104 } 12105 12106 rc = bnx2x_set_coherency_mask(bp); 12107 if (rc) 12108 goto err_out_release; 12109 12110 dev->mem_start = pci_resource_start(pdev, 0); 12111 dev->base_addr = dev->mem_start; 12112 dev->mem_end = pci_resource_end(pdev, 0); 12113 12114 dev->irq = pdev->irq; 12115 12116 bp->regview = pci_ioremap_bar(pdev, 0); 12117 if (!bp->regview) { 12118 dev_err(&bp->pdev->dev, 12119 "Cannot map register space, aborting\n"); 12120 rc = -ENOMEM; 12121 goto err_out_release; 12122 } 12123 12124 /* In E1/E1H use pci device function given by kernel. 12125 * In E2/E3 read physical function from ME register since these chips 12126 * support Physical Device Assignment where kernel BDF maybe arbitrary 12127 * (depending on hypervisor). 12128 */ 12129 if (chip_is_e1x) { 12130 bp->pf_num = PCI_FUNC(pdev->devfn); 12131 } else { 12132 /* chip is E2/3*/ 12133 pci_read_config_dword(bp->pdev, 12134 PCICFG_ME_REGISTER, &pci_cfg_dword); 12135 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 12136 ME_REG_ABS_PF_NUM_SHIFT); 12137 } 12138 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 12139 12140 bnx2x_set_power_state(bp, PCI_D0); 12141 12142 /* clean indirect addresses */ 12143 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12144 PCICFG_VENDOR_ID_OFFSET); 12145 /* 12146 * Clean the following indirect addresses for all functions since it 12147 * is not used by the driver. 12148 */ 12149 if (IS_PF(bp)) { 12150 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 12151 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 12152 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 12153 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 12154 12155 if (chip_is_e1x) { 12156 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 12157 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 12158 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 12159 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 12160 } 12161 12162 /* Enable internal target-read (in case we are probed after PF 12163 * FLR). Must be done prior to any BAR read access. Only for 12164 * 57712 and up 12165 */ 12166 if (!chip_is_e1x) 12167 REG_WR(bp, 12168 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 12169 } 12170 12171 dev->watchdog_timeo = TX_TIMEOUT; 12172 12173 dev->netdev_ops = &bnx2x_netdev_ops; 12174 bnx2x_set_ethtool_ops(bp, dev); 12175 12176 dev->priv_flags |= IFF_UNICAST_FLT; 12177 12178 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12179 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12180 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12181 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12182 if (!CHIP_IS_E1x(bp)) { 12183 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; 12184 dev->hw_enc_features = 12185 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 12186 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12187 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; 12188 } 12189 12190 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12191 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12192 12193 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12194 if (bp->flags & USING_DAC_FLAG) 12195 dev->features |= NETIF_F_HIGHDMA; 12196 12197 /* Add Loopback capability to the device */ 12198 dev->hw_features |= NETIF_F_LOOPBACK; 12199 12200 #ifdef BCM_DCBNL 12201 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 12202 #endif 12203 12204 /* get_port_hwinfo() will set prtad and mmds properly */ 12205 bp->mdio.prtad = MDIO_PRTAD_NONE; 12206 bp->mdio.mmds = 0; 12207 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 12208 bp->mdio.dev = dev; 12209 bp->mdio.mdio_read = bnx2x_mdio_read; 12210 bp->mdio.mdio_write = bnx2x_mdio_write; 12211 12212 return 0; 12213 12214 err_out_release: 12215 if (atomic_read(&pdev->enable_cnt) == 1) 12216 pci_release_regions(pdev); 12217 12218 err_out_disable: 12219 pci_disable_device(pdev); 12220 pci_set_drvdata(pdev, NULL); 12221 12222 err_out: 12223 return rc; 12224 } 12225 12226 static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, 12227 enum bnx2x_pci_bus_speed *speed) 12228 { 12229 u32 link_speed, val = 0; 12230 12231 pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); 12232 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; 12233 12234 link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 12235 12236 switch (link_speed) { 12237 case 3: 12238 *speed = BNX2X_PCI_LINK_SPEED_8000; 12239 break; 12240 case 2: 12241 *speed = BNX2X_PCI_LINK_SPEED_5000; 12242 break; 12243 default: 12244 *speed = BNX2X_PCI_LINK_SPEED_2500; 12245 } 12246 } 12247 12248 static int bnx2x_check_firmware(struct bnx2x *bp) 12249 { 12250 const struct firmware *firmware = bp->firmware; 12251 struct bnx2x_fw_file_hdr *fw_hdr; 12252 struct bnx2x_fw_file_section *sections; 12253 u32 offset, len, num_ops; 12254 __be16 *ops_offsets; 12255 int i; 12256 const u8 *fw_ver; 12257 12258 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 12259 BNX2X_ERR("Wrong FW size\n"); 12260 return -EINVAL; 12261 } 12262 12263 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 12264 sections = (struct bnx2x_fw_file_section *)fw_hdr; 12265 12266 /* Make sure none of the offsets and sizes make us read beyond 12267 * the end of the firmware data */ 12268 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 12269 offset = be32_to_cpu(sections[i].offset); 12270 len = be32_to_cpu(sections[i].len); 12271 if (offset + len > firmware->size) { 12272 BNX2X_ERR("Section %d length is out of bounds\n", i); 12273 return -EINVAL; 12274 } 12275 } 12276 12277 /* Likewise for the init_ops offsets */ 12278 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 12279 ops_offsets = (__force __be16 *)(firmware->data + offset); 12280 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 12281 12282 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 12283 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 12284 BNX2X_ERR("Section offset %d is out of bounds\n", i); 12285 return -EINVAL; 12286 } 12287 } 12288 12289 /* Check FW version */ 12290 offset = be32_to_cpu(fw_hdr->fw_version.offset); 12291 fw_ver = firmware->data + offset; 12292 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 12293 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 12294 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 12295 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 12296 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 12297 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 12298 BCM_5710_FW_MAJOR_VERSION, 12299 BCM_5710_FW_MINOR_VERSION, 12300 BCM_5710_FW_REVISION_VERSION, 12301 BCM_5710_FW_ENGINEERING_VERSION); 12302 return -EINVAL; 12303 } 12304 12305 return 0; 12306 } 12307 12308 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 12309 { 12310 const __be32 *source = (const __be32 *)_source; 12311 u32 *target = (u32 *)_target; 12312 u32 i; 12313 12314 for (i = 0; i < n/4; i++) 12315 target[i] = be32_to_cpu(source[i]); 12316 } 12317 12318 /* 12319 Ops array is stored in the following format: 12320 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 12321 */ 12322 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 12323 { 12324 const __be32 *source = (const __be32 *)_source; 12325 struct raw_op *target = (struct raw_op *)_target; 12326 u32 i, j, tmp; 12327 12328 for (i = 0, j = 0; i < n/8; i++, j += 2) { 12329 tmp = be32_to_cpu(source[j]); 12330 target[i].op = (tmp >> 24) & 0xff; 12331 target[i].offset = tmp & 0xffffff; 12332 target[i].raw_data = be32_to_cpu(source[j + 1]); 12333 } 12334 } 12335 12336 /* IRO array is stored in the following format: 12337 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 12338 */ 12339 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 12340 { 12341 const __be32 *source = (const __be32 *)_source; 12342 struct iro *target = (struct iro *)_target; 12343 u32 i, j, tmp; 12344 12345 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 12346 target[i].base = be32_to_cpu(source[j]); 12347 j++; 12348 tmp = be32_to_cpu(source[j]); 12349 target[i].m1 = (tmp >> 16) & 0xffff; 12350 target[i].m2 = tmp & 0xffff; 12351 j++; 12352 tmp = be32_to_cpu(source[j]); 12353 target[i].m3 = (tmp >> 16) & 0xffff; 12354 target[i].size = tmp & 0xffff; 12355 j++; 12356 } 12357 } 12358 12359 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 12360 { 12361 const __be16 *source = (const __be16 *)_source; 12362 u16 *target = (u16 *)_target; 12363 u32 i; 12364 12365 for (i = 0; i < n/2; i++) 12366 target[i] = be16_to_cpu(source[i]); 12367 } 12368 12369 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 12370 do { \ 12371 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 12372 bp->arr = kmalloc(len, GFP_KERNEL); \ 12373 if (!bp->arr) \ 12374 goto lbl; \ 12375 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 12376 (u8 *)bp->arr, len); \ 12377 } while (0) 12378 12379 static int bnx2x_init_firmware(struct bnx2x *bp) 12380 { 12381 const char *fw_file_name; 12382 struct bnx2x_fw_file_hdr *fw_hdr; 12383 int rc; 12384 12385 if (bp->firmware) 12386 return 0; 12387 12388 if (CHIP_IS_E1(bp)) 12389 fw_file_name = FW_FILE_NAME_E1; 12390 else if (CHIP_IS_E1H(bp)) 12391 fw_file_name = FW_FILE_NAME_E1H; 12392 else if (!CHIP_IS_E1x(bp)) 12393 fw_file_name = FW_FILE_NAME_E2; 12394 else { 12395 BNX2X_ERR("Unsupported chip revision\n"); 12396 return -EINVAL; 12397 } 12398 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 12399 12400 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 12401 if (rc) { 12402 BNX2X_ERR("Can't load firmware file %s\n", 12403 fw_file_name); 12404 goto request_firmware_exit; 12405 } 12406 12407 rc = bnx2x_check_firmware(bp); 12408 if (rc) { 12409 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 12410 goto request_firmware_exit; 12411 } 12412 12413 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 12414 12415 /* Initialize the pointers to the init arrays */ 12416 /* Blob */ 12417 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 12418 12419 /* Opcodes */ 12420 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 12421 12422 /* Offsets */ 12423 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 12424 be16_to_cpu_n); 12425 12426 /* STORMs firmware */ 12427 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12428 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 12429 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 12430 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 12431 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12432 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 12433 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 12434 be32_to_cpu(fw_hdr->usem_pram_data.offset); 12435 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12436 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 12437 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 12438 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 12439 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12440 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 12441 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 12442 be32_to_cpu(fw_hdr->csem_pram_data.offset); 12443 /* IRO */ 12444 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 12445 12446 return 0; 12447 12448 iro_alloc_err: 12449 kfree(bp->init_ops_offsets); 12450 init_offsets_alloc_err: 12451 kfree(bp->init_ops); 12452 init_ops_alloc_err: 12453 kfree(bp->init_data); 12454 request_firmware_exit: 12455 release_firmware(bp->firmware); 12456 bp->firmware = NULL; 12457 12458 return rc; 12459 } 12460 12461 static void bnx2x_release_firmware(struct bnx2x *bp) 12462 { 12463 kfree(bp->init_ops_offsets); 12464 kfree(bp->init_ops); 12465 kfree(bp->init_data); 12466 release_firmware(bp->firmware); 12467 bp->firmware = NULL; 12468 } 12469 12470 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 12471 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 12472 .init_hw_cmn = bnx2x_init_hw_common, 12473 .init_hw_port = bnx2x_init_hw_port, 12474 .init_hw_func = bnx2x_init_hw_func, 12475 12476 .reset_hw_cmn = bnx2x_reset_common, 12477 .reset_hw_port = bnx2x_reset_port, 12478 .reset_hw_func = bnx2x_reset_func, 12479 12480 .gunzip_init = bnx2x_gunzip_init, 12481 .gunzip_end = bnx2x_gunzip_end, 12482 12483 .init_fw = bnx2x_init_firmware, 12484 .release_fw = bnx2x_release_firmware, 12485 }; 12486 12487 void bnx2x__init_func_obj(struct bnx2x *bp) 12488 { 12489 /* Prepare DMAE related driver resources */ 12490 bnx2x_setup_dmae(bp); 12491 12492 bnx2x_init_func_obj(bp, &bp->func_obj, 12493 bnx2x_sp(bp, func_rdata), 12494 bnx2x_sp_mapping(bp, func_rdata), 12495 bnx2x_sp(bp, func_afex_rdata), 12496 bnx2x_sp_mapping(bp, func_afex_rdata), 12497 &bnx2x_func_sp_drv); 12498 } 12499 12500 /* must be called after sriov-enable */ 12501 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 12502 { 12503 int cid_count = BNX2X_L2_MAX_CID(bp); 12504 12505 if (IS_SRIOV(bp)) 12506 cid_count += BNX2X_VF_CIDS; 12507 12508 if (CNIC_SUPPORT(bp)) 12509 cid_count += CNIC_CID_MAX; 12510 12511 return roundup(cid_count, QM_CID_ROUND); 12512 } 12513 12514 /** 12515 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 12516 * 12517 * @dev: pci device 12518 * 12519 */ 12520 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, 12521 int cnic_cnt, bool is_vf) 12522 { 12523 int pos, index; 12524 u16 control = 0; 12525 12526 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 12527 12528 /* 12529 * If MSI-X is not supported - return number of SBs needed to support 12530 * one fast path queue: one FP queue + SB for CNIC 12531 */ 12532 if (!pos) { 12533 dev_info(&pdev->dev, "no msix capability found\n"); 12534 return 1 + cnic_cnt; 12535 } 12536 dev_info(&pdev->dev, "msix capability found\n"); 12537 12538 /* 12539 * The value in the PCI configuration space is the index of the last 12540 * entry, namely one less than the actual size of the table, which is 12541 * exactly what we want to return from this function: number of all SBs 12542 * without the default SB. 12543 * For VFs there is no default SB, then we return (index+1). 12544 */ 12545 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 12546 12547 index = control & PCI_MSIX_FLAGS_QSIZE; 12548 12549 return is_vf ? index + 1 : index; 12550 } 12551 12552 static int set_max_cos_est(int chip_id) 12553 { 12554 switch (chip_id) { 12555 case BCM57710: 12556 case BCM57711: 12557 case BCM57711E: 12558 return BNX2X_MULTI_TX_COS_E1X; 12559 case BCM57712: 12560 case BCM57712_MF: 12561 case BCM57712_VF: 12562 return BNX2X_MULTI_TX_COS_E2_E3A0; 12563 case BCM57800: 12564 case BCM57800_MF: 12565 case BCM57800_VF: 12566 case BCM57810: 12567 case BCM57810_MF: 12568 case BCM57840_4_10: 12569 case BCM57840_2_20: 12570 case BCM57840_O: 12571 case BCM57840_MFO: 12572 case BCM57810_VF: 12573 case BCM57840_MF: 12574 case BCM57840_VF: 12575 case BCM57811: 12576 case BCM57811_MF: 12577 case BCM57811_VF: 12578 return BNX2X_MULTI_TX_COS_E3B0; 12579 return 1; 12580 default: 12581 pr_err("Unknown board_type (%d), aborting\n", chip_id); 12582 return -ENODEV; 12583 } 12584 } 12585 12586 static int set_is_vf(int chip_id) 12587 { 12588 switch (chip_id) { 12589 case BCM57712_VF: 12590 case BCM57800_VF: 12591 case BCM57810_VF: 12592 case BCM57840_VF: 12593 case BCM57811_VF: 12594 return true; 12595 default: 12596 return false; 12597 } 12598 } 12599 12600 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); 12601 12602 static int bnx2x_init_one(struct pci_dev *pdev, 12603 const struct pci_device_id *ent) 12604 { 12605 struct net_device *dev = NULL; 12606 struct bnx2x *bp; 12607 int pcie_width; 12608 enum bnx2x_pci_bus_speed pcie_speed; 12609 int rc, max_non_def_sbs; 12610 int rx_count, tx_count, rss_count, doorbell_size; 12611 int max_cos_est; 12612 bool is_vf; 12613 int cnic_cnt; 12614 12615 /* An estimated maximum supported CoS number according to the chip 12616 * version. 12617 * We will try to roughly estimate the maximum number of CoSes this chip 12618 * may support in order to minimize the memory allocated for Tx 12619 * netdev_queue's. This number will be accurately calculated during the 12620 * initialization of bp->max_cos based on the chip versions AND chip 12621 * revision in the bnx2x_init_bp(). 12622 */ 12623 max_cos_est = set_max_cos_est(ent->driver_data); 12624 if (max_cos_est < 0) 12625 return max_cos_est; 12626 is_vf = set_is_vf(ent->driver_data); 12627 cnic_cnt = is_vf ? 0 : 1; 12628 12629 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); 12630 12631 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12632 rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; 12633 12634 if (rss_count < 1) 12635 return -EINVAL; 12636 12637 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 12638 rx_count = rss_count + cnic_cnt; 12639 12640 /* Maximum number of netdev Tx queues: 12641 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 12642 */ 12643 tx_count = rss_count * max_cos_est + cnic_cnt; 12644 12645 /* dev zeroed in init_etherdev */ 12646 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 12647 if (!dev) 12648 return -ENOMEM; 12649 12650 bp = netdev_priv(dev); 12651 12652 bp->flags = 0; 12653 if (is_vf) 12654 bp->flags |= IS_VF_FLAG; 12655 12656 bp->igu_sb_cnt = max_non_def_sbs; 12657 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 12658 bp->msg_enable = debug; 12659 bp->cnic_support = cnic_cnt; 12660 bp->cnic_probe = bnx2x_cnic_probe; 12661 12662 pci_set_drvdata(pdev, dev); 12663 12664 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); 12665 if (rc < 0) { 12666 free_netdev(dev); 12667 return rc; 12668 } 12669 12670 BNX2X_DEV_INFO("This is a %s function\n", 12671 IS_PF(bp) ? "physical" : "virtual"); 12672 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); 12673 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); 12674 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 12675 tx_count, rx_count); 12676 12677 rc = bnx2x_init_bp(bp); 12678 if (rc) 12679 goto init_one_exit; 12680 12681 /* Map doorbells here as we need the real value of bp->max_cos which 12682 * is initialized in bnx2x_init_bp() to determine the number of 12683 * l2 connections. 12684 */ 12685 if (IS_VF(bp)) { 12686 bp->doorbells = bnx2x_vf_doorbells(bp); 12687 rc = bnx2x_vf_pci_alloc(bp); 12688 if (rc) 12689 goto init_one_exit; 12690 } else { 12691 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 12692 if (doorbell_size > pci_resource_len(pdev, 2)) { 12693 dev_err(&bp->pdev->dev, 12694 "Cannot map doorbells, bar size too small, aborting\n"); 12695 rc = -ENOMEM; 12696 goto init_one_exit; 12697 } 12698 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 12699 doorbell_size); 12700 } 12701 if (!bp->doorbells) { 12702 dev_err(&bp->pdev->dev, 12703 "Cannot map doorbell space, aborting\n"); 12704 rc = -ENOMEM; 12705 goto init_one_exit; 12706 } 12707 12708 if (IS_VF(bp)) { 12709 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 12710 if (rc) 12711 goto init_one_exit; 12712 } 12713 12714 /* Enable SRIOV if capability found in configuration space */ 12715 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 12716 if (rc) 12717 goto init_one_exit; 12718 12719 /* calc qm_cid_count */ 12720 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 12721 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); 12722 12723 /* disable FCOE L2 queue for E1x*/ 12724 if (CHIP_IS_E1x(bp)) 12725 bp->flags |= NO_FCOE_FLAG; 12726 12727 /* Set bp->num_queues for MSI-X mode*/ 12728 bnx2x_set_num_queues(bp); 12729 12730 /* Configure interrupt mode: try to enable MSI-X/MSI if 12731 * needed. 12732 */ 12733 rc = bnx2x_set_int_mode(bp); 12734 if (rc) { 12735 dev_err(&pdev->dev, "Cannot set interrupts\n"); 12736 goto init_one_exit; 12737 } 12738 BNX2X_DEV_INFO("set interrupts successfully\n"); 12739 12740 /* register the net device */ 12741 rc = register_netdev(dev); 12742 if (rc) { 12743 dev_err(&pdev->dev, "Cannot register net device\n"); 12744 goto init_one_exit; 12745 } 12746 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 12747 12748 if (!NO_FCOE(bp)) { 12749 /* Add storage MAC address */ 12750 rtnl_lock(); 12751 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12752 rtnl_unlock(); 12753 } 12754 12755 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12756 BNX2X_DEV_INFO("got pcie width %d and speed %d\n", 12757 pcie_width, pcie_speed); 12758 12759 BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 12760 board_info[ent->driver_data].name, 12761 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 12762 pcie_width, 12763 pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : 12764 pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : 12765 pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : 12766 "Unknown", 12767 dev->base_addr, bp->pdev->irq, dev->dev_addr); 12768 12769 return 0; 12770 12771 init_one_exit: 12772 if (bp->regview) 12773 iounmap(bp->regview); 12774 12775 if (IS_PF(bp) && bp->doorbells) 12776 iounmap(bp->doorbells); 12777 12778 free_netdev(dev); 12779 12780 if (atomic_read(&pdev->enable_cnt) == 1) 12781 pci_release_regions(pdev); 12782 12783 pci_disable_device(pdev); 12784 pci_set_drvdata(pdev, NULL); 12785 12786 return rc; 12787 } 12788 12789 static void __bnx2x_remove(struct pci_dev *pdev, 12790 struct net_device *dev, 12791 struct bnx2x *bp, 12792 bool remove_netdev) 12793 { 12794 /* Delete storage MAC address */ 12795 if (!NO_FCOE(bp)) { 12796 rtnl_lock(); 12797 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12798 rtnl_unlock(); 12799 } 12800 12801 #ifdef BCM_DCBNL 12802 /* Delete app tlvs from dcbnl */ 12803 bnx2x_dcbnl_update_applist(bp, true); 12804 #endif 12805 12806 /* Close the interface - either directly or implicitly */ 12807 if (remove_netdev) { 12808 unregister_netdev(dev); 12809 } else { 12810 rtnl_lock(); 12811 if (netif_running(dev)) 12812 bnx2x_close(dev); 12813 rtnl_unlock(); 12814 } 12815 12816 /* Power on: we can't let PCI layer write to us while we are in D3 */ 12817 if (IS_PF(bp)) 12818 bnx2x_set_power_state(bp, PCI_D0); 12819 12820 /* Disable MSI/MSI-X */ 12821 bnx2x_disable_msi(bp); 12822 12823 /* Power off */ 12824 if (IS_PF(bp)) 12825 bnx2x_set_power_state(bp, PCI_D3hot); 12826 12827 /* Make sure RESET task is not scheduled before continuing */ 12828 cancel_delayed_work_sync(&bp->sp_rtnl_task); 12829 12830 bnx2x_iov_remove_one(bp); 12831 12832 /* send message via vfpf channel to release the resources of this vf */ 12833 if (IS_VF(bp)) 12834 bnx2x_vfpf_release(bp); 12835 12836 /* Assumes no further PCIe PM changes will occur */ 12837 if (system_state == SYSTEM_POWER_OFF) { 12838 pci_wake_from_d3(pdev, bp->wol); 12839 pci_set_power_state(pdev, PCI_D3hot); 12840 } 12841 12842 if (bp->regview) 12843 iounmap(bp->regview); 12844 12845 /* for vf doorbells are part of the regview and were unmapped along with 12846 * it. FW is only loaded by PF. 12847 */ 12848 if (IS_PF(bp)) { 12849 if (bp->doorbells) 12850 iounmap(bp->doorbells); 12851 12852 bnx2x_release_firmware(bp); 12853 } 12854 bnx2x_free_mem_bp(bp); 12855 12856 if (remove_netdev) 12857 free_netdev(dev); 12858 12859 if (atomic_read(&pdev->enable_cnt) == 1) 12860 pci_release_regions(pdev); 12861 12862 pci_disable_device(pdev); 12863 pci_set_drvdata(pdev, NULL); 12864 } 12865 12866 static void bnx2x_remove_one(struct pci_dev *pdev) 12867 { 12868 struct net_device *dev = pci_get_drvdata(pdev); 12869 struct bnx2x *bp; 12870 12871 if (!dev) { 12872 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 12873 return; 12874 } 12875 bp = netdev_priv(dev); 12876 12877 __bnx2x_remove(pdev, dev, bp, true); 12878 } 12879 12880 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 12881 { 12882 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 12883 12884 bp->rx_mode = BNX2X_RX_MODE_NONE; 12885 12886 if (CNIC_LOADED(bp)) 12887 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 12888 12889 /* Stop Tx */ 12890 bnx2x_tx_disable(bp); 12891 /* Delete all NAPI objects */ 12892 bnx2x_del_all_napi(bp); 12893 if (CNIC_LOADED(bp)) 12894 bnx2x_del_all_napi_cnic(bp); 12895 netdev_reset_tc(bp->dev); 12896 12897 del_timer_sync(&bp->timer); 12898 cancel_delayed_work(&bp->sp_task); 12899 cancel_delayed_work(&bp->period_task); 12900 12901 spin_lock_bh(&bp->stats_lock); 12902 bp->stats_state = STATS_STATE_DISABLED; 12903 spin_unlock_bh(&bp->stats_lock); 12904 12905 bnx2x_save_statistics(bp); 12906 12907 netif_carrier_off(bp->dev); 12908 12909 return 0; 12910 } 12911 12912 /** 12913 * bnx2x_io_error_detected - called when PCI error is detected 12914 * @pdev: Pointer to PCI device 12915 * @state: The current pci connection state 12916 * 12917 * This function is called after a PCI bus error affecting 12918 * this device has been detected. 12919 */ 12920 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 12921 pci_channel_state_t state) 12922 { 12923 struct net_device *dev = pci_get_drvdata(pdev); 12924 struct bnx2x *bp = netdev_priv(dev); 12925 12926 rtnl_lock(); 12927 12928 BNX2X_ERR("IO error detected\n"); 12929 12930 netif_device_detach(dev); 12931 12932 if (state == pci_channel_io_perm_failure) { 12933 rtnl_unlock(); 12934 return PCI_ERS_RESULT_DISCONNECT; 12935 } 12936 12937 if (netif_running(dev)) 12938 bnx2x_eeh_nic_unload(bp); 12939 12940 bnx2x_prev_path_mark_eeh(bp); 12941 12942 pci_disable_device(pdev); 12943 12944 rtnl_unlock(); 12945 12946 /* Request a slot reset */ 12947 return PCI_ERS_RESULT_NEED_RESET; 12948 } 12949 12950 /** 12951 * bnx2x_io_slot_reset - called after the PCI bus has been reset 12952 * @pdev: Pointer to PCI device 12953 * 12954 * Restart the card from scratch, as if from a cold-boot. 12955 */ 12956 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 12957 { 12958 struct net_device *dev = pci_get_drvdata(pdev); 12959 struct bnx2x *bp = netdev_priv(dev); 12960 int i; 12961 12962 rtnl_lock(); 12963 BNX2X_ERR("IO slot reset initializing...\n"); 12964 if (pci_enable_device(pdev)) { 12965 dev_err(&pdev->dev, 12966 "Cannot re-enable PCI device after reset\n"); 12967 rtnl_unlock(); 12968 return PCI_ERS_RESULT_DISCONNECT; 12969 } 12970 12971 pci_set_master(pdev); 12972 pci_restore_state(pdev); 12973 pci_save_state(pdev); 12974 12975 if (netif_running(dev)) 12976 bnx2x_set_power_state(bp, PCI_D0); 12977 12978 if (netif_running(dev)) { 12979 BNX2X_ERR("IO slot reset --> driver unload\n"); 12980 12981 /* MCP should have been reset; Need to wait for validity */ 12982 bnx2x_init_shmem(bp); 12983 12984 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 12985 u32 v; 12986 12987 v = SHMEM2_RD(bp, 12988 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 12989 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 12990 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 12991 } 12992 bnx2x_drain_tx_queues(bp); 12993 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); 12994 bnx2x_netif_stop(bp, 1); 12995 bnx2x_free_irq(bp); 12996 12997 /* Report UNLOAD_DONE to MCP */ 12998 bnx2x_send_unload_done(bp, true); 12999 13000 bp->sp_state = 0; 13001 bp->port.pmf = 0; 13002 13003 bnx2x_prev_unload(bp); 13004 13005 /* We should have reseted the engine, so It's fair to 13006 * assume the FW will no longer write to the bnx2x driver. 13007 */ 13008 bnx2x_squeeze_objects(bp); 13009 bnx2x_free_skbs(bp); 13010 for_each_rx_queue(bp, i) 13011 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 13012 bnx2x_free_fp_mem(bp); 13013 bnx2x_free_mem(bp); 13014 13015 bp->state = BNX2X_STATE_CLOSED; 13016 } 13017 13018 rtnl_unlock(); 13019 13020 return PCI_ERS_RESULT_RECOVERED; 13021 } 13022 13023 /** 13024 * bnx2x_io_resume - called when traffic can start flowing again 13025 * @pdev: Pointer to PCI device 13026 * 13027 * This callback is called when the error recovery driver tells us that 13028 * its OK to resume normal operation. 13029 */ 13030 static void bnx2x_io_resume(struct pci_dev *pdev) 13031 { 13032 struct net_device *dev = pci_get_drvdata(pdev); 13033 struct bnx2x *bp = netdev_priv(dev); 13034 13035 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 13036 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 13037 return; 13038 } 13039 13040 rtnl_lock(); 13041 13042 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 13043 DRV_MSG_SEQ_NUMBER_MASK; 13044 13045 if (netif_running(dev)) 13046 bnx2x_nic_load(bp, LOAD_NORMAL); 13047 13048 netif_device_attach(dev); 13049 13050 rtnl_unlock(); 13051 } 13052 13053 static const struct pci_error_handlers bnx2x_err_handler = { 13054 .error_detected = bnx2x_io_error_detected, 13055 .slot_reset = bnx2x_io_slot_reset, 13056 .resume = bnx2x_io_resume, 13057 }; 13058 13059 static void bnx2x_shutdown(struct pci_dev *pdev) 13060 { 13061 struct net_device *dev = pci_get_drvdata(pdev); 13062 struct bnx2x *bp; 13063 13064 if (!dev) 13065 return; 13066 13067 bp = netdev_priv(dev); 13068 if (!bp) 13069 return; 13070 13071 rtnl_lock(); 13072 netif_device_detach(dev); 13073 rtnl_unlock(); 13074 13075 /* Don't remove the netdevice, as there are scenarios which will cause 13076 * the kernel to hang, e.g., when trying to remove bnx2i while the 13077 * rootfs is mounted from SAN. 13078 */ 13079 __bnx2x_remove(pdev, dev, bp, false); 13080 } 13081 13082 static struct pci_driver bnx2x_pci_driver = { 13083 .name = DRV_MODULE_NAME, 13084 .id_table = bnx2x_pci_tbl, 13085 .probe = bnx2x_init_one, 13086 .remove = bnx2x_remove_one, 13087 .suspend = bnx2x_suspend, 13088 .resume = bnx2x_resume, 13089 .err_handler = &bnx2x_err_handler, 13090 #ifdef CONFIG_BNX2X_SRIOV 13091 .sriov_configure = bnx2x_sriov_configure, 13092 #endif 13093 .shutdown = bnx2x_shutdown, 13094 }; 13095 13096 static int __init bnx2x_init(void) 13097 { 13098 int ret; 13099 13100 pr_info("%s", version); 13101 13102 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 13103 if (bnx2x_wq == NULL) { 13104 pr_err("Cannot create workqueue\n"); 13105 return -ENOMEM; 13106 } 13107 13108 ret = pci_register_driver(&bnx2x_pci_driver); 13109 if (ret) { 13110 pr_err("Cannot register driver\n"); 13111 destroy_workqueue(bnx2x_wq); 13112 } 13113 return ret; 13114 } 13115 13116 static void __exit bnx2x_cleanup(void) 13117 { 13118 struct list_head *pos, *q; 13119 13120 pci_unregister_driver(&bnx2x_pci_driver); 13121 13122 destroy_workqueue(bnx2x_wq); 13123 13124 /* Free globally allocated resources */ 13125 list_for_each_safe(pos, q, &bnx2x_prev_list) { 13126 struct bnx2x_prev_path_list *tmp = 13127 list_entry(pos, struct bnx2x_prev_path_list, list); 13128 list_del(pos); 13129 kfree(tmp); 13130 } 13131 } 13132 13133 void bnx2x_notify_link_changed(struct bnx2x *bp) 13134 { 13135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 13136 } 13137 13138 module_init(bnx2x_init); 13139 module_exit(bnx2x_cleanup); 13140 13141 /** 13142 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 13143 * 13144 * @bp: driver handle 13145 * @set: set or clear the CAM entry 13146 * 13147 * This function will wait until the ramrod completion returns. 13148 * Return 0 if success, -ENODEV if ramrod doesn't return. 13149 */ 13150 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 13151 { 13152 unsigned long ramrod_flags = 0; 13153 13154 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 13155 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 13156 &bp->iscsi_l2_mac_obj, true, 13157 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 13158 } 13159 13160 /* count denotes the number of new completions we have seen */ 13161 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 13162 { 13163 struct eth_spe *spe; 13164 int cxt_index, cxt_offset; 13165 13166 #ifdef BNX2X_STOP_ON_ERROR 13167 if (unlikely(bp->panic)) 13168 return; 13169 #endif 13170 13171 spin_lock_bh(&bp->spq_lock); 13172 BUG_ON(bp->cnic_spq_pending < count); 13173 bp->cnic_spq_pending -= count; 13174 13175 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 13176 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 13177 & SPE_HDR_CONN_TYPE) >> 13178 SPE_HDR_CONN_TYPE_SHIFT; 13179 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 13180 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 13181 13182 /* Set validation for iSCSI L2 client before sending SETUP 13183 * ramrod 13184 */ 13185 if (type == ETH_CONNECTION_TYPE) { 13186 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 13187 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 13188 ILT_PAGE_CIDS; 13189 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 13190 (cxt_index * ILT_PAGE_CIDS); 13191 bnx2x_set_ctx_validation(bp, 13192 &bp->context[cxt_index]. 13193 vcxt[cxt_offset].eth, 13194 BNX2X_ISCSI_ETH_CID(bp)); 13195 } 13196 } 13197 13198 /* 13199 * There may be not more than 8 L2, not more than 8 L5 SPEs 13200 * and in the air. We also check that number of outstanding 13201 * COMMON ramrods is not more than the EQ and SPQ can 13202 * accommodate. 13203 */ 13204 if (type == ETH_CONNECTION_TYPE) { 13205 if (!atomic_read(&bp->cq_spq_left)) 13206 break; 13207 else 13208 atomic_dec(&bp->cq_spq_left); 13209 } else if (type == NONE_CONNECTION_TYPE) { 13210 if (!atomic_read(&bp->eq_spq_left)) 13211 break; 13212 else 13213 atomic_dec(&bp->eq_spq_left); 13214 } else if ((type == ISCSI_CONNECTION_TYPE) || 13215 (type == FCOE_CONNECTION_TYPE)) { 13216 if (bp->cnic_spq_pending >= 13217 bp->cnic_eth_dev.max_kwqe_pending) 13218 break; 13219 else 13220 bp->cnic_spq_pending++; 13221 } else { 13222 BNX2X_ERR("Unknown SPE type: %d\n", type); 13223 bnx2x_panic(); 13224 break; 13225 } 13226 13227 spe = bnx2x_sp_get_next(bp); 13228 *spe = *bp->cnic_kwq_cons; 13229 13230 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 13231 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 13232 13233 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 13234 bp->cnic_kwq_cons = bp->cnic_kwq; 13235 else 13236 bp->cnic_kwq_cons++; 13237 } 13238 bnx2x_sp_prod_update(bp); 13239 spin_unlock_bh(&bp->spq_lock); 13240 } 13241 13242 static int bnx2x_cnic_sp_queue(struct net_device *dev, 13243 struct kwqe_16 *kwqes[], u32 count) 13244 { 13245 struct bnx2x *bp = netdev_priv(dev); 13246 int i; 13247 13248 #ifdef BNX2X_STOP_ON_ERROR 13249 if (unlikely(bp->panic)) { 13250 BNX2X_ERR("Can't post to SP queue while panic\n"); 13251 return -EIO; 13252 } 13253 #endif 13254 13255 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 13256 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 13257 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 13258 return -EAGAIN; 13259 } 13260 13261 spin_lock_bh(&bp->spq_lock); 13262 13263 for (i = 0; i < count; i++) { 13264 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 13265 13266 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 13267 break; 13268 13269 *bp->cnic_kwq_prod = *spe; 13270 13271 bp->cnic_kwq_pending++; 13272 13273 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 13274 spe->hdr.conn_and_cmd_data, spe->hdr.type, 13275 spe->data.update_data_addr.hi, 13276 spe->data.update_data_addr.lo, 13277 bp->cnic_kwq_pending); 13278 13279 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 13280 bp->cnic_kwq_prod = bp->cnic_kwq; 13281 else 13282 bp->cnic_kwq_prod++; 13283 } 13284 13285 spin_unlock_bh(&bp->spq_lock); 13286 13287 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 13288 bnx2x_cnic_sp_post(bp, 0); 13289 13290 return i; 13291 } 13292 13293 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 13294 { 13295 struct cnic_ops *c_ops; 13296 int rc = 0; 13297 13298 mutex_lock(&bp->cnic_mutex); 13299 c_ops = rcu_dereference_protected(bp->cnic_ops, 13300 lockdep_is_held(&bp->cnic_mutex)); 13301 if (c_ops) 13302 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 13303 mutex_unlock(&bp->cnic_mutex); 13304 13305 return rc; 13306 } 13307 13308 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 13309 { 13310 struct cnic_ops *c_ops; 13311 int rc = 0; 13312 13313 rcu_read_lock(); 13314 c_ops = rcu_dereference(bp->cnic_ops); 13315 if (c_ops) 13316 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 13317 rcu_read_unlock(); 13318 13319 return rc; 13320 } 13321 13322 /* 13323 * for commands that have no data 13324 */ 13325 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 13326 { 13327 struct cnic_ctl_info ctl = {0}; 13328 13329 ctl.cmd = cmd; 13330 13331 return bnx2x_cnic_ctl_send(bp, &ctl); 13332 } 13333 13334 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 13335 { 13336 struct cnic_ctl_info ctl = {0}; 13337 13338 /* first we tell CNIC and only then we count this as a completion */ 13339 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 13340 ctl.data.comp.cid = cid; 13341 ctl.data.comp.error = err; 13342 13343 bnx2x_cnic_ctl_send_bh(bp, &ctl); 13344 bnx2x_cnic_sp_post(bp, 0); 13345 } 13346 13347 /* Called with netif_addr_lock_bh() taken. 13348 * Sets an rx_mode config for an iSCSI ETH client. 13349 * Doesn't block. 13350 * Completion should be checked outside. 13351 */ 13352 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 13353 { 13354 unsigned long accept_flags = 0, ramrod_flags = 0; 13355 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 13356 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 13357 13358 if (start) { 13359 /* Start accepting on iSCSI L2 ring. Accept all multicasts 13360 * because it's the only way for UIO Queue to accept 13361 * multicasts (in non-promiscuous mode only one Queue per 13362 * function will receive multicast packets (leading in our 13363 * case). 13364 */ 13365 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 13366 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 13367 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 13368 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 13369 13370 /* Clear STOP_PENDING bit if START is requested */ 13371 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 13372 13373 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 13374 } else 13375 /* Clear START_PENDING bit if STOP is requested */ 13376 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 13377 13378 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 13379 set_bit(sched_state, &bp->sp_state); 13380 else { 13381 __set_bit(RAMROD_RX, &ramrod_flags); 13382 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 13383 ramrod_flags); 13384 } 13385 } 13386 13387 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 13388 { 13389 struct bnx2x *bp = netdev_priv(dev); 13390 int rc = 0; 13391 13392 switch (ctl->cmd) { 13393 case DRV_CTL_CTXTBL_WR_CMD: { 13394 u32 index = ctl->data.io.offset; 13395 dma_addr_t addr = ctl->data.io.dma_addr; 13396 13397 bnx2x_ilt_wr(bp, index, addr); 13398 break; 13399 } 13400 13401 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 13402 int count = ctl->data.credit.credit_count; 13403 13404 bnx2x_cnic_sp_post(bp, count); 13405 break; 13406 } 13407 13408 /* rtnl_lock is held. */ 13409 case DRV_CTL_START_L2_CMD: { 13410 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13411 unsigned long sp_bits = 0; 13412 13413 /* Configure the iSCSI classification object */ 13414 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 13415 cp->iscsi_l2_client_id, 13416 cp->iscsi_l2_cid, BP_FUNC(bp), 13417 bnx2x_sp(bp, mac_rdata), 13418 bnx2x_sp_mapping(bp, mac_rdata), 13419 BNX2X_FILTER_MAC_PENDING, 13420 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 13421 &bp->macs_pool); 13422 13423 /* Set iSCSI MAC address */ 13424 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 13425 if (rc) 13426 break; 13427 13428 mmiowb(); 13429 barrier(); 13430 13431 /* Start accepting on iSCSI L2 ring */ 13432 13433 netif_addr_lock_bh(dev); 13434 bnx2x_set_iscsi_eth_rx_mode(bp, true); 13435 netif_addr_unlock_bh(dev); 13436 13437 /* bits to wait on */ 13438 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 13439 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 13440 13441 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 13442 BNX2X_ERR("rx_mode completion timed out!\n"); 13443 13444 break; 13445 } 13446 13447 /* rtnl_lock is held. */ 13448 case DRV_CTL_STOP_L2_CMD: { 13449 unsigned long sp_bits = 0; 13450 13451 /* Stop accepting on iSCSI L2 ring */ 13452 netif_addr_lock_bh(dev); 13453 bnx2x_set_iscsi_eth_rx_mode(bp, false); 13454 netif_addr_unlock_bh(dev); 13455 13456 /* bits to wait on */ 13457 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 13458 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 13459 13460 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 13461 BNX2X_ERR("rx_mode completion timed out!\n"); 13462 13463 mmiowb(); 13464 barrier(); 13465 13466 /* Unset iSCSI L2 MAC */ 13467 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 13468 BNX2X_ISCSI_ETH_MAC, true); 13469 break; 13470 } 13471 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 13472 int count = ctl->data.credit.credit_count; 13473 13474 smp_mb__before_atomic_inc(); 13475 atomic_add(count, &bp->cq_spq_left); 13476 smp_mb__after_atomic_inc(); 13477 break; 13478 } 13479 case DRV_CTL_ULP_REGISTER_CMD: { 13480 int ulp_type = ctl->data.register_data.ulp_type; 13481 13482 if (CHIP_IS_E3(bp)) { 13483 int idx = BP_FW_MB_IDX(bp); 13484 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 13485 int path = BP_PATH(bp); 13486 int port = BP_PORT(bp); 13487 int i; 13488 u32 scratch_offset; 13489 u32 *host_addr; 13490 13491 /* first write capability to shmem2 */ 13492 if (ulp_type == CNIC_ULP_ISCSI) 13493 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 13494 else if (ulp_type == CNIC_ULP_FCOE) 13495 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13496 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13497 13498 if ((ulp_type != CNIC_ULP_FCOE) || 13499 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || 13500 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) 13501 break; 13502 13503 /* if reached here - should write fcoe capabilities */ 13504 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); 13505 if (!scratch_offset) 13506 break; 13507 scratch_offset += offsetof(struct glob_ncsi_oem_data, 13508 fcoe_features[path][port]); 13509 host_addr = (u32 *) &(ctl->data.register_data. 13510 fcoe_features); 13511 for (i = 0; i < sizeof(struct fcoe_capabilities); 13512 i += 4) 13513 REG_WR(bp, scratch_offset + i, 13514 *(host_addr + i/4)); 13515 } 13516 break; 13517 } 13518 13519 case DRV_CTL_ULP_UNREGISTER_CMD: { 13520 int ulp_type = ctl->data.ulp_type; 13521 13522 if (CHIP_IS_E3(bp)) { 13523 int idx = BP_FW_MB_IDX(bp); 13524 u32 cap; 13525 13526 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 13527 if (ulp_type == CNIC_ULP_ISCSI) 13528 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 13529 else if (ulp_type == CNIC_ULP_FCOE) 13530 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13531 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13532 } 13533 break; 13534 } 13535 13536 default: 13537 BNX2X_ERR("unknown command %x\n", ctl->cmd); 13538 rc = -EINVAL; 13539 } 13540 13541 return rc; 13542 } 13543 13544 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 13545 { 13546 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13547 13548 if (bp->flags & USING_MSIX_FLAG) { 13549 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 13550 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 13551 cp->irq_arr[0].vector = bp->msix_table[1].vector; 13552 } else { 13553 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 13554 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 13555 } 13556 if (!CHIP_IS_E1x(bp)) 13557 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 13558 else 13559 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 13560 13561 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 13562 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 13563 cp->irq_arr[1].status_blk = bp->def_status_blk; 13564 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 13565 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 13566 13567 cp->num_irq = 2; 13568 } 13569 13570 void bnx2x_setup_cnic_info(struct bnx2x *bp) 13571 { 13572 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13573 13574 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 13575 bnx2x_cid_ilt_lines(bp); 13576 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 13577 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13578 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13579 13580 if (NO_ISCSI_OOO(bp)) 13581 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 13582 } 13583 13584 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 13585 void *data) 13586 { 13587 struct bnx2x *bp = netdev_priv(dev); 13588 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13589 int rc; 13590 13591 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); 13592 13593 if (ops == NULL) { 13594 BNX2X_ERR("NULL ops received\n"); 13595 return -EINVAL; 13596 } 13597 13598 if (!CNIC_SUPPORT(bp)) { 13599 BNX2X_ERR("Can't register CNIC when not supported\n"); 13600 return -EOPNOTSUPP; 13601 } 13602 13603 if (!CNIC_LOADED(bp)) { 13604 rc = bnx2x_load_cnic(bp); 13605 if (rc) { 13606 BNX2X_ERR("CNIC-related load failed\n"); 13607 return rc; 13608 } 13609 } 13610 13611 bp->cnic_enabled = true; 13612 13613 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 13614 if (!bp->cnic_kwq) 13615 return -ENOMEM; 13616 13617 bp->cnic_kwq_cons = bp->cnic_kwq; 13618 bp->cnic_kwq_prod = bp->cnic_kwq; 13619 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 13620 13621 bp->cnic_spq_pending = 0; 13622 bp->cnic_kwq_pending = 0; 13623 13624 bp->cnic_data = data; 13625 13626 cp->num_irq = 0; 13627 cp->drv_state |= CNIC_DRV_STATE_REGD; 13628 cp->iro_arr = bp->iro_arr; 13629 13630 bnx2x_setup_cnic_irq_info(bp); 13631 13632 rcu_assign_pointer(bp->cnic_ops, ops); 13633 13634 return 0; 13635 } 13636 13637 static int bnx2x_unregister_cnic(struct net_device *dev) 13638 { 13639 struct bnx2x *bp = netdev_priv(dev); 13640 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13641 13642 mutex_lock(&bp->cnic_mutex); 13643 cp->drv_state = 0; 13644 RCU_INIT_POINTER(bp->cnic_ops, NULL); 13645 mutex_unlock(&bp->cnic_mutex); 13646 synchronize_rcu(); 13647 bp->cnic_enabled = false; 13648 kfree(bp->cnic_kwq); 13649 bp->cnic_kwq = NULL; 13650 13651 return 0; 13652 } 13653 13654 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 13655 { 13656 struct bnx2x *bp = netdev_priv(dev); 13657 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13658 13659 /* If both iSCSI and FCoE are disabled - return NULL in 13660 * order to indicate CNIC that it should not try to work 13661 * with this device. 13662 */ 13663 if (NO_ISCSI(bp) && NO_FCOE(bp)) 13664 return NULL; 13665 13666 cp->drv_owner = THIS_MODULE; 13667 cp->chip_id = CHIP_ID(bp); 13668 cp->pdev = bp->pdev; 13669 cp->io_base = bp->regview; 13670 cp->io_base2 = bp->doorbells; 13671 cp->max_kwqe_pending = 8; 13672 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 13673 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 13674 bnx2x_cid_ilt_lines(bp); 13675 cp->ctx_tbl_len = CNIC_ILT_LINES; 13676 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 13677 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 13678 cp->drv_ctl = bnx2x_drv_ctl; 13679 cp->drv_register_cnic = bnx2x_register_cnic; 13680 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 13681 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13682 cp->iscsi_l2_client_id = 13683 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 13684 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13685 13686 if (NO_ISCSI_OOO(bp)) 13687 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 13688 13689 if (NO_ISCSI(bp)) 13690 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 13691 13692 if (NO_FCOE(bp)) 13693 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 13694 13695 BNX2X_DEV_INFO( 13696 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 13697 cp->ctx_blk_size, 13698 cp->ctx_tbl_offset, 13699 cp->ctx_tbl_len, 13700 cp->starting_cid); 13701 return cp; 13702 } 13703 13704 u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 13705 { 13706 struct bnx2x *bp = fp->bp; 13707 u32 offset = BAR_USTRORM_INTMEM; 13708 13709 if (IS_VF(bp)) 13710 return bnx2x_vf_ustorm_prods_offset(bp, fp); 13711 else if (!CHIP_IS_E1x(bp)) 13712 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 13713 else 13714 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 13715 13716 return offset; 13717 } 13718 13719 /* called only on E1H or E2. 13720 * When pretending to be PF, the pretend value is the function number 0...7 13721 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 13722 * combination 13723 */ 13724 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 13725 { 13726 u32 pretend_reg; 13727 13728 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) 13729 return -1; 13730 13731 /* get my own pretend register */ 13732 pretend_reg = bnx2x_get_pretend_reg(bp); 13733 REG_WR(bp, pretend_reg, pretend_func_val); 13734 REG_RD(bp, pretend_reg); 13735 return 0; 13736 } 13737