1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/kernel.h> 23 #include <linux/device.h> /* for dev_info() */ 24 #include <linux/timer.h> 25 #include <linux/errno.h> 26 #include <linux/ioport.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/pci.h> 30 #include <linux/aer.h> 31 #include <linux/init.h> 32 #include <linux/netdevice.h> 33 #include <linux/etherdevice.h> 34 #include <linux/skbuff.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/bitops.h> 37 #include <linux/irq.h> 38 #include <linux/delay.h> 39 #include <asm/byteorder.h> 40 #include <linux/time.h> 41 #include <linux/ethtool.h> 42 #include <linux/mii.h> 43 #include <linux/if_vlan.h> 44 #include <net/ip.h> 45 #include <net/ipv6.h> 46 #include <net/tcp.h> 47 #include <net/checksum.h> 48 #include <net/ip6_checksum.h> 49 #include <linux/workqueue.h> 50 #include <linux/crc32.h> 51 #include <linux/crc32c.h> 52 #include <linux/prefetch.h> 53 #include <linux/zlib.h> 54 #include <linux/io.h> 55 #include <linux/semaphore.h> 56 #include <linux/stringify.h> 57 #include <linux/vmalloc.h> 58 59 #include "bnx2x.h" 60 #include "bnx2x_init.h" 61 #include "bnx2x_init_ops.h" 62 #include "bnx2x_cmn.h" 63 #include "bnx2x_vfpf.h" 64 #include "bnx2x_dcb.h" 65 #include "bnx2x_sp.h" 66 67 #include <linux/firmware.h> 68 #include "bnx2x_fw_file_hdr.h" 69 /* FW files */ 70 #define FW_FILE_VERSION \ 71 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 72 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 73 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 74 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 75 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 76 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 77 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 78 79 /* Time in jiffies before concluding the transmitter is hung */ 80 #define TX_TIMEOUT (5*HZ) 81 82 static char version[] = 83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 85 86 MODULE_AUTHOR("Eliezer Tamir"); 87 MODULE_DESCRIPTION("Broadcom NetXtreme II " 88 "BCM57710/57711/57711E/" 89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 90 "57840/57840_MF Driver"); 91 MODULE_LICENSE("GPL"); 92 MODULE_VERSION(DRV_MODULE_VERSION); 93 MODULE_FIRMWARE(FW_FILE_NAME_E1); 94 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 95 MODULE_FIRMWARE(FW_FILE_NAME_E2); 96 97 int bnx2x_num_queues; 98 module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO); 99 MODULE_PARM_DESC(num_queues, 100 " Set number of queues (default is as a number of CPUs)"); 101 102 static int disable_tpa; 103 module_param(disable_tpa, int, S_IRUGO); 104 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 105 106 static int int_mode; 107 module_param(int_mode, int, S_IRUGO); 108 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 109 "(1 INT#x; 2 MSI)"); 110 111 static int dropless_fc; 112 module_param(dropless_fc, int, S_IRUGO); 113 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 114 115 static int mrrs = -1; 116 module_param(mrrs, int, S_IRUGO); 117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 118 119 static int debug; 120 module_param(debug, int, S_IRUGO); 121 MODULE_PARM_DESC(debug, " Default debug msglevel"); 122 123 static struct workqueue_struct *bnx2x_wq; 124 struct workqueue_struct *bnx2x_iov_wq; 125 126 struct bnx2x_mac_vals { 127 u32 xmac_addr; 128 u32 xmac_val; 129 u32 emac_addr; 130 u32 emac_val; 131 u32 umac_addr; 132 u32 umac_val; 133 u32 bmac_addr; 134 u32 bmac_val[2]; 135 }; 136 137 enum bnx2x_board_type { 138 BCM57710 = 0, 139 BCM57711, 140 BCM57711E, 141 BCM57712, 142 BCM57712_MF, 143 BCM57712_VF, 144 BCM57800, 145 BCM57800_MF, 146 BCM57800_VF, 147 BCM57810, 148 BCM57810_MF, 149 BCM57810_VF, 150 BCM57840_4_10, 151 BCM57840_2_20, 152 BCM57840_MF, 153 BCM57840_VF, 154 BCM57811, 155 BCM57811_MF, 156 BCM57840_O, 157 BCM57840_MFO, 158 BCM57811_VF 159 }; 160 161 /* indexed by board_type, above */ 162 static struct { 163 char *name; 164 } board_info[] = { 165 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 166 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 167 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 168 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, 169 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, 170 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, 171 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, 172 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, 173 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, 174 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 175 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 176 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, 177 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, 178 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, 179 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, 180 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, 181 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, 182 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, 183 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 184 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, 185 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } 186 }; 187 188 #ifndef PCI_DEVICE_ID_NX2_57710 189 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 190 #endif 191 #ifndef PCI_DEVICE_ID_NX2_57711 192 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 193 #endif 194 #ifndef PCI_DEVICE_ID_NX2_57711E 195 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 196 #endif 197 #ifndef PCI_DEVICE_ID_NX2_57712 198 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 199 #endif 200 #ifndef PCI_DEVICE_ID_NX2_57712_MF 201 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 202 #endif 203 #ifndef PCI_DEVICE_ID_NX2_57712_VF 204 #define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF 205 #endif 206 #ifndef PCI_DEVICE_ID_NX2_57800 207 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 208 #endif 209 #ifndef PCI_DEVICE_ID_NX2_57800_MF 210 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 211 #endif 212 #ifndef PCI_DEVICE_ID_NX2_57800_VF 213 #define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF 214 #endif 215 #ifndef PCI_DEVICE_ID_NX2_57810 216 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 217 #endif 218 #ifndef PCI_DEVICE_ID_NX2_57810_MF 219 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 220 #endif 221 #ifndef PCI_DEVICE_ID_NX2_57840_O 222 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE 223 #endif 224 #ifndef PCI_DEVICE_ID_NX2_57810_VF 225 #define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF 226 #endif 227 #ifndef PCI_DEVICE_ID_NX2_57840_4_10 228 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 229 #endif 230 #ifndef PCI_DEVICE_ID_NX2_57840_2_20 231 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 232 #endif 233 #ifndef PCI_DEVICE_ID_NX2_57840_MFO 234 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE 235 #endif 236 #ifndef PCI_DEVICE_ID_NX2_57840_MF 237 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 238 #endif 239 #ifndef PCI_DEVICE_ID_NX2_57840_VF 240 #define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF 241 #endif 242 #ifndef PCI_DEVICE_ID_NX2_57811 243 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 244 #endif 245 #ifndef PCI_DEVICE_ID_NX2_57811_MF 246 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 247 #endif 248 #ifndef PCI_DEVICE_ID_NX2_57811_VF 249 #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF 250 #endif 251 252 static const struct pci_device_id bnx2x_pci_tbl[] = { 253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, 259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, 262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, 265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, 267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, 268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, 269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, 274 { 0 } 275 }; 276 277 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 278 279 /* Global resources for unloading a previously loaded device */ 280 #define BNX2X_PREV_WAIT_NEEDED 1 281 static DEFINE_SEMAPHORE(bnx2x_prev_sem); 282 static LIST_HEAD(bnx2x_prev_list); 283 284 /* Forward declaration */ 285 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); 286 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); 287 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); 288 289 /**************************************************************************** 290 * General service functions 291 ****************************************************************************/ 292 293 static void __storm_memset_dma_mapping(struct bnx2x *bp, 294 u32 addr, dma_addr_t mapping) 295 { 296 REG_WR(bp, addr, U64_LO(mapping)); 297 REG_WR(bp, addr + 4, U64_HI(mapping)); 298 } 299 300 static void storm_memset_spq_addr(struct bnx2x *bp, 301 dma_addr_t mapping, u16 abs_fid) 302 { 303 u32 addr = XSEM_REG_FAST_MEMORY + 304 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 305 306 __storm_memset_dma_mapping(bp, addr, mapping); 307 } 308 309 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 310 u16 pf_id) 311 { 312 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 313 pf_id); 314 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 315 pf_id); 316 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 317 pf_id); 318 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 319 pf_id); 320 } 321 322 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 323 u8 enable) 324 { 325 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 326 enable); 327 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 328 enable); 329 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 330 enable); 331 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 332 enable); 333 } 334 335 static void storm_memset_eq_data(struct bnx2x *bp, 336 struct event_ring_data *eq_data, 337 u16 pfid) 338 { 339 size_t size = sizeof(struct event_ring_data); 340 341 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 342 343 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 344 } 345 346 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 347 u16 pfid) 348 { 349 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 350 REG_WR16(bp, addr, eq_prod); 351 } 352 353 /* used only at init 354 * locking is done by mcp 355 */ 356 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 357 { 358 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 359 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 360 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 361 PCICFG_VENDOR_ID_OFFSET); 362 } 363 364 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 365 { 366 u32 val; 367 368 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 369 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 370 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 371 PCICFG_VENDOR_ID_OFFSET); 372 373 return val; 374 } 375 376 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 377 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 378 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 379 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 380 #define DMAE_DP_DST_NONE "dst_addr [none]" 381 382 static void bnx2x_dp_dmae(struct bnx2x *bp, 383 struct dmae_command *dmae, int msglvl) 384 { 385 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 386 int i; 387 388 switch (dmae->opcode & DMAE_COMMAND_DST) { 389 case DMAE_CMD_DST_PCI: 390 if (src_type == DMAE_CMD_SRC_PCI) 391 DP(msglvl, "DMAE: opcode 0x%08x\n" 392 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 393 "comp_addr [%x:%08x], comp_val 0x%08x\n", 394 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 395 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 396 dmae->comp_addr_hi, dmae->comp_addr_lo, 397 dmae->comp_val); 398 else 399 DP(msglvl, "DMAE: opcode 0x%08x\n" 400 "src [%08x], len [%d*4], dst [%x:%08x]\n" 401 "comp_addr [%x:%08x], comp_val 0x%08x\n", 402 dmae->opcode, dmae->src_addr_lo >> 2, 403 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 404 dmae->comp_addr_hi, dmae->comp_addr_lo, 405 dmae->comp_val); 406 break; 407 case DMAE_CMD_DST_GRC: 408 if (src_type == DMAE_CMD_SRC_PCI) 409 DP(msglvl, "DMAE: opcode 0x%08x\n" 410 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 411 "comp_addr [%x:%08x], comp_val 0x%08x\n", 412 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 413 dmae->len, dmae->dst_addr_lo >> 2, 414 dmae->comp_addr_hi, dmae->comp_addr_lo, 415 dmae->comp_val); 416 else 417 DP(msglvl, "DMAE: opcode 0x%08x\n" 418 "src [%08x], len [%d*4], dst [%08x]\n" 419 "comp_addr [%x:%08x], comp_val 0x%08x\n", 420 dmae->opcode, dmae->src_addr_lo >> 2, 421 dmae->len, dmae->dst_addr_lo >> 2, 422 dmae->comp_addr_hi, dmae->comp_addr_lo, 423 dmae->comp_val); 424 break; 425 default: 426 if (src_type == DMAE_CMD_SRC_PCI) 427 DP(msglvl, "DMAE: opcode 0x%08x\n" 428 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 429 "comp_addr [%x:%08x] comp_val 0x%08x\n", 430 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 431 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 432 dmae->comp_val); 433 else 434 DP(msglvl, "DMAE: opcode 0x%08x\n" 435 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 436 "comp_addr [%x:%08x] comp_val 0x%08x\n", 437 dmae->opcode, dmae->src_addr_lo >> 2, 438 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 439 dmae->comp_val); 440 break; 441 } 442 443 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) 444 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", 445 i, *(((u32 *)dmae) + i)); 446 } 447 448 /* copy command into DMAE command memory and set DMAE command go */ 449 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 450 { 451 u32 cmd_offset; 452 int i; 453 454 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 455 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 456 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 457 } 458 REG_WR(bp, dmae_reg_go_c[idx], 1); 459 } 460 461 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 462 { 463 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 464 DMAE_CMD_C_ENABLE); 465 } 466 467 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 468 { 469 return opcode & ~DMAE_CMD_SRC_RESET; 470 } 471 472 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 473 bool with_comp, u8 comp_type) 474 { 475 u32 opcode = 0; 476 477 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 478 (dst_type << DMAE_COMMAND_DST_SHIFT)); 479 480 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 481 482 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 483 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 484 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 485 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 486 487 #ifdef __BIG_ENDIAN 488 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 489 #else 490 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 491 #endif 492 if (with_comp) 493 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 494 return opcode; 495 } 496 497 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 498 struct dmae_command *dmae, 499 u8 src_type, u8 dst_type) 500 { 501 memset(dmae, 0, sizeof(struct dmae_command)); 502 503 /* set the opcode */ 504 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 505 true, DMAE_COMP_PCI); 506 507 /* fill in the completion parameters */ 508 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 509 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 510 dmae->comp_val = DMAE_COMP_VAL; 511 } 512 513 /* issue a dmae command over the init-channel and wait for completion */ 514 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 515 u32 *comp) 516 { 517 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 518 int rc = 0; 519 520 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); 521 522 /* Lock the dmae channel. Disable BHs to prevent a dead-lock 523 * as long as this code is called both from syscall context and 524 * from ndo_set_rx_mode() flow that may be called from BH. 525 */ 526 spin_lock_bh(&bp->dmae_lock); 527 528 /* reset completion */ 529 *comp = 0; 530 531 /* post the command on the channel used for initializations */ 532 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 533 534 /* wait for completion */ 535 udelay(5); 536 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 537 538 if (!cnt || 539 (bp->recovery_state != BNX2X_RECOVERY_DONE && 540 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 541 BNX2X_ERR("DMAE timeout!\n"); 542 rc = DMAE_TIMEOUT; 543 goto unlock; 544 } 545 cnt--; 546 udelay(50); 547 } 548 if (*comp & DMAE_PCI_ERR_FLAG) { 549 BNX2X_ERR("DMAE PCI error!\n"); 550 rc = DMAE_PCI_ERROR; 551 } 552 553 unlock: 554 spin_unlock_bh(&bp->dmae_lock); 555 return rc; 556 } 557 558 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 559 u32 len32) 560 { 561 int rc; 562 struct dmae_command dmae; 563 564 if (!bp->dmae_ready) { 565 u32 *data = bnx2x_sp(bp, wb_data[0]); 566 567 if (CHIP_IS_E1(bp)) 568 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 569 else 570 bnx2x_init_str_wr(bp, dst_addr, data, len32); 571 return; 572 } 573 574 /* set opcode and fixed command fields */ 575 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 576 577 /* fill in addresses and len */ 578 dmae.src_addr_lo = U64_LO(dma_addr); 579 dmae.src_addr_hi = U64_HI(dma_addr); 580 dmae.dst_addr_lo = dst_addr >> 2; 581 dmae.dst_addr_hi = 0; 582 dmae.len = len32; 583 584 /* issue the command and wait for completion */ 585 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 586 if (rc) { 587 BNX2X_ERR("DMAE returned failure %d\n", rc); 588 #ifdef BNX2X_STOP_ON_ERROR 589 bnx2x_panic(); 590 #endif 591 } 592 } 593 594 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 595 { 596 int rc; 597 struct dmae_command dmae; 598 599 if (!bp->dmae_ready) { 600 u32 *data = bnx2x_sp(bp, wb_data[0]); 601 int i; 602 603 if (CHIP_IS_E1(bp)) 604 for (i = 0; i < len32; i++) 605 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 606 else 607 for (i = 0; i < len32; i++) 608 data[i] = REG_RD(bp, src_addr + i*4); 609 610 return; 611 } 612 613 /* set opcode and fixed command fields */ 614 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 615 616 /* fill in addresses and len */ 617 dmae.src_addr_lo = src_addr >> 2; 618 dmae.src_addr_hi = 0; 619 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 620 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 621 dmae.len = len32; 622 623 /* issue the command and wait for completion */ 624 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 625 if (rc) { 626 BNX2X_ERR("DMAE returned failure %d\n", rc); 627 #ifdef BNX2X_STOP_ON_ERROR 628 bnx2x_panic(); 629 #endif 630 } 631 } 632 633 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 634 u32 addr, u32 len) 635 { 636 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 637 int offset = 0; 638 639 while (len > dmae_wr_max) { 640 bnx2x_write_dmae(bp, phys_addr + offset, 641 addr + offset, dmae_wr_max); 642 offset += dmae_wr_max * 4; 643 len -= dmae_wr_max; 644 } 645 646 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 647 } 648 649 static int bnx2x_mc_assert(struct bnx2x *bp) 650 { 651 char last_idx; 652 int i, rc = 0; 653 u32 row0, row1, row2, row3; 654 655 /* XSTORM */ 656 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 657 XSTORM_ASSERT_LIST_INDEX_OFFSET); 658 if (last_idx) 659 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 660 661 /* print the asserts */ 662 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 663 664 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 665 XSTORM_ASSERT_LIST_OFFSET(i)); 666 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 667 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 668 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 669 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 670 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 671 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 672 673 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 674 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 675 i, row3, row2, row1, row0); 676 rc++; 677 } else { 678 break; 679 } 680 } 681 682 /* TSTORM */ 683 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + 684 TSTORM_ASSERT_LIST_INDEX_OFFSET); 685 if (last_idx) 686 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 687 688 /* print the asserts */ 689 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 690 691 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + 692 TSTORM_ASSERT_LIST_OFFSET(i)); 693 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + 694 TSTORM_ASSERT_LIST_OFFSET(i) + 4); 695 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + 696 TSTORM_ASSERT_LIST_OFFSET(i) + 8); 697 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + 698 TSTORM_ASSERT_LIST_OFFSET(i) + 12); 699 700 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 701 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 702 i, row3, row2, row1, row0); 703 rc++; 704 } else { 705 break; 706 } 707 } 708 709 /* CSTORM */ 710 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 711 CSTORM_ASSERT_LIST_INDEX_OFFSET); 712 if (last_idx) 713 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 714 715 /* print the asserts */ 716 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 717 718 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 719 CSTORM_ASSERT_LIST_OFFSET(i)); 720 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 721 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 722 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 723 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 724 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 725 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 726 727 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 728 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 729 i, row3, row2, row1, row0); 730 rc++; 731 } else { 732 break; 733 } 734 } 735 736 /* USTORM */ 737 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 738 USTORM_ASSERT_LIST_INDEX_OFFSET); 739 if (last_idx) 740 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 741 742 /* print the asserts */ 743 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 744 745 row0 = REG_RD(bp, BAR_USTRORM_INTMEM + 746 USTORM_ASSERT_LIST_OFFSET(i)); 747 row1 = REG_RD(bp, BAR_USTRORM_INTMEM + 748 USTORM_ASSERT_LIST_OFFSET(i) + 4); 749 row2 = REG_RD(bp, BAR_USTRORM_INTMEM + 750 USTORM_ASSERT_LIST_OFFSET(i) + 8); 751 row3 = REG_RD(bp, BAR_USTRORM_INTMEM + 752 USTORM_ASSERT_LIST_OFFSET(i) + 12); 753 754 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 755 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 756 i, row3, row2, row1, row0); 757 rc++; 758 } else { 759 break; 760 } 761 } 762 763 return rc; 764 } 765 766 #define MCPR_TRACE_BUFFER_SIZE (0x800) 767 #define SCRATCH_BUFFER_SIZE(bp) \ 768 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) 769 770 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 771 { 772 u32 addr, val; 773 u32 mark, offset; 774 __be32 data[9]; 775 int word; 776 u32 trace_shmem_base; 777 if (BP_NOMCP(bp)) { 778 BNX2X_ERR("NO MCP - can not dump\n"); 779 return; 780 } 781 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 782 (bp->common.bc_ver & 0xff0000) >> 16, 783 (bp->common.bc_ver & 0xff00) >> 8, 784 (bp->common.bc_ver & 0xff)); 785 786 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 787 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 788 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 789 790 if (BP_PATH(bp) == 0) 791 trace_shmem_base = bp->common.shmem_base; 792 else 793 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 794 795 /* sanity */ 796 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || 797 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + 798 SCRATCH_BUFFER_SIZE(bp)) { 799 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", 800 trace_shmem_base); 801 return; 802 } 803 804 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; 805 806 /* validate TRCB signature */ 807 mark = REG_RD(bp, addr); 808 if (mark != MFW_TRACE_SIGNATURE) { 809 BNX2X_ERR("Trace buffer signature is missing."); 810 return ; 811 } 812 813 /* read cyclic buffer pointer */ 814 addr += 4; 815 mark = REG_RD(bp, addr); 816 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; 817 if (mark >= trace_shmem_base || mark < addr + 4) { 818 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); 819 return; 820 } 821 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 822 823 printk("%s", lvl); 824 825 /* dump buffer after the mark */ 826 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { 827 for (word = 0; word < 8; word++) 828 data[word] = htonl(REG_RD(bp, offset + 4*word)); 829 data[8] = 0x0; 830 pr_cont("%s", (char *)data); 831 } 832 833 /* dump buffer before the mark */ 834 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 835 for (word = 0; word < 8; word++) 836 data[word] = htonl(REG_RD(bp, offset + 4*word)); 837 data[8] = 0x0; 838 pr_cont("%s", (char *)data); 839 } 840 printk("%s" "end of fw dump\n", lvl); 841 } 842 843 static void bnx2x_fw_dump(struct bnx2x *bp) 844 { 845 bnx2x_fw_dump_lvl(bp, KERN_ERR); 846 } 847 848 static void bnx2x_hc_int_disable(struct bnx2x *bp) 849 { 850 int port = BP_PORT(bp); 851 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 852 u32 val = REG_RD(bp, addr); 853 854 /* in E1 we must use only PCI configuration space to disable 855 * MSI/MSIX capability 856 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 857 */ 858 if (CHIP_IS_E1(bp)) { 859 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 860 * Use mask register to prevent from HC sending interrupts 861 * after we exit the function 862 */ 863 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 864 865 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 866 HC_CONFIG_0_REG_INT_LINE_EN_0 | 867 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 868 } else 869 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 870 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 871 HC_CONFIG_0_REG_INT_LINE_EN_0 | 872 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 873 874 DP(NETIF_MSG_IFDOWN, 875 "write %x to HC %d (addr 0x%x)\n", 876 val, port, addr); 877 878 /* flush all outstanding writes */ 879 mmiowb(); 880 881 REG_WR(bp, addr, val); 882 if (REG_RD(bp, addr) != val) 883 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 884 } 885 886 static void bnx2x_igu_int_disable(struct bnx2x *bp) 887 { 888 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 889 890 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 891 IGU_PF_CONF_INT_LINE_EN | 892 IGU_PF_CONF_ATTN_BIT_EN); 893 894 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 895 896 /* flush all outstanding writes */ 897 mmiowb(); 898 899 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 900 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 901 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 902 } 903 904 static void bnx2x_int_disable(struct bnx2x *bp) 905 { 906 if (bp->common.int_block == INT_BLOCK_HC) 907 bnx2x_hc_int_disable(bp); 908 else 909 bnx2x_igu_int_disable(bp); 910 } 911 912 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) 913 { 914 int i; 915 u16 j; 916 struct hc_sp_status_block_data sp_sb_data; 917 int func = BP_FUNC(bp); 918 #ifdef BNX2X_STOP_ON_ERROR 919 u16 start = 0, end = 0; 920 u8 cos; 921 #endif 922 if (IS_PF(bp) && disable_int) 923 bnx2x_int_disable(bp); 924 925 bp->stats_state = STATS_STATE_DISABLED; 926 bp->eth_stats.unrecoverable_error++; 927 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 928 929 BNX2X_ERR("begin crash dump -----------------\n"); 930 931 /* Indices */ 932 /* Common */ 933 if (IS_PF(bp)) { 934 struct host_sp_status_block *def_sb = bp->def_status_blk; 935 int data_size, cstorm_offset; 936 937 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 938 bp->def_idx, bp->def_att_idx, bp->attn_state, 939 bp->spq_prod_idx, bp->stats_counter); 940 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 941 def_sb->atten_status_block.attn_bits, 942 def_sb->atten_status_block.attn_bits_ack, 943 def_sb->atten_status_block.status_block_id, 944 def_sb->atten_status_block.attn_bits_index); 945 BNX2X_ERR(" def ("); 946 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 947 pr_cont("0x%x%s", 948 def_sb->sp_sb.index_values[i], 949 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 950 951 data_size = sizeof(struct hc_sp_status_block_data) / 952 sizeof(u32); 953 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); 954 for (i = 0; i < data_size; i++) 955 *((u32 *)&sp_sb_data + i) = 956 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + 957 i * sizeof(u32)); 958 959 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 960 sp_sb_data.igu_sb_id, 961 sp_sb_data.igu_seg_id, 962 sp_sb_data.p_func.pf_id, 963 sp_sb_data.p_func.vnic_id, 964 sp_sb_data.p_func.vf_id, 965 sp_sb_data.p_func.vf_valid, 966 sp_sb_data.state); 967 } 968 969 for_each_eth_queue(bp, i) { 970 struct bnx2x_fastpath *fp = &bp->fp[i]; 971 int loop; 972 struct hc_status_block_data_e2 sb_data_e2; 973 struct hc_status_block_data_e1x sb_data_e1x; 974 struct hc_status_block_sm *hc_sm_p = 975 CHIP_IS_E1x(bp) ? 976 sb_data_e1x.common.state_machine : 977 sb_data_e2.common.state_machine; 978 struct hc_index_data *hc_index_p = 979 CHIP_IS_E1x(bp) ? 980 sb_data_e1x.index_data : 981 sb_data_e2.index_data; 982 u8 data_size, cos; 983 u32 *sb_data_p; 984 struct bnx2x_fp_txdata txdata; 985 986 /* Rx */ 987 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 988 i, fp->rx_bd_prod, fp->rx_bd_cons, 989 fp->rx_comp_prod, 990 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 991 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 992 fp->rx_sge_prod, fp->last_max_sge, 993 le16_to_cpu(fp->fp_hc_idx)); 994 995 /* Tx */ 996 for_each_cos_in_tx_queue(fp, cos) 997 { 998 txdata = *fp->txdata_ptr[cos]; 999 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 1000 i, txdata.tx_pkt_prod, 1001 txdata.tx_pkt_cons, txdata.tx_bd_prod, 1002 txdata.tx_bd_cons, 1003 le16_to_cpu(*txdata.tx_cons_sb)); 1004 } 1005 1006 loop = CHIP_IS_E1x(bp) ? 1007 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 1008 1009 /* host sb data */ 1010 1011 if (IS_FCOE_FP(fp)) 1012 continue; 1013 1014 BNX2X_ERR(" run indexes ("); 1015 for (j = 0; j < HC_SB_MAX_SM; j++) 1016 pr_cont("0x%x%s", 1017 fp->sb_running_index[j], 1018 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 1019 1020 BNX2X_ERR(" indexes ("); 1021 for (j = 0; j < loop; j++) 1022 pr_cont("0x%x%s", 1023 fp->sb_index_values[j], 1024 (j == loop - 1) ? ")" : " "); 1025 1026 /* VF cannot access FW refelection for status block */ 1027 if (IS_VF(bp)) 1028 continue; 1029 1030 /* fw sb data */ 1031 data_size = CHIP_IS_E1x(bp) ? 1032 sizeof(struct hc_status_block_data_e1x) : 1033 sizeof(struct hc_status_block_data_e2); 1034 data_size /= sizeof(u32); 1035 sb_data_p = CHIP_IS_E1x(bp) ? 1036 (u32 *)&sb_data_e1x : 1037 (u32 *)&sb_data_e2; 1038 /* copy sb data in here */ 1039 for (j = 0; j < data_size; j++) 1040 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 1041 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 1042 j * sizeof(u32)); 1043 1044 if (!CHIP_IS_E1x(bp)) { 1045 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1046 sb_data_e2.common.p_func.pf_id, 1047 sb_data_e2.common.p_func.vf_id, 1048 sb_data_e2.common.p_func.vf_valid, 1049 sb_data_e2.common.p_func.vnic_id, 1050 sb_data_e2.common.same_igu_sb_1b, 1051 sb_data_e2.common.state); 1052 } else { 1053 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1054 sb_data_e1x.common.p_func.pf_id, 1055 sb_data_e1x.common.p_func.vf_id, 1056 sb_data_e1x.common.p_func.vf_valid, 1057 sb_data_e1x.common.p_func.vnic_id, 1058 sb_data_e1x.common.same_igu_sb_1b, 1059 sb_data_e1x.common.state); 1060 } 1061 1062 /* SB_SMs data */ 1063 for (j = 0; j < HC_SB_MAX_SM; j++) { 1064 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 1065 j, hc_sm_p[j].__flags, 1066 hc_sm_p[j].igu_sb_id, 1067 hc_sm_p[j].igu_seg_id, 1068 hc_sm_p[j].time_to_expire, 1069 hc_sm_p[j].timer_value); 1070 } 1071 1072 /* Indices data */ 1073 for (j = 0; j < loop; j++) { 1074 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 1075 hc_index_p[j].flags, 1076 hc_index_p[j].timeout); 1077 } 1078 } 1079 1080 #ifdef BNX2X_STOP_ON_ERROR 1081 if (IS_PF(bp)) { 1082 /* event queue */ 1083 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1084 for (i = 0; i < NUM_EQ_DESC; i++) { 1085 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1086 1087 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1088 i, bp->eq_ring[i].message.opcode, 1089 bp->eq_ring[i].message.error); 1090 BNX2X_ERR("data: %x %x %x\n", 1091 data[0], data[1], data[2]); 1092 } 1093 } 1094 1095 /* Rings */ 1096 /* Rx */ 1097 for_each_valid_rx_queue(bp, i) { 1098 struct bnx2x_fastpath *fp = &bp->fp[i]; 1099 1100 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1101 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 1102 for (j = start; j != end; j = RX_BD(j + 1)) { 1103 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 1104 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 1105 1106 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 1107 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 1108 } 1109 1110 start = RX_SGE(fp->rx_sge_prod); 1111 end = RX_SGE(fp->last_max_sge); 1112 for (j = start; j != end; j = RX_SGE(j + 1)) { 1113 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 1114 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 1115 1116 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 1117 i, j, rx_sge[1], rx_sge[0], sw_page->page); 1118 } 1119 1120 start = RCQ_BD(fp->rx_comp_cons - 10); 1121 end = RCQ_BD(fp->rx_comp_cons + 503); 1122 for (j = start; j != end; j = RCQ_BD(j + 1)) { 1123 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 1124 1125 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 1126 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 1127 } 1128 } 1129 1130 /* Tx */ 1131 for_each_valid_tx_queue(bp, i) { 1132 struct bnx2x_fastpath *fp = &bp->fp[i]; 1133 for_each_cos_in_tx_queue(fp, cos) { 1134 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1135 1136 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 1137 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 1138 for (j = start; j != end; j = TX_BD(j + 1)) { 1139 struct sw_tx_bd *sw_bd = 1140 &txdata->tx_buf_ring[j]; 1141 1142 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 1143 i, cos, j, sw_bd->skb, 1144 sw_bd->first_bd); 1145 } 1146 1147 start = TX_BD(txdata->tx_bd_cons - 10); 1148 end = TX_BD(txdata->tx_bd_cons + 254); 1149 for (j = start; j != end; j = TX_BD(j + 1)) { 1150 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 1151 1152 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 1153 i, cos, j, tx_bd[0], tx_bd[1], 1154 tx_bd[2], tx_bd[3]); 1155 } 1156 } 1157 } 1158 #endif 1159 if (IS_PF(bp)) { 1160 bnx2x_fw_dump(bp); 1161 bnx2x_mc_assert(bp); 1162 } 1163 BNX2X_ERR("end crash dump -----------------\n"); 1164 } 1165 1166 /* 1167 * FLR Support for E2 1168 * 1169 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 1170 * initialization. 1171 */ 1172 #define FLR_WAIT_USEC 10000 /* 10 milliseconds */ 1173 #define FLR_WAIT_INTERVAL 50 /* usec */ 1174 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 1175 1176 struct pbf_pN_buf_regs { 1177 int pN; 1178 u32 init_crd; 1179 u32 crd; 1180 u32 crd_freed; 1181 }; 1182 1183 struct pbf_pN_cmd_regs { 1184 int pN; 1185 u32 lines_occup; 1186 u32 lines_freed; 1187 }; 1188 1189 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 1190 struct pbf_pN_buf_regs *regs, 1191 u32 poll_count) 1192 { 1193 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 1194 u32 cur_cnt = poll_count; 1195 1196 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 1197 crd = crd_start = REG_RD(bp, regs->crd); 1198 init_crd = REG_RD(bp, regs->init_crd); 1199 1200 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 1201 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 1202 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 1203 1204 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 1205 (init_crd - crd_start))) { 1206 if (cur_cnt--) { 1207 udelay(FLR_WAIT_INTERVAL); 1208 crd = REG_RD(bp, regs->crd); 1209 crd_freed = REG_RD(bp, regs->crd_freed); 1210 } else { 1211 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 1212 regs->pN); 1213 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 1214 regs->pN, crd); 1215 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 1216 regs->pN, crd_freed); 1217 break; 1218 } 1219 } 1220 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 1221 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1222 } 1223 1224 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 1225 struct pbf_pN_cmd_regs *regs, 1226 u32 poll_count) 1227 { 1228 u32 occup, to_free, freed, freed_start; 1229 u32 cur_cnt = poll_count; 1230 1231 occup = to_free = REG_RD(bp, regs->lines_occup); 1232 freed = freed_start = REG_RD(bp, regs->lines_freed); 1233 1234 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 1235 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 1236 1237 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1238 if (cur_cnt--) { 1239 udelay(FLR_WAIT_INTERVAL); 1240 occup = REG_RD(bp, regs->lines_occup); 1241 freed = REG_RD(bp, regs->lines_freed); 1242 } else { 1243 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 1244 regs->pN); 1245 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 1246 regs->pN, occup); 1247 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 1248 regs->pN, freed); 1249 break; 1250 } 1251 } 1252 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1253 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1254 } 1255 1256 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1257 u32 expected, u32 poll_count) 1258 { 1259 u32 cur_cnt = poll_count; 1260 u32 val; 1261 1262 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1263 udelay(FLR_WAIT_INTERVAL); 1264 1265 return val; 1266 } 1267 1268 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1269 char *msg, u32 poll_cnt) 1270 { 1271 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1272 if (val != 0) { 1273 BNX2X_ERR("%s usage count=%d\n", msg, val); 1274 return 1; 1275 } 1276 return 0; 1277 } 1278 1279 /* Common routines with VF FLR cleanup */ 1280 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1281 { 1282 /* adjust polling timeout */ 1283 if (CHIP_REV_IS_EMUL(bp)) 1284 return FLR_POLL_CNT * 2000; 1285 1286 if (CHIP_REV_IS_FPGA(bp)) 1287 return FLR_POLL_CNT * 120; 1288 1289 return FLR_POLL_CNT; 1290 } 1291 1292 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1293 { 1294 struct pbf_pN_cmd_regs cmd_regs[] = { 1295 {0, (CHIP_IS_E3B0(bp)) ? 1296 PBF_REG_TQ_OCCUPANCY_Q0 : 1297 PBF_REG_P0_TQ_OCCUPANCY, 1298 (CHIP_IS_E3B0(bp)) ? 1299 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1300 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1301 {1, (CHIP_IS_E3B0(bp)) ? 1302 PBF_REG_TQ_OCCUPANCY_Q1 : 1303 PBF_REG_P1_TQ_OCCUPANCY, 1304 (CHIP_IS_E3B0(bp)) ? 1305 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1306 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1307 {4, (CHIP_IS_E3B0(bp)) ? 1308 PBF_REG_TQ_OCCUPANCY_LB_Q : 1309 PBF_REG_P4_TQ_OCCUPANCY, 1310 (CHIP_IS_E3B0(bp)) ? 1311 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1312 PBF_REG_P4_TQ_LINES_FREED_CNT} 1313 }; 1314 1315 struct pbf_pN_buf_regs buf_regs[] = { 1316 {0, (CHIP_IS_E3B0(bp)) ? 1317 PBF_REG_INIT_CRD_Q0 : 1318 PBF_REG_P0_INIT_CRD , 1319 (CHIP_IS_E3B0(bp)) ? 1320 PBF_REG_CREDIT_Q0 : 1321 PBF_REG_P0_CREDIT, 1322 (CHIP_IS_E3B0(bp)) ? 1323 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1324 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1325 {1, (CHIP_IS_E3B0(bp)) ? 1326 PBF_REG_INIT_CRD_Q1 : 1327 PBF_REG_P1_INIT_CRD, 1328 (CHIP_IS_E3B0(bp)) ? 1329 PBF_REG_CREDIT_Q1 : 1330 PBF_REG_P1_CREDIT, 1331 (CHIP_IS_E3B0(bp)) ? 1332 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1333 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1334 {4, (CHIP_IS_E3B0(bp)) ? 1335 PBF_REG_INIT_CRD_LB_Q : 1336 PBF_REG_P4_INIT_CRD, 1337 (CHIP_IS_E3B0(bp)) ? 1338 PBF_REG_CREDIT_LB_Q : 1339 PBF_REG_P4_CREDIT, 1340 (CHIP_IS_E3B0(bp)) ? 1341 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1342 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1343 }; 1344 1345 int i; 1346 1347 /* Verify the command queues are flushed P0, P1, P4 */ 1348 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1349 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1350 1351 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1352 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1353 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1354 } 1355 1356 #define OP_GEN_PARAM(param) \ 1357 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1358 1359 #define OP_GEN_TYPE(type) \ 1360 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1361 1362 #define OP_GEN_AGG_VECT(index) \ 1363 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1364 1365 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) 1366 { 1367 u32 op_gen_command = 0; 1368 u32 comp_addr = BAR_CSTRORM_INTMEM + 1369 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1370 int ret = 0; 1371 1372 if (REG_RD(bp, comp_addr)) { 1373 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1374 return 1; 1375 } 1376 1377 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1378 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1379 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 1380 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1381 1382 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1383 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); 1384 1385 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1386 BNX2X_ERR("FW final cleanup did not succeed\n"); 1387 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1388 (REG_RD(bp, comp_addr))); 1389 bnx2x_panic(); 1390 return 1; 1391 } 1392 /* Zero completion for next FLR */ 1393 REG_WR(bp, comp_addr, 0); 1394 1395 return ret; 1396 } 1397 1398 u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1399 { 1400 u16 status; 1401 1402 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1403 return status & PCI_EXP_DEVSTA_TRPND; 1404 } 1405 1406 /* PF FLR specific routines 1407 */ 1408 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1409 { 1410 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1411 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1412 CFC_REG_NUM_LCIDS_INSIDE_PF, 1413 "CFC PF usage counter timed out", 1414 poll_cnt)) 1415 return 1; 1416 1417 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1418 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1419 DORQ_REG_PF_USAGE_CNT, 1420 "DQ PF usage counter timed out", 1421 poll_cnt)) 1422 return 1; 1423 1424 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1425 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1426 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1427 "QM PF usage counter timed out", 1428 poll_cnt)) 1429 return 1; 1430 1431 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1432 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1433 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1434 "Timers VNIC usage counter timed out", 1435 poll_cnt)) 1436 return 1; 1437 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1438 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1439 "Timers NUM_SCANS usage counter timed out", 1440 poll_cnt)) 1441 return 1; 1442 1443 /* Wait DMAE PF usage counter to zero */ 1444 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1445 dmae_reg_go_c[INIT_DMAE_C(bp)], 1446 "DMAE command register timed out", 1447 poll_cnt)) 1448 return 1; 1449 1450 return 0; 1451 } 1452 1453 static void bnx2x_hw_enable_status(struct bnx2x *bp) 1454 { 1455 u32 val; 1456 1457 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1458 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1459 1460 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1461 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1462 1463 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1464 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1465 1466 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1467 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1468 1469 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1470 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1471 1472 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1473 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1474 1475 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1476 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1477 1478 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1479 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1480 val); 1481 } 1482 1483 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1484 { 1485 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1486 1487 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1488 1489 /* Re-enable PF target read access */ 1490 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1491 1492 /* Poll HW usage counters */ 1493 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1494 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1495 return -EBUSY; 1496 1497 /* Zero the igu 'trailing edge' and 'leading edge' */ 1498 1499 /* Send the FW cleanup command */ 1500 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1501 return -EBUSY; 1502 1503 /* ATC cleanup */ 1504 1505 /* Verify TX hw is flushed */ 1506 bnx2x_tx_hw_flushed(bp, poll_cnt); 1507 1508 /* Wait 100ms (not adjusted according to platform) */ 1509 msleep(100); 1510 1511 /* Verify no pending pci transactions */ 1512 if (bnx2x_is_pcie_pending(bp->pdev)) 1513 BNX2X_ERR("PCIE Transactions still pending\n"); 1514 1515 /* Debug */ 1516 bnx2x_hw_enable_status(bp); 1517 1518 /* 1519 * Master enable - Due to WB DMAE writes performed before this 1520 * register is re-initialized as part of the regular function init 1521 */ 1522 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1523 1524 return 0; 1525 } 1526 1527 static void bnx2x_hc_int_enable(struct bnx2x *bp) 1528 { 1529 int port = BP_PORT(bp); 1530 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1531 u32 val = REG_RD(bp, addr); 1532 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1533 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1534 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1535 1536 if (msix) { 1537 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1538 HC_CONFIG_0_REG_INT_LINE_EN_0); 1539 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1540 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1541 if (single_msix) 1542 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1543 } else if (msi) { 1544 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1545 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1546 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1547 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1548 } else { 1549 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1550 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1551 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1552 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1553 1554 if (!CHIP_IS_E1(bp)) { 1555 DP(NETIF_MSG_IFUP, 1556 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1557 1558 REG_WR(bp, addr, val); 1559 1560 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1561 } 1562 } 1563 1564 if (CHIP_IS_E1(bp)) 1565 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1566 1567 DP(NETIF_MSG_IFUP, 1568 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1569 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1570 1571 REG_WR(bp, addr, val); 1572 /* 1573 * Ensure that HC_CONFIG is written before leading/trailing edge config 1574 */ 1575 mmiowb(); 1576 barrier(); 1577 1578 if (!CHIP_IS_E1(bp)) { 1579 /* init leading/trailing edge */ 1580 if (IS_MF(bp)) { 1581 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1582 if (bp->port.pmf) 1583 /* enable nig and gpio3 attention */ 1584 val |= 0x1100; 1585 } else 1586 val = 0xffff; 1587 1588 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1589 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1590 } 1591 1592 /* Make sure that interrupts are indeed enabled from here on */ 1593 mmiowb(); 1594 } 1595 1596 static void bnx2x_igu_int_enable(struct bnx2x *bp) 1597 { 1598 u32 val; 1599 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1600 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1601 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1602 1603 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1604 1605 if (msix) { 1606 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1607 IGU_PF_CONF_SINGLE_ISR_EN); 1608 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1609 IGU_PF_CONF_ATTN_BIT_EN); 1610 1611 if (single_msix) 1612 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1613 } else if (msi) { 1614 val &= ~IGU_PF_CONF_INT_LINE_EN; 1615 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1616 IGU_PF_CONF_ATTN_BIT_EN | 1617 IGU_PF_CONF_SINGLE_ISR_EN); 1618 } else { 1619 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1620 val |= (IGU_PF_CONF_INT_LINE_EN | 1621 IGU_PF_CONF_ATTN_BIT_EN | 1622 IGU_PF_CONF_SINGLE_ISR_EN); 1623 } 1624 1625 /* Clean previous status - need to configure igu prior to ack*/ 1626 if ((!msix) || single_msix) { 1627 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1628 bnx2x_ack_int(bp); 1629 } 1630 1631 val |= IGU_PF_CONF_FUNC_EN; 1632 1633 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1634 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1635 1636 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1637 1638 if (val & IGU_PF_CONF_INT_LINE_EN) 1639 pci_intx(bp->pdev, true); 1640 1641 barrier(); 1642 1643 /* init leading/trailing edge */ 1644 if (IS_MF(bp)) { 1645 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1646 if (bp->port.pmf) 1647 /* enable nig and gpio3 attention */ 1648 val |= 0x1100; 1649 } else 1650 val = 0xffff; 1651 1652 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1653 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1654 1655 /* Make sure that interrupts are indeed enabled from here on */ 1656 mmiowb(); 1657 } 1658 1659 void bnx2x_int_enable(struct bnx2x *bp) 1660 { 1661 if (bp->common.int_block == INT_BLOCK_HC) 1662 bnx2x_hc_int_enable(bp); 1663 else 1664 bnx2x_igu_int_enable(bp); 1665 } 1666 1667 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1668 { 1669 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1670 int i, offset; 1671 1672 if (disable_hw) 1673 /* prevent the HW from sending interrupts */ 1674 bnx2x_int_disable(bp); 1675 1676 /* make sure all ISRs are done */ 1677 if (msix) { 1678 synchronize_irq(bp->msix_table[0].vector); 1679 offset = 1; 1680 if (CNIC_SUPPORT(bp)) 1681 offset++; 1682 for_each_eth_queue(bp, i) 1683 synchronize_irq(bp->msix_table[offset++].vector); 1684 } else 1685 synchronize_irq(bp->pdev->irq); 1686 1687 /* make sure sp_task is not running */ 1688 cancel_delayed_work(&bp->sp_task); 1689 cancel_delayed_work(&bp->period_task); 1690 flush_workqueue(bnx2x_wq); 1691 } 1692 1693 /* fast path */ 1694 1695 /* 1696 * General service functions 1697 */ 1698 1699 /* Return true if succeeded to acquire the lock */ 1700 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1701 { 1702 u32 lock_status; 1703 u32 resource_bit = (1 << resource); 1704 int func = BP_FUNC(bp); 1705 u32 hw_lock_control_reg; 1706 1707 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1708 "Trying to take a lock on resource %d\n", resource); 1709 1710 /* Validating that the resource is within range */ 1711 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1712 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1713 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1714 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1715 return false; 1716 } 1717 1718 if (func <= 5) 1719 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1720 else 1721 hw_lock_control_reg = 1722 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1723 1724 /* Try to acquire the lock */ 1725 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1726 lock_status = REG_RD(bp, hw_lock_control_reg); 1727 if (lock_status & resource_bit) 1728 return true; 1729 1730 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1731 "Failed to get a lock on resource %d\n", resource); 1732 return false; 1733 } 1734 1735 /** 1736 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1737 * 1738 * @bp: driver handle 1739 * 1740 * Returns the recovery leader resource id according to the engine this function 1741 * belongs to. Currently only only 2 engines is supported. 1742 */ 1743 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1744 { 1745 if (BP_PATH(bp)) 1746 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1747 else 1748 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1749 } 1750 1751 /** 1752 * bnx2x_trylock_leader_lock- try to acquire a leader lock. 1753 * 1754 * @bp: driver handle 1755 * 1756 * Tries to acquire a leader lock for current engine. 1757 */ 1758 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1759 { 1760 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1761 } 1762 1763 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1764 1765 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */ 1766 static int bnx2x_schedule_sp_task(struct bnx2x *bp) 1767 { 1768 /* Set the interrupt occurred bit for the sp-task to recognize it 1769 * must ack the interrupt and transition according to the IGU 1770 * state machine. 1771 */ 1772 atomic_set(&bp->interrupt_occurred, 1); 1773 1774 /* The sp_task must execute only after this bit 1775 * is set, otherwise we will get out of sync and miss all 1776 * further interrupts. Hence, the barrier. 1777 */ 1778 smp_wmb(); 1779 1780 /* schedule sp_task to workqueue */ 1781 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1782 } 1783 1784 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1785 { 1786 struct bnx2x *bp = fp->bp; 1787 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1788 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1789 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1790 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 1791 1792 DP(BNX2X_MSG_SP, 1793 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1794 fp->index, cid, command, bp->state, 1795 rr_cqe->ramrod_cqe.ramrod_type); 1796 1797 /* If cid is within VF range, replace the slowpath object with the 1798 * one corresponding to this VF 1799 */ 1800 if (cid >= BNX2X_FIRST_VF_CID && 1801 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) 1802 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); 1803 1804 switch (command) { 1805 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1806 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1807 drv_cmd = BNX2X_Q_CMD_UPDATE; 1808 break; 1809 1810 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1811 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1812 drv_cmd = BNX2X_Q_CMD_SETUP; 1813 break; 1814 1815 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1816 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1817 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1818 break; 1819 1820 case (RAMROD_CMD_ID_ETH_HALT): 1821 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1822 drv_cmd = BNX2X_Q_CMD_HALT; 1823 break; 1824 1825 case (RAMROD_CMD_ID_ETH_TERMINATE): 1826 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); 1827 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1828 break; 1829 1830 case (RAMROD_CMD_ID_ETH_EMPTY): 1831 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1832 drv_cmd = BNX2X_Q_CMD_EMPTY; 1833 break; 1834 1835 case (RAMROD_CMD_ID_ETH_TPA_UPDATE): 1836 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); 1837 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; 1838 break; 1839 1840 default: 1841 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1842 command, fp->index); 1843 return; 1844 } 1845 1846 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1847 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1848 /* q_obj->complete_cmd() failure means that this was 1849 * an unexpected completion. 1850 * 1851 * In this case we don't want to increase the bp->spq_left 1852 * because apparently we haven't sent this command the first 1853 * place. 1854 */ 1855 #ifdef BNX2X_STOP_ON_ERROR 1856 bnx2x_panic(); 1857 #else 1858 return; 1859 #endif 1860 1861 smp_mb__before_atomic(); 1862 atomic_inc(&bp->cq_spq_left); 1863 /* push the change in bp->spq_left and towards the memory */ 1864 smp_mb__after_atomic(); 1865 1866 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1867 1868 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1869 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1870 /* if Q update ramrod is completed for last Q in AFEX vif set 1871 * flow, then ACK MCP at the end 1872 * 1873 * mark pending ACK to MCP bit. 1874 * prevent case that both bits are cleared. 1875 * At the end of load/unload driver checks that 1876 * sp_state is cleared, and this order prevents 1877 * races 1878 */ 1879 smp_mb__before_atomic(); 1880 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1881 wmb(); 1882 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1883 smp_mb__after_atomic(); 1884 1885 /* schedule the sp task as mcp ack is required */ 1886 bnx2x_schedule_sp_task(bp); 1887 } 1888 1889 return; 1890 } 1891 1892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1893 { 1894 struct bnx2x *bp = netdev_priv(dev_instance); 1895 u16 status = bnx2x_ack_int(bp); 1896 u16 mask; 1897 int i; 1898 u8 cos; 1899 1900 /* Return here if interrupt is shared and it's not for us */ 1901 if (unlikely(status == 0)) { 1902 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1903 return IRQ_NONE; 1904 } 1905 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1906 1907 #ifdef BNX2X_STOP_ON_ERROR 1908 if (unlikely(bp->panic)) 1909 return IRQ_HANDLED; 1910 #endif 1911 1912 for_each_eth_queue(bp, i) { 1913 struct bnx2x_fastpath *fp = &bp->fp[i]; 1914 1915 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); 1916 if (status & mask) { 1917 /* Handle Rx or Tx according to SB id */ 1918 for_each_cos_in_tx_queue(fp, cos) 1919 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1920 prefetch(&fp->sb_running_index[SM_RX_ID]); 1921 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1922 status &= ~mask; 1923 } 1924 } 1925 1926 if (CNIC_SUPPORT(bp)) { 1927 mask = 0x2; 1928 if (status & (mask | 0x1)) { 1929 struct cnic_ops *c_ops = NULL; 1930 1931 rcu_read_lock(); 1932 c_ops = rcu_dereference(bp->cnic_ops); 1933 if (c_ops && (bp->cnic_eth_dev.drv_state & 1934 CNIC_DRV_STATE_HANDLES_IRQ)) 1935 c_ops->cnic_handler(bp->cnic_data, NULL); 1936 rcu_read_unlock(); 1937 1938 status &= ~mask; 1939 } 1940 } 1941 1942 if (unlikely(status & 0x1)) { 1943 1944 /* schedule sp task to perform default status block work, ack 1945 * attentions and enable interrupts. 1946 */ 1947 bnx2x_schedule_sp_task(bp); 1948 1949 status &= ~0x1; 1950 if (!status) 1951 return IRQ_HANDLED; 1952 } 1953 1954 if (unlikely(status)) 1955 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1956 status); 1957 1958 return IRQ_HANDLED; 1959 } 1960 1961 /* Link */ 1962 1963 /* 1964 * General service functions 1965 */ 1966 1967 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1968 { 1969 u32 lock_status; 1970 u32 resource_bit = (1 << resource); 1971 int func = BP_FUNC(bp); 1972 u32 hw_lock_control_reg; 1973 int cnt; 1974 1975 /* Validating that the resource is within range */ 1976 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1977 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1978 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1979 return -EINVAL; 1980 } 1981 1982 if (func <= 5) { 1983 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1984 } else { 1985 hw_lock_control_reg = 1986 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1987 } 1988 1989 /* Validating that the resource is not already taken */ 1990 lock_status = REG_RD(bp, hw_lock_control_reg); 1991 if (lock_status & resource_bit) { 1992 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 1993 lock_status, resource_bit); 1994 return -EEXIST; 1995 } 1996 1997 /* Try for 5 second every 5ms */ 1998 for (cnt = 0; cnt < 1000; cnt++) { 1999 /* Try to acquire the lock */ 2000 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 2001 lock_status = REG_RD(bp, hw_lock_control_reg); 2002 if (lock_status & resource_bit) 2003 return 0; 2004 2005 usleep_range(5000, 10000); 2006 } 2007 BNX2X_ERR("Timeout\n"); 2008 return -EAGAIN; 2009 } 2010 2011 int bnx2x_release_leader_lock(struct bnx2x *bp) 2012 { 2013 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 2014 } 2015 2016 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 2017 { 2018 u32 lock_status; 2019 u32 resource_bit = (1 << resource); 2020 int func = BP_FUNC(bp); 2021 u32 hw_lock_control_reg; 2022 2023 /* Validating that the resource is within range */ 2024 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 2025 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 2026 resource, HW_LOCK_MAX_RESOURCE_VALUE); 2027 return -EINVAL; 2028 } 2029 2030 if (func <= 5) { 2031 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 2032 } else { 2033 hw_lock_control_reg = 2034 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 2035 } 2036 2037 /* Validating that the resource is currently taken */ 2038 lock_status = REG_RD(bp, hw_lock_control_reg); 2039 if (!(lock_status & resource_bit)) { 2040 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", 2041 lock_status, resource_bit); 2042 return -EFAULT; 2043 } 2044 2045 REG_WR(bp, hw_lock_control_reg, resource_bit); 2046 return 0; 2047 } 2048 2049 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 2050 { 2051 /* The GPIO should be swapped if swap register is set and active */ 2052 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2053 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2054 int gpio_shift = gpio_num + 2055 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2056 u32 gpio_mask = (1 << gpio_shift); 2057 u32 gpio_reg; 2058 int value; 2059 2060 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2061 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2062 return -EINVAL; 2063 } 2064 2065 /* read GPIO value */ 2066 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2067 2068 /* get the requested pin value */ 2069 if ((gpio_reg & gpio_mask) == gpio_mask) 2070 value = 1; 2071 else 2072 value = 0; 2073 2074 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); 2075 2076 return value; 2077 } 2078 2079 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2080 { 2081 /* The GPIO should be swapped if swap register is set and active */ 2082 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2083 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2084 int gpio_shift = gpio_num + 2085 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2086 u32 gpio_mask = (1 << gpio_shift); 2087 u32 gpio_reg; 2088 2089 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2090 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2091 return -EINVAL; 2092 } 2093 2094 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2095 /* read GPIO and mask except the float bits */ 2096 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2097 2098 switch (mode) { 2099 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2100 DP(NETIF_MSG_LINK, 2101 "Set GPIO %d (shift %d) -> output low\n", 2102 gpio_num, gpio_shift); 2103 /* clear FLOAT and set CLR */ 2104 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2105 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2106 break; 2107 2108 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2109 DP(NETIF_MSG_LINK, 2110 "Set GPIO %d (shift %d) -> output high\n", 2111 gpio_num, gpio_shift); 2112 /* clear FLOAT and set SET */ 2113 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2114 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2115 break; 2116 2117 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2118 DP(NETIF_MSG_LINK, 2119 "Set GPIO %d (shift %d) -> input\n", 2120 gpio_num, gpio_shift); 2121 /* set FLOAT */ 2122 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2123 break; 2124 2125 default: 2126 break; 2127 } 2128 2129 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2130 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2131 2132 return 0; 2133 } 2134 2135 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 2136 { 2137 u32 gpio_reg = 0; 2138 int rc = 0; 2139 2140 /* Any port swapping should be handled by caller. */ 2141 2142 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2143 /* read GPIO and mask except the float bits */ 2144 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2145 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2146 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2147 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2148 2149 switch (mode) { 2150 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2151 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 2152 /* set CLR */ 2153 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2154 break; 2155 2156 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2157 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 2158 /* set SET */ 2159 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2160 break; 2161 2162 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2163 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 2164 /* set FLOAT */ 2165 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2166 break; 2167 2168 default: 2169 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 2170 rc = -EINVAL; 2171 break; 2172 } 2173 2174 if (rc == 0) 2175 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2176 2177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2178 2179 return rc; 2180 } 2181 2182 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2183 { 2184 /* The GPIO should be swapped if swap register is set and active */ 2185 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2186 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2187 int gpio_shift = gpio_num + 2188 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2189 u32 gpio_mask = (1 << gpio_shift); 2190 u32 gpio_reg; 2191 2192 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2193 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2194 return -EINVAL; 2195 } 2196 2197 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2198 /* read GPIO int */ 2199 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 2200 2201 switch (mode) { 2202 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2203 DP(NETIF_MSG_LINK, 2204 "Clear GPIO INT %d (shift %d) -> output low\n", 2205 gpio_num, gpio_shift); 2206 /* clear SET and set CLR */ 2207 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2208 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2209 break; 2210 2211 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2212 DP(NETIF_MSG_LINK, 2213 "Set GPIO INT %d (shift %d) -> output high\n", 2214 gpio_num, gpio_shift); 2215 /* clear CLR and set SET */ 2216 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2217 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2218 break; 2219 2220 default: 2221 break; 2222 } 2223 2224 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2226 2227 return 0; 2228 } 2229 2230 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) 2231 { 2232 u32 spio_reg; 2233 2234 /* Only 2 SPIOs are configurable */ 2235 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 2236 BNX2X_ERR("Invalid SPIO 0x%x\n", spio); 2237 return -EINVAL; 2238 } 2239 2240 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2241 /* read SPIO and mask except the float bits */ 2242 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 2243 2244 switch (mode) { 2245 case MISC_SPIO_OUTPUT_LOW: 2246 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); 2247 /* clear FLOAT and set CLR */ 2248 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2249 spio_reg |= (spio << MISC_SPIO_CLR_POS); 2250 break; 2251 2252 case MISC_SPIO_OUTPUT_HIGH: 2253 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); 2254 /* clear FLOAT and set SET */ 2255 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2256 spio_reg |= (spio << MISC_SPIO_SET_POS); 2257 break; 2258 2259 case MISC_SPIO_INPUT_HI_Z: 2260 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); 2261 /* set FLOAT */ 2262 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 2263 break; 2264 2265 default: 2266 break; 2267 } 2268 2269 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2270 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2271 2272 return 0; 2273 } 2274 2275 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2276 { 2277 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2278 switch (bp->link_vars.ieee_fc & 2279 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2280 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2281 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2282 ADVERTISED_Pause); 2283 break; 2284 2285 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2286 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2287 ADVERTISED_Pause); 2288 break; 2289 2290 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2291 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2292 break; 2293 2294 default: 2295 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2296 ADVERTISED_Pause); 2297 break; 2298 } 2299 } 2300 2301 static void bnx2x_set_requested_fc(struct bnx2x *bp) 2302 { 2303 /* Initialize link parameters structure variables 2304 * It is recommended to turn off RX FC for jumbo frames 2305 * for better performance 2306 */ 2307 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2308 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2309 else 2310 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2311 } 2312 2313 static void bnx2x_init_dropless_fc(struct bnx2x *bp) 2314 { 2315 u32 pause_enabled = 0; 2316 2317 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { 2318 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2319 pause_enabled = 1; 2320 2321 REG_WR(bp, BAR_USTRORM_INTMEM + 2322 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), 2323 pause_enabled); 2324 } 2325 2326 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", 2327 pause_enabled ? "enabled" : "disabled"); 2328 } 2329 2330 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2331 { 2332 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2333 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2334 2335 if (!BP_NOMCP(bp)) { 2336 bnx2x_set_requested_fc(bp); 2337 bnx2x_acquire_phy_lock(bp); 2338 2339 if (load_mode == LOAD_DIAG) { 2340 struct link_params *lp = &bp->link_params; 2341 lp->loopback_mode = LOOPBACK_XGXS; 2342 /* do PHY loopback at 10G speed, if possible */ 2343 if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2344 if (lp->speed_cap_mask[cfx_idx] & 2345 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2346 lp->req_line_speed[cfx_idx] = 2347 SPEED_10000; 2348 else 2349 lp->req_line_speed[cfx_idx] = 2350 SPEED_1000; 2351 } 2352 } 2353 2354 if (load_mode == LOAD_LOOPBACK_EXT) { 2355 struct link_params *lp = &bp->link_params; 2356 lp->loopback_mode = LOOPBACK_EXT; 2357 } 2358 2359 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2360 2361 bnx2x_release_phy_lock(bp); 2362 2363 bnx2x_init_dropless_fc(bp); 2364 2365 bnx2x_calc_fc_adv(bp); 2366 2367 if (bp->link_vars.link_up) { 2368 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2369 bnx2x_link_report(bp); 2370 } 2371 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2372 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2373 return rc; 2374 } 2375 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2376 return -EINVAL; 2377 } 2378 2379 void bnx2x_link_set(struct bnx2x *bp) 2380 { 2381 if (!BP_NOMCP(bp)) { 2382 bnx2x_acquire_phy_lock(bp); 2383 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2384 bnx2x_release_phy_lock(bp); 2385 2386 bnx2x_init_dropless_fc(bp); 2387 2388 bnx2x_calc_fc_adv(bp); 2389 } else 2390 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2391 } 2392 2393 static void bnx2x__link_reset(struct bnx2x *bp) 2394 { 2395 if (!BP_NOMCP(bp)) { 2396 bnx2x_acquire_phy_lock(bp); 2397 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); 2398 bnx2x_release_phy_lock(bp); 2399 } else 2400 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2401 } 2402 2403 void bnx2x_force_link_reset(struct bnx2x *bp) 2404 { 2405 bnx2x_acquire_phy_lock(bp); 2406 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2407 bnx2x_release_phy_lock(bp); 2408 } 2409 2410 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2411 { 2412 u8 rc = 0; 2413 2414 if (!BP_NOMCP(bp)) { 2415 bnx2x_acquire_phy_lock(bp); 2416 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2417 is_serdes); 2418 bnx2x_release_phy_lock(bp); 2419 } else 2420 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2421 2422 return rc; 2423 } 2424 2425 /* Calculates the sum of vn_min_rates. 2426 It's needed for further normalizing of the min_rates. 2427 Returns: 2428 sum of vn_min_rates. 2429 or 2430 0 - if all the min_rates are 0. 2431 In the later case fairness algorithm should be deactivated. 2432 If not all min_rates are zero then those that are zeroes will be set to 1. 2433 */ 2434 static void bnx2x_calc_vn_min(struct bnx2x *bp, 2435 struct cmng_init_input *input) 2436 { 2437 int all_zero = 1; 2438 int vn; 2439 2440 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2441 u32 vn_cfg = bp->mf_config[vn]; 2442 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2443 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2444 2445 /* Skip hidden vns */ 2446 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2447 vn_min_rate = 0; 2448 /* If min rate is zero - set it to 1 */ 2449 else if (!vn_min_rate) 2450 vn_min_rate = DEF_MIN_RATE; 2451 else 2452 all_zero = 0; 2453 2454 input->vnic_min_rate[vn] = vn_min_rate; 2455 } 2456 2457 /* if ETS or all min rates are zeros - disable fairness */ 2458 if (BNX2X_IS_ETS_ENABLED(bp)) { 2459 input->flags.cmng_enables &= 2460 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2461 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2462 } else if (all_zero) { 2463 input->flags.cmng_enables &= 2464 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2465 DP(NETIF_MSG_IFUP, 2466 "All MIN values are zeroes fairness will be disabled\n"); 2467 } else 2468 input->flags.cmng_enables |= 2469 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2470 } 2471 2472 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2473 struct cmng_init_input *input) 2474 { 2475 u16 vn_max_rate; 2476 u32 vn_cfg = bp->mf_config[vn]; 2477 2478 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2479 vn_max_rate = 0; 2480 else { 2481 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2482 2483 if (IS_MF_SI(bp)) { 2484 /* maxCfg in percents of linkspeed */ 2485 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2486 } else /* SD modes */ 2487 /* maxCfg is absolute in 100Mb units */ 2488 vn_max_rate = maxCfg * 100; 2489 } 2490 2491 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2492 2493 input->vnic_max_rate[vn] = vn_max_rate; 2494 } 2495 2496 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2497 { 2498 if (CHIP_REV_IS_SLOW(bp)) 2499 return CMNG_FNS_NONE; 2500 if (IS_MF(bp)) 2501 return CMNG_FNS_MINMAX; 2502 2503 return CMNG_FNS_NONE; 2504 } 2505 2506 void bnx2x_read_mf_cfg(struct bnx2x *bp) 2507 { 2508 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2509 2510 if (BP_NOMCP(bp)) 2511 return; /* what should be the default value in this case */ 2512 2513 /* For 2 port configuration the absolute function number formula 2514 * is: 2515 * abs_func = 2 * vn + BP_PORT + BP_PATH 2516 * 2517 * and there are 4 functions per port 2518 * 2519 * For 4 port configuration it is 2520 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2521 * 2522 * and there are 2 functions per port 2523 */ 2524 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2525 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2526 2527 if (func >= E1H_FUNC_MAX) 2528 break; 2529 2530 bp->mf_config[vn] = 2531 MF_CFG_RD(bp, func_mf_config[func].config); 2532 } 2533 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2534 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2535 bp->flags |= MF_FUNC_DIS; 2536 } else { 2537 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2538 bp->flags &= ~MF_FUNC_DIS; 2539 } 2540 } 2541 2542 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2543 { 2544 struct cmng_init_input input; 2545 memset(&input, 0, sizeof(struct cmng_init_input)); 2546 2547 input.port_rate = bp->link_vars.line_speed; 2548 2549 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { 2550 int vn; 2551 2552 /* read mf conf from shmem */ 2553 if (read_cfg) 2554 bnx2x_read_mf_cfg(bp); 2555 2556 /* vn_weight_sum and enable fairness if not 0 */ 2557 bnx2x_calc_vn_min(bp, &input); 2558 2559 /* calculate and set min-max rate for each vn */ 2560 if (bp->port.pmf) 2561 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2562 bnx2x_calc_vn_max(bp, vn, &input); 2563 2564 /* always enable rate shaping and fairness */ 2565 input.flags.cmng_enables |= 2566 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2567 2568 bnx2x_init_cmng(&input, &bp->cmng); 2569 return; 2570 } 2571 2572 /* rate shaping and fairness are disabled */ 2573 DP(NETIF_MSG_IFUP, 2574 "rate shaping and fairness are disabled\n"); 2575 } 2576 2577 static void storm_memset_cmng(struct bnx2x *bp, 2578 struct cmng_init *cmng, 2579 u8 port) 2580 { 2581 int vn; 2582 size_t size = sizeof(struct cmng_struct_per_port); 2583 2584 u32 addr = BAR_XSTRORM_INTMEM + 2585 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2586 2587 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2588 2589 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2590 int func = func_by_vn(bp, vn); 2591 2592 addr = BAR_XSTRORM_INTMEM + 2593 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2594 size = sizeof(struct rate_shaping_vars_per_vn); 2595 __storm_memset_struct(bp, addr, size, 2596 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2597 2598 addr = BAR_XSTRORM_INTMEM + 2599 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2600 size = sizeof(struct fairness_vars_per_vn); 2601 __storm_memset_struct(bp, addr, size, 2602 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2603 } 2604 } 2605 2606 /* init cmng mode in HW according to local configuration */ 2607 void bnx2x_set_local_cmng(struct bnx2x *bp) 2608 { 2609 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2610 2611 if (cmng_fns != CMNG_FNS_NONE) { 2612 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2613 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2614 } else { 2615 /* rate shaping and fairness are disabled */ 2616 DP(NETIF_MSG_IFUP, 2617 "single function mode without fairness\n"); 2618 } 2619 } 2620 2621 /* This function is called upon link interrupt */ 2622 static void bnx2x_link_attn(struct bnx2x *bp) 2623 { 2624 /* Make sure that we are synced with the current statistics */ 2625 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2626 2627 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2628 2629 bnx2x_init_dropless_fc(bp); 2630 2631 if (bp->link_vars.link_up) { 2632 2633 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2634 struct host_port_stats *pstats; 2635 2636 pstats = bnx2x_sp(bp, port_stats); 2637 /* reset old mac stats */ 2638 memset(&(pstats->mac_stx[0]), 0, 2639 sizeof(struct mac_stx)); 2640 } 2641 if (bp->state == BNX2X_STATE_OPEN) 2642 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2643 } 2644 2645 if (bp->link_vars.link_up && bp->link_vars.line_speed) 2646 bnx2x_set_local_cmng(bp); 2647 2648 __bnx2x_link_report(bp); 2649 2650 if (IS_MF(bp)) 2651 bnx2x_link_sync_notify(bp); 2652 } 2653 2654 void bnx2x__link_status_update(struct bnx2x *bp) 2655 { 2656 if (bp->state != BNX2X_STATE_OPEN) 2657 return; 2658 2659 /* read updated dcb configuration */ 2660 if (IS_PF(bp)) { 2661 bnx2x_dcbx_pmf_update(bp); 2662 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2663 if (bp->link_vars.link_up) 2664 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2665 else 2666 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2667 /* indicate link status */ 2668 bnx2x_link_report(bp); 2669 2670 } else { /* VF */ 2671 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | 2672 SUPPORTED_10baseT_Full | 2673 SUPPORTED_100baseT_Half | 2674 SUPPORTED_100baseT_Full | 2675 SUPPORTED_1000baseT_Full | 2676 SUPPORTED_2500baseX_Full | 2677 SUPPORTED_10000baseT_Full | 2678 SUPPORTED_TP | 2679 SUPPORTED_FIBRE | 2680 SUPPORTED_Autoneg | 2681 SUPPORTED_Pause | 2682 SUPPORTED_Asym_Pause); 2683 bp->port.advertising[0] = bp->port.supported[0]; 2684 2685 bp->link_params.bp = bp; 2686 bp->link_params.port = BP_PORT(bp); 2687 bp->link_params.req_duplex[0] = DUPLEX_FULL; 2688 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; 2689 bp->link_params.req_line_speed[0] = SPEED_10000; 2690 bp->link_params.speed_cap_mask[0] = 0x7f0000; 2691 bp->link_params.switch_cfg = SWITCH_CFG_10G; 2692 bp->link_vars.mac_type = MAC_TYPE_BMAC; 2693 bp->link_vars.line_speed = SPEED_10000; 2694 bp->link_vars.link_status = 2695 (LINK_STATUS_LINK_UP | 2696 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 2697 bp->link_vars.link_up = 1; 2698 bp->link_vars.duplex = DUPLEX_FULL; 2699 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2700 __bnx2x_link_report(bp); 2701 2702 bnx2x_sample_bulletin(bp); 2703 2704 /* if bulletin board did not have an update for link status 2705 * __bnx2x_link_report will report current status 2706 * but it will NOT duplicate report in case of already reported 2707 * during sampling bulletin board. 2708 */ 2709 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2710 } 2711 } 2712 2713 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2714 u16 vlan_val, u8 allowed_prio) 2715 { 2716 struct bnx2x_func_state_params func_params = {NULL}; 2717 struct bnx2x_func_afex_update_params *f_update_params = 2718 &func_params.params.afex_update; 2719 2720 func_params.f_obj = &bp->func_obj; 2721 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2722 2723 /* no need to wait for RAMROD completion, so don't 2724 * set RAMROD_COMP_WAIT flag 2725 */ 2726 2727 f_update_params->vif_id = vifid; 2728 f_update_params->afex_default_vlan = vlan_val; 2729 f_update_params->allowed_priorities = allowed_prio; 2730 2731 /* if ramrod can not be sent, response to MCP immediately */ 2732 if (bnx2x_func_state_change(bp, &func_params) < 0) 2733 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2734 2735 return 0; 2736 } 2737 2738 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2739 u16 vif_index, u8 func_bit_map) 2740 { 2741 struct bnx2x_func_state_params func_params = {NULL}; 2742 struct bnx2x_func_afex_viflists_params *update_params = 2743 &func_params.params.afex_viflists; 2744 int rc; 2745 u32 drv_msg_code; 2746 2747 /* validate only LIST_SET and LIST_GET are received from switch */ 2748 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2749 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2750 cmd_type); 2751 2752 func_params.f_obj = &bp->func_obj; 2753 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2754 2755 /* set parameters according to cmd_type */ 2756 update_params->afex_vif_list_command = cmd_type; 2757 update_params->vif_list_index = vif_index; 2758 update_params->func_bit_map = 2759 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2760 update_params->func_to_clear = 0; 2761 drv_msg_code = 2762 (cmd_type == VIF_LIST_RULE_GET) ? 2763 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2764 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2765 2766 /* if ramrod can not be sent, respond to MCP immediately for 2767 * SET and GET requests (other are not triggered from MCP) 2768 */ 2769 rc = bnx2x_func_state_change(bp, &func_params); 2770 if (rc < 0) 2771 bnx2x_fw_command(bp, drv_msg_code, 0); 2772 2773 return 0; 2774 } 2775 2776 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2777 { 2778 struct afex_stats afex_stats; 2779 u32 func = BP_ABS_FUNC(bp); 2780 u32 mf_config; 2781 u16 vlan_val; 2782 u32 vlan_prio; 2783 u16 vif_id; 2784 u8 allowed_prio; 2785 u8 vlan_mode; 2786 u32 addr_to_write, vifid, addrs, stats_type, i; 2787 2788 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2789 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2790 DP(BNX2X_MSG_MCP, 2791 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2792 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2793 } 2794 2795 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2796 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2797 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2798 DP(BNX2X_MSG_MCP, 2799 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2800 vifid, addrs); 2801 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2802 addrs); 2803 } 2804 2805 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2806 addr_to_write = SHMEM2_RD(bp, 2807 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2808 stats_type = SHMEM2_RD(bp, 2809 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2810 2811 DP(BNX2X_MSG_MCP, 2812 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2813 addr_to_write); 2814 2815 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2816 2817 /* write response to scratchpad, for MCP */ 2818 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2819 REG_WR(bp, addr_to_write + i*sizeof(u32), 2820 *(((u32 *)(&afex_stats))+i)); 2821 2822 /* send ack message to MCP */ 2823 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2824 } 2825 2826 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2827 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2828 bp->mf_config[BP_VN(bp)] = mf_config; 2829 DP(BNX2X_MSG_MCP, 2830 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2831 mf_config); 2832 2833 /* if VIF_SET is "enabled" */ 2834 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2835 /* set rate limit directly to internal RAM */ 2836 struct cmng_init_input cmng_input; 2837 struct rate_shaping_vars_per_vn m_rs_vn; 2838 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2839 u32 addr = BAR_XSTRORM_INTMEM + 2840 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2841 2842 bp->mf_config[BP_VN(bp)] = mf_config; 2843 2844 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2845 m_rs_vn.vn_counter.rate = 2846 cmng_input.vnic_max_rate[BP_VN(bp)]; 2847 m_rs_vn.vn_counter.quota = 2848 (m_rs_vn.vn_counter.rate * 2849 RS_PERIODIC_TIMEOUT_USEC) / 8; 2850 2851 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2852 2853 /* read relevant values from mf_cfg struct in shmem */ 2854 vif_id = 2855 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2856 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2857 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2858 vlan_val = 2859 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2860 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2861 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2862 vlan_prio = (mf_config & 2863 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2864 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2865 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2866 vlan_mode = 2867 (MF_CFG_RD(bp, 2868 func_mf_config[func].afex_config) & 2869 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2870 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2871 allowed_prio = 2872 (MF_CFG_RD(bp, 2873 func_mf_config[func].afex_config) & 2874 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2875 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2876 2877 /* send ramrod to FW, return in case of failure */ 2878 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2879 allowed_prio)) 2880 return; 2881 2882 bp->afex_def_vlan_tag = vlan_val; 2883 bp->afex_vlan_mode = vlan_mode; 2884 } else { 2885 /* notify link down because BP->flags is disabled */ 2886 bnx2x_link_report(bp); 2887 2888 /* send INVALID VIF ramrod to FW */ 2889 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2890 2891 /* Reset the default afex VLAN */ 2892 bp->afex_def_vlan_tag = -1; 2893 } 2894 } 2895 } 2896 2897 static void bnx2x_pmf_update(struct bnx2x *bp) 2898 { 2899 int port = BP_PORT(bp); 2900 u32 val; 2901 2902 bp->port.pmf = 1; 2903 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2904 2905 /* 2906 * We need the mb() to ensure the ordering between the writing to 2907 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2908 */ 2909 smp_mb(); 2910 2911 /* queue a periodic task */ 2912 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2913 2914 bnx2x_dcbx_pmf_update(bp); 2915 2916 /* enable nig attention */ 2917 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2918 if (bp->common.int_block == INT_BLOCK_HC) { 2919 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2920 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2921 } else if (!CHIP_IS_E1x(bp)) { 2922 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2923 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2924 } 2925 2926 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2927 } 2928 2929 /* end of Link */ 2930 2931 /* slow path */ 2932 2933 /* 2934 * General service functions 2935 */ 2936 2937 /* send the MCP a request, block until there is a reply */ 2938 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2939 { 2940 int mb_idx = BP_FW_MB_IDX(bp); 2941 u32 seq; 2942 u32 rc = 0; 2943 u32 cnt = 1; 2944 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2945 2946 mutex_lock(&bp->fw_mb_mutex); 2947 seq = ++bp->fw_seq; 2948 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2949 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2950 2951 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 2952 (command | seq), param); 2953 2954 do { 2955 /* let the FW do it's magic ... */ 2956 msleep(delay); 2957 2958 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 2959 2960 /* Give the FW up to 5 second (500*10ms) */ 2961 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2962 2963 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2964 cnt*delay, rc, seq); 2965 2966 /* is this a reply to our command? */ 2967 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 2968 rc &= FW_MSG_CODE_MASK; 2969 else { 2970 /* FW BUG! */ 2971 BNX2X_ERR("FW failed to respond!\n"); 2972 bnx2x_fw_dump(bp); 2973 rc = 0; 2974 } 2975 mutex_unlock(&bp->fw_mb_mutex); 2976 2977 return rc; 2978 } 2979 2980 static void storm_memset_func_cfg(struct bnx2x *bp, 2981 struct tstorm_eth_function_common_config *tcfg, 2982 u16 abs_fid) 2983 { 2984 size_t size = sizeof(struct tstorm_eth_function_common_config); 2985 2986 u32 addr = BAR_TSTRORM_INTMEM + 2987 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 2988 2989 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 2990 } 2991 2992 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2993 { 2994 if (CHIP_IS_E1x(bp)) { 2995 struct tstorm_eth_function_common_config tcfg = {0}; 2996 2997 storm_memset_func_cfg(bp, &tcfg, p->func_id); 2998 } 2999 3000 /* Enable the function in the FW */ 3001 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 3002 storm_memset_func_en(bp, p->func_id, 1); 3003 3004 /* spq */ 3005 if (p->func_flgs & FUNC_FLG_SPQ) { 3006 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 3007 REG_WR(bp, XSEM_REG_FAST_MEMORY + 3008 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 3009 } 3010 } 3011 3012 /** 3013 * bnx2x_get_common_flags - Return common flags 3014 * 3015 * @bp device handle 3016 * @fp queue handle 3017 * @zero_stats TRUE if statistics zeroing is needed 3018 * 3019 * Return the flags that are common for the Tx-only and not normal connections. 3020 */ 3021 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 3022 struct bnx2x_fastpath *fp, 3023 bool zero_stats) 3024 { 3025 unsigned long flags = 0; 3026 3027 /* PF driver will always initialize the Queue to an ACTIVE state */ 3028 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 3029 3030 /* tx only connections collect statistics (on the same index as the 3031 * parent connection). The statistics are zeroed when the parent 3032 * connection is initialized. 3033 */ 3034 3035 __set_bit(BNX2X_Q_FLG_STATS, &flags); 3036 if (zero_stats) 3037 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 3038 3039 if (bp->flags & TX_SWITCHING) 3040 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); 3041 3042 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); 3043 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); 3044 3045 #ifdef BNX2X_STOP_ON_ERROR 3046 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 3047 #endif 3048 3049 return flags; 3050 } 3051 3052 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 3053 struct bnx2x_fastpath *fp, 3054 bool leading) 3055 { 3056 unsigned long flags = 0; 3057 3058 /* calculate other queue flags */ 3059 if (IS_MF_SD(bp)) 3060 __set_bit(BNX2X_Q_FLG_OV, &flags); 3061 3062 if (IS_FCOE_FP(fp)) { 3063 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 3064 /* For FCoE - force usage of default priority (for afex) */ 3065 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 3066 } 3067 3068 if (!fp->disable_tpa) { 3069 __set_bit(BNX2X_Q_FLG_TPA, &flags); 3070 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 3071 if (fp->mode == TPA_MODE_GRO) 3072 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 3073 } 3074 3075 if (leading) { 3076 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 3077 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 3078 } 3079 3080 /* Always set HW VLAN stripping */ 3081 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 3082 3083 /* configure silent vlan removal */ 3084 if (IS_MF_AFEX(bp)) 3085 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 3086 3087 return flags | bnx2x_get_common_flags(bp, fp, true); 3088 } 3089 3090 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 3091 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 3092 u8 cos) 3093 { 3094 gen_init->stat_id = bnx2x_stats_id(fp); 3095 gen_init->spcl_id = fp->cl_id; 3096 3097 /* Always use mini-jumbo MTU for FCoE L2 ring */ 3098 if (IS_FCOE_FP(fp)) 3099 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 3100 else 3101 gen_init->mtu = bp->dev->mtu; 3102 3103 gen_init->cos = cos; 3104 } 3105 3106 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 3107 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 3108 struct bnx2x_rxq_setup_params *rxq_init) 3109 { 3110 u8 max_sge = 0; 3111 u16 sge_sz = 0; 3112 u16 tpa_agg_size = 0; 3113 3114 if (!fp->disable_tpa) { 3115 pause->sge_th_lo = SGE_TH_LO(bp); 3116 pause->sge_th_hi = SGE_TH_HI(bp); 3117 3118 /* validate SGE ring has enough to cross high threshold */ 3119 WARN_ON(bp->dropless_fc && 3120 pause->sge_th_hi + FW_PREFETCH_CNT > 3121 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 3122 3123 tpa_agg_size = TPA_AGG_SIZE; 3124 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 3125 SGE_PAGE_SHIFT; 3126 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 3127 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 3128 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); 3129 } 3130 3131 /* pause - not for e1 */ 3132 if (!CHIP_IS_E1(bp)) { 3133 pause->bd_th_lo = BD_TH_LO(bp); 3134 pause->bd_th_hi = BD_TH_HI(bp); 3135 3136 pause->rcq_th_lo = RCQ_TH_LO(bp); 3137 pause->rcq_th_hi = RCQ_TH_HI(bp); 3138 /* 3139 * validate that rings have enough entries to cross 3140 * high thresholds 3141 */ 3142 WARN_ON(bp->dropless_fc && 3143 pause->bd_th_hi + FW_PREFETCH_CNT > 3144 bp->rx_ring_size); 3145 WARN_ON(bp->dropless_fc && 3146 pause->rcq_th_hi + FW_PREFETCH_CNT > 3147 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 3148 3149 pause->pri_map = 1; 3150 } 3151 3152 /* rxq setup */ 3153 rxq_init->dscr_map = fp->rx_desc_mapping; 3154 rxq_init->sge_map = fp->rx_sge_mapping; 3155 rxq_init->rcq_map = fp->rx_comp_mapping; 3156 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 3157 3158 /* This should be a maximum number of data bytes that may be 3159 * placed on the BD (not including paddings). 3160 */ 3161 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 3162 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 3163 3164 rxq_init->cl_qzone_id = fp->cl_qzone_id; 3165 rxq_init->tpa_agg_sz = tpa_agg_size; 3166 rxq_init->sge_buf_sz = sge_sz; 3167 rxq_init->max_sges_pkt = max_sge; 3168 rxq_init->rss_engine_id = BP_FUNC(bp); 3169 rxq_init->mcast_engine_id = BP_FUNC(bp); 3170 3171 /* Maximum number or simultaneous TPA aggregation for this Queue. 3172 * 3173 * For PF Clients it should be the maximum available number. 3174 * VF driver(s) may want to define it to a smaller value. 3175 */ 3176 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 3177 3178 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 3179 rxq_init->fw_sb_id = fp->fw_sb_id; 3180 3181 if (IS_FCOE_FP(fp)) 3182 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 3183 else 3184 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 3185 /* configure silent vlan removal 3186 * if multi function mode is afex, then mask default vlan 3187 */ 3188 if (IS_MF_AFEX(bp)) { 3189 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 3190 rxq_init->silent_removal_mask = VLAN_VID_MASK; 3191 } 3192 } 3193 3194 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 3195 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 3196 u8 cos) 3197 { 3198 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 3199 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 3200 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 3201 txq_init->fw_sb_id = fp->fw_sb_id; 3202 3203 /* 3204 * set the tss leading client id for TX classification == 3205 * leading RSS client id 3206 */ 3207 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 3208 3209 if (IS_FCOE_FP(fp)) { 3210 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 3211 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 3212 } 3213 } 3214 3215 static void bnx2x_pf_init(struct bnx2x *bp) 3216 { 3217 struct bnx2x_func_init_params func_init = {0}; 3218 struct event_ring_data eq_data = { {0} }; 3219 u16 flags; 3220 3221 if (!CHIP_IS_E1x(bp)) { 3222 /* reset IGU PF statistics: MSIX + ATTN */ 3223 /* PF */ 3224 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3225 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3226 (CHIP_MODE_IS_4_PORT(bp) ? 3227 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3228 /* ATTN */ 3229 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3230 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3231 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 3232 (CHIP_MODE_IS_4_PORT(bp) ? 3233 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3234 } 3235 3236 /* function setup flags */ 3237 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 3238 3239 /* This flag is relevant for E1x only. 3240 * E2 doesn't have a TPA configuration in a function level. 3241 */ 3242 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 3243 3244 func_init.func_flgs = flags; 3245 func_init.pf_id = BP_FUNC(bp); 3246 func_init.func_id = BP_FUNC(bp); 3247 func_init.spq_map = bp->spq_mapping; 3248 func_init.spq_prod = bp->spq_prod_idx; 3249 3250 bnx2x_func_init(bp, &func_init); 3251 3252 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 3253 3254 /* 3255 * Congestion management values depend on the link rate 3256 * There is no active link so initial link rate is set to 10 Gbps. 3257 * When the link comes up The congestion management values are 3258 * re-calculated according to the actual link rate. 3259 */ 3260 bp->link_vars.line_speed = SPEED_10000; 3261 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 3262 3263 /* Only the PMF sets the HW */ 3264 if (bp->port.pmf) 3265 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3266 3267 /* init Event Queue - PCI bus guarantees correct endianity*/ 3268 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 3269 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 3270 eq_data.producer = bp->eq_prod; 3271 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 3272 eq_data.sb_id = DEF_SB_ID; 3273 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3274 } 3275 3276 static void bnx2x_e1h_disable(struct bnx2x *bp) 3277 { 3278 int port = BP_PORT(bp); 3279 3280 bnx2x_tx_disable(bp); 3281 3282 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3283 } 3284 3285 static void bnx2x_e1h_enable(struct bnx2x *bp) 3286 { 3287 int port = BP_PORT(bp); 3288 3289 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3290 3291 /* Tx queue should be only re-enabled */ 3292 netif_tx_wake_all_queues(bp->dev); 3293 3294 /* 3295 * Should not call netif_carrier_on since it will be called if the link 3296 * is up when checking for link state 3297 */ 3298 } 3299 3300 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3301 3302 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3303 { 3304 struct eth_stats_info *ether_stat = 3305 &bp->slowpath->drv_info_to_mcp.ether_stat; 3306 struct bnx2x_vlan_mac_obj *mac_obj = 3307 &bp->sp_objs->mac_obj; 3308 int i; 3309 3310 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3311 ETH_STAT_INFO_VERSION_LEN); 3312 3313 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the 3314 * mac_local field in ether_stat struct. The base address is offset by 2 3315 * bytes to account for the field being 8 bytes but a mac address is 3316 * only 6 bytes. Likewise, the stride for the get_n_elements function is 3317 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes 3318 * allocated by the ether_stat struct, so the macs will land in their 3319 * proper positions. 3320 */ 3321 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) 3322 memset(ether_stat->mac_local + i, 0, 3323 sizeof(ether_stat->mac_local[0])); 3324 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3325 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3326 ether_stat->mac_local + MAC_PAD, MAC_PAD, 3327 ETH_ALEN); 3328 ether_stat->mtu_size = bp->dev->mtu; 3329 if (bp->dev->features & NETIF_F_RXCSUM) 3330 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3331 if (bp->dev->features & NETIF_F_TSO) 3332 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3333 ether_stat->feature_flags |= bp->common.boot_mode; 3334 3335 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3336 3337 ether_stat->txq_size = bp->tx_ring_size; 3338 ether_stat->rxq_size = bp->rx_ring_size; 3339 3340 #ifdef CONFIG_BNX2X_SRIOV 3341 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; 3342 #endif 3343 } 3344 3345 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3346 { 3347 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3348 struct fcoe_stats_info *fcoe_stat = 3349 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3350 3351 if (!CNIC_LOADED(bp)) 3352 return; 3353 3354 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); 3355 3356 fcoe_stat->qos_priority = 3357 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3358 3359 /* insert FCoE stats from ramrod response */ 3360 if (!NO_FCOE(bp)) { 3361 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3362 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3363 tstorm_queue_statistics; 3364 3365 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3366 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3367 xstorm_queue_statistics; 3368 3369 struct fcoe_statistics_params *fw_fcoe_stat = 3370 &bp->fw_stats_data->fcoe; 3371 3372 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, 3373 fcoe_stat->rx_bytes_lo, 3374 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3375 3376 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3377 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3378 fcoe_stat->rx_bytes_lo, 3379 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3380 3381 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3382 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3383 fcoe_stat->rx_bytes_lo, 3384 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3385 3386 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3387 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3388 fcoe_stat->rx_bytes_lo, 3389 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3390 3391 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3392 fcoe_stat->rx_frames_lo, 3393 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3394 3395 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3396 fcoe_stat->rx_frames_lo, 3397 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3398 3399 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3400 fcoe_stat->rx_frames_lo, 3401 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3402 3403 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3404 fcoe_stat->rx_frames_lo, 3405 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3406 3407 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, 3408 fcoe_stat->tx_bytes_lo, 3409 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3410 3411 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3412 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3413 fcoe_stat->tx_bytes_lo, 3414 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3415 3416 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3417 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3418 fcoe_stat->tx_bytes_lo, 3419 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3420 3421 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3422 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3423 fcoe_stat->tx_bytes_lo, 3424 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3425 3426 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3427 fcoe_stat->tx_frames_lo, 3428 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3429 3430 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3431 fcoe_stat->tx_frames_lo, 3432 fcoe_q_xstorm_stats->ucast_pkts_sent); 3433 3434 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3435 fcoe_stat->tx_frames_lo, 3436 fcoe_q_xstorm_stats->bcast_pkts_sent); 3437 3438 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3439 fcoe_stat->tx_frames_lo, 3440 fcoe_q_xstorm_stats->mcast_pkts_sent); 3441 } 3442 3443 /* ask L5 driver to add data to the struct */ 3444 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3445 } 3446 3447 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3448 { 3449 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3450 struct iscsi_stats_info *iscsi_stat = 3451 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3452 3453 if (!CNIC_LOADED(bp)) 3454 return; 3455 3456 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, 3457 ETH_ALEN); 3458 3459 iscsi_stat->qos_priority = 3460 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3461 3462 /* ask L5 driver to add data to the struct */ 3463 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3464 } 3465 3466 /* called due to MCP event (on pmf): 3467 * reread new bandwidth configuration 3468 * configure FW 3469 * notify others function about the change 3470 */ 3471 static void bnx2x_config_mf_bw(struct bnx2x *bp) 3472 { 3473 if (bp->link_vars.link_up) { 3474 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3475 bnx2x_link_sync_notify(bp); 3476 } 3477 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3478 } 3479 3480 static void bnx2x_set_mf_bw(struct bnx2x *bp) 3481 { 3482 bnx2x_config_mf_bw(bp); 3483 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3484 } 3485 3486 static void bnx2x_handle_eee_event(struct bnx2x *bp) 3487 { 3488 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3489 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3490 } 3491 3492 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) 3493 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) 3494 3495 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3496 { 3497 enum drv_info_opcode op_code; 3498 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3499 bool release = false; 3500 int wait; 3501 3502 /* if drv_info version supported by MFW doesn't match - send NACK */ 3503 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3504 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3505 return; 3506 } 3507 3508 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3509 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3510 3511 /* Must prevent other flows from accessing drv_info_to_mcp */ 3512 mutex_lock(&bp->drv_info_mutex); 3513 3514 memset(&bp->slowpath->drv_info_to_mcp, 0, 3515 sizeof(union drv_info_to_mcp)); 3516 3517 switch (op_code) { 3518 case ETH_STATS_OPCODE: 3519 bnx2x_drv_info_ether_stat(bp); 3520 break; 3521 case FCOE_STATS_OPCODE: 3522 bnx2x_drv_info_fcoe_stat(bp); 3523 break; 3524 case ISCSI_STATS_OPCODE: 3525 bnx2x_drv_info_iscsi_stat(bp); 3526 break; 3527 default: 3528 /* if op code isn't supported - send NACK */ 3529 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3530 goto out; 3531 } 3532 3533 /* if we got drv_info attn from MFW then these fields are defined in 3534 * shmem2 for sure 3535 */ 3536 SHMEM2_WR(bp, drv_info_host_addr_lo, 3537 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3538 SHMEM2_WR(bp, drv_info_host_addr_hi, 3539 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3540 3541 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3542 3543 /* Since possible management wants both this and get_driver_version 3544 * need to wait until management notifies us it finished utilizing 3545 * the buffer. 3546 */ 3547 if (!SHMEM2_HAS(bp, mfw_drv_indication)) { 3548 DP(BNX2X_MSG_MCP, "Management does not support indication\n"); 3549 } else if (!bp->drv_info_mng_owner) { 3550 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); 3551 3552 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { 3553 u32 indication = SHMEM2_RD(bp, mfw_drv_indication); 3554 3555 /* Management is done; need to clear indication */ 3556 if (indication & bit) { 3557 SHMEM2_WR(bp, mfw_drv_indication, 3558 indication & ~bit); 3559 release = true; 3560 break; 3561 } 3562 3563 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); 3564 } 3565 } 3566 if (!release) { 3567 DP(BNX2X_MSG_MCP, "Management did not release indication\n"); 3568 bp->drv_info_mng_owner = true; 3569 } 3570 3571 out: 3572 mutex_unlock(&bp->drv_info_mutex); 3573 } 3574 3575 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) 3576 { 3577 u8 vals[4]; 3578 int i = 0; 3579 3580 if (bnx2x_format) { 3581 i = sscanf(version, "1.%c%hhd.%hhd.%hhd", 3582 &vals[0], &vals[1], &vals[2], &vals[3]); 3583 if (i > 0) 3584 vals[0] -= '0'; 3585 } else { 3586 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", 3587 &vals[0], &vals[1], &vals[2], &vals[3]); 3588 } 3589 3590 while (i < 4) 3591 vals[i++] = 0; 3592 3593 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; 3594 } 3595 3596 void bnx2x_update_mng_version(struct bnx2x *bp) 3597 { 3598 u32 iscsiver = DRV_VER_NOT_LOADED; 3599 u32 fcoever = DRV_VER_NOT_LOADED; 3600 u32 ethver = DRV_VER_NOT_LOADED; 3601 int idx = BP_FW_MB_IDX(bp); 3602 u8 *version; 3603 3604 if (!SHMEM2_HAS(bp, func_os_drv_ver)) 3605 return; 3606 3607 mutex_lock(&bp->drv_info_mutex); 3608 /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ 3609 if (bp->drv_info_mng_owner) 3610 goto out; 3611 3612 if (bp->state != BNX2X_STATE_OPEN) 3613 goto out; 3614 3615 /* Parse ethernet driver version */ 3616 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3617 if (!CNIC_LOADED(bp)) 3618 goto out; 3619 3620 /* Try getting storage driver version via cnic */ 3621 memset(&bp->slowpath->drv_info_to_mcp, 0, 3622 sizeof(union drv_info_to_mcp)); 3623 bnx2x_drv_info_iscsi_stat(bp); 3624 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; 3625 iscsiver = bnx2x_update_mng_version_utility(version, false); 3626 3627 memset(&bp->slowpath->drv_info_to_mcp, 0, 3628 sizeof(union drv_info_to_mcp)); 3629 bnx2x_drv_info_fcoe_stat(bp); 3630 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; 3631 fcoever = bnx2x_update_mng_version_utility(version, false); 3632 3633 out: 3634 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); 3635 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); 3636 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); 3637 3638 mutex_unlock(&bp->drv_info_mutex); 3639 3640 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", 3641 ethver, iscsiver, fcoever); 3642 } 3643 3644 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3645 { 3646 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 3647 3648 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3649 3650 /* 3651 * This is the only place besides the function initialization 3652 * where the bp->flags can change so it is done without any 3653 * locks 3654 */ 3655 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3656 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3657 bp->flags |= MF_FUNC_DIS; 3658 3659 bnx2x_e1h_disable(bp); 3660 } else { 3661 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3662 bp->flags &= ~MF_FUNC_DIS; 3663 3664 bnx2x_e1h_enable(bp); 3665 } 3666 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3667 } 3668 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3669 bnx2x_config_mf_bw(bp); 3670 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3671 } 3672 3673 /* Report results to MCP */ 3674 if (dcc_event) 3675 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); 3676 else 3677 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); 3678 } 3679 3680 /* must be called under the spq lock */ 3681 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3682 { 3683 struct eth_spe *next_spe = bp->spq_prod_bd; 3684 3685 if (bp->spq_prod_bd == bp->spq_last_bd) { 3686 bp->spq_prod_bd = bp->spq; 3687 bp->spq_prod_idx = 0; 3688 DP(BNX2X_MSG_SP, "end of spq\n"); 3689 } else { 3690 bp->spq_prod_bd++; 3691 bp->spq_prod_idx++; 3692 } 3693 return next_spe; 3694 } 3695 3696 /* must be called under the spq lock */ 3697 static void bnx2x_sp_prod_update(struct bnx2x *bp) 3698 { 3699 int func = BP_FUNC(bp); 3700 3701 /* 3702 * Make sure that BD data is updated before writing the producer: 3703 * BD data is written to the memory, the producer is read from the 3704 * memory, thus we need a full memory barrier to ensure the ordering. 3705 */ 3706 mb(); 3707 3708 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3709 bp->spq_prod_idx); 3710 mmiowb(); 3711 } 3712 3713 /** 3714 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3715 * 3716 * @cmd: command to check 3717 * @cmd_type: command type 3718 */ 3719 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3720 { 3721 if ((cmd_type == NONE_CONNECTION_TYPE) || 3722 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3723 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3724 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3725 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3726 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3727 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3728 return true; 3729 else 3730 return false; 3731 } 3732 3733 /** 3734 * bnx2x_sp_post - place a single command on an SP ring 3735 * 3736 * @bp: driver handle 3737 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3738 * @cid: SW CID the command is related to 3739 * @data_hi: command private data address (high 32 bits) 3740 * @data_lo: command private data address (low 32 bits) 3741 * @cmd_type: command type (e.g. NONE, ETH) 3742 * 3743 * SP data is handled as if it's always an address pair, thus data fields are 3744 * not swapped to little endian in upper functions. Instead this function swaps 3745 * data as if it's two u32 fields. 3746 */ 3747 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3748 u32 data_hi, u32 data_lo, int cmd_type) 3749 { 3750 struct eth_spe *spe; 3751 u16 type; 3752 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3753 3754 #ifdef BNX2X_STOP_ON_ERROR 3755 if (unlikely(bp->panic)) { 3756 BNX2X_ERR("Can't post SP when there is panic\n"); 3757 return -EIO; 3758 } 3759 #endif 3760 3761 spin_lock_bh(&bp->spq_lock); 3762 3763 if (common) { 3764 if (!atomic_read(&bp->eq_spq_left)) { 3765 BNX2X_ERR("BUG! EQ ring full!\n"); 3766 spin_unlock_bh(&bp->spq_lock); 3767 bnx2x_panic(); 3768 return -EBUSY; 3769 } 3770 } else if (!atomic_read(&bp->cq_spq_left)) { 3771 BNX2X_ERR("BUG! SPQ ring full!\n"); 3772 spin_unlock_bh(&bp->spq_lock); 3773 bnx2x_panic(); 3774 return -EBUSY; 3775 } 3776 3777 spe = bnx2x_sp_get_next(bp); 3778 3779 /* CID needs port number to be encoded int it */ 3780 spe->hdr.conn_and_cmd_data = 3781 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3782 HW_CID(bp, cid)); 3783 3784 /* In some cases, type may already contain the func-id 3785 * mainly in SRIOV related use cases, so we add it here only 3786 * if it's not already set. 3787 */ 3788 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { 3789 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & 3790 SPE_HDR_CONN_TYPE; 3791 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3792 SPE_HDR_FUNCTION_ID); 3793 } else { 3794 type = cmd_type; 3795 } 3796 3797 spe->hdr.type = cpu_to_le16(type); 3798 3799 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3800 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3801 3802 /* 3803 * It's ok if the actual decrement is issued towards the memory 3804 * somewhere between the spin_lock and spin_unlock. Thus no 3805 * more explicit memory barrier is needed. 3806 */ 3807 if (common) 3808 atomic_dec(&bp->eq_spq_left); 3809 else 3810 atomic_dec(&bp->cq_spq_left); 3811 3812 DP(BNX2X_MSG_SP, 3813 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3814 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3815 (u32)(U64_LO(bp->spq_mapping) + 3816 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3817 HW_CID(bp, cid), data_hi, data_lo, type, 3818 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3819 3820 bnx2x_sp_prod_update(bp); 3821 spin_unlock_bh(&bp->spq_lock); 3822 return 0; 3823 } 3824 3825 /* acquire split MCP access lock register */ 3826 static int bnx2x_acquire_alr(struct bnx2x *bp) 3827 { 3828 u32 j, val; 3829 int rc = 0; 3830 3831 might_sleep(); 3832 for (j = 0; j < 1000; j++) { 3833 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); 3834 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); 3835 if (val & MCPR_ACCESS_LOCK_LOCK) 3836 break; 3837 3838 usleep_range(5000, 10000); 3839 } 3840 if (!(val & MCPR_ACCESS_LOCK_LOCK)) { 3841 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3842 rc = -EBUSY; 3843 } 3844 3845 return rc; 3846 } 3847 3848 /* release split MCP access lock register */ 3849 static void bnx2x_release_alr(struct bnx2x *bp) 3850 { 3851 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 3852 } 3853 3854 #define BNX2X_DEF_SB_ATT_IDX 0x0001 3855 #define BNX2X_DEF_SB_IDX 0x0002 3856 3857 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3858 { 3859 struct host_sp_status_block *def_sb = bp->def_status_blk; 3860 u16 rc = 0; 3861 3862 barrier(); /* status block is written to by the chip */ 3863 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3864 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3865 rc |= BNX2X_DEF_SB_ATT_IDX; 3866 } 3867 3868 if (bp->def_idx != def_sb->sp_sb.running_index) { 3869 bp->def_idx = def_sb->sp_sb.running_index; 3870 rc |= BNX2X_DEF_SB_IDX; 3871 } 3872 3873 /* Do not reorder: indices reading should complete before handling */ 3874 barrier(); 3875 return rc; 3876 } 3877 3878 /* 3879 * slow path service functions 3880 */ 3881 3882 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 3883 { 3884 int port = BP_PORT(bp); 3885 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 3886 MISC_REG_AEU_MASK_ATTN_FUNC_0; 3887 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 3888 NIG_REG_MASK_INTERRUPT_PORT0; 3889 u32 aeu_mask; 3890 u32 nig_mask = 0; 3891 u32 reg_addr; 3892 3893 if (bp->attn_state & asserted) 3894 BNX2X_ERR("IGU ERROR\n"); 3895 3896 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3897 aeu_mask = REG_RD(bp, aeu_addr); 3898 3899 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 3900 aeu_mask, asserted); 3901 aeu_mask &= ~(asserted & 0x3ff); 3902 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3903 3904 REG_WR(bp, aeu_addr, aeu_mask); 3905 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3906 3907 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 3908 bp->attn_state |= asserted; 3909 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 3910 3911 if (asserted & ATTN_HARD_WIRED_MASK) { 3912 if (asserted & ATTN_NIG_FOR_FUNC) { 3913 3914 bnx2x_acquire_phy_lock(bp); 3915 3916 /* save nig interrupt mask */ 3917 nig_mask = REG_RD(bp, nig_int_mask_addr); 3918 3919 /* If nig_mask is not set, no need to call the update 3920 * function. 3921 */ 3922 if (nig_mask) { 3923 REG_WR(bp, nig_int_mask_addr, 0); 3924 3925 bnx2x_link_attn(bp); 3926 } 3927 3928 /* handle unicore attn? */ 3929 } 3930 if (asserted & ATTN_SW_TIMER_4_FUNC) 3931 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 3932 3933 if (asserted & GPIO_2_FUNC) 3934 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 3935 3936 if (asserted & GPIO_3_FUNC) 3937 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 3938 3939 if (asserted & GPIO_4_FUNC) 3940 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 3941 3942 if (port == 0) { 3943 if (asserted & ATTN_GENERAL_ATTN_1) { 3944 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 3945 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3946 } 3947 if (asserted & ATTN_GENERAL_ATTN_2) { 3948 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 3949 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3950 } 3951 if (asserted & ATTN_GENERAL_ATTN_3) { 3952 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 3953 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3954 } 3955 } else { 3956 if (asserted & ATTN_GENERAL_ATTN_4) { 3957 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 3958 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3959 } 3960 if (asserted & ATTN_GENERAL_ATTN_5) { 3961 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 3962 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3963 } 3964 if (asserted & ATTN_GENERAL_ATTN_6) { 3965 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 3966 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3967 } 3968 } 3969 3970 } /* if hardwired */ 3971 3972 if (bp->common.int_block == INT_BLOCK_HC) 3973 reg_addr = (HC_REG_COMMAND_REG + port*32 + 3974 COMMAND_REG_ATTN_BITS_SET); 3975 else 3976 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 3977 3978 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 3979 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 3980 REG_WR(bp, reg_addr, asserted); 3981 3982 /* now set back the mask */ 3983 if (asserted & ATTN_NIG_FOR_FUNC) { 3984 /* Verify that IGU ack through BAR was written before restoring 3985 * NIG mask. This loop should exit after 2-3 iterations max. 3986 */ 3987 if (bp->common.int_block != INT_BLOCK_HC) { 3988 u32 cnt = 0, igu_acked; 3989 do { 3990 igu_acked = REG_RD(bp, 3991 IGU_REG_ATTENTION_ACK_BITS); 3992 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 3993 (++cnt < MAX_IGU_ATTN_ACK_TO)); 3994 if (!igu_acked) 3995 DP(NETIF_MSG_HW, 3996 "Failed to verify IGU ack on time\n"); 3997 barrier(); 3998 } 3999 REG_WR(bp, nig_int_mask_addr, nig_mask); 4000 bnx2x_release_phy_lock(bp); 4001 } 4002 } 4003 4004 static void bnx2x_fan_failure(struct bnx2x *bp) 4005 { 4006 int port = BP_PORT(bp); 4007 u32 ext_phy_config; 4008 /* mark the failure */ 4009 ext_phy_config = 4010 SHMEM_RD(bp, 4011 dev_info.port_hw_config[port].external_phy_config); 4012 4013 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 4014 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 4015 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 4016 ext_phy_config); 4017 4018 /* log the failure */ 4019 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 4020 "Please contact OEM Support for assistance\n"); 4021 4022 /* Schedule device reset (unload) 4023 * This is due to some boards consuming sufficient power when driver is 4024 * up to overheat if fan fails. 4025 */ 4026 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); 4027 } 4028 4029 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 4030 { 4031 int port = BP_PORT(bp); 4032 int reg_offset; 4033 u32 val; 4034 4035 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4036 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4037 4038 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 4039 4040 val = REG_RD(bp, reg_offset); 4041 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 4042 REG_WR(bp, reg_offset, val); 4043 4044 BNX2X_ERR("SPIO5 hw attention\n"); 4045 4046 /* Fan failure attention */ 4047 bnx2x_hw_reset_phy(&bp->link_params); 4048 bnx2x_fan_failure(bp); 4049 } 4050 4051 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 4052 bnx2x_acquire_phy_lock(bp); 4053 bnx2x_handle_module_detect_int(&bp->link_params); 4054 bnx2x_release_phy_lock(bp); 4055 } 4056 4057 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4058 4059 val = REG_RD(bp, reg_offset); 4060 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4061 REG_WR(bp, reg_offset, val); 4062 4063 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4064 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 4065 bnx2x_panic(); 4066 } 4067 } 4068 4069 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 4070 { 4071 u32 val; 4072 4073 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 4074 4075 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 4076 BNX2X_ERR("DB hw attention 0x%x\n", val); 4077 /* DORQ discard attention */ 4078 if (val & 0x2) 4079 BNX2X_ERR("FATAL error from DORQ\n"); 4080 } 4081 4082 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4083 4084 int port = BP_PORT(bp); 4085 int reg_offset; 4086 4087 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 4088 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4089 4090 val = REG_RD(bp, reg_offset); 4091 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4092 REG_WR(bp, reg_offset, val); 4093 4094 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4095 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 4096 bnx2x_panic(); 4097 } 4098 } 4099 4100 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 4101 { 4102 u32 val; 4103 4104 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 4105 4106 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 4107 BNX2X_ERR("CFC hw attention 0x%x\n", val); 4108 /* CFC error attention */ 4109 if (val & 0x2) 4110 BNX2X_ERR("FATAL error from CFC\n"); 4111 } 4112 4113 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 4114 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 4115 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 4116 /* RQ_USDMDP_FIFO_OVERFLOW */ 4117 if (val & 0x18000) 4118 BNX2X_ERR("FATAL error from PXP\n"); 4119 4120 if (!CHIP_IS_E1x(bp)) { 4121 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 4122 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 4123 } 4124 } 4125 4126 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4127 4128 int port = BP_PORT(bp); 4129 int reg_offset; 4130 4131 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 4132 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4133 4134 val = REG_RD(bp, reg_offset); 4135 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4136 REG_WR(bp, reg_offset, val); 4137 4138 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4139 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 4140 bnx2x_panic(); 4141 } 4142 } 4143 4144 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 4145 { 4146 u32 val; 4147 4148 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 4149 4150 if (attn & BNX2X_PMF_LINK_ASSERT) { 4151 int func = BP_FUNC(bp); 4152 4153 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 4154 bnx2x_read_mf_cfg(bp); 4155 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 4156 func_mf_config[BP_ABS_FUNC(bp)].config); 4157 val = SHMEM_RD(bp, 4158 func_mb[BP_FW_MB_IDX(bp)].drv_status); 4159 if (val & DRV_STATUS_DCC_EVENT_MASK) 4160 bnx2x_dcc_event(bp, 4161 (val & DRV_STATUS_DCC_EVENT_MASK)); 4162 4163 if (val & DRV_STATUS_SET_MF_BW) 4164 bnx2x_set_mf_bw(bp); 4165 4166 if (val & DRV_STATUS_DRV_INFO_REQ) 4167 bnx2x_handle_drv_info_req(bp); 4168 4169 if (val & DRV_STATUS_VF_DISABLED) 4170 bnx2x_schedule_iov_task(bp, 4171 BNX2X_IOV_HANDLE_FLR); 4172 4173 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 4174 bnx2x_pmf_update(bp); 4175 4176 if (bp->port.pmf && 4177 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 4178 bp->dcbx_enabled > 0) 4179 /* start dcbx state machine */ 4180 bnx2x_dcbx_set_params(bp, 4181 BNX2X_DCBX_STATE_NEG_RECEIVED); 4182 if (val & DRV_STATUS_AFEX_EVENT_MASK) 4183 bnx2x_handle_afex_cmd(bp, 4184 val & DRV_STATUS_AFEX_EVENT_MASK); 4185 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 4186 bnx2x_handle_eee_event(bp); 4187 if (bp->link_vars.periodic_flags & 4188 PERIODIC_FLAGS_LINK_EVENT) { 4189 /* sync with link */ 4190 bnx2x_acquire_phy_lock(bp); 4191 bp->link_vars.periodic_flags &= 4192 ~PERIODIC_FLAGS_LINK_EVENT; 4193 bnx2x_release_phy_lock(bp); 4194 if (IS_MF(bp)) 4195 bnx2x_link_sync_notify(bp); 4196 bnx2x_link_report(bp); 4197 } 4198 /* Always call it here: bnx2x_link_report() will 4199 * prevent the link indication duplication. 4200 */ 4201 bnx2x__link_status_update(bp); 4202 } else if (attn & BNX2X_MC_ASSERT_BITS) { 4203 4204 BNX2X_ERR("MC assert!\n"); 4205 bnx2x_mc_assert(bp); 4206 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 4207 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 4208 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 4209 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 4210 bnx2x_panic(); 4211 4212 } else if (attn & BNX2X_MCP_ASSERT) { 4213 4214 BNX2X_ERR("MCP assert!\n"); 4215 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 4216 bnx2x_fw_dump(bp); 4217 4218 } else 4219 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 4220 } 4221 4222 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 4223 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 4224 if (attn & BNX2X_GRC_TIMEOUT) { 4225 val = CHIP_IS_E1(bp) ? 0 : 4226 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 4227 BNX2X_ERR("GRC time-out 0x%08x\n", val); 4228 } 4229 if (attn & BNX2X_GRC_RSV) { 4230 val = CHIP_IS_E1(bp) ? 0 : 4231 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 4232 BNX2X_ERR("GRC reserved 0x%08x\n", val); 4233 } 4234 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 4235 } 4236 } 4237 4238 /* 4239 * Bits map: 4240 * 0-7 - Engine0 load counter. 4241 * 8-15 - Engine1 load counter. 4242 * 16 - Engine0 RESET_IN_PROGRESS bit. 4243 * 17 - Engine1 RESET_IN_PROGRESS bit. 4244 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 4245 * on the engine 4246 * 19 - Engine1 ONE_IS_LOADED. 4247 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 4248 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 4249 * just the one belonging to its engine). 4250 * 4251 */ 4252 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 4253 4254 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 4255 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 4256 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 4257 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 4258 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 4259 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 4260 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 4261 4262 /* 4263 * Set the GLOBAL_RESET bit. 4264 * 4265 * Should be run under rtnl lock 4266 */ 4267 void bnx2x_set_reset_global(struct bnx2x *bp) 4268 { 4269 u32 val; 4270 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4271 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4272 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 4273 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4274 } 4275 4276 /* 4277 * Clear the GLOBAL_RESET bit. 4278 * 4279 * Should be run under rtnl lock 4280 */ 4281 static void bnx2x_clear_reset_global(struct bnx2x *bp) 4282 { 4283 u32 val; 4284 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4285 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4286 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 4287 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4288 } 4289 4290 /* 4291 * Checks the GLOBAL_RESET bit. 4292 * 4293 * should be run under rtnl lock 4294 */ 4295 static bool bnx2x_reset_is_global(struct bnx2x *bp) 4296 { 4297 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4298 4299 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 4300 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 4301 } 4302 4303 /* 4304 * Clear RESET_IN_PROGRESS bit for the current engine. 4305 * 4306 * Should be run under rtnl lock 4307 */ 4308 static void bnx2x_set_reset_done(struct bnx2x *bp) 4309 { 4310 u32 val; 4311 u32 bit = BP_PATH(bp) ? 4312 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4313 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4314 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4315 4316 /* Clear the bit */ 4317 val &= ~bit; 4318 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4319 4320 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4321 } 4322 4323 /* 4324 * Set RESET_IN_PROGRESS for the current engine. 4325 * 4326 * should be run under rtnl lock 4327 */ 4328 void bnx2x_set_reset_in_progress(struct bnx2x *bp) 4329 { 4330 u32 val; 4331 u32 bit = BP_PATH(bp) ? 4332 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4333 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4334 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4335 4336 /* Set the bit */ 4337 val |= bit; 4338 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4339 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4340 } 4341 4342 /* 4343 * Checks the RESET_IN_PROGRESS bit for the given engine. 4344 * should be run under rtnl lock 4345 */ 4346 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 4347 { 4348 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4349 u32 bit = engine ? 4350 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4351 4352 /* return false if bit is set */ 4353 return (val & bit) ? false : true; 4354 } 4355 4356 /* 4357 * set pf load for the current pf. 4358 * 4359 * should be run under rtnl lock 4360 */ 4361 void bnx2x_set_pf_load(struct bnx2x *bp) 4362 { 4363 u32 val1, val; 4364 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4365 BNX2X_PATH0_LOAD_CNT_MASK; 4366 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4367 BNX2X_PATH0_LOAD_CNT_SHIFT; 4368 4369 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4370 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4371 4372 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 4373 4374 /* get the current counter value */ 4375 val1 = (val & mask) >> shift; 4376 4377 /* set bit of that PF */ 4378 val1 |= (1 << bp->pf_num); 4379 4380 /* clear the old value */ 4381 val &= ~mask; 4382 4383 /* set the new one */ 4384 val |= ((val1 << shift) & mask); 4385 4386 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4387 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4388 } 4389 4390 /** 4391 * bnx2x_clear_pf_load - clear pf load mark 4392 * 4393 * @bp: driver handle 4394 * 4395 * Should be run under rtnl lock. 4396 * Decrements the load counter for the current engine. Returns 4397 * whether other functions are still loaded 4398 */ 4399 bool bnx2x_clear_pf_load(struct bnx2x *bp) 4400 { 4401 u32 val1, val; 4402 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4403 BNX2X_PATH0_LOAD_CNT_MASK; 4404 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4405 BNX2X_PATH0_LOAD_CNT_SHIFT; 4406 4407 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4408 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4409 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 4410 4411 /* get the current counter value */ 4412 val1 = (val & mask) >> shift; 4413 4414 /* clear bit of that PF */ 4415 val1 &= ~(1 << bp->pf_num); 4416 4417 /* clear the old value */ 4418 val &= ~mask; 4419 4420 /* set the new one */ 4421 val |= ((val1 << shift) & mask); 4422 4423 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4424 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4425 return val1 != 0; 4426 } 4427 4428 /* 4429 * Read the load status for the current engine. 4430 * 4431 * should be run under rtnl lock 4432 */ 4433 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4434 { 4435 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4436 BNX2X_PATH0_LOAD_CNT_MASK); 4437 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4438 BNX2X_PATH0_LOAD_CNT_SHIFT); 4439 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4440 4441 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4442 4443 val = (val & mask) >> shift; 4444 4445 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4446 engine, val); 4447 4448 return val != 0; 4449 } 4450 4451 static void _print_parity(struct bnx2x *bp, u32 reg) 4452 { 4453 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); 4454 } 4455 4456 static void _print_next_block(int idx, const char *blk) 4457 { 4458 pr_cont("%s%s", idx ? ", " : "", blk); 4459 } 4460 4461 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4462 int *par_num, bool print) 4463 { 4464 u32 cur_bit; 4465 bool res; 4466 int i; 4467 4468 res = false; 4469 4470 for (i = 0; sig; i++) { 4471 cur_bit = (0x1UL << i); 4472 if (sig & cur_bit) { 4473 res |= true; /* Each bit is real error! */ 4474 4475 if (print) { 4476 switch (cur_bit) { 4477 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4478 _print_next_block((*par_num)++, "BRB"); 4479 _print_parity(bp, 4480 BRB1_REG_BRB1_PRTY_STS); 4481 break; 4482 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4483 _print_next_block((*par_num)++, 4484 "PARSER"); 4485 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4486 break; 4487 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4488 _print_next_block((*par_num)++, "TSDM"); 4489 _print_parity(bp, 4490 TSDM_REG_TSDM_PRTY_STS); 4491 break; 4492 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4493 _print_next_block((*par_num)++, 4494 "SEARCHER"); 4495 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4496 break; 4497 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4498 _print_next_block((*par_num)++, "TCM"); 4499 _print_parity(bp, TCM_REG_TCM_PRTY_STS); 4500 break; 4501 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4502 _print_next_block((*par_num)++, 4503 "TSEMI"); 4504 _print_parity(bp, 4505 TSEM_REG_TSEM_PRTY_STS_0); 4506 _print_parity(bp, 4507 TSEM_REG_TSEM_PRTY_STS_1); 4508 break; 4509 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4510 _print_next_block((*par_num)++, "XPB"); 4511 _print_parity(bp, GRCBASE_XPB + 4512 PB_REG_PB_PRTY_STS); 4513 break; 4514 } 4515 } 4516 4517 /* Clear the bit */ 4518 sig &= ~cur_bit; 4519 } 4520 } 4521 4522 return res; 4523 } 4524 4525 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4526 int *par_num, bool *global, 4527 bool print) 4528 { 4529 u32 cur_bit; 4530 bool res; 4531 int i; 4532 4533 res = false; 4534 4535 for (i = 0; sig; i++) { 4536 cur_bit = (0x1UL << i); 4537 if (sig & cur_bit) { 4538 res |= true; /* Each bit is real error! */ 4539 switch (cur_bit) { 4540 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4541 if (print) { 4542 _print_next_block((*par_num)++, "PBF"); 4543 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4544 } 4545 break; 4546 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4547 if (print) { 4548 _print_next_block((*par_num)++, "QM"); 4549 _print_parity(bp, QM_REG_QM_PRTY_STS); 4550 } 4551 break; 4552 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4553 if (print) { 4554 _print_next_block((*par_num)++, "TM"); 4555 _print_parity(bp, TM_REG_TM_PRTY_STS); 4556 } 4557 break; 4558 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4559 if (print) { 4560 _print_next_block((*par_num)++, "XSDM"); 4561 _print_parity(bp, 4562 XSDM_REG_XSDM_PRTY_STS); 4563 } 4564 break; 4565 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4566 if (print) { 4567 _print_next_block((*par_num)++, "XCM"); 4568 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4569 } 4570 break; 4571 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4572 if (print) { 4573 _print_next_block((*par_num)++, 4574 "XSEMI"); 4575 _print_parity(bp, 4576 XSEM_REG_XSEM_PRTY_STS_0); 4577 _print_parity(bp, 4578 XSEM_REG_XSEM_PRTY_STS_1); 4579 } 4580 break; 4581 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4582 if (print) { 4583 _print_next_block((*par_num)++, 4584 "DOORBELLQ"); 4585 _print_parity(bp, 4586 DORQ_REG_DORQ_PRTY_STS); 4587 } 4588 break; 4589 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4590 if (print) { 4591 _print_next_block((*par_num)++, "NIG"); 4592 if (CHIP_IS_E1x(bp)) { 4593 _print_parity(bp, 4594 NIG_REG_NIG_PRTY_STS); 4595 } else { 4596 _print_parity(bp, 4597 NIG_REG_NIG_PRTY_STS_0); 4598 _print_parity(bp, 4599 NIG_REG_NIG_PRTY_STS_1); 4600 } 4601 } 4602 break; 4603 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4604 if (print) 4605 _print_next_block((*par_num)++, 4606 "VAUX PCI CORE"); 4607 *global = true; 4608 break; 4609 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4610 if (print) { 4611 _print_next_block((*par_num)++, 4612 "DEBUG"); 4613 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4614 } 4615 break; 4616 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4617 if (print) { 4618 _print_next_block((*par_num)++, "USDM"); 4619 _print_parity(bp, 4620 USDM_REG_USDM_PRTY_STS); 4621 } 4622 break; 4623 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4624 if (print) { 4625 _print_next_block((*par_num)++, "UCM"); 4626 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4627 } 4628 break; 4629 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4630 if (print) { 4631 _print_next_block((*par_num)++, 4632 "USEMI"); 4633 _print_parity(bp, 4634 USEM_REG_USEM_PRTY_STS_0); 4635 _print_parity(bp, 4636 USEM_REG_USEM_PRTY_STS_1); 4637 } 4638 break; 4639 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4640 if (print) { 4641 _print_next_block((*par_num)++, "UPB"); 4642 _print_parity(bp, GRCBASE_UPB + 4643 PB_REG_PB_PRTY_STS); 4644 } 4645 break; 4646 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4647 if (print) { 4648 _print_next_block((*par_num)++, "CSDM"); 4649 _print_parity(bp, 4650 CSDM_REG_CSDM_PRTY_STS); 4651 } 4652 break; 4653 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4654 if (print) { 4655 _print_next_block((*par_num)++, "CCM"); 4656 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4657 } 4658 break; 4659 } 4660 4661 /* Clear the bit */ 4662 sig &= ~cur_bit; 4663 } 4664 } 4665 4666 return res; 4667 } 4668 4669 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4670 int *par_num, bool print) 4671 { 4672 u32 cur_bit; 4673 bool res; 4674 int i; 4675 4676 res = false; 4677 4678 for (i = 0; sig; i++) { 4679 cur_bit = (0x1UL << i); 4680 if (sig & cur_bit) { 4681 res |= true; /* Each bit is real error! */ 4682 if (print) { 4683 switch (cur_bit) { 4684 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4685 _print_next_block((*par_num)++, 4686 "CSEMI"); 4687 _print_parity(bp, 4688 CSEM_REG_CSEM_PRTY_STS_0); 4689 _print_parity(bp, 4690 CSEM_REG_CSEM_PRTY_STS_1); 4691 break; 4692 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4693 _print_next_block((*par_num)++, "PXP"); 4694 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4695 _print_parity(bp, 4696 PXP2_REG_PXP2_PRTY_STS_0); 4697 _print_parity(bp, 4698 PXP2_REG_PXP2_PRTY_STS_1); 4699 break; 4700 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4701 _print_next_block((*par_num)++, 4702 "PXPPCICLOCKCLIENT"); 4703 break; 4704 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4705 _print_next_block((*par_num)++, "CFC"); 4706 _print_parity(bp, 4707 CFC_REG_CFC_PRTY_STS); 4708 break; 4709 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4710 _print_next_block((*par_num)++, "CDU"); 4711 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4712 break; 4713 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4714 _print_next_block((*par_num)++, "DMAE"); 4715 _print_parity(bp, 4716 DMAE_REG_DMAE_PRTY_STS); 4717 break; 4718 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4719 _print_next_block((*par_num)++, "IGU"); 4720 if (CHIP_IS_E1x(bp)) 4721 _print_parity(bp, 4722 HC_REG_HC_PRTY_STS); 4723 else 4724 _print_parity(bp, 4725 IGU_REG_IGU_PRTY_STS); 4726 break; 4727 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4728 _print_next_block((*par_num)++, "MISC"); 4729 _print_parity(bp, 4730 MISC_REG_MISC_PRTY_STS); 4731 break; 4732 } 4733 } 4734 4735 /* Clear the bit */ 4736 sig &= ~cur_bit; 4737 } 4738 } 4739 4740 return res; 4741 } 4742 4743 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, 4744 int *par_num, bool *global, 4745 bool print) 4746 { 4747 bool res = false; 4748 u32 cur_bit; 4749 int i; 4750 4751 for (i = 0; sig; i++) { 4752 cur_bit = (0x1UL << i); 4753 if (sig & cur_bit) { 4754 switch (cur_bit) { 4755 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4756 if (print) 4757 _print_next_block((*par_num)++, 4758 "MCP ROM"); 4759 *global = true; 4760 res |= true; 4761 break; 4762 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4763 if (print) 4764 _print_next_block((*par_num)++, 4765 "MCP UMP RX"); 4766 *global = true; 4767 res |= true; 4768 break; 4769 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4770 if (print) 4771 _print_next_block((*par_num)++, 4772 "MCP UMP TX"); 4773 *global = true; 4774 res |= true; 4775 break; 4776 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4777 if (print) 4778 _print_next_block((*par_num)++, 4779 "MCP SCPAD"); 4780 /* clear latched SCPAD PATIRY from MCP */ 4781 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4782 1UL << 10); 4783 break; 4784 } 4785 4786 /* Clear the bit */ 4787 sig &= ~cur_bit; 4788 } 4789 } 4790 4791 return res; 4792 } 4793 4794 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4795 int *par_num, bool print) 4796 { 4797 u32 cur_bit; 4798 bool res; 4799 int i; 4800 4801 res = false; 4802 4803 for (i = 0; sig; i++) { 4804 cur_bit = (0x1UL << i); 4805 if (sig & cur_bit) { 4806 res |= true; /* Each bit is real error! */ 4807 if (print) { 4808 switch (cur_bit) { 4809 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4810 _print_next_block((*par_num)++, 4811 "PGLUE_B"); 4812 _print_parity(bp, 4813 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4814 break; 4815 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4816 _print_next_block((*par_num)++, "ATC"); 4817 _print_parity(bp, 4818 ATC_REG_ATC_PRTY_STS); 4819 break; 4820 } 4821 } 4822 /* Clear the bit */ 4823 sig &= ~cur_bit; 4824 } 4825 } 4826 4827 return res; 4828 } 4829 4830 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4831 u32 *sig) 4832 { 4833 bool res = false; 4834 4835 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4836 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4837 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4838 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4839 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4840 int par_num = 0; 4841 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4842 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4843 sig[0] & HW_PRTY_ASSERT_SET_0, 4844 sig[1] & HW_PRTY_ASSERT_SET_1, 4845 sig[2] & HW_PRTY_ASSERT_SET_2, 4846 sig[3] & HW_PRTY_ASSERT_SET_3, 4847 sig[4] & HW_PRTY_ASSERT_SET_4); 4848 if (print) 4849 netdev_err(bp->dev, 4850 "Parity errors detected in blocks: "); 4851 res |= bnx2x_check_blocks_with_parity0(bp, 4852 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 4853 res |= bnx2x_check_blocks_with_parity1(bp, 4854 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); 4855 res |= bnx2x_check_blocks_with_parity2(bp, 4856 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); 4857 res |= bnx2x_check_blocks_with_parity3(bp, 4858 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); 4859 res |= bnx2x_check_blocks_with_parity4(bp, 4860 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); 4861 4862 if (print) 4863 pr_cont("\n"); 4864 } 4865 4866 return res; 4867 } 4868 4869 /** 4870 * bnx2x_chk_parity_attn - checks for parity attentions. 4871 * 4872 * @bp: driver handle 4873 * @global: true if there was a global attention 4874 * @print: show parity attention in syslog 4875 */ 4876 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 4877 { 4878 struct attn_route attn = { {0} }; 4879 int port = BP_PORT(bp); 4880 4881 attn.sig[0] = REG_RD(bp, 4882 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 4883 port*4); 4884 attn.sig[1] = REG_RD(bp, 4885 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 4886 port*4); 4887 attn.sig[2] = REG_RD(bp, 4888 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 4889 port*4); 4890 attn.sig[3] = REG_RD(bp, 4891 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4892 port*4); 4893 /* Since MCP attentions can't be disabled inside the block, we need to 4894 * read AEU registers to see whether they're currently disabled 4895 */ 4896 attn.sig[3] &= ((REG_RD(bp, 4897 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 4898 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & 4899 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 4900 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 4901 4902 if (!CHIP_IS_E1x(bp)) 4903 attn.sig[4] = REG_RD(bp, 4904 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 4905 port*4); 4906 4907 return bnx2x_parity_attn(bp, global, print, attn.sig); 4908 } 4909 4910 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4911 { 4912 u32 val; 4913 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4914 4915 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 4916 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 4917 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 4918 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 4919 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 4920 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 4921 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 4922 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 4923 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 4924 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 4925 if (val & 4926 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 4927 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 4928 if (val & 4929 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 4930 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 4931 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 4932 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 4933 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 4934 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 4935 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 4936 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 4937 } 4938 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 4939 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 4940 BNX2X_ERR("ATC hw attention 0x%x\n", val); 4941 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 4942 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 4943 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 4944 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 4945 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 4946 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 4947 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 4948 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 4949 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 4950 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 4951 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 4952 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 4953 } 4954 4955 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4956 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 4957 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 4958 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4959 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4960 } 4961 } 4962 4963 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4964 { 4965 struct attn_route attn, *group_mask; 4966 int port = BP_PORT(bp); 4967 int index; 4968 u32 reg_addr; 4969 u32 val; 4970 u32 aeu_mask; 4971 bool global = false; 4972 4973 /* need to take HW lock because MCP or other port might also 4974 try to handle this event */ 4975 bnx2x_acquire_alr(bp); 4976 4977 if (bnx2x_chk_parity_attn(bp, &global, true)) { 4978 #ifndef BNX2X_STOP_ON_ERROR 4979 bp->recovery_state = BNX2X_RECOVERY_INIT; 4980 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4981 /* Disable HW interrupts */ 4982 bnx2x_int_disable(bp); 4983 /* In case of parity errors don't handle attentions so that 4984 * other function would "see" parity errors. 4985 */ 4986 #else 4987 bnx2x_panic(); 4988 #endif 4989 bnx2x_release_alr(bp); 4990 return; 4991 } 4992 4993 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 4994 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 4995 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 4996 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 4997 if (!CHIP_IS_E1x(bp)) 4998 attn.sig[4] = 4999 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 5000 else 5001 attn.sig[4] = 0; 5002 5003 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 5004 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 5005 5006 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5007 if (deasserted & (1 << index)) { 5008 group_mask = &bp->attn_group[index]; 5009 5010 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 5011 index, 5012 group_mask->sig[0], group_mask->sig[1], 5013 group_mask->sig[2], group_mask->sig[3], 5014 group_mask->sig[4]); 5015 5016 bnx2x_attn_int_deasserted4(bp, 5017 attn.sig[4] & group_mask->sig[4]); 5018 bnx2x_attn_int_deasserted3(bp, 5019 attn.sig[3] & group_mask->sig[3]); 5020 bnx2x_attn_int_deasserted1(bp, 5021 attn.sig[1] & group_mask->sig[1]); 5022 bnx2x_attn_int_deasserted2(bp, 5023 attn.sig[2] & group_mask->sig[2]); 5024 bnx2x_attn_int_deasserted0(bp, 5025 attn.sig[0] & group_mask->sig[0]); 5026 } 5027 } 5028 5029 bnx2x_release_alr(bp); 5030 5031 if (bp->common.int_block == INT_BLOCK_HC) 5032 reg_addr = (HC_REG_COMMAND_REG + port*32 + 5033 COMMAND_REG_ATTN_BITS_CLR); 5034 else 5035 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 5036 5037 val = ~deasserted; 5038 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 5039 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 5040 REG_WR(bp, reg_addr, val); 5041 5042 if (~bp->attn_state & deasserted) 5043 BNX2X_ERR("IGU ERROR\n"); 5044 5045 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 5046 MISC_REG_AEU_MASK_ATTN_FUNC_0; 5047 5048 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 5049 aeu_mask = REG_RD(bp, reg_addr); 5050 5051 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 5052 aeu_mask, deasserted); 5053 aeu_mask |= (deasserted & 0x3ff); 5054 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 5055 5056 REG_WR(bp, reg_addr, aeu_mask); 5057 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 5058 5059 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 5060 bp->attn_state &= ~deasserted; 5061 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 5062 } 5063 5064 static void bnx2x_attn_int(struct bnx2x *bp) 5065 { 5066 /* read local copy of bits */ 5067 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 5068 attn_bits); 5069 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 5070 attn_bits_ack); 5071 u32 attn_state = bp->attn_state; 5072 5073 /* look for changed bits */ 5074 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 5075 u32 deasserted = ~attn_bits & attn_ack & attn_state; 5076 5077 DP(NETIF_MSG_HW, 5078 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 5079 attn_bits, attn_ack, asserted, deasserted); 5080 5081 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 5082 BNX2X_ERR("BAD attention state\n"); 5083 5084 /* handle bits that were raised */ 5085 if (asserted) 5086 bnx2x_attn_int_asserted(bp, asserted); 5087 5088 if (deasserted) 5089 bnx2x_attn_int_deasserted(bp, deasserted); 5090 } 5091 5092 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 5093 u16 index, u8 op, u8 update) 5094 { 5095 u32 igu_addr = bp->igu_base_addr; 5096 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 5097 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 5098 igu_addr); 5099 } 5100 5101 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 5102 { 5103 /* No memory barriers */ 5104 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 5105 mmiowb(); /* keep prod updates ordered */ 5106 } 5107 5108 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 5109 union event_ring_elem *elem) 5110 { 5111 u8 err = elem->message.error; 5112 5113 if (!bp->cnic_eth_dev.starting_cid || 5114 (cid < bp->cnic_eth_dev.starting_cid && 5115 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 5116 return 1; 5117 5118 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 5119 5120 if (unlikely(err)) { 5121 5122 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 5123 cid); 5124 bnx2x_panic_dump(bp, false); 5125 } 5126 bnx2x_cnic_cfc_comp(bp, cid, err); 5127 return 0; 5128 } 5129 5130 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 5131 { 5132 struct bnx2x_mcast_ramrod_params rparam; 5133 int rc; 5134 5135 memset(&rparam, 0, sizeof(rparam)); 5136 5137 rparam.mcast_obj = &bp->mcast_obj; 5138 5139 netif_addr_lock_bh(bp->dev); 5140 5141 /* Clear pending state for the last command */ 5142 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 5143 5144 /* If there are pending mcast commands - send them */ 5145 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 5146 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 5147 if (rc < 0) 5148 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 5149 rc); 5150 } 5151 5152 netif_addr_unlock_bh(bp->dev); 5153 } 5154 5155 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 5156 union event_ring_elem *elem) 5157 { 5158 unsigned long ramrod_flags = 0; 5159 int rc = 0; 5160 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 5161 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 5162 5163 /* Always push next commands out, don't wait here */ 5164 __set_bit(RAMROD_CONT, &ramrod_flags); 5165 5166 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) 5167 >> BNX2X_SWCID_SHIFT) { 5168 case BNX2X_FILTER_MAC_PENDING: 5169 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 5170 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 5171 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 5172 else 5173 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 5174 5175 break; 5176 case BNX2X_FILTER_MCAST_PENDING: 5177 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 5178 /* This is only relevant for 57710 where multicast MACs are 5179 * configured as unicast MACs using the same ramrod. 5180 */ 5181 bnx2x_handle_mcast_eqe(bp); 5182 return; 5183 default: 5184 BNX2X_ERR("Unsupported classification command: %d\n", 5185 elem->message.data.eth_event.echo); 5186 return; 5187 } 5188 5189 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 5190 5191 if (rc < 0) 5192 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 5193 else if (rc > 0) 5194 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 5195 } 5196 5197 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 5198 5199 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 5200 { 5201 netif_addr_lock_bh(bp->dev); 5202 5203 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5204 5205 /* Send rx_mode command again if was requested */ 5206 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 5207 bnx2x_set_storm_rx_mode(bp); 5208 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 5209 &bp->sp_state)) 5210 bnx2x_set_iscsi_eth_rx_mode(bp, true); 5211 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 5212 &bp->sp_state)) 5213 bnx2x_set_iscsi_eth_rx_mode(bp, false); 5214 5215 netif_addr_unlock_bh(bp->dev); 5216 } 5217 5218 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 5219 union event_ring_elem *elem) 5220 { 5221 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 5222 DP(BNX2X_MSG_SP, 5223 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 5224 elem->message.data.vif_list_event.func_bit_map); 5225 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 5226 elem->message.data.vif_list_event.func_bit_map); 5227 } else if (elem->message.data.vif_list_event.echo == 5228 VIF_LIST_RULE_SET) { 5229 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 5230 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 5231 } 5232 } 5233 5234 /* called with rtnl_lock */ 5235 static void bnx2x_after_function_update(struct bnx2x *bp) 5236 { 5237 int q, rc; 5238 struct bnx2x_fastpath *fp; 5239 struct bnx2x_queue_state_params queue_params = {NULL}; 5240 struct bnx2x_queue_update_params *q_update_params = 5241 &queue_params.params.update; 5242 5243 /* Send Q update command with afex vlan removal values for all Qs */ 5244 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 5245 5246 /* set silent vlan removal values according to vlan mode */ 5247 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5248 &q_update_params->update_flags); 5249 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 5250 &q_update_params->update_flags); 5251 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5252 5253 /* in access mode mark mask and value are 0 to strip all vlans */ 5254 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 5255 q_update_params->silent_removal_value = 0; 5256 q_update_params->silent_removal_mask = 0; 5257 } else { 5258 q_update_params->silent_removal_value = 5259 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 5260 q_update_params->silent_removal_mask = VLAN_VID_MASK; 5261 } 5262 5263 for_each_eth_queue(bp, q) { 5264 /* Set the appropriate Queue object */ 5265 fp = &bp->fp[q]; 5266 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5267 5268 /* send the ramrod */ 5269 rc = bnx2x_queue_state_change(bp, &queue_params); 5270 if (rc < 0) 5271 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5272 q); 5273 } 5274 5275 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { 5276 fp = &bp->fp[FCOE_IDX(bp)]; 5277 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5278 5279 /* clear pending completion bit */ 5280 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5281 5282 /* mark latest Q bit */ 5283 smp_mb__before_atomic(); 5284 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5285 smp_mb__after_atomic(); 5286 5287 /* send Q update ramrod for FCoE Q */ 5288 rc = bnx2x_queue_state_change(bp, &queue_params); 5289 if (rc < 0) 5290 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5291 q); 5292 } else { 5293 /* If no FCoE ring - ACK MCP now */ 5294 bnx2x_link_report(bp); 5295 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5296 } 5297 } 5298 5299 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 5300 struct bnx2x *bp, u32 cid) 5301 { 5302 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 5303 5304 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) 5305 return &bnx2x_fcoe_sp_obj(bp, q_obj); 5306 else 5307 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 5308 } 5309 5310 static void bnx2x_eq_int(struct bnx2x *bp) 5311 { 5312 u16 hw_cons, sw_cons, sw_prod; 5313 union event_ring_elem *elem; 5314 u8 echo; 5315 u32 cid; 5316 u8 opcode; 5317 int rc, spqe_cnt = 0; 5318 struct bnx2x_queue_sp_obj *q_obj; 5319 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 5320 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 5321 5322 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 5323 5324 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 5325 * when we get the next-page we need to adjust so the loop 5326 * condition below will be met. The next element is the size of a 5327 * regular element and hence incrementing by 1 5328 */ 5329 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 5330 hw_cons++; 5331 5332 /* This function may never run in parallel with itself for a 5333 * specific bp, thus there is no need in "paired" read memory 5334 * barrier here. 5335 */ 5336 sw_cons = bp->eq_cons; 5337 sw_prod = bp->eq_prod; 5338 5339 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 5340 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 5341 5342 for (; sw_cons != hw_cons; 5343 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 5344 5345 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 5346 5347 rc = bnx2x_iov_eq_sp_event(bp, elem); 5348 if (!rc) { 5349 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", 5350 rc); 5351 goto next_spqe; 5352 } 5353 5354 /* elem CID originates from FW; actually LE */ 5355 cid = SW_CID((__force __le32) 5356 elem->message.data.cfc_del_event.cid); 5357 opcode = elem->message.opcode; 5358 5359 /* handle eq element */ 5360 switch (opcode) { 5361 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5362 bnx2x_vf_mbx_schedule(bp, 5363 &elem->message.data.vf_pf_event); 5364 continue; 5365 5366 case EVENT_RING_OPCODE_STAT_QUERY: 5367 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), 5368 "got statistics comp event %d\n", 5369 bp->stats_comp++); 5370 /* nothing to do with stats comp */ 5371 goto next_spqe; 5372 5373 case EVENT_RING_OPCODE_CFC_DEL: 5374 /* handle according to cid range */ 5375 /* 5376 * we may want to verify here that the bp state is 5377 * HALTING 5378 */ 5379 DP(BNX2X_MSG_SP, 5380 "got delete ramrod for MULTI[%d]\n", cid); 5381 5382 if (CNIC_LOADED(bp) && 5383 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 5384 goto next_spqe; 5385 5386 q_obj = bnx2x_cid_to_q_obj(bp, cid); 5387 5388 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 5389 break; 5390 5391 goto next_spqe; 5392 5393 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5394 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 5395 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 5396 if (f_obj->complete_cmd(bp, f_obj, 5397 BNX2X_F_CMD_TX_STOP)) 5398 break; 5399 goto next_spqe; 5400 5401 case EVENT_RING_OPCODE_START_TRAFFIC: 5402 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 5403 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 5404 if (f_obj->complete_cmd(bp, f_obj, 5405 BNX2X_F_CMD_TX_START)) 5406 break; 5407 goto next_spqe; 5408 5409 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 5410 echo = elem->message.data.function_update_event.echo; 5411 if (echo == SWITCH_UPDATE) { 5412 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5413 "got FUNC_SWITCH_UPDATE ramrod\n"); 5414 if (f_obj->complete_cmd( 5415 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) 5416 break; 5417 5418 } else { 5419 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; 5420 5421 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5422 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5423 f_obj->complete_cmd(bp, f_obj, 5424 BNX2X_F_CMD_AFEX_UPDATE); 5425 5426 /* We will perform the Queues update from 5427 * sp_rtnl task as all Queue SP operations 5428 * should run under rtnl_lock. 5429 */ 5430 bnx2x_schedule_sp_rtnl(bp, cmd, 0); 5431 } 5432 5433 goto next_spqe; 5434 5435 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 5436 f_obj->complete_cmd(bp, f_obj, 5437 BNX2X_F_CMD_AFEX_VIFLISTS); 5438 bnx2x_after_afex_vif_lists(bp, elem); 5439 goto next_spqe; 5440 case EVENT_RING_OPCODE_FUNCTION_START: 5441 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5442 "got FUNC_START ramrod\n"); 5443 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 5444 break; 5445 5446 goto next_spqe; 5447 5448 case EVENT_RING_OPCODE_FUNCTION_STOP: 5449 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5450 "got FUNC_STOP ramrod\n"); 5451 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 5452 break; 5453 5454 goto next_spqe; 5455 } 5456 5457 switch (opcode | bp->state) { 5458 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5459 BNX2X_STATE_OPEN): 5460 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5461 BNX2X_STATE_OPENING_WAIT4_PORT): 5462 cid = elem->message.data.eth_event.echo & 5463 BNX2X_SWCID_MASK; 5464 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5465 cid); 5466 rss_raw->clear_pending(rss_raw); 5467 break; 5468 5469 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 5470 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 5471 case (EVENT_RING_OPCODE_SET_MAC | 5472 BNX2X_STATE_CLOSING_WAIT4_HALT): 5473 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5474 BNX2X_STATE_OPEN): 5475 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5476 BNX2X_STATE_DIAG): 5477 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5478 BNX2X_STATE_CLOSING_WAIT4_HALT): 5479 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); 5480 bnx2x_handle_classification_eqe(bp, elem); 5481 break; 5482 5483 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5484 BNX2X_STATE_OPEN): 5485 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5486 BNX2X_STATE_DIAG): 5487 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5488 BNX2X_STATE_CLOSING_WAIT4_HALT): 5489 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 5490 bnx2x_handle_mcast_eqe(bp); 5491 break; 5492 5493 case (EVENT_RING_OPCODE_FILTERS_RULES | 5494 BNX2X_STATE_OPEN): 5495 case (EVENT_RING_OPCODE_FILTERS_RULES | 5496 BNX2X_STATE_DIAG): 5497 case (EVENT_RING_OPCODE_FILTERS_RULES | 5498 BNX2X_STATE_CLOSING_WAIT4_HALT): 5499 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 5500 bnx2x_handle_rx_mode_eqe(bp); 5501 break; 5502 default: 5503 /* unknown event log error and continue */ 5504 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 5505 elem->message.opcode, bp->state); 5506 } 5507 next_spqe: 5508 spqe_cnt++; 5509 } /* for */ 5510 5511 smp_mb__before_atomic(); 5512 atomic_add(spqe_cnt, &bp->eq_spq_left); 5513 5514 bp->eq_cons = sw_cons; 5515 bp->eq_prod = sw_prod; 5516 /* Make sure that above mem writes were issued towards the memory */ 5517 smp_wmb(); 5518 5519 /* update producer */ 5520 bnx2x_update_eq_prod(bp, bp->eq_prod); 5521 } 5522 5523 static void bnx2x_sp_task(struct work_struct *work) 5524 { 5525 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 5526 5527 DP(BNX2X_MSG_SP, "sp task invoked\n"); 5528 5529 /* make sure the atomic interrupt_occurred has been written */ 5530 smp_rmb(); 5531 if (atomic_read(&bp->interrupt_occurred)) { 5532 5533 /* what work needs to be performed? */ 5534 u16 status = bnx2x_update_dsb_idx(bp); 5535 5536 DP(BNX2X_MSG_SP, "status %x\n", status); 5537 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); 5538 atomic_set(&bp->interrupt_occurred, 0); 5539 5540 /* HW attentions */ 5541 if (status & BNX2X_DEF_SB_ATT_IDX) { 5542 bnx2x_attn_int(bp); 5543 status &= ~BNX2X_DEF_SB_ATT_IDX; 5544 } 5545 5546 /* SP events: STAT_QUERY and others */ 5547 if (status & BNX2X_DEF_SB_IDX) { 5548 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5549 5550 if (FCOE_INIT(bp) && 5551 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5552 /* Prevent local bottom-halves from running as 5553 * we are going to change the local NAPI list. 5554 */ 5555 local_bh_disable(); 5556 napi_schedule(&bnx2x_fcoe(bp, napi)); 5557 local_bh_enable(); 5558 } 5559 5560 /* Handle EQ completions */ 5561 bnx2x_eq_int(bp); 5562 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5563 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5564 5565 status &= ~BNX2X_DEF_SB_IDX; 5566 } 5567 5568 /* if status is non zero then perhaps something went wrong */ 5569 if (unlikely(status)) 5570 DP(BNX2X_MSG_SP, 5571 "got an unknown interrupt! (status 0x%x)\n", status); 5572 5573 /* ack status block only if something was actually handled */ 5574 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5575 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5576 } 5577 5578 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5579 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5580 &bp->sp_state)) { 5581 bnx2x_link_report(bp); 5582 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5583 } 5584 } 5585 5586 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5587 { 5588 struct net_device *dev = dev_instance; 5589 struct bnx2x *bp = netdev_priv(dev); 5590 5591 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5592 IGU_INT_DISABLE, 0); 5593 5594 #ifdef BNX2X_STOP_ON_ERROR 5595 if (unlikely(bp->panic)) 5596 return IRQ_HANDLED; 5597 #endif 5598 5599 if (CNIC_LOADED(bp)) { 5600 struct cnic_ops *c_ops; 5601 5602 rcu_read_lock(); 5603 c_ops = rcu_dereference(bp->cnic_ops); 5604 if (c_ops) 5605 c_ops->cnic_handler(bp->cnic_data, NULL); 5606 rcu_read_unlock(); 5607 } 5608 5609 /* schedule sp task to perform default status block work, ack 5610 * attentions and enable interrupts. 5611 */ 5612 bnx2x_schedule_sp_task(bp); 5613 5614 return IRQ_HANDLED; 5615 } 5616 5617 /* end of slow path */ 5618 5619 void bnx2x_drv_pulse(struct bnx2x *bp) 5620 { 5621 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5622 bp->fw_drv_pulse_wr_seq); 5623 } 5624 5625 static void bnx2x_timer(unsigned long data) 5626 { 5627 struct bnx2x *bp = (struct bnx2x *) data; 5628 5629 if (!netif_running(bp->dev)) 5630 return; 5631 5632 if (IS_PF(bp) && 5633 !BP_NOMCP(bp)) { 5634 int mb_idx = BP_FW_MB_IDX(bp); 5635 u16 drv_pulse; 5636 u16 mcp_pulse; 5637 5638 ++bp->fw_drv_pulse_wr_seq; 5639 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5640 drv_pulse = bp->fw_drv_pulse_wr_seq; 5641 bnx2x_drv_pulse(bp); 5642 5643 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5644 MCP_PULSE_SEQ_MASK); 5645 /* The delta between driver pulse and mcp response 5646 * should not get too big. If the MFW is more than 5 pulses 5647 * behind, we should worry about it enough to generate an error 5648 * log. 5649 */ 5650 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) 5651 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5652 drv_pulse, mcp_pulse); 5653 } 5654 5655 if (bp->state == BNX2X_STATE_OPEN) 5656 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5657 5658 /* sample pf vf bulletin board for new posts from pf */ 5659 if (IS_VF(bp)) 5660 bnx2x_timer_sriov(bp); 5661 5662 mod_timer(&bp->timer, jiffies + bp->current_interval); 5663 } 5664 5665 /* end of Statistics */ 5666 5667 /* nic init */ 5668 5669 /* 5670 * nic init service functions 5671 */ 5672 5673 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5674 { 5675 u32 i; 5676 if (!(len%4) && !(addr%4)) 5677 for (i = 0; i < len; i += 4) 5678 REG_WR(bp, addr + i, fill); 5679 else 5680 for (i = 0; i < len; i++) 5681 REG_WR8(bp, addr + i, fill); 5682 } 5683 5684 /* helper: writes FP SP data to FW - data_size in dwords */ 5685 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5686 int fw_sb_id, 5687 u32 *sb_data_p, 5688 u32 data_size) 5689 { 5690 int index; 5691 for (index = 0; index < data_size; index++) 5692 REG_WR(bp, BAR_CSTRORM_INTMEM + 5693 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5694 sizeof(u32)*index, 5695 *(sb_data_p + index)); 5696 } 5697 5698 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5699 { 5700 u32 *sb_data_p; 5701 u32 data_size = 0; 5702 struct hc_status_block_data_e2 sb_data_e2; 5703 struct hc_status_block_data_e1x sb_data_e1x; 5704 5705 /* disable the function first */ 5706 if (!CHIP_IS_E1x(bp)) { 5707 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5708 sb_data_e2.common.state = SB_DISABLED; 5709 sb_data_e2.common.p_func.vf_valid = false; 5710 sb_data_p = (u32 *)&sb_data_e2; 5711 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5712 } else { 5713 memset(&sb_data_e1x, 0, 5714 sizeof(struct hc_status_block_data_e1x)); 5715 sb_data_e1x.common.state = SB_DISABLED; 5716 sb_data_e1x.common.p_func.vf_valid = false; 5717 sb_data_p = (u32 *)&sb_data_e1x; 5718 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5719 } 5720 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5721 5722 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5723 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5724 CSTORM_STATUS_BLOCK_SIZE); 5725 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5726 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5727 CSTORM_SYNC_BLOCK_SIZE); 5728 } 5729 5730 /* helper: writes SP SB data to FW */ 5731 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5732 struct hc_sp_status_block_data *sp_sb_data) 5733 { 5734 int func = BP_FUNC(bp); 5735 int i; 5736 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5737 REG_WR(bp, BAR_CSTRORM_INTMEM + 5738 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5739 i*sizeof(u32), 5740 *((u32 *)sp_sb_data + i)); 5741 } 5742 5743 static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5744 { 5745 int func = BP_FUNC(bp); 5746 struct hc_sp_status_block_data sp_sb_data; 5747 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5748 5749 sp_sb_data.state = SB_DISABLED; 5750 sp_sb_data.p_func.vf_valid = false; 5751 5752 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5753 5754 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5755 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5756 CSTORM_SP_STATUS_BLOCK_SIZE); 5757 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5758 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5759 CSTORM_SP_SYNC_BLOCK_SIZE); 5760 } 5761 5762 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5763 int igu_sb_id, int igu_seg_id) 5764 { 5765 hc_sm->igu_sb_id = igu_sb_id; 5766 hc_sm->igu_seg_id = igu_seg_id; 5767 hc_sm->timer_value = 0xFF; 5768 hc_sm->time_to_expire = 0xFFFFFFFF; 5769 } 5770 5771 /* allocates state machine ids. */ 5772 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5773 { 5774 /* zero out state machine indices */ 5775 /* rx indices */ 5776 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5777 5778 /* tx indices */ 5779 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5780 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5781 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5782 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5783 5784 /* map indices */ 5785 /* rx indices */ 5786 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5787 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5788 5789 /* tx indices */ 5790 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5791 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5792 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5793 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5794 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5795 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5796 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5797 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5798 } 5799 5800 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5801 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5802 { 5803 int igu_seg_id; 5804 5805 struct hc_status_block_data_e2 sb_data_e2; 5806 struct hc_status_block_data_e1x sb_data_e1x; 5807 struct hc_status_block_sm *hc_sm_p; 5808 int data_size; 5809 u32 *sb_data_p; 5810 5811 if (CHIP_INT_MODE_IS_BC(bp)) 5812 igu_seg_id = HC_SEG_ACCESS_NORM; 5813 else 5814 igu_seg_id = IGU_SEG_ACCESS_NORM; 5815 5816 bnx2x_zero_fp_sb(bp, fw_sb_id); 5817 5818 if (!CHIP_IS_E1x(bp)) { 5819 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5820 sb_data_e2.common.state = SB_ENABLED; 5821 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5822 sb_data_e2.common.p_func.vf_id = vfid; 5823 sb_data_e2.common.p_func.vf_valid = vf_valid; 5824 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5825 sb_data_e2.common.same_igu_sb_1b = true; 5826 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5827 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5828 hc_sm_p = sb_data_e2.common.state_machine; 5829 sb_data_p = (u32 *)&sb_data_e2; 5830 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5831 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5832 } else { 5833 memset(&sb_data_e1x, 0, 5834 sizeof(struct hc_status_block_data_e1x)); 5835 sb_data_e1x.common.state = SB_ENABLED; 5836 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5837 sb_data_e1x.common.p_func.vf_id = 0xff; 5838 sb_data_e1x.common.p_func.vf_valid = false; 5839 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5840 sb_data_e1x.common.same_igu_sb_1b = true; 5841 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5842 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5843 hc_sm_p = sb_data_e1x.common.state_machine; 5844 sb_data_p = (u32 *)&sb_data_e1x; 5845 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5846 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5847 } 5848 5849 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5850 igu_sb_id, igu_seg_id); 5851 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 5852 igu_sb_id, igu_seg_id); 5853 5854 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 5855 5856 /* write indices to HW - PCI guarantees endianity of regpairs */ 5857 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5858 } 5859 5860 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 5861 u16 tx_usec, u16 rx_usec) 5862 { 5863 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 5864 false, rx_usec); 5865 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5866 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 5867 tx_usec); 5868 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5869 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 5870 tx_usec); 5871 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5872 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 5873 tx_usec); 5874 } 5875 5876 static void bnx2x_init_def_sb(struct bnx2x *bp) 5877 { 5878 struct host_sp_status_block *def_sb = bp->def_status_blk; 5879 dma_addr_t mapping = bp->def_status_blk_mapping; 5880 int igu_sp_sb_index; 5881 int igu_seg_id; 5882 int port = BP_PORT(bp); 5883 int func = BP_FUNC(bp); 5884 int reg_offset, reg_offset_en5; 5885 u64 section; 5886 int index; 5887 struct hc_sp_status_block_data sp_sb_data; 5888 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5889 5890 if (CHIP_INT_MODE_IS_BC(bp)) { 5891 igu_sp_sb_index = DEF_SB_IGU_ID; 5892 igu_seg_id = HC_SEG_ACCESS_DEF; 5893 } else { 5894 igu_sp_sb_index = bp->igu_dsb_id; 5895 igu_seg_id = IGU_SEG_ACCESS_DEF; 5896 } 5897 5898 /* ATTN */ 5899 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5900 atten_status_block); 5901 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5902 5903 bp->attn_state = 0; 5904 5905 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5906 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5907 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5908 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 5909 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5910 int sindex; 5911 /* take care of sig[0]..sig[4] */ 5912 for (sindex = 0; sindex < 4; sindex++) 5913 bp->attn_group[index].sig[sindex] = 5914 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 5915 5916 if (!CHIP_IS_E1x(bp)) 5917 /* 5918 * enable5 is separate from the rest of the registers, 5919 * and therefore the address skip is 4 5920 * and not 16 between the different groups 5921 */ 5922 bp->attn_group[index].sig[4] = REG_RD(bp, 5923 reg_offset_en5 + 0x4*index); 5924 else 5925 bp->attn_group[index].sig[4] = 0; 5926 } 5927 5928 if (bp->common.int_block == INT_BLOCK_HC) { 5929 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 5930 HC_REG_ATTN_MSG0_ADDR_L); 5931 5932 REG_WR(bp, reg_offset, U64_LO(section)); 5933 REG_WR(bp, reg_offset + 4, U64_HI(section)); 5934 } else if (!CHIP_IS_E1x(bp)) { 5935 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5936 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5937 } 5938 5939 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5940 sp_sb); 5941 5942 bnx2x_zero_sp_sb(bp); 5943 5944 /* PCI guarantees endianity of regpairs */ 5945 sp_sb_data.state = SB_ENABLED; 5946 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5947 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5948 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5949 sp_sb_data.igu_seg_id = igu_seg_id; 5950 sp_sb_data.p_func.pf_id = func; 5951 sp_sb_data.p_func.vnic_id = BP_VN(bp); 5952 sp_sb_data.p_func.vf_id = 0xff; 5953 5954 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5955 5956 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5957 } 5958 5959 void bnx2x_update_coalesce(struct bnx2x *bp) 5960 { 5961 int i; 5962 5963 for_each_eth_queue(bp, i) 5964 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 5965 bp->tx_ticks, bp->rx_ticks); 5966 } 5967 5968 static void bnx2x_init_sp_ring(struct bnx2x *bp) 5969 { 5970 spin_lock_init(&bp->spq_lock); 5971 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 5972 5973 bp->spq_prod_idx = 0; 5974 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5975 bp->spq_prod_bd = bp->spq; 5976 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5977 } 5978 5979 static void bnx2x_init_eq_ring(struct bnx2x *bp) 5980 { 5981 int i; 5982 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5983 union event_ring_elem *elem = 5984 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 5985 5986 elem->next_page.addr.hi = 5987 cpu_to_le32(U64_HI(bp->eq_mapping + 5988 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 5989 elem->next_page.addr.lo = 5990 cpu_to_le32(U64_LO(bp->eq_mapping + 5991 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 5992 } 5993 bp->eq_cons = 0; 5994 bp->eq_prod = NUM_EQ_DESC; 5995 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5996 /* we want a warning message before it gets wrought... */ 5997 atomic_set(&bp->eq_spq_left, 5998 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5999 } 6000 6001 /* called with netif_addr_lock_bh() */ 6002 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 6003 unsigned long rx_mode_flags, 6004 unsigned long rx_accept_flags, 6005 unsigned long tx_accept_flags, 6006 unsigned long ramrod_flags) 6007 { 6008 struct bnx2x_rx_mode_ramrod_params ramrod_param; 6009 int rc; 6010 6011 memset(&ramrod_param, 0, sizeof(ramrod_param)); 6012 6013 /* Prepare ramrod parameters */ 6014 ramrod_param.cid = 0; 6015 ramrod_param.cl_id = cl_id; 6016 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 6017 ramrod_param.func_id = BP_FUNC(bp); 6018 6019 ramrod_param.pstate = &bp->sp_state; 6020 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 6021 6022 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 6023 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 6024 6025 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 6026 6027 ramrod_param.ramrod_flags = ramrod_flags; 6028 ramrod_param.rx_mode_flags = rx_mode_flags; 6029 6030 ramrod_param.rx_accept_flags = rx_accept_flags; 6031 ramrod_param.tx_accept_flags = tx_accept_flags; 6032 6033 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 6034 if (rc < 0) { 6035 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 6036 return rc; 6037 } 6038 6039 return 0; 6040 } 6041 6042 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, 6043 unsigned long *rx_accept_flags, 6044 unsigned long *tx_accept_flags) 6045 { 6046 /* Clear the flags first */ 6047 *rx_accept_flags = 0; 6048 *tx_accept_flags = 0; 6049 6050 switch (rx_mode) { 6051 case BNX2X_RX_MODE_NONE: 6052 /* 6053 * 'drop all' supersedes any accept flags that may have been 6054 * passed to the function. 6055 */ 6056 break; 6057 case BNX2X_RX_MODE_NORMAL: 6058 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6059 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); 6060 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6061 6062 /* internal switching mode */ 6063 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6064 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); 6065 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6066 6067 break; 6068 case BNX2X_RX_MODE_ALLMULTI: 6069 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6070 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 6071 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6072 6073 /* internal switching mode */ 6074 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6075 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 6076 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6077 6078 break; 6079 case BNX2X_RX_MODE_PROMISC: 6080 /* According to definition of SI mode, iface in promisc mode 6081 * should receive matched and unmatched (in resolution of port) 6082 * unicast packets. 6083 */ 6084 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); 6085 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6086 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 6087 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6088 6089 /* internal switching mode */ 6090 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 6091 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6092 6093 if (IS_MF_SI(bp)) 6094 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); 6095 else 6096 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6097 6098 break; 6099 default: 6100 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); 6101 return -EINVAL; 6102 } 6103 6104 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 6105 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 6106 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6107 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6108 } 6109 6110 return 0; 6111 } 6112 6113 /* called with netif_addr_lock_bh() */ 6114 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) 6115 { 6116 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 6117 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 6118 int rc; 6119 6120 if (!NO_FCOE(bp)) 6121 /* Configure rx_mode of FCoE Queue */ 6122 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 6123 6124 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, 6125 &tx_accept_flags); 6126 if (rc) 6127 return rc; 6128 6129 __set_bit(RAMROD_RX, &ramrod_flags); 6130 __set_bit(RAMROD_TX, &ramrod_flags); 6131 6132 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, 6133 rx_accept_flags, tx_accept_flags, 6134 ramrod_flags); 6135 } 6136 6137 static void bnx2x_init_internal_common(struct bnx2x *bp) 6138 { 6139 int i; 6140 6141 /* Zero this manually as its initialization is 6142 currently missing in the initTool */ 6143 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 6144 REG_WR(bp, BAR_USTRORM_INTMEM + 6145 USTORM_AGG_DATA_OFFSET + i * 4, 0); 6146 if (!CHIP_IS_E1x(bp)) { 6147 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 6148 CHIP_INT_MODE_IS_BC(bp) ? 6149 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 6150 } 6151 } 6152 6153 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 6154 { 6155 switch (load_code) { 6156 case FW_MSG_CODE_DRV_LOAD_COMMON: 6157 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 6158 bnx2x_init_internal_common(bp); 6159 /* no break */ 6160 6161 case FW_MSG_CODE_DRV_LOAD_PORT: 6162 /* nothing to do */ 6163 /* no break */ 6164 6165 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 6166 /* internal memory per function is 6167 initialized inside bnx2x_pf_init */ 6168 break; 6169 6170 default: 6171 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 6172 break; 6173 } 6174 } 6175 6176 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 6177 { 6178 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); 6179 } 6180 6181 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 6182 { 6183 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); 6184 } 6185 6186 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 6187 { 6188 if (CHIP_IS_E1x(fp->bp)) 6189 return BP_L_ID(fp->bp) + fp->index; 6190 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 6191 return bnx2x_fp_igu_sb_id(fp); 6192 } 6193 6194 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 6195 { 6196 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 6197 u8 cos; 6198 unsigned long q_type = 0; 6199 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 6200 fp->rx_queue = fp_idx; 6201 fp->cid = fp_idx; 6202 fp->cl_id = bnx2x_fp_cl_id(fp); 6203 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 6204 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 6205 /* qZone id equals to FW (per path) client id */ 6206 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 6207 6208 /* init shortcut */ 6209 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 6210 6211 /* Setup SB indices */ 6212 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 6213 6214 /* Configure Queue State object */ 6215 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6216 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6217 6218 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 6219 6220 /* init tx data */ 6221 for_each_cos_in_tx_queue(fp, cos) { 6222 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 6223 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 6224 FP_COS_TO_TXQ(fp, cos, bp), 6225 BNX2X_TX_SB_INDEX_BASE + cos, fp); 6226 cids[cos] = fp->txdata_ptr[cos]->cid; 6227 } 6228 6229 /* nothing more for vf to do here */ 6230 if (IS_VF(bp)) 6231 return; 6232 6233 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 6234 fp->fw_sb_id, fp->igu_sb_id); 6235 bnx2x_update_fpsb_idx(fp); 6236 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 6237 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6238 bnx2x_sp_mapping(bp, q_rdata), q_type); 6239 6240 /** 6241 * Configure classification DBs: Always enable Tx switching 6242 */ 6243 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 6244 6245 DP(NETIF_MSG_IFUP, 6246 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6247 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6248 fp->igu_sb_id); 6249 } 6250 6251 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 6252 { 6253 int i; 6254 6255 for (i = 1; i <= NUM_TX_RINGS; i++) { 6256 struct eth_tx_next_bd *tx_next_bd = 6257 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 6258 6259 tx_next_bd->addr_hi = 6260 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 6261 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6262 tx_next_bd->addr_lo = 6263 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 6264 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6265 } 6266 6267 *txdata->tx_cons_sb = cpu_to_le16(0); 6268 6269 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 6270 txdata->tx_db.data.zero_fill1 = 0; 6271 txdata->tx_db.data.prod = 0; 6272 6273 txdata->tx_pkt_prod = 0; 6274 txdata->tx_pkt_cons = 0; 6275 txdata->tx_bd_prod = 0; 6276 txdata->tx_bd_cons = 0; 6277 txdata->tx_pkt = 0; 6278 } 6279 6280 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) 6281 { 6282 int i; 6283 6284 for_each_tx_queue_cnic(bp, i) 6285 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); 6286 } 6287 6288 static void bnx2x_init_tx_rings(struct bnx2x *bp) 6289 { 6290 int i; 6291 u8 cos; 6292 6293 for_each_eth_queue(bp, i) 6294 for_each_cos_in_tx_queue(&bp->fp[i], cos) 6295 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 6296 } 6297 6298 static void bnx2x_init_fcoe_fp(struct bnx2x *bp) 6299 { 6300 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 6301 unsigned long q_type = 0; 6302 6303 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 6304 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 6305 BNX2X_FCOE_ETH_CL_ID_IDX); 6306 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 6307 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 6308 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 6309 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 6310 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 6311 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 6312 fp); 6313 6314 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 6315 6316 /* qZone id equals to FW (per path) client id */ 6317 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 6318 /* init shortcut */ 6319 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 6320 bnx2x_rx_ustorm_prods_offset(fp); 6321 6322 /* Configure Queue State object */ 6323 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6324 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6325 6326 /* No multi-CoS for FCoE L2 client */ 6327 BUG_ON(fp->max_cos != 1); 6328 6329 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 6330 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6331 bnx2x_sp_mapping(bp, q_rdata), q_type); 6332 6333 DP(NETIF_MSG_IFUP, 6334 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6335 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6336 fp->igu_sb_id); 6337 } 6338 6339 void bnx2x_nic_init_cnic(struct bnx2x *bp) 6340 { 6341 if (!NO_FCOE(bp)) 6342 bnx2x_init_fcoe_fp(bp); 6343 6344 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 6345 BNX2X_VF_ID_INVALID, false, 6346 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 6347 6348 /* ensure status block indices were read */ 6349 rmb(); 6350 bnx2x_init_rx_rings_cnic(bp); 6351 bnx2x_init_tx_rings_cnic(bp); 6352 6353 /* flush all */ 6354 mb(); 6355 mmiowb(); 6356 } 6357 6358 void bnx2x_pre_irq_nic_init(struct bnx2x *bp) 6359 { 6360 int i; 6361 6362 /* Setup NIC internals and enable interrupts */ 6363 for_each_eth_queue(bp, i) 6364 bnx2x_init_eth_fp(bp, i); 6365 6366 /* ensure status block indices were read */ 6367 rmb(); 6368 bnx2x_init_rx_rings(bp); 6369 bnx2x_init_tx_rings(bp); 6370 6371 if (IS_PF(bp)) { 6372 /* Initialize MOD_ABS interrupts */ 6373 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6374 bp->common.shmem_base, 6375 bp->common.shmem2_base, BP_PORT(bp)); 6376 6377 /* initialize the default status block and sp ring */ 6378 bnx2x_init_def_sb(bp); 6379 bnx2x_update_dsb_idx(bp); 6380 bnx2x_init_sp_ring(bp); 6381 } else { 6382 bnx2x_memset_stats(bp); 6383 } 6384 } 6385 6386 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) 6387 { 6388 bnx2x_init_eq_ring(bp); 6389 bnx2x_init_internal(bp, load_code); 6390 bnx2x_pf_init(bp); 6391 bnx2x_stats_init(bp); 6392 6393 /* flush all before enabling interrupts */ 6394 mb(); 6395 mmiowb(); 6396 6397 bnx2x_int_enable(bp); 6398 6399 /* Check for SPIO5 */ 6400 bnx2x_attn_int_deasserted0(bp, 6401 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 6402 AEU_INPUTS_ATTN_BITS_SPIO5); 6403 } 6404 6405 /* gzip service functions */ 6406 static int bnx2x_gunzip_init(struct bnx2x *bp) 6407 { 6408 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 6409 &bp->gunzip_mapping, GFP_KERNEL); 6410 if (bp->gunzip_buf == NULL) 6411 goto gunzip_nomem1; 6412 6413 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 6414 if (bp->strm == NULL) 6415 goto gunzip_nomem2; 6416 6417 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 6418 if (bp->strm->workspace == NULL) 6419 goto gunzip_nomem3; 6420 6421 return 0; 6422 6423 gunzip_nomem3: 6424 kfree(bp->strm); 6425 bp->strm = NULL; 6426 6427 gunzip_nomem2: 6428 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6429 bp->gunzip_mapping); 6430 bp->gunzip_buf = NULL; 6431 6432 gunzip_nomem1: 6433 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 6434 return -ENOMEM; 6435 } 6436 6437 static void bnx2x_gunzip_end(struct bnx2x *bp) 6438 { 6439 if (bp->strm) { 6440 vfree(bp->strm->workspace); 6441 kfree(bp->strm); 6442 bp->strm = NULL; 6443 } 6444 6445 if (bp->gunzip_buf) { 6446 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6447 bp->gunzip_mapping); 6448 bp->gunzip_buf = NULL; 6449 } 6450 } 6451 6452 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 6453 { 6454 int n, rc; 6455 6456 /* check gzip header */ 6457 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 6458 BNX2X_ERR("Bad gzip header\n"); 6459 return -EINVAL; 6460 } 6461 6462 n = 10; 6463 6464 #define FNAME 0x8 6465 6466 if (zbuf[3] & FNAME) 6467 while ((zbuf[n++] != 0) && (n < len)); 6468 6469 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 6470 bp->strm->avail_in = len - n; 6471 bp->strm->next_out = bp->gunzip_buf; 6472 bp->strm->avail_out = FW_BUF_SIZE; 6473 6474 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 6475 if (rc != Z_OK) 6476 return rc; 6477 6478 rc = zlib_inflate(bp->strm, Z_FINISH); 6479 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 6480 netdev_err(bp->dev, "Firmware decompression error: %s\n", 6481 bp->strm->msg); 6482 6483 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6484 if (bp->gunzip_outlen & 0x3) 6485 netdev_err(bp->dev, 6486 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6487 bp->gunzip_outlen); 6488 bp->gunzip_outlen >>= 2; 6489 6490 zlib_inflateEnd(bp->strm); 6491 6492 if (rc == Z_STREAM_END) 6493 return 0; 6494 6495 return rc; 6496 } 6497 6498 /* nic load/unload */ 6499 6500 /* 6501 * General service functions 6502 */ 6503 6504 /* send a NIG loopback debug packet */ 6505 static void bnx2x_lb_pckt(struct bnx2x *bp) 6506 { 6507 u32 wb_write[3]; 6508 6509 /* Ethernet source and destination addresses */ 6510 wb_write[0] = 0x55555555; 6511 wb_write[1] = 0x55555555; 6512 wb_write[2] = 0x20; /* SOP */ 6513 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6514 6515 /* NON-IP protocol */ 6516 wb_write[0] = 0x09000000; 6517 wb_write[1] = 0x55555555; 6518 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 6519 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6520 } 6521 6522 /* some of the internal memories 6523 * are not directly readable from the driver 6524 * to test them we send debug packets 6525 */ 6526 static int bnx2x_int_mem_test(struct bnx2x *bp) 6527 { 6528 int factor; 6529 int count, i; 6530 u32 val = 0; 6531 6532 if (CHIP_REV_IS_FPGA(bp)) 6533 factor = 120; 6534 else if (CHIP_REV_IS_EMUL(bp)) 6535 factor = 200; 6536 else 6537 factor = 1; 6538 6539 /* Disable inputs of parser neighbor blocks */ 6540 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6541 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6542 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6543 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6544 6545 /* Write 0 to parser credits for CFC search request */ 6546 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6547 6548 /* send Ethernet packet */ 6549 bnx2x_lb_pckt(bp); 6550 6551 /* TODO do i reset NIG statistic? */ 6552 /* Wait until NIG register shows 1 packet of size 0x10 */ 6553 count = 1000 * factor; 6554 while (count) { 6555 6556 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6557 val = *bnx2x_sp(bp, wb_data[0]); 6558 if (val == 0x10) 6559 break; 6560 6561 usleep_range(10000, 20000); 6562 count--; 6563 } 6564 if (val != 0x10) { 6565 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6566 return -1; 6567 } 6568 6569 /* Wait until PRS register shows 1 packet */ 6570 count = 1000 * factor; 6571 while (count) { 6572 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6573 if (val == 1) 6574 break; 6575 6576 usleep_range(10000, 20000); 6577 count--; 6578 } 6579 if (val != 0x1) { 6580 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6581 return -2; 6582 } 6583 6584 /* Reset and init BRB, PRS */ 6585 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6586 msleep(50); 6587 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6588 msleep(50); 6589 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6590 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6591 6592 DP(NETIF_MSG_HW, "part2\n"); 6593 6594 /* Disable inputs of parser neighbor blocks */ 6595 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6596 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6597 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6598 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6599 6600 /* Write 0 to parser credits for CFC search request */ 6601 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6602 6603 /* send 10 Ethernet packets */ 6604 for (i = 0; i < 10; i++) 6605 bnx2x_lb_pckt(bp); 6606 6607 /* Wait until NIG register shows 10 + 1 6608 packets of size 11*0x10 = 0xb0 */ 6609 count = 1000 * factor; 6610 while (count) { 6611 6612 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6613 val = *bnx2x_sp(bp, wb_data[0]); 6614 if (val == 0xb0) 6615 break; 6616 6617 usleep_range(10000, 20000); 6618 count--; 6619 } 6620 if (val != 0xb0) { 6621 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6622 return -3; 6623 } 6624 6625 /* Wait until PRS register shows 2 packets */ 6626 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6627 if (val != 2) 6628 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6629 6630 /* Write 1 to parser credits for CFC search request */ 6631 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6632 6633 /* Wait until PRS register shows 3 packets */ 6634 msleep(10 * factor); 6635 /* Wait until NIG register shows 1 packet of size 0x10 */ 6636 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6637 if (val != 3) 6638 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6639 6640 /* clear NIG EOP FIFO */ 6641 for (i = 0; i < 11; i++) 6642 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6643 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6644 if (val != 1) { 6645 BNX2X_ERR("clear of NIG failed\n"); 6646 return -4; 6647 } 6648 6649 /* Reset and init BRB, PRS, NIG */ 6650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6651 msleep(50); 6652 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6653 msleep(50); 6654 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6655 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6656 if (!CNIC_SUPPORT(bp)) 6657 /* set NIC mode */ 6658 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6659 6660 /* Enable inputs of parser neighbor blocks */ 6661 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6662 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6663 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6664 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6665 6666 DP(NETIF_MSG_HW, "done\n"); 6667 6668 return 0; /* OK */ 6669 } 6670 6671 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6672 { 6673 u32 val; 6674 6675 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6676 if (!CHIP_IS_E1x(bp)) 6677 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6678 else 6679 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6680 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6681 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6682 /* 6683 * mask read length error interrupts in brb for parser 6684 * (parsing unit and 'checksum and crc' unit) 6685 * these errors are legal (PU reads fixed length and CAC can cause 6686 * read length error on truncated packets) 6687 */ 6688 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6689 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6690 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6691 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6692 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6693 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6694 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6695 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6696 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6697 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6698 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6699 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6700 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6701 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6702 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6703 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6704 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6705 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6706 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6707 6708 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 6709 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 6710 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; 6711 if (!CHIP_IS_E1x(bp)) 6712 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 6713 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; 6714 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); 6715 6716 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6717 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6718 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6719 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6720 6721 if (!CHIP_IS_E1x(bp)) 6722 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6723 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6724 6725 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6726 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6727 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6728 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6729 } 6730 6731 static void bnx2x_reset_common(struct bnx2x *bp) 6732 { 6733 u32 val = 0x1400; 6734 6735 /* reset_common */ 6736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6737 0xd3ffff7f); 6738 6739 if (CHIP_IS_E3(bp)) { 6740 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6741 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6742 } 6743 6744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6745 } 6746 6747 static void bnx2x_setup_dmae(struct bnx2x *bp) 6748 { 6749 bp->dmae_ready = 0; 6750 spin_lock_init(&bp->dmae_lock); 6751 } 6752 6753 static void bnx2x_init_pxp(struct bnx2x *bp) 6754 { 6755 u16 devctl; 6756 int r_order, w_order; 6757 6758 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6759 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6760 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6761 if (bp->mrrs == -1) 6762 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6763 else { 6764 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6765 r_order = bp->mrrs; 6766 } 6767 6768 bnx2x_init_pxp_arb(bp, r_order, w_order); 6769 } 6770 6771 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6772 { 6773 int is_required; 6774 u32 val; 6775 int port; 6776 6777 if (BP_NOMCP(bp)) 6778 return; 6779 6780 is_required = 0; 6781 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6782 SHARED_HW_CFG_FAN_FAILURE_MASK; 6783 6784 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6785 is_required = 1; 6786 6787 /* 6788 * The fan failure mechanism is usually related to the PHY type since 6789 * the power consumption of the board is affected by the PHY. Currently, 6790 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6791 */ 6792 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6793 for (port = PORT_0; port < PORT_MAX; port++) { 6794 is_required |= 6795 bnx2x_fan_failure_det_req( 6796 bp, 6797 bp->common.shmem_base, 6798 bp->common.shmem2_base, 6799 port); 6800 } 6801 6802 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6803 6804 if (is_required == 0) 6805 return; 6806 6807 /* Fan failure is indicated by SPIO 5 */ 6808 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 6809 6810 /* set to active low mode */ 6811 val = REG_RD(bp, MISC_REG_SPIO_INT); 6812 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 6813 REG_WR(bp, MISC_REG_SPIO_INT, val); 6814 6815 /* enable interrupt to signal the IGU */ 6816 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6817 val |= MISC_SPIO_SPIO5; 6818 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6819 } 6820 6821 void bnx2x_pf_disable(struct bnx2x *bp) 6822 { 6823 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6824 val &= ~IGU_PF_CONF_FUNC_EN; 6825 6826 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6827 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6828 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6829 } 6830 6831 static void bnx2x__common_init_phy(struct bnx2x *bp) 6832 { 6833 u32 shmem_base[2], shmem2_base[2]; 6834 /* Avoid common init in case MFW supports LFA */ 6835 if (SHMEM2_RD(bp, size) > 6836 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 6837 return; 6838 shmem_base[0] = bp->common.shmem_base; 6839 shmem2_base[0] = bp->common.shmem2_base; 6840 if (!CHIP_IS_E1x(bp)) { 6841 shmem_base[1] = 6842 SHMEM2_RD(bp, other_shmem_base_addr); 6843 shmem2_base[1] = 6844 SHMEM2_RD(bp, other_shmem2_base_addr); 6845 } 6846 bnx2x_acquire_phy_lock(bp); 6847 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 6848 bp->common.chip_id); 6849 bnx2x_release_phy_lock(bp); 6850 } 6851 6852 /** 6853 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6854 * 6855 * @bp: driver handle 6856 */ 6857 static int bnx2x_init_hw_common(struct bnx2x *bp) 6858 { 6859 u32 val; 6860 6861 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 6862 6863 /* 6864 * take the RESET lock to protect undi_unload flow from accessing 6865 * registers while we're resetting the chip 6866 */ 6867 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6868 6869 bnx2x_reset_common(bp); 6870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 6871 6872 val = 0xfffc; 6873 if (CHIP_IS_E3(bp)) { 6874 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6875 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6876 } 6877 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 6878 6879 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6880 6881 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 6882 6883 if (!CHIP_IS_E1x(bp)) { 6884 u8 abs_func_id; 6885 6886 /** 6887 * 4-port mode or 2-port mode we need to turn of master-enable 6888 * for everyone, after that, turn it back on for self. 6889 * so, we disregard multi-function or not, and always disable 6890 * for all functions on the given path, this means 0,2,4,6 for 6891 * path 0 and 1,3,5,7 for path 1 6892 */ 6893 for (abs_func_id = BP_PATH(bp); 6894 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 6895 if (abs_func_id == BP_ABS_FUNC(bp)) { 6896 REG_WR(bp, 6897 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 6898 1); 6899 continue; 6900 } 6901 6902 bnx2x_pretend_func(bp, abs_func_id); 6903 /* clear pf enable */ 6904 bnx2x_pf_disable(bp); 6905 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6906 } 6907 } 6908 6909 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 6910 if (CHIP_IS_E1(bp)) { 6911 /* enable HW interrupt from PXP on USDM overflow 6912 bit 16 on INT_MASK_0 */ 6913 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6914 } 6915 6916 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6917 bnx2x_init_pxp(bp); 6918 6919 #ifdef __BIG_ENDIAN 6920 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); 6921 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); 6922 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 6923 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 6924 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 6925 /* make sure this value is 0 */ 6926 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 6927 6928 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 6929 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 6930 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); 6931 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); 6932 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 6933 #endif 6934 6935 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6936 6937 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6938 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 6939 6940 /* let the HW do it's magic ... */ 6941 msleep(100); 6942 /* finish PXP init */ 6943 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 6944 if (val != 1) { 6945 BNX2X_ERR("PXP2 CFG failed\n"); 6946 return -EBUSY; 6947 } 6948 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 6949 if (val != 1) { 6950 BNX2X_ERR("PXP2 RD_INIT failed\n"); 6951 return -EBUSY; 6952 } 6953 6954 /* Timers bug workaround E2 only. We need to set the entire ILT to 6955 * have entries with value "0" and valid bit on. 6956 * This needs to be done by the first PF that is loaded in a path 6957 * (i.e. common phase) 6958 */ 6959 if (!CHIP_IS_E1x(bp)) { 6960 /* In E2 there is a bug in the timers block that can cause function 6 / 7 6961 * (i.e. vnic3) to start even if it is marked as "scan-off". 6962 * This occurs when a different function (func2,3) is being marked 6963 * as "scan-off". Real-life scenario for example: if a driver is being 6964 * load-unloaded while func6,7 are down. This will cause the timer to access 6965 * the ilt, translate to a logical address and send a request to read/write. 6966 * Since the ilt for the function that is down is not valid, this will cause 6967 * a translation error which is unrecoverable. 6968 * The Workaround is intended to make sure that when this happens nothing fatal 6969 * will occur. The workaround: 6970 * 1. First PF driver which loads on a path will: 6971 * a. After taking the chip out of reset, by using pretend, 6972 * it will write "0" to the following registers of 6973 * the other vnics. 6974 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6975 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 6976 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 6977 * And for itself it will write '1' to 6978 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 6979 * dmae-operations (writing to pram for example.) 6980 * note: can be done for only function 6,7 but cleaner this 6981 * way. 6982 * b. Write zero+valid to the entire ILT. 6983 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 6984 * VNIC3 (of that port). The range allocated will be the 6985 * entire ILT. This is needed to prevent ILT range error. 6986 * 2. Any PF driver load flow: 6987 * a. ILT update with the physical addresses of the allocated 6988 * logical pages. 6989 * b. Wait 20msec. - note that this timeout is needed to make 6990 * sure there are no requests in one of the PXP internal 6991 * queues with "old" ILT addresses. 6992 * c. PF enable in the PGLC. 6993 * d. Clear the was_error of the PF in the PGLC. (could have 6994 * occurred while driver was down) 6995 * e. PF enable in the CFC (WEAK + STRONG) 6996 * f. Timers scan enable 6997 * 3. PF driver unload flow: 6998 * a. Clear the Timers scan_en. 6999 * b. Polling for scan_on=0 for that PF. 7000 * c. Clear the PF enable bit in the PXP. 7001 * d. Clear the PF enable in the CFC (WEAK + STRONG) 7002 * e. Write zero+valid to all ILT entries (The valid bit must 7003 * stay set) 7004 * f. If this is VNIC 3 of a port then also init 7005 * first_timers_ilt_entry to zero and last_timers_ilt_entry 7006 * to the last entry in the ILT. 7007 * 7008 * Notes: 7009 * Currently the PF error in the PGLC is non recoverable. 7010 * In the future the there will be a recovery routine for this error. 7011 * Currently attention is masked. 7012 * Having an MCP lock on the load/unload process does not guarantee that 7013 * there is no Timer disable during Func6/7 enable. This is because the 7014 * Timers scan is currently being cleared by the MCP on FLR. 7015 * Step 2.d can be done only for PF6/7 and the driver can also check if 7016 * there is error before clearing it. But the flow above is simpler and 7017 * more general. 7018 * All ILT entries are written by zero+valid and not just PF6/7 7019 * ILT entries since in the future the ILT entries allocation for 7020 * PF-s might be dynamic. 7021 */ 7022 struct ilt_client_info ilt_cli; 7023 struct bnx2x_ilt ilt; 7024 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 7025 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 7026 7027 /* initialize dummy TM client */ 7028 ilt_cli.start = 0; 7029 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 7030 ilt_cli.client_num = ILT_CLIENT_TM; 7031 7032 /* Step 1: set zeroes to all ilt page entries with valid bit on 7033 * Step 2: set the timers first/last ilt entry to point 7034 * to the entire range to prevent ILT range error for 3rd/4th 7035 * vnic (this code assumes existence of the vnic) 7036 * 7037 * both steps performed by call to bnx2x_ilt_client_init_op() 7038 * with dummy TM client 7039 * 7040 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 7041 * and his brother are split registers 7042 */ 7043 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 7044 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 7045 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 7046 7047 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 7048 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 7049 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 7050 } 7051 7052 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 7053 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 7054 7055 if (!CHIP_IS_E1x(bp)) { 7056 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 7057 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 7058 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 7059 7060 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 7061 7062 /* let the HW do it's magic ... */ 7063 do { 7064 msleep(200); 7065 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 7066 } while (factor-- && (val != 1)); 7067 7068 if (val != 1) { 7069 BNX2X_ERR("ATC_INIT failed\n"); 7070 return -EBUSY; 7071 } 7072 } 7073 7074 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 7075 7076 bnx2x_iov_init_dmae(bp); 7077 7078 /* clean the DMAE memory */ 7079 bp->dmae_ready = 1; 7080 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 7081 7082 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 7083 7084 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 7085 7086 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 7087 7088 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 7089 7090 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 7091 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 7092 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 7093 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 7094 7095 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 7096 7097 /* QM queues pointers table */ 7098 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 7099 7100 /* soft reset pulse */ 7101 REG_WR(bp, QM_REG_SOFT_RESET, 1); 7102 REG_WR(bp, QM_REG_SOFT_RESET, 0); 7103 7104 if (CNIC_SUPPORT(bp)) 7105 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 7106 7107 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 7108 7109 if (!CHIP_REV_IS_SLOW(bp)) 7110 /* enable hw interrupt from doorbell Q */ 7111 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 7112 7113 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 7114 7115 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 7116 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 7117 7118 if (!CHIP_IS_E1(bp)) 7119 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 7120 7121 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 7122 if (IS_MF_AFEX(bp)) { 7123 /* configure that VNTag and VLAN headers must be 7124 * received in afex mode 7125 */ 7126 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 7127 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 7128 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 7129 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 7130 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 7131 } else { 7132 /* Bit-map indicating which L2 hdrs may appear 7133 * after the basic Ethernet header 7134 */ 7135 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 7136 bp->path_has_ovlan ? 7 : 6); 7137 } 7138 } 7139 7140 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 7141 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 7142 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 7143 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 7144 7145 if (!CHIP_IS_E1x(bp)) { 7146 /* reset VFC memories */ 7147 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7148 VFC_MEMORIES_RST_REG_CAM_RST | 7149 VFC_MEMORIES_RST_REG_RAM_RST); 7150 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7151 VFC_MEMORIES_RST_REG_CAM_RST | 7152 VFC_MEMORIES_RST_REG_RAM_RST); 7153 7154 msleep(20); 7155 } 7156 7157 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 7158 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 7159 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 7160 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 7161 7162 /* sync semi rtc */ 7163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 7164 0x80000000); 7165 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 7166 0x80000000); 7167 7168 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 7169 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 7170 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 7171 7172 if (!CHIP_IS_E1x(bp)) { 7173 if (IS_MF_AFEX(bp)) { 7174 /* configure that VNTag and VLAN headers must be 7175 * sent in afex mode 7176 */ 7177 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 7178 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 7179 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 7180 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 7181 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 7182 } else { 7183 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 7184 bp->path_has_ovlan ? 7 : 6); 7185 } 7186 } 7187 7188 REG_WR(bp, SRC_REG_SOFT_RST, 1); 7189 7190 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 7191 7192 if (CNIC_SUPPORT(bp)) { 7193 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 7194 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 7195 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 7196 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 7197 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 7198 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 7199 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 7200 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 7201 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 7202 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 7203 } 7204 REG_WR(bp, SRC_REG_SOFT_RST, 0); 7205 7206 if (sizeof(union cdu_context) != 1024) 7207 /* we currently assume that a context is 1024 bytes */ 7208 dev_alert(&bp->pdev->dev, 7209 "please adjust the size of cdu_context(%ld)\n", 7210 (long)sizeof(union cdu_context)); 7211 7212 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 7213 val = (4 << 24) + (0 << 12) + 1024; 7214 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 7215 7216 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 7217 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 7218 /* enable context validation interrupt from CFC */ 7219 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 7220 7221 /* set the thresholds to prevent CFC/CDU race */ 7222 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 7223 7224 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 7225 7226 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 7227 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 7228 7229 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 7230 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 7231 7232 /* Reset PCIE errors for debug */ 7233 REG_WR(bp, 0x2814, 0xffffffff); 7234 REG_WR(bp, 0x3820, 0xffffffff); 7235 7236 if (!CHIP_IS_E1x(bp)) { 7237 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 7238 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 7239 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 7240 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 7241 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 7242 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 7243 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 7244 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 7245 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 7246 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 7247 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 7248 } 7249 7250 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 7251 if (!CHIP_IS_E1(bp)) { 7252 /* in E3 this done in per-port section */ 7253 if (!CHIP_IS_E3(bp)) 7254 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7255 } 7256 if (CHIP_IS_E1H(bp)) 7257 /* not applicable for E2 (and above ...) */ 7258 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 7259 7260 if (CHIP_REV_IS_SLOW(bp)) 7261 msleep(200); 7262 7263 /* finish CFC init */ 7264 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 7265 if (val != 1) { 7266 BNX2X_ERR("CFC LL_INIT failed\n"); 7267 return -EBUSY; 7268 } 7269 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 7270 if (val != 1) { 7271 BNX2X_ERR("CFC AC_INIT failed\n"); 7272 return -EBUSY; 7273 } 7274 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 7275 if (val != 1) { 7276 BNX2X_ERR("CFC CAM_INIT failed\n"); 7277 return -EBUSY; 7278 } 7279 REG_WR(bp, CFC_REG_DEBUG0, 0); 7280 7281 if (CHIP_IS_E1(bp)) { 7282 /* read NIG statistic 7283 to see if this is our first up since powerup */ 7284 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 7285 val = *bnx2x_sp(bp, wb_data[0]); 7286 7287 /* do internal memory self test */ 7288 if ((val == 0) && bnx2x_int_mem_test(bp)) { 7289 BNX2X_ERR("internal mem self test failed\n"); 7290 return -EBUSY; 7291 } 7292 } 7293 7294 bnx2x_setup_fan_failure_detection(bp); 7295 7296 /* clear PXP2 attentions */ 7297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 7298 7299 bnx2x_enable_blocks_attention(bp); 7300 bnx2x_enable_blocks_parity(bp); 7301 7302 if (!BP_NOMCP(bp)) { 7303 if (CHIP_IS_E1x(bp)) 7304 bnx2x__common_init_phy(bp); 7305 } else 7306 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 7307 7308 return 0; 7309 } 7310 7311 /** 7312 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 7313 * 7314 * @bp: driver handle 7315 */ 7316 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 7317 { 7318 int rc = bnx2x_init_hw_common(bp); 7319 7320 if (rc) 7321 return rc; 7322 7323 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 7324 if (!BP_NOMCP(bp)) 7325 bnx2x__common_init_phy(bp); 7326 7327 return 0; 7328 } 7329 7330 static int bnx2x_init_hw_port(struct bnx2x *bp) 7331 { 7332 int port = BP_PORT(bp); 7333 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7334 u32 low, high; 7335 u32 val, reg; 7336 7337 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7338 7339 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 7340 7341 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7342 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7343 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7344 7345 /* Timers bug workaround: disables the pf_master bit in pglue at 7346 * common phase, we need to enable it here before any dmae access are 7347 * attempted. Therefore we manually added the enable-master to the 7348 * port phase (it also happens in the function phase) 7349 */ 7350 if (!CHIP_IS_E1x(bp)) 7351 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7352 7353 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7354 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7355 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7356 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7357 7358 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7359 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7360 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7361 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7362 7363 /* QM cid (connection) count */ 7364 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 7365 7366 if (CNIC_SUPPORT(bp)) { 7367 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7368 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 7369 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 7370 } 7371 7372 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7373 7374 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7375 7376 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 7377 7378 if (IS_MF(bp)) 7379 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 7380 else if (bp->dev->mtu > 4096) { 7381 if (bp->flags & ONE_PORT_FLAG) 7382 low = 160; 7383 else { 7384 val = bp->dev->mtu; 7385 /* (24*1024 + val*4)/256 */ 7386 low = 96 + (val/64) + 7387 ((val % 64) ? 1 : 0); 7388 } 7389 } else 7390 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 7391 high = low + 56; /* 14*1024/256 */ 7392 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 7393 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 7394 } 7395 7396 if (CHIP_MODE_IS_4_PORT(bp)) 7397 REG_WR(bp, (BP_PORT(bp) ? 7398 BRB1_REG_MAC_GUARANTIED_1 : 7399 BRB1_REG_MAC_GUARANTIED_0), 40); 7400 7401 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7402 if (CHIP_IS_E3B0(bp)) { 7403 if (IS_MF_AFEX(bp)) { 7404 /* configure headers for AFEX mode */ 7405 REG_WR(bp, BP_PORT(bp) ? 7406 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7407 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 7408 REG_WR(bp, BP_PORT(bp) ? 7409 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 7410 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 7411 REG_WR(bp, BP_PORT(bp) ? 7412 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 7413 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 7414 } else { 7415 /* Ovlan exists only if we are in multi-function + 7416 * switch-dependent mode, in switch-independent there 7417 * is no ovlan headers 7418 */ 7419 REG_WR(bp, BP_PORT(bp) ? 7420 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7421 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 7422 (bp->path_has_ovlan ? 7 : 6)); 7423 } 7424 } 7425 7426 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7427 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7428 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7429 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7430 7431 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7432 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7433 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7434 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7435 7436 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7437 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7438 7439 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7440 7441 if (CHIP_IS_E1x(bp)) { 7442 /* configure PBF to work without PAUSE mtu 9000 */ 7443 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 7444 7445 /* update threshold */ 7446 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 7447 /* update init credit */ 7448 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 7449 7450 /* probe changes */ 7451 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 7452 udelay(50); 7453 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 7454 } 7455 7456 if (CNIC_SUPPORT(bp)) 7457 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7458 7459 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7460 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7461 7462 if (CHIP_IS_E1(bp)) { 7463 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7464 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7465 } 7466 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7467 7468 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7469 7470 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7471 /* init aeu_mask_attn_func_0/1: 7472 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use 7473 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF 7474 * bits 4-7 are used for "per vn group attention" */ 7475 val = IS_MF(bp) ? 0xF7 : 0x7; 7476 /* Enable DCBX attention for all but E1 */ 7477 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7478 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7479 7480 /* SCPAD_PARITY should NOT trigger close the gates */ 7481 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; 7482 REG_WR(bp, reg, 7483 REG_RD(bp, reg) & 7484 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7485 7486 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; 7487 REG_WR(bp, reg, 7488 REG_RD(bp, reg) & 7489 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7490 7491 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7492 7493 if (!CHIP_IS_E1x(bp)) { 7494 /* Bit-map indicating which L2 hdrs may appear after the 7495 * basic Ethernet header 7496 */ 7497 if (IS_MF_AFEX(bp)) 7498 REG_WR(bp, BP_PORT(bp) ? 7499 NIG_REG_P1_HDRS_AFTER_BASIC : 7500 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 7501 else 7502 REG_WR(bp, BP_PORT(bp) ? 7503 NIG_REG_P1_HDRS_AFTER_BASIC : 7504 NIG_REG_P0_HDRS_AFTER_BASIC, 7505 IS_MF_SD(bp) ? 7 : 6); 7506 7507 if (CHIP_IS_E3(bp)) 7508 REG_WR(bp, BP_PORT(bp) ? 7509 NIG_REG_LLH1_MF_MODE : 7510 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7511 } 7512 if (!CHIP_IS_E3(bp)) 7513 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 7514 7515 if (!CHIP_IS_E1(bp)) { 7516 /* 0x2 disable mf_ov, 0x1 enable */ 7517 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 7518 (IS_MF_SD(bp) ? 0x1 : 0x2)); 7519 7520 if (!CHIP_IS_E1x(bp)) { 7521 val = 0; 7522 switch (bp->mf_mode) { 7523 case MULTI_FUNCTION_SD: 7524 val = 1; 7525 break; 7526 case MULTI_FUNCTION_SI: 7527 case MULTI_FUNCTION_AFEX: 7528 val = 2; 7529 break; 7530 } 7531 7532 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 7533 NIG_REG_LLH0_CLS_TYPE), val); 7534 } 7535 { 7536 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 7537 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 7538 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 7539 } 7540 } 7541 7542 /* If SPIO5 is set to generate interrupts, enable it for this port */ 7543 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 7544 if (val & MISC_SPIO_SPIO5) { 7545 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 7546 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 7547 val = REG_RD(bp, reg_addr); 7548 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 7549 REG_WR(bp, reg_addr, val); 7550 } 7551 7552 return 0; 7553 } 7554 7555 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 7556 { 7557 int reg; 7558 u32 wb_write[2]; 7559 7560 if (CHIP_IS_E1(bp)) 7561 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 7562 else 7563 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 7564 7565 wb_write[0] = ONCHIP_ADDR1(addr); 7566 wb_write[1] = ONCHIP_ADDR2(addr); 7567 REG_WR_DMAE(bp, reg, wb_write, 2); 7568 } 7569 7570 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) 7571 { 7572 u32 data, ctl, cnt = 100; 7573 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 7574 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 7575 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 7576 u32 sb_bit = 1 << (idu_sb_id%32); 7577 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 7578 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 7579 7580 /* Not supported in BC mode */ 7581 if (CHIP_INT_MODE_IS_BC(bp)) 7582 return; 7583 7584 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 7585 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 7586 IGU_REGULAR_CLEANUP_SET | 7587 IGU_REGULAR_BCLEANUP; 7588 7589 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 7590 func_encode << IGU_CTRL_REG_FID_SHIFT | 7591 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7592 7593 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7594 data, igu_addr_data); 7595 REG_WR(bp, igu_addr_data, data); 7596 mmiowb(); 7597 barrier(); 7598 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7599 ctl, igu_addr_ctl); 7600 REG_WR(bp, igu_addr_ctl, ctl); 7601 mmiowb(); 7602 barrier(); 7603 7604 /* wait for clean up to finish */ 7605 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7606 msleep(20); 7607 7608 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7609 DP(NETIF_MSG_HW, 7610 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7611 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7612 } 7613 } 7614 7615 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7616 { 7617 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7618 } 7619 7620 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7621 { 7622 u32 i, base = FUNC_ILT_BASE(func); 7623 for (i = base; i < base + ILT_PER_FUNC; i++) 7624 bnx2x_ilt_wr(bp, i, 0); 7625 } 7626 7627 static void bnx2x_init_searcher(struct bnx2x *bp) 7628 { 7629 int port = BP_PORT(bp); 7630 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7631 /* T1 hash bits value determines the T1 number of entries */ 7632 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7633 } 7634 7635 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) 7636 { 7637 int rc; 7638 struct bnx2x_func_state_params func_params = {NULL}; 7639 struct bnx2x_func_switch_update_params *switch_update_params = 7640 &func_params.params.switch_update; 7641 7642 /* Prepare parameters for function state transitions */ 7643 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 7644 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 7645 7646 func_params.f_obj = &bp->func_obj; 7647 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 7648 7649 /* Function parameters */ 7650 switch_update_params->suspend = suspend; 7651 7652 rc = bnx2x_func_state_change(bp, &func_params); 7653 7654 return rc; 7655 } 7656 7657 static int bnx2x_reset_nic_mode(struct bnx2x *bp) 7658 { 7659 int rc, i, port = BP_PORT(bp); 7660 int vlan_en = 0, mac_en[NUM_MACS]; 7661 7662 /* Close input from network */ 7663 if (bp->mf_mode == SINGLE_FUNCTION) { 7664 bnx2x_set_rx_filter(&bp->link_params, 0); 7665 } else { 7666 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : 7667 NIG_REG_LLH0_FUNC_EN); 7668 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7669 NIG_REG_LLH0_FUNC_EN, 0); 7670 for (i = 0; i < NUM_MACS; i++) { 7671 mac_en[i] = REG_RD(bp, port ? 7672 (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7673 4 * i) : 7674 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 7675 4 * i)); 7676 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7677 4 * i) : 7678 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); 7679 } 7680 } 7681 7682 /* Close BMC to host */ 7683 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7684 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); 7685 7686 /* Suspend Tx switching to the PF. Completion of this ramrod 7687 * further guarantees that all the packets of that PF / child 7688 * VFs in BRB were processed by the Parser, so it is safe to 7689 * change the NIC_MODE register. 7690 */ 7691 rc = bnx2x_func_switch_update(bp, 1); 7692 if (rc) { 7693 BNX2X_ERR("Can't suspend tx-switching!\n"); 7694 return rc; 7695 } 7696 7697 /* Change NIC_MODE register */ 7698 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7699 7700 /* Open input from network */ 7701 if (bp->mf_mode == SINGLE_FUNCTION) { 7702 bnx2x_set_rx_filter(&bp->link_params, 1); 7703 } else { 7704 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7705 NIG_REG_LLH0_FUNC_EN, vlan_en); 7706 for (i = 0; i < NUM_MACS; i++) { 7707 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7708 4 * i) : 7709 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 7710 mac_en[i]); 7711 } 7712 } 7713 7714 /* Enable BMC to host */ 7715 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7716 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); 7717 7718 /* Resume Tx switching to the PF */ 7719 rc = bnx2x_func_switch_update(bp, 0); 7720 if (rc) { 7721 BNX2X_ERR("Can't resume tx-switching!\n"); 7722 return rc; 7723 } 7724 7725 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7726 return 0; 7727 } 7728 7729 int bnx2x_init_hw_func_cnic(struct bnx2x *bp) 7730 { 7731 int rc; 7732 7733 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); 7734 7735 if (CONFIGURE_NIC_MODE(bp)) { 7736 /* Configure searcher as part of function hw init */ 7737 bnx2x_init_searcher(bp); 7738 7739 /* Reset NIC mode */ 7740 rc = bnx2x_reset_nic_mode(bp); 7741 if (rc) 7742 BNX2X_ERR("Can't change NIC mode!\n"); 7743 return rc; 7744 } 7745 7746 return 0; 7747 } 7748 7749 static int bnx2x_init_hw_func(struct bnx2x *bp) 7750 { 7751 int port = BP_PORT(bp); 7752 int func = BP_FUNC(bp); 7753 int init_phase = PHASE_PF0 + func; 7754 struct bnx2x_ilt *ilt = BP_ILT(bp); 7755 u16 cdu_ilt_start; 7756 u32 addr, val; 7757 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7758 int i, main_mem_width, rc; 7759 7760 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7761 7762 /* FLR cleanup - hmmm */ 7763 if (!CHIP_IS_E1x(bp)) { 7764 rc = bnx2x_pf_flr_clnup(bp); 7765 if (rc) { 7766 bnx2x_fw_dump(bp); 7767 return rc; 7768 } 7769 } 7770 7771 /* set MSI reconfigure capability */ 7772 if (bp->common.int_block == INT_BLOCK_HC) { 7773 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7774 val = REG_RD(bp, addr); 7775 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7776 REG_WR(bp, addr, val); 7777 } 7778 7779 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7780 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7781 7782 ilt = BP_ILT(bp); 7783 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7784 7785 if (IS_SRIOV(bp)) 7786 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; 7787 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); 7788 7789 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes 7790 * those of the VFs, so start line should be reset 7791 */ 7792 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7793 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7794 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7795 ilt->lines[cdu_ilt_start + i].page_mapping = 7796 bp->context[i].cxt_mapping; 7797 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 7798 } 7799 7800 bnx2x_ilt_init_op(bp, INITOP_SET); 7801 7802 if (!CONFIGURE_NIC_MODE(bp)) { 7803 bnx2x_init_searcher(bp); 7804 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7805 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7806 } else { 7807 /* Set NIC mode */ 7808 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7809 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); 7810 } 7811 7812 if (!CHIP_IS_E1x(bp)) { 7813 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7814 7815 /* Turn on a single ISR mode in IGU if driver is going to use 7816 * INT#x or MSI 7817 */ 7818 if (!(bp->flags & USING_MSIX_FLAG)) 7819 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 7820 /* 7821 * Timers workaround bug: function init part. 7822 * Need to wait 20msec after initializing ILT, 7823 * needed to make sure there are no requests in 7824 * one of the PXP internal queues with "old" ILT addresses 7825 */ 7826 msleep(20); 7827 /* 7828 * Master enable - Due to WB DMAE writes performed before this 7829 * register is re-initialized as part of the regular function 7830 * init 7831 */ 7832 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7833 /* Enable the function in IGU */ 7834 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 7835 } 7836 7837 bp->dmae_ready = 1; 7838 7839 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7840 7841 if (!CHIP_IS_E1x(bp)) 7842 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 7843 7844 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7845 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7846 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7847 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7848 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7849 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7850 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7851 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7852 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7853 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7854 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7855 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7856 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7857 7858 if (!CHIP_IS_E1x(bp)) 7859 REG_WR(bp, QM_REG_PF_EN, 1); 7860 7861 if (!CHIP_IS_E1x(bp)) { 7862 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7863 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7864 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7865 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7866 } 7867 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7868 7869 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7870 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7871 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ 7872 7873 bnx2x_iov_init_dq(bp); 7874 7875 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7876 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7877 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7878 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7879 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7880 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7881 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7882 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7883 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7884 if (!CHIP_IS_E1x(bp)) 7885 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 7886 7887 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7888 7889 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7890 7891 if (!CHIP_IS_E1x(bp)) 7892 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 7893 7894 if (IS_MF(bp)) { 7895 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7896 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 7897 } 7898 7899 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7900 7901 /* HC init per function */ 7902 if (bp->common.int_block == INT_BLOCK_HC) { 7903 if (CHIP_IS_E1H(bp)) { 7904 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7905 7906 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7907 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7908 } 7909 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7910 7911 } else { 7912 int num_segs, sb_idx, prod_offset; 7913 7914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7915 7916 if (!CHIP_IS_E1x(bp)) { 7917 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 7918 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 7919 } 7920 7921 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7922 7923 if (!CHIP_IS_E1x(bp)) { 7924 int dsb_idx = 0; 7925 /** 7926 * Producer memory: 7927 * E2 mode: address 0-135 match to the mapping memory; 7928 * 136 - PF0 default prod; 137 - PF1 default prod; 7929 * 138 - PF2 default prod; 139 - PF3 default prod; 7930 * 140 - PF0 attn prod; 141 - PF1 attn prod; 7931 * 142 - PF2 attn prod; 143 - PF3 attn prod; 7932 * 144-147 reserved. 7933 * 7934 * E1.5 mode - In backward compatible mode; 7935 * for non default SB; each even line in the memory 7936 * holds the U producer and each odd line hold 7937 * the C producer. The first 128 producers are for 7938 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 7939 * producers are for the DSB for each PF. 7940 * Each PF has five segments: (the order inside each 7941 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 7942 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 7943 * 144-147 attn prods; 7944 */ 7945 /* non-default-status-blocks */ 7946 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7947 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 7948 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 7949 prod_offset = (bp->igu_base_sb + sb_idx) * 7950 num_segs; 7951 7952 for (i = 0; i < num_segs; i++) { 7953 addr = IGU_REG_PROD_CONS_MEMORY + 7954 (prod_offset + i) * 4; 7955 REG_WR(bp, addr, 0); 7956 } 7957 /* send consumer update with value 0 */ 7958 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 7959 USTORM_ID, 0, IGU_INT_NOP, 1); 7960 bnx2x_igu_clear_sb(bp, 7961 bp->igu_base_sb + sb_idx); 7962 } 7963 7964 /* default-status-blocks */ 7965 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7966 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 7967 7968 if (CHIP_MODE_IS_4_PORT(bp)) 7969 dsb_idx = BP_FUNC(bp); 7970 else 7971 dsb_idx = BP_VN(bp); 7972 7973 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 7974 IGU_BC_BASE_DSB_PROD + dsb_idx : 7975 IGU_NORM_BASE_DSB_PROD + dsb_idx); 7976 7977 /* 7978 * igu prods come in chunks of E1HVN_MAX (4) - 7979 * does not matters what is the current chip mode 7980 */ 7981 for (i = 0; i < (num_segs * E1HVN_MAX); 7982 i += E1HVN_MAX) { 7983 addr = IGU_REG_PROD_CONS_MEMORY + 7984 (prod_offset + i)*4; 7985 REG_WR(bp, addr, 0); 7986 } 7987 /* send consumer update with 0 */ 7988 if (CHIP_INT_MODE_IS_BC(bp)) { 7989 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7990 USTORM_ID, 0, IGU_INT_NOP, 1); 7991 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7992 CSTORM_ID, 0, IGU_INT_NOP, 1); 7993 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7994 XSTORM_ID, 0, IGU_INT_NOP, 1); 7995 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7996 TSTORM_ID, 0, IGU_INT_NOP, 1); 7997 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7998 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7999 } else { 8000 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8001 USTORM_ID, 0, IGU_INT_NOP, 1); 8002 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8003 ATTENTION_ID, 0, IGU_INT_NOP, 1); 8004 } 8005 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 8006 8007 /* !!! These should become driver const once 8008 rf-tool supports split-68 const */ 8009 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 8010 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 8011 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 8012 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 8013 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 8014 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 8015 } 8016 } 8017 8018 /* Reset PCIE errors for debug */ 8019 REG_WR(bp, 0x2114, 0xffffffff); 8020 REG_WR(bp, 0x2120, 0xffffffff); 8021 8022 if (CHIP_IS_E1x(bp)) { 8023 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 8024 main_mem_base = HC_REG_MAIN_MEMORY + 8025 BP_PORT(bp) * (main_mem_size * 4); 8026 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 8027 main_mem_width = 8; 8028 8029 val = REG_RD(bp, main_mem_prty_clr); 8030 if (val) 8031 DP(NETIF_MSG_HW, 8032 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 8033 val); 8034 8035 /* Clear "false" parity errors in MSI-X table */ 8036 for (i = main_mem_base; 8037 i < main_mem_base + main_mem_size * 4; 8038 i += main_mem_width) { 8039 bnx2x_read_dmae(bp, i, main_mem_width / 4); 8040 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 8041 i, main_mem_width / 4); 8042 } 8043 /* Clear HC parity attention */ 8044 REG_RD(bp, main_mem_prty_clr); 8045 } 8046 8047 #ifdef BNX2X_STOP_ON_ERROR 8048 /* Enable STORMs SP logging */ 8049 REG_WR8(bp, BAR_USTRORM_INTMEM + 8050 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8051 REG_WR8(bp, BAR_TSTRORM_INTMEM + 8052 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8053 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8054 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8055 REG_WR8(bp, BAR_XSTRORM_INTMEM + 8056 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8057 #endif 8058 8059 bnx2x_phy_probe(&bp->link_params); 8060 8061 return 0; 8062 } 8063 8064 void bnx2x_free_mem_cnic(struct bnx2x *bp) 8065 { 8066 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); 8067 8068 if (!CHIP_IS_E1x(bp)) 8069 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 8070 sizeof(struct host_hc_status_block_e2)); 8071 else 8072 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 8073 sizeof(struct host_hc_status_block_e1x)); 8074 8075 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 8076 } 8077 8078 void bnx2x_free_mem(struct bnx2x *bp) 8079 { 8080 int i; 8081 8082 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 8083 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 8084 8085 if (IS_VF(bp)) 8086 return; 8087 8088 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 8089 sizeof(struct host_sp_status_block)); 8090 8091 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 8092 sizeof(struct bnx2x_slowpath)); 8093 8094 for (i = 0; i < L2_ILT_LINES(bp); i++) 8095 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 8096 bp->context[i].size); 8097 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 8098 8099 BNX2X_FREE(bp->ilt->lines); 8100 8101 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 8102 8103 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 8104 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8105 8106 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 8107 8108 bnx2x_iov_free_mem(bp); 8109 } 8110 8111 int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 8112 { 8113 if (!CHIP_IS_E1x(bp)) { 8114 /* size = the status block + ramrod buffers */ 8115 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, 8116 sizeof(struct host_hc_status_block_e2)); 8117 if (!bp->cnic_sb.e2_sb) 8118 goto alloc_mem_err; 8119 } else { 8120 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, 8121 sizeof(struct host_hc_status_block_e1x)); 8122 if (!bp->cnic_sb.e1x_sb) 8123 goto alloc_mem_err; 8124 } 8125 8126 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { 8127 /* allocate searcher T2 table, as it wasn't allocated before */ 8128 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); 8129 if (!bp->t2) 8130 goto alloc_mem_err; 8131 } 8132 8133 /* write address to which L5 should insert its values */ 8134 bp->cnic_eth_dev.addr_drv_info_to_mcp = 8135 &bp->slowpath->drv_info_to_mcp; 8136 8137 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) 8138 goto alloc_mem_err; 8139 8140 return 0; 8141 8142 alloc_mem_err: 8143 bnx2x_free_mem_cnic(bp); 8144 BNX2X_ERR("Can't allocate memory\n"); 8145 return -ENOMEM; 8146 } 8147 8148 int bnx2x_alloc_mem(struct bnx2x *bp) 8149 { 8150 int i, allocated, context_size; 8151 8152 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { 8153 /* allocate searcher T2 table */ 8154 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); 8155 if (!bp->t2) 8156 goto alloc_mem_err; 8157 } 8158 8159 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, 8160 sizeof(struct host_sp_status_block)); 8161 if (!bp->def_status_blk) 8162 goto alloc_mem_err; 8163 8164 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, 8165 sizeof(struct bnx2x_slowpath)); 8166 if (!bp->slowpath) 8167 goto alloc_mem_err; 8168 8169 /* Allocate memory for CDU context: 8170 * This memory is allocated separately and not in the generic ILT 8171 * functions because CDU differs in few aspects: 8172 * 1. There are multiple entities allocating memory for context - 8173 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 8174 * its own ILT lines. 8175 * 2. Since CDU page-size is not a single 4KB page (which is the case 8176 * for the other ILT clients), to be efficient we want to support 8177 * allocation of sub-page-size in the last entry. 8178 * 3. Context pointers are used by the driver to pass to FW / update 8179 * the context (for the other ILT clients the pointers are used just to 8180 * free the memory during unload). 8181 */ 8182 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 8183 8184 for (i = 0, allocated = 0; allocated < context_size; i++) { 8185 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 8186 (context_size - allocated)); 8187 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, 8188 bp->context[i].size); 8189 if (!bp->context[i].vcxt) 8190 goto alloc_mem_err; 8191 allocated += bp->context[i].size; 8192 } 8193 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), 8194 GFP_KERNEL); 8195 if (!bp->ilt->lines) 8196 goto alloc_mem_err; 8197 8198 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 8199 goto alloc_mem_err; 8200 8201 if (bnx2x_iov_alloc_mem(bp)) 8202 goto alloc_mem_err; 8203 8204 /* Slow path ring */ 8205 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); 8206 if (!bp->spq) 8207 goto alloc_mem_err; 8208 8209 /* EQ */ 8210 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, 8211 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8212 if (!bp->eq_ring) 8213 goto alloc_mem_err; 8214 8215 return 0; 8216 8217 alloc_mem_err: 8218 bnx2x_free_mem(bp); 8219 BNX2X_ERR("Can't allocate memory\n"); 8220 return -ENOMEM; 8221 } 8222 8223 /* 8224 * Init service functions 8225 */ 8226 8227 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 8228 struct bnx2x_vlan_mac_obj *obj, bool set, 8229 int mac_type, unsigned long *ramrod_flags) 8230 { 8231 int rc; 8232 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 8233 8234 memset(&ramrod_param, 0, sizeof(ramrod_param)); 8235 8236 /* Fill general parameters */ 8237 ramrod_param.vlan_mac_obj = obj; 8238 ramrod_param.ramrod_flags = *ramrod_flags; 8239 8240 /* Fill a user request section if needed */ 8241 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8242 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 8243 8244 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 8245 8246 /* Set the command: ADD or DEL */ 8247 if (set) 8248 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8249 else 8250 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 8251 } 8252 8253 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 8254 8255 if (rc == -EEXIST) { 8256 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 8257 /* do not treat adding same MAC as error */ 8258 rc = 0; 8259 } else if (rc < 0) 8260 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 8261 8262 return rc; 8263 } 8264 8265 int bnx2x_del_all_macs(struct bnx2x *bp, 8266 struct bnx2x_vlan_mac_obj *mac_obj, 8267 int mac_type, bool wait_for_comp) 8268 { 8269 int rc; 8270 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 8271 8272 /* Wait for completion of requested */ 8273 if (wait_for_comp) 8274 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8275 8276 /* Set the mac type of addresses we want to clear */ 8277 __set_bit(mac_type, &vlan_mac_flags); 8278 8279 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 8280 if (rc < 0) 8281 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 8282 8283 return rc; 8284 } 8285 8286 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 8287 { 8288 if (is_zero_ether_addr(bp->dev->dev_addr) && 8289 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 8290 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 8291 "Ignoring Zero MAC for STORAGE SD mode\n"); 8292 return 0; 8293 } 8294 8295 if (IS_PF(bp)) { 8296 unsigned long ramrod_flags = 0; 8297 8298 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 8299 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8300 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, 8301 &bp->sp_objs->mac_obj, set, 8302 BNX2X_ETH_MAC, &ramrod_flags); 8303 } else { /* vf */ 8304 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, 8305 bp->fp->index, true); 8306 } 8307 } 8308 8309 int bnx2x_setup_leading(struct bnx2x *bp) 8310 { 8311 if (IS_PF(bp)) 8312 return bnx2x_setup_queue(bp, &bp->fp[0], true); 8313 else /* VF */ 8314 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); 8315 } 8316 8317 /** 8318 * bnx2x_set_int_mode - configure interrupt mode 8319 * 8320 * @bp: driver handle 8321 * 8322 * In case of MSI-X it will also try to enable MSI-X. 8323 */ 8324 int bnx2x_set_int_mode(struct bnx2x *bp) 8325 { 8326 int rc = 0; 8327 8328 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { 8329 BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); 8330 return -EINVAL; 8331 } 8332 8333 switch (int_mode) { 8334 case BNX2X_INT_MODE_MSIX: 8335 /* attempt to enable msix */ 8336 rc = bnx2x_enable_msix(bp); 8337 8338 /* msix attained */ 8339 if (!rc) 8340 return 0; 8341 8342 /* vfs use only msix */ 8343 if (rc && IS_VF(bp)) 8344 return rc; 8345 8346 /* failed to enable multiple MSI-X */ 8347 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 8348 bp->num_queues, 8349 1 + bp->num_cnic_queues); 8350 8351 /* falling through... */ 8352 case BNX2X_INT_MODE_MSI: 8353 bnx2x_enable_msi(bp); 8354 8355 /* falling through... */ 8356 case BNX2X_INT_MODE_INTX: 8357 bp->num_ethernet_queues = 1; 8358 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 8359 BNX2X_DEV_INFO("set number of queues to 1\n"); 8360 break; 8361 default: 8362 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); 8363 return -EINVAL; 8364 } 8365 return 0; 8366 } 8367 8368 /* must be called prior to any HW initializations */ 8369 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 8370 { 8371 if (IS_SRIOV(bp)) 8372 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; 8373 return L2_ILT_LINES(bp); 8374 } 8375 8376 void bnx2x_ilt_set_info(struct bnx2x *bp) 8377 { 8378 struct ilt_client_info *ilt_client; 8379 struct bnx2x_ilt *ilt = BP_ILT(bp); 8380 u16 line = 0; 8381 8382 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 8383 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 8384 8385 /* CDU */ 8386 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 8387 ilt_client->client_num = ILT_CLIENT_CDU; 8388 ilt_client->page_size = CDU_ILT_PAGE_SZ; 8389 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 8390 ilt_client->start = line; 8391 line += bnx2x_cid_ilt_lines(bp); 8392 8393 if (CNIC_SUPPORT(bp)) 8394 line += CNIC_ILT_LINES; 8395 ilt_client->end = line - 1; 8396 8397 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8398 ilt_client->start, 8399 ilt_client->end, 8400 ilt_client->page_size, 8401 ilt_client->flags, 8402 ilog2(ilt_client->page_size >> 12)); 8403 8404 /* QM */ 8405 if (QM_INIT(bp->qm_cid_count)) { 8406 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 8407 ilt_client->client_num = ILT_CLIENT_QM; 8408 ilt_client->page_size = QM_ILT_PAGE_SZ; 8409 ilt_client->flags = 0; 8410 ilt_client->start = line; 8411 8412 /* 4 bytes for each cid */ 8413 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 8414 QM_ILT_PAGE_SZ); 8415 8416 ilt_client->end = line - 1; 8417 8418 DP(NETIF_MSG_IFUP, 8419 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8420 ilt_client->start, 8421 ilt_client->end, 8422 ilt_client->page_size, 8423 ilt_client->flags, 8424 ilog2(ilt_client->page_size >> 12)); 8425 } 8426 8427 if (CNIC_SUPPORT(bp)) { 8428 /* SRC */ 8429 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 8430 ilt_client->client_num = ILT_CLIENT_SRC; 8431 ilt_client->page_size = SRC_ILT_PAGE_SZ; 8432 ilt_client->flags = 0; 8433 ilt_client->start = line; 8434 line += SRC_ILT_LINES; 8435 ilt_client->end = line - 1; 8436 8437 DP(NETIF_MSG_IFUP, 8438 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8439 ilt_client->start, 8440 ilt_client->end, 8441 ilt_client->page_size, 8442 ilt_client->flags, 8443 ilog2(ilt_client->page_size >> 12)); 8444 8445 /* TM */ 8446 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 8447 ilt_client->client_num = ILT_CLIENT_TM; 8448 ilt_client->page_size = TM_ILT_PAGE_SZ; 8449 ilt_client->flags = 0; 8450 ilt_client->start = line; 8451 line += TM_ILT_LINES; 8452 ilt_client->end = line - 1; 8453 8454 DP(NETIF_MSG_IFUP, 8455 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8456 ilt_client->start, 8457 ilt_client->end, 8458 ilt_client->page_size, 8459 ilt_client->flags, 8460 ilog2(ilt_client->page_size >> 12)); 8461 } 8462 8463 BUG_ON(line > ILT_MAX_LINES); 8464 } 8465 8466 /** 8467 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 8468 * 8469 * @bp: driver handle 8470 * @fp: pointer to fastpath 8471 * @init_params: pointer to parameters structure 8472 * 8473 * parameters configured: 8474 * - HC configuration 8475 * - Queue's CDU context 8476 */ 8477 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 8478 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 8479 { 8480 u8 cos; 8481 int cxt_index, cxt_offset; 8482 8483 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 8484 if (!IS_FCOE_FP(fp)) { 8485 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 8486 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 8487 8488 /* If HC is supported, enable host coalescing in the transition 8489 * to INIT state. 8490 */ 8491 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 8492 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 8493 8494 /* HC rate */ 8495 init_params->rx.hc_rate = bp->rx_ticks ? 8496 (1000000 / bp->rx_ticks) : 0; 8497 init_params->tx.hc_rate = bp->tx_ticks ? 8498 (1000000 / bp->tx_ticks) : 0; 8499 8500 /* FW SB ID */ 8501 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 8502 fp->fw_sb_id; 8503 8504 /* 8505 * CQ index among the SB indices: FCoE clients uses the default 8506 * SB, therefore it's different. 8507 */ 8508 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 8509 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 8510 } 8511 8512 /* set maximum number of COSs supported by this queue */ 8513 init_params->max_cos = fp->max_cos; 8514 8515 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 8516 fp->index, init_params->max_cos); 8517 8518 /* set the context pointers queue object */ 8519 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 8520 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 8521 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 8522 ILT_PAGE_CIDS); 8523 init_params->cxts[cos] = 8524 &bp->context[cxt_index].vcxt[cxt_offset].eth; 8525 } 8526 } 8527 8528 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8529 struct bnx2x_queue_state_params *q_params, 8530 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 8531 int tx_index, bool leading) 8532 { 8533 memset(tx_only_params, 0, sizeof(*tx_only_params)); 8534 8535 /* Set the command */ 8536 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 8537 8538 /* Set tx-only QUEUE flags: don't zero statistics */ 8539 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 8540 8541 /* choose the index of the cid to send the slow path on */ 8542 tx_only_params->cid_index = tx_index; 8543 8544 /* Set general TX_ONLY_SETUP parameters */ 8545 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 8546 8547 /* Set Tx TX_ONLY_SETUP parameters */ 8548 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 8549 8550 DP(NETIF_MSG_IFUP, 8551 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 8552 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 8553 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 8554 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 8555 8556 /* send the ramrod */ 8557 return bnx2x_queue_state_change(bp, q_params); 8558 } 8559 8560 /** 8561 * bnx2x_setup_queue - setup queue 8562 * 8563 * @bp: driver handle 8564 * @fp: pointer to fastpath 8565 * @leading: is leading 8566 * 8567 * This function performs 2 steps in a Queue state machine 8568 * actually: 1) RESET->INIT 2) INIT->SETUP 8569 */ 8570 8571 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8572 bool leading) 8573 { 8574 struct bnx2x_queue_state_params q_params = {NULL}; 8575 struct bnx2x_queue_setup_params *setup_params = 8576 &q_params.params.setup; 8577 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 8578 &q_params.params.tx_only; 8579 int rc; 8580 u8 tx_index; 8581 8582 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 8583 8584 /* reset IGU state skip FCoE L2 queue */ 8585 if (!IS_FCOE_FP(fp)) 8586 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 8587 IGU_INT_ENABLE, 0); 8588 8589 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8590 /* We want to wait for completion in this context */ 8591 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8592 8593 /* Prepare the INIT parameters */ 8594 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 8595 8596 /* Set the command */ 8597 q_params.cmd = BNX2X_Q_CMD_INIT; 8598 8599 /* Change the state to INIT */ 8600 rc = bnx2x_queue_state_change(bp, &q_params); 8601 if (rc) { 8602 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 8603 return rc; 8604 } 8605 8606 DP(NETIF_MSG_IFUP, "init complete\n"); 8607 8608 /* Now move the Queue to the SETUP state... */ 8609 memset(setup_params, 0, sizeof(*setup_params)); 8610 8611 /* Set QUEUE flags */ 8612 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 8613 8614 /* Set general SETUP parameters */ 8615 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 8616 FIRST_TX_COS_INDEX); 8617 8618 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 8619 &setup_params->rxq_params); 8620 8621 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 8622 FIRST_TX_COS_INDEX); 8623 8624 /* Set the command */ 8625 q_params.cmd = BNX2X_Q_CMD_SETUP; 8626 8627 if (IS_FCOE_FP(fp)) 8628 bp->fcoe_init = true; 8629 8630 /* Change the state to SETUP */ 8631 rc = bnx2x_queue_state_change(bp, &q_params); 8632 if (rc) { 8633 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 8634 return rc; 8635 } 8636 8637 /* loop through the relevant tx-only indices */ 8638 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8639 tx_index < fp->max_cos; 8640 tx_index++) { 8641 8642 /* prepare and send tx-only ramrod*/ 8643 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 8644 tx_only_params, tx_index, leading); 8645 if (rc) { 8646 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 8647 fp->index, tx_index); 8648 return rc; 8649 } 8650 } 8651 8652 return rc; 8653 } 8654 8655 static int bnx2x_stop_queue(struct bnx2x *bp, int index) 8656 { 8657 struct bnx2x_fastpath *fp = &bp->fp[index]; 8658 struct bnx2x_fp_txdata *txdata; 8659 struct bnx2x_queue_state_params q_params = {NULL}; 8660 int rc, tx_index; 8661 8662 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 8663 8664 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8665 /* We want to wait for completion in this context */ 8666 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8667 8668 /* close tx-only connections */ 8669 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8670 tx_index < fp->max_cos; 8671 tx_index++){ 8672 8673 /* ascertain this is a normal queue*/ 8674 txdata = fp->txdata_ptr[tx_index]; 8675 8676 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 8677 txdata->txq_index); 8678 8679 /* send halt terminate on tx-only connection */ 8680 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8681 memset(&q_params.params.terminate, 0, 8682 sizeof(q_params.params.terminate)); 8683 q_params.params.terminate.cid_index = tx_index; 8684 8685 rc = bnx2x_queue_state_change(bp, &q_params); 8686 if (rc) 8687 return rc; 8688 8689 /* send halt terminate on tx-only connection */ 8690 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8691 memset(&q_params.params.cfc_del, 0, 8692 sizeof(q_params.params.cfc_del)); 8693 q_params.params.cfc_del.cid_index = tx_index; 8694 rc = bnx2x_queue_state_change(bp, &q_params); 8695 if (rc) 8696 return rc; 8697 } 8698 /* Stop the primary connection: */ 8699 /* ...halt the connection */ 8700 q_params.cmd = BNX2X_Q_CMD_HALT; 8701 rc = bnx2x_queue_state_change(bp, &q_params); 8702 if (rc) 8703 return rc; 8704 8705 /* ...terminate the connection */ 8706 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8707 memset(&q_params.params.terminate, 0, 8708 sizeof(q_params.params.terminate)); 8709 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8710 rc = bnx2x_queue_state_change(bp, &q_params); 8711 if (rc) 8712 return rc; 8713 /* ...delete cfc entry */ 8714 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8715 memset(&q_params.params.cfc_del, 0, 8716 sizeof(q_params.params.cfc_del)); 8717 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8718 return bnx2x_queue_state_change(bp, &q_params); 8719 } 8720 8721 static void bnx2x_reset_func(struct bnx2x *bp) 8722 { 8723 int port = BP_PORT(bp); 8724 int func = BP_FUNC(bp); 8725 int i; 8726 8727 /* Disable the function in the FW */ 8728 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8729 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8730 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8731 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8732 8733 /* FP SBs */ 8734 for_each_eth_queue(bp, i) { 8735 struct bnx2x_fastpath *fp = &bp->fp[i]; 8736 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8737 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 8738 SB_DISABLED); 8739 } 8740 8741 if (CNIC_LOADED(bp)) 8742 /* CNIC SB */ 8743 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8744 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 8745 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); 8746 8747 /* SP SB */ 8748 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8749 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8750 SB_DISABLED); 8751 8752 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 8753 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8754 0); 8755 8756 /* Configure IGU */ 8757 if (bp->common.int_block == INT_BLOCK_HC) { 8758 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8759 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8760 } else { 8761 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8762 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8763 } 8764 8765 if (CNIC_LOADED(bp)) { 8766 /* Disable Timer scan */ 8767 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8768 /* 8769 * Wait for at least 10ms and up to 2 second for the timers 8770 * scan to complete 8771 */ 8772 for (i = 0; i < 200; i++) { 8773 usleep_range(10000, 20000); 8774 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8775 break; 8776 } 8777 } 8778 /* Clear ILT */ 8779 bnx2x_clear_func_ilt(bp, func); 8780 8781 /* Timers workaround bug for E2: if this is vnic-3, 8782 * we need to set the entire ilt range for this timers. 8783 */ 8784 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8785 struct ilt_client_info ilt_cli; 8786 /* use dummy TM client */ 8787 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 8788 ilt_cli.start = 0; 8789 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 8790 ilt_cli.client_num = ILT_CLIENT_TM; 8791 8792 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8793 } 8794 8795 /* this assumes that reset_port() called before reset_func()*/ 8796 if (!CHIP_IS_E1x(bp)) 8797 bnx2x_pf_disable(bp); 8798 8799 bp->dmae_ready = 0; 8800 } 8801 8802 static void bnx2x_reset_port(struct bnx2x *bp) 8803 { 8804 int port = BP_PORT(bp); 8805 u32 val; 8806 8807 /* Reset physical Link */ 8808 bnx2x__link_reset(bp); 8809 8810 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 8811 8812 /* Do not rcv packets to BRB */ 8813 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 8814 /* Do not direct rcv packets that are not for MCP to the BRB */ 8815 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 8816 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 8817 8818 /* Configure AEU */ 8819 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 8820 8821 msleep(100); 8822 /* Check for BRB port occupancy */ 8823 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 8824 if (val) 8825 DP(NETIF_MSG_IFDOWN, 8826 "BRB1 is not empty %d blocks are occupied\n", val); 8827 8828 /* TODO: Close Doorbell port? */ 8829 } 8830 8831 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8832 { 8833 struct bnx2x_func_state_params func_params = {NULL}; 8834 8835 /* Prepare parameters for function state transitions */ 8836 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8837 8838 func_params.f_obj = &bp->func_obj; 8839 func_params.cmd = BNX2X_F_CMD_HW_RESET; 8840 8841 func_params.params.hw_init.load_phase = load_code; 8842 8843 return bnx2x_func_state_change(bp, &func_params); 8844 } 8845 8846 static int bnx2x_func_stop(struct bnx2x *bp) 8847 { 8848 struct bnx2x_func_state_params func_params = {NULL}; 8849 int rc; 8850 8851 /* Prepare parameters for function state transitions */ 8852 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8853 func_params.f_obj = &bp->func_obj; 8854 func_params.cmd = BNX2X_F_CMD_STOP; 8855 8856 /* 8857 * Try to stop the function the 'good way'. If fails (in case 8858 * of a parity error during bnx2x_chip_cleanup()) and we are 8859 * not in a debug mode, perform a state transaction in order to 8860 * enable further HW_RESET transaction. 8861 */ 8862 rc = bnx2x_func_state_change(bp, &func_params); 8863 if (rc) { 8864 #ifdef BNX2X_STOP_ON_ERROR 8865 return rc; 8866 #else 8867 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 8868 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 8869 return bnx2x_func_state_change(bp, &func_params); 8870 #endif 8871 } 8872 8873 return 0; 8874 } 8875 8876 /** 8877 * bnx2x_send_unload_req - request unload mode from the MCP. 8878 * 8879 * @bp: driver handle 8880 * @unload_mode: requested function's unload mode 8881 * 8882 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 8883 */ 8884 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 8885 { 8886 u32 reset_code = 0; 8887 int port = BP_PORT(bp); 8888 8889 /* Select the UNLOAD request mode */ 8890 if (unload_mode == UNLOAD_NORMAL) 8891 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8892 8893 else if (bp->flags & NO_WOL_FLAG) 8894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 8895 8896 else if (bp->wol) { 8897 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8898 u8 *mac_addr = bp->dev->dev_addr; 8899 struct pci_dev *pdev = bp->pdev; 8900 u32 val; 8901 u16 pmc; 8902 8903 /* The mac address is written to entries 1-4 to 8904 * preserve entry 0 which is used by the PMF 8905 */ 8906 u8 entry = (BP_VN(bp) + 1)*8; 8907 8908 val = (mac_addr[0] << 8) | mac_addr[1]; 8909 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 8910 8911 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 8912 (mac_addr[4] << 8) | mac_addr[5]; 8913 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8914 8915 /* Enable the PME and clear the status */ 8916 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); 8917 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8918 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); 8919 8920 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8921 8922 } else 8923 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8924 8925 /* Send the request to the MCP */ 8926 if (!BP_NOMCP(bp)) 8927 reset_code = bnx2x_fw_command(bp, reset_code, 0); 8928 else { 8929 int path = BP_PATH(bp); 8930 8931 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 8932 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 8933 bnx2x_load_count[path][2]); 8934 bnx2x_load_count[path][0]--; 8935 bnx2x_load_count[path][1 + port]--; 8936 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 8937 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 8938 bnx2x_load_count[path][2]); 8939 if (bnx2x_load_count[path][0] == 0) 8940 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 8941 else if (bnx2x_load_count[path][1 + port] == 0) 8942 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 8943 else 8944 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 8945 } 8946 8947 return reset_code; 8948 } 8949 8950 /** 8951 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8952 * 8953 * @bp: driver handle 8954 * @keep_link: true iff link should be kept up 8955 */ 8956 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) 8957 { 8958 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 8959 8960 /* Report UNLOAD_DONE to MCP */ 8961 if (!BP_NOMCP(bp)) 8962 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 8963 } 8964 8965 static int bnx2x_func_wait_started(struct bnx2x *bp) 8966 { 8967 int tout = 50; 8968 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8969 8970 if (!bp->port.pmf) 8971 return 0; 8972 8973 /* 8974 * (assumption: No Attention from MCP at this stage) 8975 * PMF probably in the middle of TX disable/enable transaction 8976 * 1. Sync IRS for default SB 8977 * 2. Sync SP queue - this guarantees us that attention handling started 8978 * 3. Wait, that TX disable/enable transaction completes 8979 * 8980 * 1+2 guarantee that if DCBx attention was scheduled it already changed 8981 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 8982 * received completion for the transaction the state is TX_STOPPED. 8983 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8984 * transaction. 8985 */ 8986 8987 /* make sure default SB ISR is done */ 8988 if (msix) 8989 synchronize_irq(bp->msix_table[0].vector); 8990 else 8991 synchronize_irq(bp->pdev->irq); 8992 8993 flush_workqueue(bnx2x_wq); 8994 flush_workqueue(bnx2x_iov_wq); 8995 8996 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8997 BNX2X_F_STATE_STARTED && tout--) 8998 msleep(20); 8999 9000 if (bnx2x_func_get_state(bp, &bp->func_obj) != 9001 BNX2X_F_STATE_STARTED) { 9002 #ifdef BNX2X_STOP_ON_ERROR 9003 BNX2X_ERR("Wrong function state\n"); 9004 return -EBUSY; 9005 #else 9006 /* 9007 * Failed to complete the transaction in a "good way" 9008 * Force both transactions with CLR bit 9009 */ 9010 struct bnx2x_func_state_params func_params = {NULL}; 9011 9012 DP(NETIF_MSG_IFDOWN, 9013 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 9014 9015 func_params.f_obj = &bp->func_obj; 9016 __set_bit(RAMROD_DRV_CLR_ONLY, 9017 &func_params.ramrod_flags); 9018 9019 /* STARTED-->TX_ST0PPED */ 9020 func_params.cmd = BNX2X_F_CMD_TX_STOP; 9021 bnx2x_func_state_change(bp, &func_params); 9022 9023 /* TX_ST0PPED-->STARTED */ 9024 func_params.cmd = BNX2X_F_CMD_TX_START; 9025 return bnx2x_func_state_change(bp, &func_params); 9026 #endif 9027 } 9028 9029 return 0; 9030 } 9031 9032 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 9033 { 9034 int port = BP_PORT(bp); 9035 int i, rc = 0; 9036 u8 cos; 9037 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 9038 u32 reset_code; 9039 9040 /* Wait until tx fastpath tasks complete */ 9041 for_each_tx_queue(bp, i) { 9042 struct bnx2x_fastpath *fp = &bp->fp[i]; 9043 9044 for_each_cos_in_tx_queue(fp, cos) 9045 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 9046 #ifdef BNX2X_STOP_ON_ERROR 9047 if (rc) 9048 return; 9049 #endif 9050 } 9051 9052 /* Give HW time to discard old tx messages */ 9053 usleep_range(1000, 2000); 9054 9055 /* Clean all ETH MACs */ 9056 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 9057 false); 9058 if (rc < 0) 9059 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 9060 9061 /* Clean up UC list */ 9062 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 9063 true); 9064 if (rc < 0) 9065 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 9066 rc); 9067 9068 /* Disable LLH */ 9069 if (!CHIP_IS_E1(bp)) 9070 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 9071 9072 /* Set "drop all" (stop Rx). 9073 * We need to take a netif_addr_lock() here in order to prevent 9074 * a race between the completion code and this code. 9075 */ 9076 netif_addr_lock_bh(bp->dev); 9077 /* Schedule the rx_mode command */ 9078 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 9079 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 9080 else 9081 bnx2x_set_storm_rx_mode(bp); 9082 9083 /* Cleanup multicast configuration */ 9084 rparam.mcast_obj = &bp->mcast_obj; 9085 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 9086 if (rc < 0) 9087 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 9088 9089 netif_addr_unlock_bh(bp->dev); 9090 9091 bnx2x_iov_chip_cleanup(bp); 9092 9093 /* 9094 * Send the UNLOAD_REQUEST to the MCP. This will return if 9095 * this function should perform FUNC, PORT or COMMON HW 9096 * reset. 9097 */ 9098 reset_code = bnx2x_send_unload_req(bp, unload_mode); 9099 9100 /* 9101 * (assumption: No Attention from MCP at this stage) 9102 * PMF probably in the middle of TX disable/enable transaction 9103 */ 9104 rc = bnx2x_func_wait_started(bp); 9105 if (rc) { 9106 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 9107 #ifdef BNX2X_STOP_ON_ERROR 9108 return; 9109 #endif 9110 } 9111 9112 /* Close multi and leading connections 9113 * Completions for ramrods are collected in a synchronous way 9114 */ 9115 for_each_eth_queue(bp, i) 9116 if (bnx2x_stop_queue(bp, i)) 9117 #ifdef BNX2X_STOP_ON_ERROR 9118 return; 9119 #else 9120 goto unload_error; 9121 #endif 9122 9123 if (CNIC_LOADED(bp)) { 9124 for_each_cnic_queue(bp, i) 9125 if (bnx2x_stop_queue(bp, i)) 9126 #ifdef BNX2X_STOP_ON_ERROR 9127 return; 9128 #else 9129 goto unload_error; 9130 #endif 9131 } 9132 9133 /* If SP settings didn't get completed so far - something 9134 * very wrong has happen. 9135 */ 9136 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 9137 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 9138 9139 #ifndef BNX2X_STOP_ON_ERROR 9140 unload_error: 9141 #endif 9142 rc = bnx2x_func_stop(bp); 9143 if (rc) { 9144 BNX2X_ERR("Function stop failed!\n"); 9145 #ifdef BNX2X_STOP_ON_ERROR 9146 return; 9147 #endif 9148 } 9149 9150 /* Disable HW interrupts, NAPI */ 9151 bnx2x_netif_stop(bp, 1); 9152 /* Delete all NAPI objects */ 9153 bnx2x_del_all_napi(bp); 9154 if (CNIC_LOADED(bp)) 9155 bnx2x_del_all_napi_cnic(bp); 9156 9157 /* Release IRQs */ 9158 bnx2x_free_irq(bp); 9159 9160 /* Reset the chip */ 9161 rc = bnx2x_reset_hw(bp, reset_code); 9162 if (rc) 9163 BNX2X_ERR("HW_RESET failed\n"); 9164 9165 /* Report UNLOAD_DONE to MCP */ 9166 bnx2x_send_unload_done(bp, keep_link); 9167 } 9168 9169 void bnx2x_disable_close_the_gate(struct bnx2x *bp) 9170 { 9171 u32 val; 9172 9173 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 9174 9175 if (CHIP_IS_E1(bp)) { 9176 int port = BP_PORT(bp); 9177 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 9178 MISC_REG_AEU_MASK_ATTN_FUNC_0; 9179 9180 val = REG_RD(bp, addr); 9181 val &= ~(0x300); 9182 REG_WR(bp, addr, val); 9183 } else { 9184 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 9185 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 9186 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 9187 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 9188 } 9189 } 9190 9191 /* Close gates #2, #3 and #4: */ 9192 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 9193 { 9194 u32 val; 9195 9196 /* Gates #2 and #4a are closed/opened for "not E1" only */ 9197 if (!CHIP_IS_E1(bp)) { 9198 /* #4 */ 9199 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 9200 /* #2 */ 9201 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 9202 } 9203 9204 /* #3 */ 9205 if (CHIP_IS_E1x(bp)) { 9206 /* Prevent interrupts from HC on both ports */ 9207 val = REG_RD(bp, HC_REG_CONFIG_1); 9208 REG_WR(bp, HC_REG_CONFIG_1, 9209 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 9210 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 9211 9212 val = REG_RD(bp, HC_REG_CONFIG_0); 9213 REG_WR(bp, HC_REG_CONFIG_0, 9214 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 9215 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 9216 } else { 9217 /* Prevent incoming interrupts in IGU */ 9218 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9219 9220 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 9221 (!close) ? 9222 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 9223 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 9224 } 9225 9226 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 9227 close ? "closing" : "opening"); 9228 mmiowb(); 9229 } 9230 9231 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 9232 9233 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 9234 { 9235 /* Do some magic... */ 9236 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9237 *magic_val = val & SHARED_MF_CLP_MAGIC; 9238 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 9239 } 9240 9241 /** 9242 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 9243 * 9244 * @bp: driver handle 9245 * @magic_val: old value of the `magic' bit. 9246 */ 9247 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 9248 { 9249 /* Restore the `magic' bit value... */ 9250 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9251 MF_CFG_WR(bp, shared_mf_config.clp_mb, 9252 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 9253 } 9254 9255 /** 9256 * bnx2x_reset_mcp_prep - prepare for MCP reset. 9257 * 9258 * @bp: driver handle 9259 * @magic_val: old value of 'magic' bit. 9260 * 9261 * Takes care of CLP configurations. 9262 */ 9263 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 9264 { 9265 u32 shmem; 9266 u32 validity_offset; 9267 9268 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 9269 9270 /* Set `magic' bit in order to save MF config */ 9271 if (!CHIP_IS_E1(bp)) 9272 bnx2x_clp_reset_prep(bp, magic_val); 9273 9274 /* Get shmem offset */ 9275 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9276 validity_offset = 9277 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); 9278 9279 /* Clear validity map flags */ 9280 if (shmem > 0) 9281 REG_WR(bp, shmem + validity_offset, 0); 9282 } 9283 9284 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 9285 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 9286 9287 /** 9288 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 9289 * 9290 * @bp: driver handle 9291 */ 9292 static void bnx2x_mcp_wait_one(struct bnx2x *bp) 9293 { 9294 /* special handling for emulation and FPGA, 9295 wait 10 times longer */ 9296 if (CHIP_REV_IS_SLOW(bp)) 9297 msleep(MCP_ONE_TIMEOUT*10); 9298 else 9299 msleep(MCP_ONE_TIMEOUT); 9300 } 9301 9302 /* 9303 * initializes bp->common.shmem_base and waits for validity signature to appear 9304 */ 9305 static int bnx2x_init_shmem(struct bnx2x *bp) 9306 { 9307 int cnt = 0; 9308 u32 val = 0; 9309 9310 do { 9311 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9312 if (bp->common.shmem_base) { 9313 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9314 if (val & SHR_MEM_VALIDITY_MB) 9315 return 0; 9316 } 9317 9318 bnx2x_mcp_wait_one(bp); 9319 9320 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 9321 9322 BNX2X_ERR("BAD MCP validity signature\n"); 9323 9324 return -ENODEV; 9325 } 9326 9327 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 9328 { 9329 int rc = bnx2x_init_shmem(bp); 9330 9331 /* Restore the `magic' bit value */ 9332 if (!CHIP_IS_E1(bp)) 9333 bnx2x_clp_reset_done(bp, magic_val); 9334 9335 return rc; 9336 } 9337 9338 static void bnx2x_pxp_prep(struct bnx2x *bp) 9339 { 9340 if (!CHIP_IS_E1(bp)) { 9341 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 9342 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 9343 mmiowb(); 9344 } 9345 } 9346 9347 /* 9348 * Reset the whole chip except for: 9349 * - PCIE core 9350 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 9351 * one reset bit) 9352 * - IGU 9353 * - MISC (including AEU) 9354 * - GRC 9355 * - RBCN, RBCP 9356 */ 9357 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 9358 { 9359 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 9360 u32 global_bits2, stay_reset2; 9361 9362 /* 9363 * Bits that have to be set in reset_mask2 if we want to reset 'global' 9364 * (per chip) blocks. 9365 */ 9366 global_bits2 = 9367 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 9368 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 9369 9370 /* Don't reset the following blocks. 9371 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 9372 * reset, as in 4 port device they might still be owned 9373 * by the MCP (there is only one leader per path). 9374 */ 9375 not_reset_mask1 = 9376 MISC_REGISTERS_RESET_REG_1_RST_HC | 9377 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 9378 MISC_REGISTERS_RESET_REG_1_RST_PXP; 9379 9380 not_reset_mask2 = 9381 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 9382 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 9383 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 9384 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 9385 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 9386 MISC_REGISTERS_RESET_REG_2_RST_GRC | 9387 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 9388 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 9389 MISC_REGISTERS_RESET_REG_2_RST_ATC | 9390 MISC_REGISTERS_RESET_REG_2_PGLC | 9391 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 9392 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 9393 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 9394 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 9395 MISC_REGISTERS_RESET_REG_2_UMAC0 | 9396 MISC_REGISTERS_RESET_REG_2_UMAC1; 9397 9398 /* 9399 * Keep the following blocks in reset: 9400 * - all xxMACs are handled by the bnx2x_link code. 9401 */ 9402 stay_reset2 = 9403 MISC_REGISTERS_RESET_REG_2_XMAC | 9404 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 9405 9406 /* Full reset masks according to the chip */ 9407 reset_mask1 = 0xffffffff; 9408 9409 if (CHIP_IS_E1(bp)) 9410 reset_mask2 = 0xffff; 9411 else if (CHIP_IS_E1H(bp)) 9412 reset_mask2 = 0x1ffff; 9413 else if (CHIP_IS_E2(bp)) 9414 reset_mask2 = 0xfffff; 9415 else /* CHIP_IS_E3 */ 9416 reset_mask2 = 0x3ffffff; 9417 9418 /* Don't reset global blocks unless we need to */ 9419 if (!global) 9420 reset_mask2 &= ~global_bits2; 9421 9422 /* 9423 * In case of attention in the QM, we need to reset PXP 9424 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 9425 * because otherwise QM reset would release 'close the gates' shortly 9426 * before resetting the PXP, then the PSWRQ would send a write 9427 * request to PGLUE. Then when PXP is reset, PGLUE would try to 9428 * read the payload data from PSWWR, but PSWWR would not 9429 * respond. The write queue in PGLUE would stuck, dmae commands 9430 * would not return. Therefore it's important to reset the second 9431 * reset register (containing the 9432 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 9433 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 9434 * bit). 9435 */ 9436 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 9437 reset_mask2 & (~not_reset_mask2)); 9438 9439 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 9440 reset_mask1 & (~not_reset_mask1)); 9441 9442 barrier(); 9443 mmiowb(); 9444 9445 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 9446 reset_mask2 & (~stay_reset2)); 9447 9448 barrier(); 9449 mmiowb(); 9450 9451 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 9452 mmiowb(); 9453 } 9454 9455 /** 9456 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 9457 * It should get cleared in no more than 1s. 9458 * 9459 * @bp: driver handle 9460 * 9461 * It should get cleared in no more than 1s. Returns 0 if 9462 * pending writes bit gets cleared. 9463 */ 9464 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 9465 { 9466 u32 cnt = 1000; 9467 u32 pend_bits = 0; 9468 9469 do { 9470 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 9471 9472 if (pend_bits == 0) 9473 break; 9474 9475 usleep_range(1000, 2000); 9476 } while (cnt-- > 0); 9477 9478 if (cnt <= 0) { 9479 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 9480 pend_bits); 9481 return -EBUSY; 9482 } 9483 9484 return 0; 9485 } 9486 9487 static int bnx2x_process_kill(struct bnx2x *bp, bool global) 9488 { 9489 int cnt = 1000; 9490 u32 val = 0; 9491 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 9492 u32 tags_63_32 = 0; 9493 9494 /* Empty the Tetris buffer, wait for 1s */ 9495 do { 9496 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 9497 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 9498 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 9499 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 9500 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 9501 if (CHIP_IS_E3(bp)) 9502 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); 9503 9504 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 9505 ((port_is_idle_0 & 0x1) == 0x1) && 9506 ((port_is_idle_1 & 0x1) == 0x1) && 9507 (pgl_exp_rom2 == 0xffffffff) && 9508 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) 9509 break; 9510 usleep_range(1000, 2000); 9511 } while (cnt-- > 0); 9512 9513 if (cnt <= 0) { 9514 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 9515 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 9516 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 9517 pgl_exp_rom2); 9518 return -EAGAIN; 9519 } 9520 9521 barrier(); 9522 9523 /* Close gates #2, #3 and #4 */ 9524 bnx2x_set_234_gates(bp, true); 9525 9526 /* Poll for IGU VQs for 57712 and newer chips */ 9527 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 9528 return -EAGAIN; 9529 9530 /* TBD: Indicate that "process kill" is in progress to MCP */ 9531 9532 /* Clear "unprepared" bit */ 9533 REG_WR(bp, MISC_REG_UNPREPARED, 0); 9534 barrier(); 9535 9536 /* Make sure all is written to the chip before the reset */ 9537 mmiowb(); 9538 9539 /* Wait for 1ms to empty GLUE and PCI-E core queues, 9540 * PSWHST, GRC and PSWRD Tetris buffer. 9541 */ 9542 usleep_range(1000, 2000); 9543 9544 /* Prepare to chip reset: */ 9545 /* MCP */ 9546 if (global) 9547 bnx2x_reset_mcp_prep(bp, &val); 9548 9549 /* PXP */ 9550 bnx2x_pxp_prep(bp); 9551 barrier(); 9552 9553 /* reset the chip */ 9554 bnx2x_process_kill_chip_reset(bp, global); 9555 barrier(); 9556 9557 /* clear errors in PGB */ 9558 if (!CHIP_IS_E1x(bp)) 9559 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 9560 9561 /* Recover after reset: */ 9562 /* MCP */ 9563 if (global && bnx2x_reset_mcp_comp(bp, val)) 9564 return -EAGAIN; 9565 9566 /* TBD: Add resetting the NO_MCP mode DB here */ 9567 9568 /* Open the gates #2, #3 and #4 */ 9569 bnx2x_set_234_gates(bp, false); 9570 9571 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 9572 * reset state, re-enable attentions. */ 9573 9574 return 0; 9575 } 9576 9577 static int bnx2x_leader_reset(struct bnx2x *bp) 9578 { 9579 int rc = 0; 9580 bool global = bnx2x_reset_is_global(bp); 9581 u32 load_code; 9582 9583 /* if not going to reset MCP - load "fake" driver to reset HW while 9584 * driver is owner of the HW 9585 */ 9586 if (!global && !BP_NOMCP(bp)) { 9587 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 9588 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 9589 if (!load_code) { 9590 BNX2X_ERR("MCP response failure, aborting\n"); 9591 rc = -EAGAIN; 9592 goto exit_leader_reset; 9593 } 9594 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 9595 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 9596 BNX2X_ERR("MCP unexpected resp, aborting\n"); 9597 rc = -EAGAIN; 9598 goto exit_leader_reset2; 9599 } 9600 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 9601 if (!load_code) { 9602 BNX2X_ERR("MCP response failure, aborting\n"); 9603 rc = -EAGAIN; 9604 goto exit_leader_reset2; 9605 } 9606 } 9607 9608 /* Try to recover after the failure */ 9609 if (bnx2x_process_kill(bp, global)) { 9610 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 9611 BP_PATH(bp)); 9612 rc = -EAGAIN; 9613 goto exit_leader_reset2; 9614 } 9615 9616 /* 9617 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 9618 * state. 9619 */ 9620 bnx2x_set_reset_done(bp); 9621 if (global) 9622 bnx2x_clear_reset_global(bp); 9623 9624 exit_leader_reset2: 9625 /* unload "fake driver" if it was loaded */ 9626 if (!global && !BP_NOMCP(bp)) { 9627 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 9628 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9629 } 9630 exit_leader_reset: 9631 bp->is_leader = 0; 9632 bnx2x_release_leader_lock(bp); 9633 smp_mb(); 9634 return rc; 9635 } 9636 9637 static void bnx2x_recovery_failed(struct bnx2x *bp) 9638 { 9639 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 9640 9641 /* Disconnect this device */ 9642 netif_device_detach(bp->dev); 9643 9644 /* 9645 * Block ifup for all function on this engine until "process kill" 9646 * or power cycle. 9647 */ 9648 bnx2x_set_reset_in_progress(bp); 9649 9650 /* Shut down the power */ 9651 bnx2x_set_power_state(bp, PCI_D3hot); 9652 9653 bp->recovery_state = BNX2X_RECOVERY_FAILED; 9654 9655 smp_mb(); 9656 } 9657 9658 /* 9659 * Assumption: runs under rtnl lock. This together with the fact 9660 * that it's called only from bnx2x_sp_rtnl() ensure that it 9661 * will never be called when netif_running(bp->dev) is false. 9662 */ 9663 static void bnx2x_parity_recover(struct bnx2x *bp) 9664 { 9665 bool global = false; 9666 u32 error_recovered, error_unrecovered; 9667 bool is_parity; 9668 9669 DP(NETIF_MSG_HW, "Handling parity\n"); 9670 while (1) { 9671 switch (bp->recovery_state) { 9672 case BNX2X_RECOVERY_INIT: 9673 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 9674 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 9675 WARN_ON(!is_parity); 9676 9677 /* Try to get a LEADER_LOCK HW lock */ 9678 if (bnx2x_trylock_leader_lock(bp)) { 9679 bnx2x_set_reset_in_progress(bp); 9680 /* 9681 * Check if there is a global attention and if 9682 * there was a global attention, set the global 9683 * reset bit. 9684 */ 9685 9686 if (global) 9687 bnx2x_set_reset_global(bp); 9688 9689 bp->is_leader = 1; 9690 } 9691 9692 /* Stop the driver */ 9693 /* If interface has been removed - break */ 9694 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) 9695 return; 9696 9697 bp->recovery_state = BNX2X_RECOVERY_WAIT; 9698 9699 /* Ensure "is_leader", MCP command sequence and 9700 * "recovery_state" update values are seen on other 9701 * CPUs. 9702 */ 9703 smp_mb(); 9704 break; 9705 9706 case BNX2X_RECOVERY_WAIT: 9707 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 9708 if (bp->is_leader) { 9709 int other_engine = BP_PATH(bp) ? 0 : 1; 9710 bool other_load_status = 9711 bnx2x_get_load_status(bp, other_engine); 9712 bool load_status = 9713 bnx2x_get_load_status(bp, BP_PATH(bp)); 9714 global = bnx2x_reset_is_global(bp); 9715 9716 /* 9717 * In case of a parity in a global block, let 9718 * the first leader that performs a 9719 * leader_reset() reset the global blocks in 9720 * order to clear global attentions. Otherwise 9721 * the gates will remain closed for that 9722 * engine. 9723 */ 9724 if (load_status || 9725 (global && other_load_status)) { 9726 /* Wait until all other functions get 9727 * down. 9728 */ 9729 schedule_delayed_work(&bp->sp_rtnl_task, 9730 HZ/10); 9731 return; 9732 } else { 9733 /* If all other functions got down - 9734 * try to bring the chip back to 9735 * normal. In any case it's an exit 9736 * point for a leader. 9737 */ 9738 if (bnx2x_leader_reset(bp)) { 9739 bnx2x_recovery_failed(bp); 9740 return; 9741 } 9742 9743 /* If we are here, means that the 9744 * leader has succeeded and doesn't 9745 * want to be a leader any more. Try 9746 * to continue as a none-leader. 9747 */ 9748 break; 9749 } 9750 } else { /* non-leader */ 9751 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 9752 /* Try to get a LEADER_LOCK HW lock as 9753 * long as a former leader may have 9754 * been unloaded by the user or 9755 * released a leadership by another 9756 * reason. 9757 */ 9758 if (bnx2x_trylock_leader_lock(bp)) { 9759 /* I'm a leader now! Restart a 9760 * switch case. 9761 */ 9762 bp->is_leader = 1; 9763 break; 9764 } 9765 9766 schedule_delayed_work(&bp->sp_rtnl_task, 9767 HZ/10); 9768 return; 9769 9770 } else { 9771 /* 9772 * If there was a global attention, wait 9773 * for it to be cleared. 9774 */ 9775 if (bnx2x_reset_is_global(bp)) { 9776 schedule_delayed_work( 9777 &bp->sp_rtnl_task, 9778 HZ/10); 9779 return; 9780 } 9781 9782 error_recovered = 9783 bp->eth_stats.recoverable_error; 9784 error_unrecovered = 9785 bp->eth_stats.unrecoverable_error; 9786 bp->recovery_state = 9787 BNX2X_RECOVERY_NIC_LOADING; 9788 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 9789 error_unrecovered++; 9790 netdev_err(bp->dev, 9791 "Recovery failed. Power cycle needed\n"); 9792 /* Disconnect this device */ 9793 netif_device_detach(bp->dev); 9794 /* Shut down the power */ 9795 bnx2x_set_power_state( 9796 bp, PCI_D3hot); 9797 smp_mb(); 9798 } else { 9799 bp->recovery_state = 9800 BNX2X_RECOVERY_DONE; 9801 error_recovered++; 9802 smp_mb(); 9803 } 9804 bp->eth_stats.recoverable_error = 9805 error_recovered; 9806 bp->eth_stats.unrecoverable_error = 9807 error_unrecovered; 9808 9809 return; 9810 } 9811 } 9812 default: 9813 return; 9814 } 9815 } 9816 } 9817 9818 static int bnx2x_close(struct net_device *dev); 9819 9820 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 9821 * scheduled on a general queue in order to prevent a dead lock. 9822 */ 9823 static void bnx2x_sp_rtnl_task(struct work_struct *work) 9824 { 9825 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 9826 9827 rtnl_lock(); 9828 9829 if (!netif_running(bp->dev)) { 9830 rtnl_unlock(); 9831 return; 9832 } 9833 9834 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 9835 #ifdef BNX2X_STOP_ON_ERROR 9836 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9837 "you will need to reboot when done\n"); 9838 goto sp_rtnl_not_reset; 9839 #endif 9840 /* 9841 * Clear all pending SP commands as we are going to reset the 9842 * function anyway. 9843 */ 9844 bp->sp_rtnl_state = 0; 9845 smp_mb(); 9846 9847 bnx2x_parity_recover(bp); 9848 9849 rtnl_unlock(); 9850 return; 9851 } 9852 9853 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9854 #ifdef BNX2X_STOP_ON_ERROR 9855 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9856 "you will need to reboot when done\n"); 9857 goto sp_rtnl_not_reset; 9858 #endif 9859 9860 /* 9861 * Clear all pending SP commands as we are going to reset the 9862 * function anyway. 9863 */ 9864 bp->sp_rtnl_state = 0; 9865 smp_mb(); 9866 9867 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 9868 bnx2x_nic_load(bp, LOAD_NORMAL); 9869 9870 rtnl_unlock(); 9871 return; 9872 } 9873 #ifdef BNX2X_STOP_ON_ERROR 9874 sp_rtnl_not_reset: 9875 #endif 9876 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9877 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9878 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 9879 bnx2x_after_function_update(bp); 9880 /* 9881 * in case of fan failure we need to reset id if the "stop on error" 9882 * debug flag is set, since we trying to prevent permanent overheating 9883 * damage 9884 */ 9885 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 9886 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 9887 netif_device_detach(bp->dev); 9888 bnx2x_close(bp->dev); 9889 rtnl_unlock(); 9890 return; 9891 } 9892 9893 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { 9894 DP(BNX2X_MSG_SP, 9895 "sending set mcast vf pf channel message from rtnl sp-task\n"); 9896 bnx2x_vfpf_set_mcast(bp->dev); 9897 } 9898 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 9899 &bp->sp_rtnl_state)){ 9900 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { 9901 bnx2x_tx_disable(bp); 9902 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); 9903 } 9904 } 9905 9906 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { 9907 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); 9908 bnx2x_set_rx_mode_inner(bp); 9909 } 9910 9911 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 9912 &bp->sp_rtnl_state)) 9913 bnx2x_pf_set_vfs_vlan(bp); 9914 9915 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { 9916 bnx2x_dcbx_stop_hw_tx(bp); 9917 bnx2x_dcbx_resume_hw_tx(bp); 9918 } 9919 9920 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, 9921 &bp->sp_rtnl_state)) 9922 bnx2x_update_mng_version(bp); 9923 9924 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9925 * can be called from other contexts as well) 9926 */ 9927 rtnl_unlock(); 9928 9929 /* enable SR-IOV if applicable */ 9930 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 9931 &bp->sp_rtnl_state)) { 9932 bnx2x_disable_sriov(bp); 9933 bnx2x_enable_sriov(bp); 9934 } 9935 } 9936 9937 static void bnx2x_period_task(struct work_struct *work) 9938 { 9939 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 9940 9941 if (!netif_running(bp->dev)) 9942 goto period_task_exit; 9943 9944 if (CHIP_REV_IS_SLOW(bp)) { 9945 BNX2X_ERR("period task called on emulation, ignoring\n"); 9946 goto period_task_exit; 9947 } 9948 9949 bnx2x_acquire_phy_lock(bp); 9950 /* 9951 * The barrier is needed to ensure the ordering between the writing to 9952 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 9953 * the reading here. 9954 */ 9955 smp_mb(); 9956 if (bp->port.pmf) { 9957 bnx2x_period_func(&bp->link_params, &bp->link_vars); 9958 9959 /* Re-queue task in 1 sec */ 9960 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 9961 } 9962 9963 bnx2x_release_phy_lock(bp); 9964 period_task_exit: 9965 return; 9966 } 9967 9968 /* 9969 * Init service functions 9970 */ 9971 9972 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 9973 { 9974 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9975 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 9976 return base + (BP_ABS_FUNC(bp)) * stride; 9977 } 9978 9979 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 9980 struct bnx2x_mac_vals *vals) 9981 { 9982 u32 val, base_addr, offset, mask, reset_reg; 9983 bool mac_stopped = false; 9984 u8 port = BP_PORT(bp); 9985 9986 /* reset addresses as they also mark which values were changed */ 9987 vals->bmac_addr = 0; 9988 vals->umac_addr = 0; 9989 vals->xmac_addr = 0; 9990 vals->emac_addr = 0; 9991 9992 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 9993 9994 if (!CHIP_IS_E3(bp)) { 9995 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9996 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9997 if ((mask & reset_reg) && val) { 9998 u32 wb_data[2]; 9999 BNX2X_DEV_INFO("Disable bmac Rx\n"); 10000 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 10001 : NIG_REG_INGRESS_BMAC0_MEM; 10002 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 10003 : BIGMAC_REGISTER_BMAC_CONTROL; 10004 10005 /* 10006 * use rd/wr since we cannot use dmae. This is safe 10007 * since MCP won't access the bus due to the request 10008 * to unload, and no function on the path can be 10009 * loaded at this time. 10010 */ 10011 wb_data[0] = REG_RD(bp, base_addr + offset); 10012 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 10013 vals->bmac_addr = base_addr + offset; 10014 vals->bmac_val[0] = wb_data[0]; 10015 vals->bmac_val[1] = wb_data[1]; 10016 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 10017 REG_WR(bp, vals->bmac_addr, wb_data[0]); 10018 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); 10019 } 10020 BNX2X_DEV_INFO("Disable emac Rx\n"); 10021 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; 10022 vals->emac_val = REG_RD(bp, vals->emac_addr); 10023 REG_WR(bp, vals->emac_addr, 0); 10024 mac_stopped = true; 10025 } else { 10026 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 10027 BNX2X_DEV_INFO("Disable xmac Rx\n"); 10028 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 10029 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 10030 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 10031 val & ~(1 << 1)); 10032 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 10033 val | (1 << 1)); 10034 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 10035 vals->xmac_val = REG_RD(bp, vals->xmac_addr); 10036 REG_WR(bp, vals->xmac_addr, 0); 10037 mac_stopped = true; 10038 } 10039 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 10040 if (mask & reset_reg) { 10041 BNX2X_DEV_INFO("Disable umac Rx\n"); 10042 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 10043 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 10044 vals->umac_val = REG_RD(bp, vals->umac_addr); 10045 REG_WR(bp, vals->umac_addr, 0); 10046 mac_stopped = true; 10047 } 10048 } 10049 10050 if (mac_stopped) 10051 msleep(20); 10052 } 10053 10054 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 10055 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 10056 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 10057 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 10058 10059 #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10060 #define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10061 #define BCM_5710_UNDI_FW_MF_VERS (0x05) 10062 #define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) 10063 #define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) 10064 10065 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) 10066 { 10067 /* UNDI marks its presence in DORQ - 10068 * it initializes CID offset for normal bell to 0x7 10069 */ 10070 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & 10071 MISC_REGISTERS_RESET_REG_1_RST_DORQ)) 10072 return false; 10073 10074 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { 10075 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10076 return true; 10077 } 10078 10079 return false; 10080 } 10081 10082 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10083 { 10084 u8 major, minor, version; 10085 u32 fw; 10086 10087 /* Must check that FW is loaded */ 10088 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & 10089 MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { 10090 BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); 10091 return false; 10092 } 10093 10094 /* Read Currently loaded FW version */ 10095 fw = REG_RD(bp, XSEM_REG_PRAM); 10096 major = fw & 0xff; 10097 minor = (fw >> 0x8) & 0xff; 10098 version = (fw >> 0x10) & 0xff; 10099 BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", 10100 fw, major, minor, version); 10101 10102 if (major > BCM_5710_UNDI_FW_MF_MAJOR) 10103 return true; 10104 10105 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && 10106 (minor > BCM_5710_UNDI_FW_MF_MINOR)) 10107 return true; 10108 10109 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && 10110 (minor == BCM_5710_UNDI_FW_MF_MINOR) && 10111 (version >= BCM_5710_UNDI_FW_MF_VERS)) 10112 return true; 10113 10114 return false; 10115 } 10116 10117 static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) 10118 { 10119 int i; 10120 10121 /* Due to legacy (FW) code, the first function on each engine has a 10122 * different offset macro from the rest of the functions. 10123 * Setting this for all 8 functions is harmless regardless of whether 10124 * this is actually a multi-function device. 10125 */ 10126 for (i = 0; i < 2; i++) 10127 REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); 10128 10129 for (i = 2; i < 8; i++) 10130 REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); 10131 10132 BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); 10133 } 10134 10135 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) 10136 { 10137 u16 rcq, bd; 10138 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 10139 10140 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 10141 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 10142 10143 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 10144 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 10145 10146 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 10147 port, bd, rcq); 10148 } 10149 10150 static int bnx2x_prev_mcp_done(struct bnx2x *bp) 10151 { 10152 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 10153 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 10154 if (!rc) { 10155 BNX2X_ERR("MCP response failure, aborting\n"); 10156 return -EBUSY; 10157 } 10158 10159 return 0; 10160 } 10161 10162 static struct bnx2x_prev_path_list * 10163 bnx2x_prev_path_get_entry(struct bnx2x *bp) 10164 { 10165 struct bnx2x_prev_path_list *tmp_list; 10166 10167 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) 10168 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 10169 bp->pdev->bus->number == tmp_list->bus && 10170 BP_PATH(bp) == tmp_list->path) 10171 return tmp_list; 10172 10173 return NULL; 10174 } 10175 10176 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) 10177 { 10178 struct bnx2x_prev_path_list *tmp_list; 10179 int rc; 10180 10181 rc = down_interruptible(&bnx2x_prev_sem); 10182 if (rc) { 10183 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10184 return rc; 10185 } 10186 10187 tmp_list = bnx2x_prev_path_get_entry(bp); 10188 if (tmp_list) { 10189 tmp_list->aer = 1; 10190 rc = 0; 10191 } else { 10192 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", 10193 BP_PATH(bp)); 10194 } 10195 10196 up(&bnx2x_prev_sem); 10197 10198 return rc; 10199 } 10200 10201 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 10202 { 10203 struct bnx2x_prev_path_list *tmp_list; 10204 bool rc = false; 10205 10206 if (down_trylock(&bnx2x_prev_sem)) 10207 return false; 10208 10209 tmp_list = bnx2x_prev_path_get_entry(bp); 10210 if (tmp_list) { 10211 if (tmp_list->aer) { 10212 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", 10213 BP_PATH(bp)); 10214 } else { 10215 rc = true; 10216 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 10217 BP_PATH(bp)); 10218 } 10219 } 10220 10221 up(&bnx2x_prev_sem); 10222 10223 return rc; 10224 } 10225 10226 bool bnx2x_port_after_undi(struct bnx2x *bp) 10227 { 10228 struct bnx2x_prev_path_list *entry; 10229 bool val; 10230 10231 down(&bnx2x_prev_sem); 10232 10233 entry = bnx2x_prev_path_get_entry(bp); 10234 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); 10235 10236 up(&bnx2x_prev_sem); 10237 10238 return val; 10239 } 10240 10241 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 10242 { 10243 struct bnx2x_prev_path_list *tmp_list; 10244 int rc; 10245 10246 rc = down_interruptible(&bnx2x_prev_sem); 10247 if (rc) { 10248 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10249 return rc; 10250 } 10251 10252 /* Check whether the entry for this path already exists */ 10253 tmp_list = bnx2x_prev_path_get_entry(bp); 10254 if (tmp_list) { 10255 if (!tmp_list->aer) { 10256 BNX2X_ERR("Re-Marking the path.\n"); 10257 } else { 10258 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", 10259 BP_PATH(bp)); 10260 tmp_list->aer = 0; 10261 } 10262 up(&bnx2x_prev_sem); 10263 return 0; 10264 } 10265 up(&bnx2x_prev_sem); 10266 10267 /* Create an entry for this path and add it */ 10268 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 10269 if (!tmp_list) { 10270 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 10271 return -ENOMEM; 10272 } 10273 10274 tmp_list->bus = bp->pdev->bus->number; 10275 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 10276 tmp_list->path = BP_PATH(bp); 10277 tmp_list->aer = 0; 10278 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 10279 10280 rc = down_interruptible(&bnx2x_prev_sem); 10281 if (rc) { 10282 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10283 kfree(tmp_list); 10284 } else { 10285 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", 10286 BP_PATH(bp)); 10287 list_add(&tmp_list->list, &bnx2x_prev_list); 10288 up(&bnx2x_prev_sem); 10289 } 10290 10291 return rc; 10292 } 10293 10294 static int bnx2x_do_flr(struct bnx2x *bp) 10295 { 10296 struct pci_dev *dev = bp->pdev; 10297 10298 if (CHIP_IS_E1x(bp)) { 10299 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 10300 return -EINVAL; 10301 } 10302 10303 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 10304 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 10305 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 10306 bp->common.bc_ver); 10307 return -EINVAL; 10308 } 10309 10310 if (!pci_wait_for_pending_transaction(dev)) 10311 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 10312 10313 BNX2X_DEV_INFO("Initiating FLR\n"); 10314 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 10315 10316 return 0; 10317 } 10318 10319 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) 10320 { 10321 int rc; 10322 10323 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 10324 10325 /* Test if previous unload process was already finished for this path */ 10326 if (bnx2x_prev_is_path_marked(bp)) 10327 return bnx2x_prev_mcp_done(bp); 10328 10329 BNX2X_DEV_INFO("Path is unmarked\n"); 10330 10331 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */ 10332 if (bnx2x_prev_is_after_undi(bp)) 10333 goto out; 10334 10335 /* If function has FLR capabilities, and existing FW version matches 10336 * the one required, then FLR will be sufficient to clean any residue 10337 * left by previous driver 10338 */ 10339 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); 10340 10341 if (!rc) { 10342 /* fw version is good */ 10343 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); 10344 rc = bnx2x_do_flr(bp); 10345 } 10346 10347 if (!rc) { 10348 /* FLR was performed */ 10349 BNX2X_DEV_INFO("FLR successful\n"); 10350 return 0; 10351 } 10352 10353 BNX2X_DEV_INFO("Could not FLR\n"); 10354 10355 out: 10356 /* Close the MCP request, return failure*/ 10357 rc = bnx2x_prev_mcp_done(bp); 10358 if (!rc) 10359 rc = BNX2X_PREV_WAIT_NEEDED; 10360 10361 return rc; 10362 } 10363 10364 static int bnx2x_prev_unload_common(struct bnx2x *bp) 10365 { 10366 u32 reset_reg, tmp_reg = 0, rc; 10367 bool prev_undi = false; 10368 struct bnx2x_mac_vals mac_vals; 10369 10370 /* It is possible a previous function received 'common' answer, 10371 * but hasn't loaded yet, therefore creating a scenario of 10372 * multiple functions receiving 'common' on the same path. 10373 */ 10374 BNX2X_DEV_INFO("Common unload Flow\n"); 10375 10376 memset(&mac_vals, 0, sizeof(mac_vals)); 10377 10378 if (bnx2x_prev_is_path_marked(bp)) 10379 return bnx2x_prev_mcp_done(bp); 10380 10381 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 10382 10383 /* Reset should be performed after BRB is emptied */ 10384 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10385 u32 timer_count = 1000; 10386 bool need_write = true; 10387 10388 /* Close the MAC Rx to prevent BRB from filling up */ 10389 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10390 10391 /* close LLH filters towards the BRB */ 10392 bnx2x_set_rx_filter(&bp->link_params, 0); 10393 10394 /* Check if the UNDI driver was previously loaded */ 10395 if (bnx2x_prev_is_after_undi(bp)) { 10396 prev_undi = true; 10397 /* clear the UNDI indication */ 10398 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 10399 /* clear possible idle check errors */ 10400 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); 10401 } 10402 if (!CHIP_IS_E1x(bp)) 10403 /* block FW from writing to host */ 10404 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10405 10406 /* wait until BRB is empty */ 10407 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10408 while (timer_count) { 10409 u32 prev_brb = tmp_reg; 10410 10411 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10412 if (!tmp_reg) 10413 break; 10414 10415 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 10416 10417 /* reset timer as long as BRB actually gets emptied */ 10418 if (prev_brb > tmp_reg) 10419 timer_count = 1000; 10420 else 10421 timer_count--; 10422 10423 /* New UNDI FW supports MF and contains better 10424 * cleaning methods - might be redundant but harmless. 10425 */ 10426 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10427 if (need_write) { 10428 bnx2x_prev_unload_undi_mf(bp); 10429 need_write = false; 10430 } 10431 } else if (prev_undi) { 10432 /* If UNDI resides in memory, 10433 * manually increment it 10434 */ 10435 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); 10436 } 10437 udelay(10); 10438 } 10439 10440 if (!timer_count) 10441 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 10442 } 10443 10444 /* No packets are in the pipeline, path is ready for reset */ 10445 bnx2x_reset_common(bp); 10446 10447 if (mac_vals.xmac_addr) 10448 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10449 if (mac_vals.umac_addr) 10450 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); 10451 if (mac_vals.emac_addr) 10452 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10453 if (mac_vals.bmac_addr) { 10454 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 10455 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 10456 } 10457 10458 rc = bnx2x_prev_mark_path(bp, prev_undi); 10459 if (rc) { 10460 bnx2x_prev_mcp_done(bp); 10461 return rc; 10462 } 10463 10464 return bnx2x_prev_mcp_done(bp); 10465 } 10466 10467 /* previous driver DMAE transaction may have occurred when pre-boot stage ended 10468 * and boot began, or when kdump kernel was loaded. Either case would invalidate 10469 * the addresses of the transaction, resulting in was-error bit set in the pci 10470 * causing all hw-to-host pcie transactions to timeout. If this happened we want 10471 * to clear the interrupt which detected this from the pglueb and the was done 10472 * bit 10473 */ 10474 static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 10475 { 10476 if (!CHIP_IS_E1x(bp)) { 10477 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 10478 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 10479 DP(BNX2X_MSG_SP, 10480 "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); 10481 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 10482 1 << BP_FUNC(bp)); 10483 } 10484 } 10485 } 10486 10487 static int bnx2x_prev_unload(struct bnx2x *bp) 10488 { 10489 int time_counter = 10; 10490 u32 rc, fw, hw_lock_reg, hw_lock_val; 10491 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10492 10493 /* clear hw from errors which may have resulted from an interrupted 10494 * dmae transaction. 10495 */ 10496 bnx2x_prev_interrupted_dmae(bp); 10497 10498 /* Release previously held locks */ 10499 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10500 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 10501 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 10502 10503 hw_lock_val = REG_RD(bp, hw_lock_reg); 10504 if (hw_lock_val) { 10505 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 10506 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 10507 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 10508 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 10509 } 10510 10511 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 10512 REG_WR(bp, hw_lock_reg, 0xffffffff); 10513 } else 10514 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 10515 10516 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 10517 BNX2X_DEV_INFO("Release previously held alr\n"); 10518 bnx2x_release_alr(bp); 10519 } 10520 10521 do { 10522 int aer = 0; 10523 /* Lock MCP using an unload request */ 10524 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10525 if (!fw) { 10526 BNX2X_ERR("MCP response failure, aborting\n"); 10527 rc = -EBUSY; 10528 break; 10529 } 10530 10531 rc = down_interruptible(&bnx2x_prev_sem); 10532 if (rc) { 10533 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", 10534 rc); 10535 } else { 10536 /* If Path is marked by EEH, ignore unload status */ 10537 aer = !!(bnx2x_prev_path_get_entry(bp) && 10538 bnx2x_prev_path_get_entry(bp)->aer); 10539 up(&bnx2x_prev_sem); 10540 } 10541 10542 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { 10543 rc = bnx2x_prev_unload_common(bp); 10544 break; 10545 } 10546 10547 /* non-common reply from MCP might require looping */ 10548 rc = bnx2x_prev_unload_uncommon(bp); 10549 if (rc != BNX2X_PREV_WAIT_NEEDED) 10550 break; 10551 10552 msleep(20); 10553 } while (--time_counter); 10554 10555 if (!time_counter || rc) { 10556 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); 10557 rc = -EPROBE_DEFER; 10558 } 10559 10560 /* Mark function if its port was used to boot from SAN */ 10561 if (bnx2x_port_after_undi(bp)) 10562 bp->link_params.feature_config_flags |= 10563 FEATURE_CONFIG_BOOT_FROM_SAN; 10564 10565 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 10566 10567 return rc; 10568 } 10569 10570 static void bnx2x_get_common_hwinfo(struct bnx2x *bp) 10571 { 10572 u32 val, val2, val3, val4, id, boot_mode; 10573 u16 pmc; 10574 10575 /* Get the chip revision id and number. */ 10576 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 10577 val = REG_RD(bp, MISC_REG_CHIP_NUM); 10578 id = ((val & 0xffff) << 16); 10579 val = REG_RD(bp, MISC_REG_CHIP_REV); 10580 id |= ((val & 0xf) << 12); 10581 10582 /* Metal is read from PCI regs, but we can't access >=0x400 from 10583 * the configuration space (so we need to reg_rd) 10584 */ 10585 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); 10586 id |= (((val >> 24) & 0xf) << 4); 10587 val = REG_RD(bp, MISC_REG_BOND_ID); 10588 id |= (val & 0xf); 10589 bp->common.chip_id = id; 10590 10591 /* force 57811 according to MISC register */ 10592 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 10593 if (CHIP_IS_57810(bp)) 10594 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 10595 (bp->common.chip_id & 0x0000FFFF); 10596 else if (CHIP_IS_57810_MF(bp)) 10597 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 10598 (bp->common.chip_id & 0x0000FFFF); 10599 bp->common.chip_id |= 0x1; 10600 } 10601 10602 /* Set doorbell size */ 10603 bp->db_size = (1 << BNX2X_DB_SHIFT); 10604 10605 if (!CHIP_IS_E1x(bp)) { 10606 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 10607 if ((val & 1) == 0) 10608 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 10609 else 10610 val = (val >> 1) & 1; 10611 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 10612 "2_PORT_MODE"); 10613 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 10614 CHIP_2_PORT_MODE; 10615 10616 if (CHIP_MODE_IS_4_PORT(bp)) 10617 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 10618 else 10619 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 10620 } else { 10621 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 10622 bp->pfid = bp->pf_num; /* 0..7 */ 10623 } 10624 10625 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 10626 10627 bp->link_params.chip_id = bp->common.chip_id; 10628 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 10629 10630 val = (REG_RD(bp, 0x2874) & 0x55); 10631 if ((bp->common.chip_id & 0x1) || 10632 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 10633 bp->flags |= ONE_PORT_FLAG; 10634 BNX2X_DEV_INFO("single port device\n"); 10635 } 10636 10637 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 10638 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 10639 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 10640 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 10641 bp->common.flash_size, bp->common.flash_size); 10642 10643 bnx2x_init_shmem(bp); 10644 10645 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 10646 MISC_REG_GENERIC_CR_1 : 10647 MISC_REG_GENERIC_CR_0)); 10648 10649 bp->link_params.shmem_base = bp->common.shmem_base; 10650 bp->link_params.shmem2_base = bp->common.shmem2_base; 10651 if (SHMEM2_RD(bp, size) > 10652 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 10653 bp->link_params.lfa_base = 10654 REG_RD(bp, bp->common.shmem2_base + 10655 (u32)offsetof(struct shmem2_region, 10656 lfa_host_addr[BP_PORT(bp)])); 10657 else 10658 bp->link_params.lfa_base = 0; 10659 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 10660 bp->common.shmem_base, bp->common.shmem2_base); 10661 10662 if (!bp->common.shmem_base) { 10663 BNX2X_DEV_INFO("MCP not active\n"); 10664 bp->flags |= NO_MCP_FLAG; 10665 return; 10666 } 10667 10668 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 10669 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 10670 10671 bp->link_params.hw_led_mode = ((bp->common.hw_config & 10672 SHARED_HW_CFG_LED_MODE_MASK) >> 10673 SHARED_HW_CFG_LED_MODE_SHIFT); 10674 10675 bp->link_params.feature_config_flags = 0; 10676 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 10677 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 10678 bp->link_params.feature_config_flags |= 10679 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 10680 else 10681 bp->link_params.feature_config_flags &= 10682 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 10683 10684 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 10685 bp->common.bc_ver = val; 10686 BNX2X_DEV_INFO("bc_ver %X\n", val); 10687 if (val < BNX2X_BC_VER) { 10688 /* for now only warn 10689 * later we might need to enforce this */ 10690 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 10691 BNX2X_BC_VER, val); 10692 } 10693 bp->link_params.feature_config_flags |= 10694 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 10695 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 10696 10697 bp->link_params.feature_config_flags |= 10698 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 10699 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 10700 bp->link_params.feature_config_flags |= 10701 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 10702 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 10703 bp->link_params.feature_config_flags |= 10704 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 10705 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 10706 10707 bp->link_params.feature_config_flags |= 10708 (val >= REQ_BC_VER_4_MT_SUPPORTED) ? 10709 FEATURE_CONFIG_MT_SUPPORT : 0; 10710 10711 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 10712 BC_SUPPORTS_PFC_STATS : 0; 10713 10714 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? 10715 BC_SUPPORTS_FCOE_FEATURES : 0; 10716 10717 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10718 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10719 10720 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? 10721 BC_SUPPORTS_RMMOD_CMD : 0; 10722 10723 boot_mode = SHMEM_RD(bp, 10724 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10725 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 10726 switch (boot_mode) { 10727 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 10728 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 10729 break; 10730 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 10731 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 10732 break; 10733 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 10734 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 10735 break; 10736 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 10737 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 10738 break; 10739 } 10740 10741 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); 10742 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 10743 10744 BNX2X_DEV_INFO("%sWoL capable\n", 10745 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 10746 10747 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 10748 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 10749 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 10750 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 10751 10752 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 10753 val, val2, val3, val4); 10754 } 10755 10756 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 10757 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 10758 10759 static int bnx2x_get_igu_cam_info(struct bnx2x *bp) 10760 { 10761 int pfid = BP_FUNC(bp); 10762 int igu_sb_id; 10763 u32 val; 10764 u8 fid, igu_sb_cnt = 0; 10765 10766 bp->igu_base_sb = 0xff; 10767 if (CHIP_INT_MODE_IS_BC(bp)) { 10768 int vn = BP_VN(bp); 10769 igu_sb_cnt = bp->igu_sb_cnt; 10770 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 10771 FP_SB_MAX_E1x; 10772 10773 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 10774 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 10775 10776 return 0; 10777 } 10778 10779 /* IGU in normal mode - read CAM */ 10780 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 10781 igu_sb_id++) { 10782 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 10783 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 10784 continue; 10785 fid = IGU_FID(val); 10786 if ((fid & IGU_FID_ENCODE_IS_PF)) { 10787 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 10788 continue; 10789 if (IGU_VEC(val) == 0) 10790 /* default status block */ 10791 bp->igu_dsb_id = igu_sb_id; 10792 else { 10793 if (bp->igu_base_sb == 0xff) 10794 bp->igu_base_sb = igu_sb_id; 10795 igu_sb_cnt++; 10796 } 10797 } 10798 } 10799 10800 #ifdef CONFIG_PCI_MSI 10801 /* Due to new PF resource allocation by MFW T7.4 and above, it's 10802 * optional that number of CAM entries will not be equal to the value 10803 * advertised in PCI. 10804 * Driver should use the minimal value of both as the actual status 10805 * block count 10806 */ 10807 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 10808 #endif 10809 10810 if (igu_sb_cnt == 0) { 10811 BNX2X_ERR("CAM configuration error\n"); 10812 return -EINVAL; 10813 } 10814 10815 return 0; 10816 } 10817 10818 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) 10819 { 10820 int cfg_size = 0, idx, port = BP_PORT(bp); 10821 10822 /* Aggregation of supported attributes of all external phys */ 10823 bp->port.supported[0] = 0; 10824 bp->port.supported[1] = 0; 10825 switch (bp->link_params.num_phys) { 10826 case 1: 10827 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 10828 cfg_size = 1; 10829 break; 10830 case 2: 10831 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 10832 cfg_size = 1; 10833 break; 10834 case 3: 10835 if (bp->link_params.multi_phy_config & 10836 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 10837 bp->port.supported[1] = 10838 bp->link_params.phy[EXT_PHY1].supported; 10839 bp->port.supported[0] = 10840 bp->link_params.phy[EXT_PHY2].supported; 10841 } else { 10842 bp->port.supported[0] = 10843 bp->link_params.phy[EXT_PHY1].supported; 10844 bp->port.supported[1] = 10845 bp->link_params.phy[EXT_PHY2].supported; 10846 } 10847 cfg_size = 2; 10848 break; 10849 } 10850 10851 if (!(bp->port.supported[0] || bp->port.supported[1])) { 10852 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 10853 SHMEM_RD(bp, 10854 dev_info.port_hw_config[port].external_phy_config), 10855 SHMEM_RD(bp, 10856 dev_info.port_hw_config[port].external_phy_config2)); 10857 return; 10858 } 10859 10860 if (CHIP_IS_E3(bp)) 10861 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 10862 else { 10863 switch (switch_cfg) { 10864 case SWITCH_CFG_1G: 10865 bp->port.phy_addr = REG_RD( 10866 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 10867 break; 10868 case SWITCH_CFG_10G: 10869 bp->port.phy_addr = REG_RD( 10870 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 10871 break; 10872 default: 10873 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 10874 bp->port.link_config[0]); 10875 return; 10876 } 10877 } 10878 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 10879 /* mask what we support according to speed_cap_mask per configuration */ 10880 for (idx = 0; idx < cfg_size; idx++) { 10881 if (!(bp->link_params.speed_cap_mask[idx] & 10882 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 10883 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 10884 10885 if (!(bp->link_params.speed_cap_mask[idx] & 10886 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 10887 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 10888 10889 if (!(bp->link_params.speed_cap_mask[idx] & 10890 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 10891 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 10892 10893 if (!(bp->link_params.speed_cap_mask[idx] & 10894 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 10895 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 10896 10897 if (!(bp->link_params.speed_cap_mask[idx] & 10898 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 10899 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 10900 SUPPORTED_1000baseT_Full); 10901 10902 if (!(bp->link_params.speed_cap_mask[idx] & 10903 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 10904 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 10905 10906 if (!(bp->link_params.speed_cap_mask[idx] & 10907 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 10908 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 10909 10910 if (!(bp->link_params.speed_cap_mask[idx] & 10911 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 10912 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; 10913 } 10914 10915 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 10916 bp->port.supported[1]); 10917 } 10918 10919 static void bnx2x_link_settings_requested(struct bnx2x *bp) 10920 { 10921 u32 link_config, idx, cfg_size = 0; 10922 bp->port.advertising[0] = 0; 10923 bp->port.advertising[1] = 0; 10924 switch (bp->link_params.num_phys) { 10925 case 1: 10926 case 2: 10927 cfg_size = 1; 10928 break; 10929 case 3: 10930 cfg_size = 2; 10931 break; 10932 } 10933 for (idx = 0; idx < cfg_size; idx++) { 10934 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 10935 link_config = bp->port.link_config[idx]; 10936 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 10937 case PORT_FEATURE_LINK_SPEED_AUTO: 10938 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 10939 bp->link_params.req_line_speed[idx] = 10940 SPEED_AUTO_NEG; 10941 bp->port.advertising[idx] |= 10942 bp->port.supported[idx]; 10943 if (bp->link_params.phy[EXT_PHY1].type == 10944 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 10945 bp->port.advertising[idx] |= 10946 (SUPPORTED_100baseT_Half | 10947 SUPPORTED_100baseT_Full); 10948 } else { 10949 /* force 10G, no AN */ 10950 bp->link_params.req_line_speed[idx] = 10951 SPEED_10000; 10952 bp->port.advertising[idx] |= 10953 (ADVERTISED_10000baseT_Full | 10954 ADVERTISED_FIBRE); 10955 continue; 10956 } 10957 break; 10958 10959 case PORT_FEATURE_LINK_SPEED_10M_FULL: 10960 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 10961 bp->link_params.req_line_speed[idx] = 10962 SPEED_10; 10963 bp->port.advertising[idx] |= 10964 (ADVERTISED_10baseT_Full | 10965 ADVERTISED_TP); 10966 } else { 10967 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10968 link_config, 10969 bp->link_params.speed_cap_mask[idx]); 10970 return; 10971 } 10972 break; 10973 10974 case PORT_FEATURE_LINK_SPEED_10M_HALF: 10975 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 10976 bp->link_params.req_line_speed[idx] = 10977 SPEED_10; 10978 bp->link_params.req_duplex[idx] = 10979 DUPLEX_HALF; 10980 bp->port.advertising[idx] |= 10981 (ADVERTISED_10baseT_Half | 10982 ADVERTISED_TP); 10983 } else { 10984 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10985 link_config, 10986 bp->link_params.speed_cap_mask[idx]); 10987 return; 10988 } 10989 break; 10990 10991 case PORT_FEATURE_LINK_SPEED_100M_FULL: 10992 if (bp->port.supported[idx] & 10993 SUPPORTED_100baseT_Full) { 10994 bp->link_params.req_line_speed[idx] = 10995 SPEED_100; 10996 bp->port.advertising[idx] |= 10997 (ADVERTISED_100baseT_Full | 10998 ADVERTISED_TP); 10999 } else { 11000 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11001 link_config, 11002 bp->link_params.speed_cap_mask[idx]); 11003 return; 11004 } 11005 break; 11006 11007 case PORT_FEATURE_LINK_SPEED_100M_HALF: 11008 if (bp->port.supported[idx] & 11009 SUPPORTED_100baseT_Half) { 11010 bp->link_params.req_line_speed[idx] = 11011 SPEED_100; 11012 bp->link_params.req_duplex[idx] = 11013 DUPLEX_HALF; 11014 bp->port.advertising[idx] |= 11015 (ADVERTISED_100baseT_Half | 11016 ADVERTISED_TP); 11017 } else { 11018 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11019 link_config, 11020 bp->link_params.speed_cap_mask[idx]); 11021 return; 11022 } 11023 break; 11024 11025 case PORT_FEATURE_LINK_SPEED_1G: 11026 if (bp->port.supported[idx] & 11027 SUPPORTED_1000baseT_Full) { 11028 bp->link_params.req_line_speed[idx] = 11029 SPEED_1000; 11030 bp->port.advertising[idx] |= 11031 (ADVERTISED_1000baseT_Full | 11032 ADVERTISED_TP); 11033 } else { 11034 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11035 link_config, 11036 bp->link_params.speed_cap_mask[idx]); 11037 return; 11038 } 11039 break; 11040 11041 case PORT_FEATURE_LINK_SPEED_2_5G: 11042 if (bp->port.supported[idx] & 11043 SUPPORTED_2500baseX_Full) { 11044 bp->link_params.req_line_speed[idx] = 11045 SPEED_2500; 11046 bp->port.advertising[idx] |= 11047 (ADVERTISED_2500baseX_Full | 11048 ADVERTISED_TP); 11049 } else { 11050 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11051 link_config, 11052 bp->link_params.speed_cap_mask[idx]); 11053 return; 11054 } 11055 break; 11056 11057 case PORT_FEATURE_LINK_SPEED_10G_CX4: 11058 if (bp->port.supported[idx] & 11059 SUPPORTED_10000baseT_Full) { 11060 bp->link_params.req_line_speed[idx] = 11061 SPEED_10000; 11062 bp->port.advertising[idx] |= 11063 (ADVERTISED_10000baseT_Full | 11064 ADVERTISED_FIBRE); 11065 } else { 11066 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11067 link_config, 11068 bp->link_params.speed_cap_mask[idx]); 11069 return; 11070 } 11071 break; 11072 case PORT_FEATURE_LINK_SPEED_20G: 11073 bp->link_params.req_line_speed[idx] = SPEED_20000; 11074 11075 break; 11076 default: 11077 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 11078 link_config); 11079 bp->link_params.req_line_speed[idx] = 11080 SPEED_AUTO_NEG; 11081 bp->port.advertising[idx] = 11082 bp->port.supported[idx]; 11083 break; 11084 } 11085 11086 bp->link_params.req_flow_ctrl[idx] = (link_config & 11087 PORT_FEATURE_FLOW_CONTROL_MASK); 11088 if (bp->link_params.req_flow_ctrl[idx] == 11089 BNX2X_FLOW_CTRL_AUTO) { 11090 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) 11091 bp->link_params.req_flow_ctrl[idx] = 11092 BNX2X_FLOW_CTRL_NONE; 11093 else 11094 bnx2x_set_requested_fc(bp); 11095 } 11096 11097 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 11098 bp->link_params.req_line_speed[idx], 11099 bp->link_params.req_duplex[idx], 11100 bp->link_params.req_flow_ctrl[idx], 11101 bp->port.advertising[idx]); 11102 } 11103 } 11104 11105 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 11106 { 11107 __be16 mac_hi_be = cpu_to_be16(mac_hi); 11108 __be32 mac_lo_be = cpu_to_be32(mac_lo); 11109 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); 11110 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); 11111 } 11112 11113 static void bnx2x_get_port_hwinfo(struct bnx2x *bp) 11114 { 11115 int port = BP_PORT(bp); 11116 u32 config; 11117 u32 ext_phy_type, ext_phy_config, eee_mode; 11118 11119 bp->link_params.bp = bp; 11120 bp->link_params.port = port; 11121 11122 bp->link_params.lane_config = 11123 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 11124 11125 bp->link_params.speed_cap_mask[0] = 11126 SHMEM_RD(bp, 11127 dev_info.port_hw_config[port].speed_capability_mask) & 11128 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 11129 bp->link_params.speed_cap_mask[1] = 11130 SHMEM_RD(bp, 11131 dev_info.port_hw_config[port].speed_capability_mask2) & 11132 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 11133 bp->port.link_config[0] = 11134 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 11135 11136 bp->port.link_config[1] = 11137 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 11138 11139 bp->link_params.multi_phy_config = 11140 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 11141 /* If the device is capable of WoL, set the default state according 11142 * to the HW 11143 */ 11144 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 11145 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 11146 (config & PORT_FEATURE_WOL_ENABLED)); 11147 11148 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 11149 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) 11150 bp->flags |= NO_ISCSI_FLAG; 11151 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 11152 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) 11153 bp->flags |= NO_FCOE_FLAG; 11154 11155 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 11156 bp->link_params.lane_config, 11157 bp->link_params.speed_cap_mask[0], 11158 bp->port.link_config[0]); 11159 11160 bp->link_params.switch_cfg = (bp->port.link_config[0] & 11161 PORT_FEATURE_CONNECTED_SWITCH_MASK); 11162 bnx2x_phy_probe(&bp->link_params); 11163 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 11164 11165 bnx2x_link_settings_requested(bp); 11166 11167 /* 11168 * If connected directly, work with the internal PHY, otherwise, work 11169 * with the external PHY 11170 */ 11171 ext_phy_config = 11172 SHMEM_RD(bp, 11173 dev_info.port_hw_config[port].external_phy_config); 11174 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 11175 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 11176 bp->mdio.prtad = bp->port.phy_addr; 11177 11178 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 11179 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 11180 bp->mdio.prtad = 11181 XGXS_EXT_PHY_ADDR(ext_phy_config); 11182 11183 /* Configure link feature according to nvram value */ 11184 eee_mode = (((SHMEM_RD(bp, dev_info. 11185 port_feature_config[port].eee_power_mode)) & 11186 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 11187 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 11188 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 11189 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 11190 EEE_MODE_ENABLE_LPI | 11191 EEE_MODE_OUTPUT_TIME; 11192 } else { 11193 bp->link_params.eee_mode = 0; 11194 } 11195 } 11196 11197 void bnx2x_get_iscsi_info(struct bnx2x *bp) 11198 { 11199 u32 no_flags = NO_ISCSI_FLAG; 11200 int port = BP_PORT(bp); 11201 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11202 drv_lic_key[port].max_iscsi_conn); 11203 11204 if (!CNIC_SUPPORT(bp)) { 11205 bp->flags |= no_flags; 11206 return; 11207 } 11208 11209 /* Get the number of maximum allowed iSCSI connections */ 11210 bp->cnic_eth_dev.max_iscsi_conn = 11211 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 11212 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 11213 11214 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 11215 bp->cnic_eth_dev.max_iscsi_conn); 11216 11217 /* 11218 * If maximum allowed number of connections is zero - 11219 * disable the feature. 11220 */ 11221 if (!bp->cnic_eth_dev.max_iscsi_conn) 11222 bp->flags |= no_flags; 11223 } 11224 11225 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 11226 { 11227 /* Port info */ 11228 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11229 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 11230 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11231 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 11232 11233 /* Node info */ 11234 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11235 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 11236 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11237 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 11238 } 11239 11240 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) 11241 { 11242 u8 count = 0; 11243 11244 if (IS_MF(bp)) { 11245 u8 fid; 11246 11247 /* iterate over absolute function ids for this path: */ 11248 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { 11249 if (IS_MF_SD(bp)) { 11250 u32 cfg = MF_CFG_RD(bp, 11251 func_mf_config[fid].config); 11252 11253 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && 11254 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == 11255 FUNC_MF_CFG_PROTOCOL_FCOE)) 11256 count++; 11257 } else { 11258 u32 cfg = MF_CFG_RD(bp, 11259 func_ext_config[fid]. 11260 func_cfg); 11261 11262 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && 11263 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) 11264 count++; 11265 } 11266 } 11267 } else { /* SF */ 11268 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; 11269 11270 for (port = 0; port < port_cnt; port++) { 11271 u32 lic = SHMEM_RD(bp, 11272 drv_lic_key[port].max_fcoe_conn) ^ 11273 FW_ENCODE_32BIT_PATTERN; 11274 if (lic) 11275 count++; 11276 } 11277 } 11278 11279 return count; 11280 } 11281 11282 static void bnx2x_get_fcoe_info(struct bnx2x *bp) 11283 { 11284 int port = BP_PORT(bp); 11285 int func = BP_ABS_FUNC(bp); 11286 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11287 drv_lic_key[port].max_fcoe_conn); 11288 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); 11289 11290 if (!CNIC_SUPPORT(bp)) { 11291 bp->flags |= NO_FCOE_FLAG; 11292 return; 11293 } 11294 11295 /* Get the number of maximum allowed FCoE connections */ 11296 bp->cnic_eth_dev.max_fcoe_conn = 11297 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 11298 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 11299 11300 /* Calculate the number of maximum allowed FCoE tasks */ 11301 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; 11302 11303 /* check if FCoE resources must be shared between different functions */ 11304 if (num_fcoe_func) 11305 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; 11306 11307 /* Read the WWN: */ 11308 if (!IS_MF(bp)) { 11309 /* Port info */ 11310 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11311 SHMEM_RD(bp, 11312 dev_info.port_hw_config[port]. 11313 fcoe_wwn_port_name_upper); 11314 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11315 SHMEM_RD(bp, 11316 dev_info.port_hw_config[port]. 11317 fcoe_wwn_port_name_lower); 11318 11319 /* Node info */ 11320 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11321 SHMEM_RD(bp, 11322 dev_info.port_hw_config[port]. 11323 fcoe_wwn_node_name_upper); 11324 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11325 SHMEM_RD(bp, 11326 dev_info.port_hw_config[port]. 11327 fcoe_wwn_node_name_lower); 11328 } else if (!IS_MF_SD(bp)) { 11329 /* 11330 * Read the WWN info only if the FCoE feature is enabled for 11331 * this function. 11332 */ 11333 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 11334 bnx2x_get_ext_wwn_info(bp, func); 11335 11336 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) { 11337 bnx2x_get_ext_wwn_info(bp, func); 11338 } 11339 11340 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 11341 11342 /* 11343 * If maximum allowed number of connections is zero - 11344 * disable the feature. 11345 */ 11346 if (!bp->cnic_eth_dev.max_fcoe_conn) 11347 bp->flags |= NO_FCOE_FLAG; 11348 } 11349 11350 static void bnx2x_get_cnic_info(struct bnx2x *bp) 11351 { 11352 /* 11353 * iSCSI may be dynamically disabled but reading 11354 * info here we will decrease memory usage by driver 11355 * if the feature is disabled for good 11356 */ 11357 bnx2x_get_iscsi_info(bp); 11358 bnx2x_get_fcoe_info(bp); 11359 } 11360 11361 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) 11362 { 11363 u32 val, val2; 11364 int func = BP_ABS_FUNC(bp); 11365 int port = BP_PORT(bp); 11366 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 11367 u8 *fip_mac = bp->fip_mac; 11368 11369 if (IS_MF(bp)) { 11370 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 11371 * FCoE MAC then the appropriate feature should be disabled. 11372 * In non SD mode features configuration comes from struct 11373 * func_ext_config. 11374 */ 11375 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) { 11376 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 11377 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 11378 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11379 iscsi_mac_addr_upper); 11380 val = MF_CFG_RD(bp, func_ext_config[func]. 11381 iscsi_mac_addr_lower); 11382 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11383 BNX2X_DEV_INFO 11384 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11385 } else { 11386 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11387 } 11388 11389 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 11390 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11391 fcoe_mac_addr_upper); 11392 val = MF_CFG_RD(bp, func_ext_config[func]. 11393 fcoe_mac_addr_lower); 11394 bnx2x_set_mac_buf(fip_mac, val, val2); 11395 BNX2X_DEV_INFO 11396 ("Read FCoE L2 MAC: %pM\n", fip_mac); 11397 } else { 11398 bp->flags |= NO_FCOE_FLAG; 11399 } 11400 11401 bp->mf_ext_config = cfg; 11402 11403 } else { /* SD MODE */ 11404 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 11405 /* use primary mac as iscsi mac */ 11406 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); 11407 11408 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 11409 BNX2X_DEV_INFO 11410 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11411 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { 11412 /* use primary mac as fip mac */ 11413 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); 11414 BNX2X_DEV_INFO("SD FCoE MODE\n"); 11415 BNX2X_DEV_INFO 11416 ("Read FIP MAC: %pM\n", fip_mac); 11417 } 11418 } 11419 11420 /* If this is a storage-only interface, use SAN mac as 11421 * primary MAC. Notice that for SD this is already the case, 11422 * as the SAN mac was copied from the primary MAC. 11423 */ 11424 if (IS_MF_FCOE_AFEX(bp)) 11425 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 11426 } else { 11427 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11428 iscsi_mac_upper); 11429 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11430 iscsi_mac_lower); 11431 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11432 11433 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11434 fcoe_fip_mac_upper); 11435 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11436 fcoe_fip_mac_lower); 11437 bnx2x_set_mac_buf(fip_mac, val, val2); 11438 } 11439 11440 /* Disable iSCSI OOO if MAC configuration is invalid. */ 11441 if (!is_valid_ether_addr(iscsi_mac)) { 11442 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11443 memset(iscsi_mac, 0, ETH_ALEN); 11444 } 11445 11446 /* Disable FCoE if MAC configuration is invalid. */ 11447 if (!is_valid_ether_addr(fip_mac)) { 11448 bp->flags |= NO_FCOE_FLAG; 11449 memset(bp->fip_mac, 0, ETH_ALEN); 11450 } 11451 } 11452 11453 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) 11454 { 11455 u32 val, val2; 11456 int func = BP_ABS_FUNC(bp); 11457 int port = BP_PORT(bp); 11458 11459 /* Zero primary MAC configuration */ 11460 memset(bp->dev->dev_addr, 0, ETH_ALEN); 11461 11462 if (BP_NOMCP(bp)) { 11463 BNX2X_ERROR("warning: random MAC workaround active\n"); 11464 eth_hw_addr_random(bp->dev); 11465 } else if (IS_MF(bp)) { 11466 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11467 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 11468 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 11469 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 11470 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11471 11472 if (CNIC_SUPPORT(bp)) 11473 bnx2x_get_cnic_mac_hwinfo(bp); 11474 } else { 11475 /* in SF read MACs from port configuration */ 11476 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11477 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11478 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11479 11480 if (CNIC_SUPPORT(bp)) 11481 bnx2x_get_cnic_mac_hwinfo(bp); 11482 } 11483 11484 if (!BP_NOMCP(bp)) { 11485 /* Read physical port identifier from shmem */ 11486 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11487 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11488 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); 11489 bp->flags |= HAS_PHYS_PORT_ID; 11490 } 11491 11492 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 11493 11494 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 11495 dev_err(&bp->pdev->dev, 11496 "bad Ethernet MAC address configuration: %pM\n" 11497 "change it manually before bringing up the appropriate network interface\n", 11498 bp->dev->dev_addr); 11499 } 11500 11501 static bool bnx2x_get_dropless_info(struct bnx2x *bp) 11502 { 11503 int tmp; 11504 u32 cfg; 11505 11506 if (IS_VF(bp)) 11507 return 0; 11508 11509 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11510 /* Take function: tmp = func */ 11511 tmp = BP_ABS_FUNC(bp); 11512 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); 11513 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); 11514 } else { 11515 /* Take port: tmp = port */ 11516 tmp = BP_PORT(bp); 11517 cfg = SHMEM_RD(bp, 11518 dev_info.port_hw_config[tmp].generic_features); 11519 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); 11520 } 11521 return cfg; 11522 } 11523 11524 static int bnx2x_get_hwinfo(struct bnx2x *bp) 11525 { 11526 int /*abs*/func = BP_ABS_FUNC(bp); 11527 int vn; 11528 u32 val = 0; 11529 int rc = 0; 11530 11531 bnx2x_get_common_hwinfo(bp); 11532 11533 /* 11534 * initialize IGU parameters 11535 */ 11536 if (CHIP_IS_E1x(bp)) { 11537 bp->common.int_block = INT_BLOCK_HC; 11538 11539 bp->igu_dsb_id = DEF_SB_IGU_ID; 11540 bp->igu_base_sb = 0; 11541 } else { 11542 bp->common.int_block = INT_BLOCK_IGU; 11543 11544 /* do not allow device reset during IGU info processing */ 11545 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11546 11547 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 11548 11549 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11550 int tout = 5000; 11551 11552 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 11553 11554 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 11555 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 11556 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 11557 11558 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11559 tout--; 11560 usleep_range(1000, 2000); 11561 } 11562 11563 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11564 dev_err(&bp->pdev->dev, 11565 "FORCING Normal Mode failed!!!\n"); 11566 bnx2x_release_hw_lock(bp, 11567 HW_LOCK_RESOURCE_RESET); 11568 return -EPERM; 11569 } 11570 } 11571 11572 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11573 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 11574 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 11575 } else 11576 BNX2X_DEV_INFO("IGU Normal Mode\n"); 11577 11578 rc = bnx2x_get_igu_cam_info(bp); 11579 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11580 if (rc) 11581 return rc; 11582 } 11583 11584 /* 11585 * set base FW non-default (fast path) status block id, this value is 11586 * used to initialize the fw_sb_id saved on the fp/queue structure to 11587 * determine the id used by the FW. 11588 */ 11589 if (CHIP_IS_E1x(bp)) 11590 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 11591 else /* 11592 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 11593 * the same queue are indicated on the same IGU SB). So we prefer 11594 * FW and IGU SBs to be the same value. 11595 */ 11596 bp->base_fw_ndsb = bp->igu_base_sb; 11597 11598 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 11599 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 11600 bp->igu_sb_cnt, bp->base_fw_ndsb); 11601 11602 /* 11603 * Initialize MF configuration 11604 */ 11605 11606 bp->mf_ov = 0; 11607 bp->mf_mode = 0; 11608 vn = BP_VN(bp); 11609 11610 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 11611 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 11612 bp->common.shmem2_base, SHMEM2_RD(bp, size), 11613 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 11614 11615 if (SHMEM2_HAS(bp, mf_cfg_addr)) 11616 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 11617 else 11618 bp->common.mf_cfg_base = bp->common.shmem_base + 11619 offsetof(struct shmem_region, func_mb) + 11620 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 11621 /* 11622 * get mf configuration: 11623 * 1. Existence of MF configuration 11624 * 2. MAC address must be legal (check only upper bytes) 11625 * for Switch-Independent mode; 11626 * OVLAN must be legal for Switch-Dependent mode 11627 * 3. SF_MODE configures specific MF mode 11628 */ 11629 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 11630 /* get mf configuration */ 11631 val = SHMEM_RD(bp, 11632 dev_info.shared_feature_config.config); 11633 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 11634 11635 switch (val) { 11636 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 11637 val = MF_CFG_RD(bp, func_mf_config[func]. 11638 mac_upper); 11639 /* check for legal mac (upper bytes)*/ 11640 if (val != 0xffff) { 11641 bp->mf_mode = MULTI_FUNCTION_SI; 11642 bp->mf_config[vn] = MF_CFG_RD(bp, 11643 func_mf_config[func].config); 11644 } else 11645 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 11646 break; 11647 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 11648 if ((!CHIP_IS_E1x(bp)) && 11649 (MF_CFG_RD(bp, func_mf_config[func]. 11650 mac_upper) != 0xffff) && 11651 (SHMEM2_HAS(bp, 11652 afex_driver_support))) { 11653 bp->mf_mode = MULTI_FUNCTION_AFEX; 11654 bp->mf_config[vn] = MF_CFG_RD(bp, 11655 func_mf_config[func].config); 11656 } else { 11657 BNX2X_DEV_INFO("can not configure afex mode\n"); 11658 } 11659 break; 11660 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 11661 /* get OV configuration */ 11662 val = MF_CFG_RD(bp, 11663 func_mf_config[FUNC_0].e1hov_tag); 11664 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 11665 11666 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 11667 bp->mf_mode = MULTI_FUNCTION_SD; 11668 bp->mf_config[vn] = MF_CFG_RD(bp, 11669 func_mf_config[func].config); 11670 } else 11671 BNX2X_DEV_INFO("illegal OV for SD\n"); 11672 break; 11673 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 11674 bp->mf_config[vn] = 0; 11675 break; 11676 default: 11677 /* Unknown configuration: reset mf_config */ 11678 bp->mf_config[vn] = 0; 11679 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 11680 } 11681 } 11682 11683 BNX2X_DEV_INFO("%s function mode\n", 11684 IS_MF(bp) ? "multi" : "single"); 11685 11686 switch (bp->mf_mode) { 11687 case MULTI_FUNCTION_SD: 11688 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 11689 FUNC_MF_CFG_E1HOV_TAG_MASK; 11690 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 11691 bp->mf_ov = val; 11692 bp->path_has_ovlan = true; 11693 11694 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 11695 func, bp->mf_ov, bp->mf_ov); 11696 } else { 11697 dev_err(&bp->pdev->dev, 11698 "No valid MF OV for func %d, aborting\n", 11699 func); 11700 return -EPERM; 11701 } 11702 break; 11703 case MULTI_FUNCTION_AFEX: 11704 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 11705 break; 11706 case MULTI_FUNCTION_SI: 11707 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 11708 func); 11709 break; 11710 default: 11711 if (vn) { 11712 dev_err(&bp->pdev->dev, 11713 "VN %d is in a single function mode, aborting\n", 11714 vn); 11715 return -EPERM; 11716 } 11717 break; 11718 } 11719 11720 /* check if other port on the path needs ovlan: 11721 * Since MF configuration is shared between ports 11722 * Possible mixed modes are only 11723 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 11724 */ 11725 if (CHIP_MODE_IS_4_PORT(bp) && 11726 !bp->path_has_ovlan && 11727 !IS_MF(bp) && 11728 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 11729 u8 other_port = !BP_PORT(bp); 11730 u8 other_func = BP_PATH(bp) + 2*other_port; 11731 val = MF_CFG_RD(bp, 11732 func_mf_config[other_func].e1hov_tag); 11733 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 11734 bp->path_has_ovlan = true; 11735 } 11736 } 11737 11738 /* adjust igu_sb_cnt to MF for E1H */ 11739 if (CHIP_IS_E1H(bp) && IS_MF(bp)) 11740 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); 11741 11742 /* port info */ 11743 bnx2x_get_port_hwinfo(bp); 11744 11745 /* Get MAC addresses */ 11746 bnx2x_get_mac_hwinfo(bp); 11747 11748 bnx2x_get_cnic_info(bp); 11749 11750 return rc; 11751 } 11752 11753 static void bnx2x_read_fwinfo(struct bnx2x *bp) 11754 { 11755 int cnt, i, block_end, rodi; 11756 char vpd_start[BNX2X_VPD_LEN+1]; 11757 char str_id_reg[VENDOR_ID_LEN+1]; 11758 char str_id_cap[VENDOR_ID_LEN+1]; 11759 char *vpd_data; 11760 char *vpd_extended_data = NULL; 11761 u8 len; 11762 11763 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 11764 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 11765 11766 if (cnt < BNX2X_VPD_LEN) 11767 goto out_not_found; 11768 11769 /* VPD RO tag should be first tag after identifier string, hence 11770 * we should be able to find it in first BNX2X_VPD_LEN chars 11771 */ 11772 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 11773 PCI_VPD_LRDT_RO_DATA); 11774 if (i < 0) 11775 goto out_not_found; 11776 11777 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 11778 pci_vpd_lrdt_size(&vpd_start[i]); 11779 11780 i += PCI_VPD_LRDT_TAG_SIZE; 11781 11782 if (block_end > BNX2X_VPD_LEN) { 11783 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 11784 if (vpd_extended_data == NULL) 11785 goto out_not_found; 11786 11787 /* read rest of vpd image into vpd_extended_data */ 11788 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 11789 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 11790 block_end - BNX2X_VPD_LEN, 11791 vpd_extended_data + BNX2X_VPD_LEN); 11792 if (cnt < (block_end - BNX2X_VPD_LEN)) 11793 goto out_not_found; 11794 vpd_data = vpd_extended_data; 11795 } else 11796 vpd_data = vpd_start; 11797 11798 /* now vpd_data holds full vpd content in both cases */ 11799 11800 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 11801 PCI_VPD_RO_KEYWORD_MFR_ID); 11802 if (rodi < 0) 11803 goto out_not_found; 11804 11805 len = pci_vpd_info_field_size(&vpd_data[rodi]); 11806 11807 if (len != VENDOR_ID_LEN) 11808 goto out_not_found; 11809 11810 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 11811 11812 /* vendor specific info */ 11813 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 11814 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 11815 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 11816 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 11817 11818 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 11819 PCI_VPD_RO_KEYWORD_VENDOR0); 11820 if (rodi >= 0) { 11821 len = pci_vpd_info_field_size(&vpd_data[rodi]); 11822 11823 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 11824 11825 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 11826 memcpy(bp->fw_ver, &vpd_data[rodi], len); 11827 bp->fw_ver[len] = ' '; 11828 } 11829 } 11830 kfree(vpd_extended_data); 11831 return; 11832 } 11833 out_not_found: 11834 kfree(vpd_extended_data); 11835 return; 11836 } 11837 11838 static void bnx2x_set_modes_bitmap(struct bnx2x *bp) 11839 { 11840 u32 flags = 0; 11841 11842 if (CHIP_REV_IS_FPGA(bp)) 11843 SET_FLAGS(flags, MODE_FPGA); 11844 else if (CHIP_REV_IS_EMUL(bp)) 11845 SET_FLAGS(flags, MODE_EMUL); 11846 else 11847 SET_FLAGS(flags, MODE_ASIC); 11848 11849 if (CHIP_MODE_IS_4_PORT(bp)) 11850 SET_FLAGS(flags, MODE_PORT4); 11851 else 11852 SET_FLAGS(flags, MODE_PORT2); 11853 11854 if (CHIP_IS_E2(bp)) 11855 SET_FLAGS(flags, MODE_E2); 11856 else if (CHIP_IS_E3(bp)) { 11857 SET_FLAGS(flags, MODE_E3); 11858 if (CHIP_REV(bp) == CHIP_REV_Ax) 11859 SET_FLAGS(flags, MODE_E3_A0); 11860 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 11861 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 11862 } 11863 11864 if (IS_MF(bp)) { 11865 SET_FLAGS(flags, MODE_MF); 11866 switch (bp->mf_mode) { 11867 case MULTI_FUNCTION_SD: 11868 SET_FLAGS(flags, MODE_MF_SD); 11869 break; 11870 case MULTI_FUNCTION_SI: 11871 SET_FLAGS(flags, MODE_MF_SI); 11872 break; 11873 case MULTI_FUNCTION_AFEX: 11874 SET_FLAGS(flags, MODE_MF_AFEX); 11875 break; 11876 } 11877 } else 11878 SET_FLAGS(flags, MODE_SF); 11879 11880 #if defined(__LITTLE_ENDIAN) 11881 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 11882 #else /*(__BIG_ENDIAN)*/ 11883 SET_FLAGS(flags, MODE_BIG_ENDIAN); 11884 #endif 11885 INIT_MODE_FLAGS(bp) = flags; 11886 } 11887 11888 static int bnx2x_init_bp(struct bnx2x *bp) 11889 { 11890 int func; 11891 int rc; 11892 11893 mutex_init(&bp->port.phy_mutex); 11894 mutex_init(&bp->fw_mb_mutex); 11895 mutex_init(&bp->drv_info_mutex); 11896 bp->drv_info_mng_owner = false; 11897 spin_lock_init(&bp->stats_lock); 11898 sema_init(&bp->stats_sema, 1); 11899 11900 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11901 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11902 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 11903 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); 11904 if (IS_PF(bp)) { 11905 rc = bnx2x_get_hwinfo(bp); 11906 if (rc) 11907 return rc; 11908 } else { 11909 eth_zero_addr(bp->dev->dev_addr); 11910 } 11911 11912 bnx2x_set_modes_bitmap(bp); 11913 11914 rc = bnx2x_alloc_mem_bp(bp); 11915 if (rc) 11916 return rc; 11917 11918 bnx2x_read_fwinfo(bp); 11919 11920 func = BP_FUNC(bp); 11921 11922 /* need to reset chip if undi was active */ 11923 if (IS_PF(bp) && !BP_NOMCP(bp)) { 11924 /* init fw_seq */ 11925 bp->fw_seq = 11926 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 11927 DRV_MSG_SEQ_NUMBER_MASK; 11928 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 11929 11930 rc = bnx2x_prev_unload(bp); 11931 if (rc) { 11932 bnx2x_free_mem_bp(bp); 11933 return rc; 11934 } 11935 } 11936 11937 if (CHIP_REV_IS_FPGA(bp)) 11938 dev_err(&bp->pdev->dev, "FPGA detected\n"); 11939 11940 if (BP_NOMCP(bp) && (func == 0)) 11941 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 11942 11943 bp->disable_tpa = disable_tpa; 11944 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11945 /* Reduce memory usage in kdump environment by disabling TPA */ 11946 bp->disable_tpa |= reset_devices; 11947 11948 /* Set TPA flags */ 11949 if (bp->disable_tpa) { 11950 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 11951 bp->dev->features &= ~NETIF_F_LRO; 11952 } else { 11953 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 11954 bp->dev->features |= NETIF_F_LRO; 11955 } 11956 11957 if (CHIP_IS_E1(bp)) 11958 bp->dropless_fc = 0; 11959 else 11960 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); 11961 11962 bp->mrrs = mrrs; 11963 11964 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 11965 if (IS_VF(bp)) 11966 bp->rx_ring_size = MAX_RX_AVAIL; 11967 11968 /* make sure that the numbers are in the right granularity */ 11969 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 11970 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 11971 11972 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 11973 11974 init_timer(&bp->timer); 11975 bp->timer.expires = jiffies + bp->current_interval; 11976 bp->timer.data = (unsigned long) bp; 11977 bp->timer.function = bnx2x_timer; 11978 11979 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && 11980 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && 11981 SHMEM2_RD(bp, dcbx_lldp_params_offset) && 11982 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { 11983 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 11984 bnx2x_dcbx_init_params(bp); 11985 } else { 11986 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); 11987 } 11988 11989 if (CHIP_IS_E1x(bp)) 11990 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 11991 else 11992 bp->cnic_base_cl_id = FP_SB_MAX_E2; 11993 11994 /* multiple tx priority */ 11995 if (IS_VF(bp)) 11996 bp->max_cos = 1; 11997 else if (CHIP_IS_E1x(bp)) 11998 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 11999 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 12000 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 12001 else if (CHIP_IS_E3B0(bp)) 12002 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 12003 else 12004 BNX2X_ERR("unknown chip %x revision %x\n", 12005 CHIP_NUM(bp), CHIP_REV(bp)); 12006 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); 12007 12008 /* We need at least one default status block for slow-path events, 12009 * second status block for the L2 queue, and a third status block for 12010 * CNIC if supported. 12011 */ 12012 if (IS_VF(bp)) 12013 bp->min_msix_vec_cnt = 1; 12014 else if (CNIC_SUPPORT(bp)) 12015 bp->min_msix_vec_cnt = 3; 12016 else /* PF w/o cnic */ 12017 bp->min_msix_vec_cnt = 2; 12018 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 12019 12020 bp->dump_preset_idx = 1; 12021 12022 return rc; 12023 } 12024 12025 /**************************************************************************** 12026 * General service functions 12027 ****************************************************************************/ 12028 12029 /* 12030 * net_device service functions 12031 */ 12032 12033 /* called with rtnl_lock */ 12034 static int bnx2x_open(struct net_device *dev) 12035 { 12036 struct bnx2x *bp = netdev_priv(dev); 12037 int rc; 12038 12039 bp->stats_init = true; 12040 12041 netif_carrier_off(dev); 12042 12043 bnx2x_set_power_state(bp, PCI_D0); 12044 12045 /* If parity had happen during the unload, then attentions 12046 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 12047 * want the first function loaded on the current engine to 12048 * complete the recovery. 12049 * Parity recovery is only relevant for PF driver. 12050 */ 12051 if (IS_PF(bp)) { 12052 int other_engine = BP_PATH(bp) ? 0 : 1; 12053 bool other_load_status, load_status; 12054 bool global = false; 12055 12056 other_load_status = bnx2x_get_load_status(bp, other_engine); 12057 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 12058 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 12059 bnx2x_chk_parity_attn(bp, &global, true)) { 12060 do { 12061 /* If there are attentions and they are in a 12062 * global blocks, set the GLOBAL_RESET bit 12063 * regardless whether it will be this function 12064 * that will complete the recovery or not. 12065 */ 12066 if (global) 12067 bnx2x_set_reset_global(bp); 12068 12069 /* Only the first function on the current 12070 * engine should try to recover in open. In case 12071 * of attentions in global blocks only the first 12072 * in the chip should try to recover. 12073 */ 12074 if ((!load_status && 12075 (!global || !other_load_status)) && 12076 bnx2x_trylock_leader_lock(bp) && 12077 !bnx2x_leader_reset(bp)) { 12078 netdev_info(bp->dev, 12079 "Recovered in open\n"); 12080 break; 12081 } 12082 12083 /* recovery has failed... */ 12084 bnx2x_set_power_state(bp, PCI_D3hot); 12085 bp->recovery_state = BNX2X_RECOVERY_FAILED; 12086 12087 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 12088 "If you still see this message after a few retries then power cycle is required.\n"); 12089 12090 return -EAGAIN; 12091 } while (0); 12092 } 12093 } 12094 12095 bp->recovery_state = BNX2X_RECOVERY_DONE; 12096 rc = bnx2x_nic_load(bp, LOAD_OPEN); 12097 if (rc) 12098 return rc; 12099 return 0; 12100 } 12101 12102 /* called with rtnl_lock */ 12103 static int bnx2x_close(struct net_device *dev) 12104 { 12105 struct bnx2x *bp = netdev_priv(dev); 12106 12107 /* Unload the driver, release IRQs */ 12108 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 12109 12110 return 0; 12111 } 12112 12113 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 12114 struct bnx2x_mcast_ramrod_params *p) 12115 { 12116 int mc_count = netdev_mc_count(bp->dev); 12117 struct bnx2x_mcast_list_elem *mc_mac = 12118 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); 12119 struct netdev_hw_addr *ha; 12120 12121 if (!mc_mac) 12122 return -ENOMEM; 12123 12124 INIT_LIST_HEAD(&p->mcast_list); 12125 12126 netdev_for_each_mc_addr(ha, bp->dev) { 12127 mc_mac->mac = bnx2x_mc_addr(ha); 12128 list_add_tail(&mc_mac->link, &p->mcast_list); 12129 mc_mac++; 12130 } 12131 12132 p->mcast_list_len = mc_count; 12133 12134 return 0; 12135 } 12136 12137 static void bnx2x_free_mcast_macs_list( 12138 struct bnx2x_mcast_ramrod_params *p) 12139 { 12140 struct bnx2x_mcast_list_elem *mc_mac = 12141 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, 12142 link); 12143 12144 WARN_ON(!mc_mac); 12145 kfree(mc_mac); 12146 } 12147 12148 /** 12149 * bnx2x_set_uc_list - configure a new unicast MACs list. 12150 * 12151 * @bp: driver handle 12152 * 12153 * We will use zero (0) as a MAC type for these MACs. 12154 */ 12155 static int bnx2x_set_uc_list(struct bnx2x *bp) 12156 { 12157 int rc; 12158 struct net_device *dev = bp->dev; 12159 struct netdev_hw_addr *ha; 12160 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 12161 unsigned long ramrod_flags = 0; 12162 12163 /* First schedule a cleanup up of old configuration */ 12164 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 12165 if (rc < 0) { 12166 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 12167 return rc; 12168 } 12169 12170 netdev_for_each_uc_addr(ha, dev) { 12171 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 12172 BNX2X_UC_LIST_MAC, &ramrod_flags); 12173 if (rc == -EEXIST) { 12174 DP(BNX2X_MSG_SP, 12175 "Failed to schedule ADD operations: %d\n", rc); 12176 /* do not treat adding same MAC as error */ 12177 rc = 0; 12178 12179 } else if (rc < 0) { 12180 12181 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 12182 rc); 12183 return rc; 12184 } 12185 } 12186 12187 /* Execute the pending commands */ 12188 __set_bit(RAMROD_CONT, &ramrod_flags); 12189 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 12190 BNX2X_UC_LIST_MAC, &ramrod_flags); 12191 } 12192 12193 static int bnx2x_set_mc_list(struct bnx2x *bp) 12194 { 12195 struct net_device *dev = bp->dev; 12196 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 12197 int rc = 0; 12198 12199 rparam.mcast_obj = &bp->mcast_obj; 12200 12201 /* first, clear all configured multicast MACs */ 12202 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12203 if (rc < 0) { 12204 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 12205 return rc; 12206 } 12207 12208 /* then, configure a new MACs list */ 12209 if (netdev_mc_count(dev)) { 12210 rc = bnx2x_init_mcast_macs_list(bp, &rparam); 12211 if (rc) { 12212 BNX2X_ERR("Failed to create multicast MACs list: %d\n", 12213 rc); 12214 return rc; 12215 } 12216 12217 /* Now add the new MACs */ 12218 rc = bnx2x_config_mcast(bp, &rparam, 12219 BNX2X_MCAST_CMD_ADD); 12220 if (rc < 0) 12221 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 12222 rc); 12223 12224 bnx2x_free_mcast_macs_list(&rparam); 12225 } 12226 12227 return rc; 12228 } 12229 12230 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 12231 static void bnx2x_set_rx_mode(struct net_device *dev) 12232 { 12233 struct bnx2x *bp = netdev_priv(dev); 12234 12235 if (bp->state != BNX2X_STATE_OPEN) { 12236 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 12237 return; 12238 } else { 12239 /* Schedule an SP task to handle rest of change */ 12240 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, 12241 NETIF_MSG_IFUP); 12242 } 12243 } 12244 12245 void bnx2x_set_rx_mode_inner(struct bnx2x *bp) 12246 { 12247 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 12248 12249 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 12250 12251 netif_addr_lock_bh(bp->dev); 12252 12253 if (bp->dev->flags & IFF_PROMISC) { 12254 rx_mode = BNX2X_RX_MODE_PROMISC; 12255 } else if ((bp->dev->flags & IFF_ALLMULTI) || 12256 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && 12257 CHIP_IS_E1(bp))) { 12258 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12259 } else { 12260 if (IS_PF(bp)) { 12261 /* some multicasts */ 12262 if (bnx2x_set_mc_list(bp) < 0) 12263 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12264 12265 /* release bh lock, as bnx2x_set_uc_list might sleep */ 12266 netif_addr_unlock_bh(bp->dev); 12267 if (bnx2x_set_uc_list(bp) < 0) 12268 rx_mode = BNX2X_RX_MODE_PROMISC; 12269 netif_addr_lock_bh(bp->dev); 12270 } else { 12271 /* configuring mcast to a vf involves sleeping (when we 12272 * wait for the pf's response). 12273 */ 12274 bnx2x_schedule_sp_rtnl(bp, 12275 BNX2X_SP_RTNL_VFPF_MCAST, 0); 12276 } 12277 } 12278 12279 bp->rx_mode = rx_mode; 12280 /* handle ISCSI SD mode */ 12281 if (IS_MF_ISCSI_SD(bp)) 12282 bp->rx_mode = BNX2X_RX_MODE_NONE; 12283 12284 /* Schedule the rx_mode command */ 12285 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 12286 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 12287 netif_addr_unlock_bh(bp->dev); 12288 return; 12289 } 12290 12291 if (IS_PF(bp)) { 12292 bnx2x_set_storm_rx_mode(bp); 12293 netif_addr_unlock_bh(bp->dev); 12294 } else { 12295 /* VF will need to request the PF to make this change, and so 12296 * the VF needs to release the bottom-half lock prior to the 12297 * request (as it will likely require sleep on the VF side) 12298 */ 12299 netif_addr_unlock_bh(bp->dev); 12300 bnx2x_vfpf_storm_rx_mode(bp); 12301 } 12302 } 12303 12304 /* called with rtnl_lock */ 12305 static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 12306 int devad, u16 addr) 12307 { 12308 struct bnx2x *bp = netdev_priv(netdev); 12309 u16 value; 12310 int rc; 12311 12312 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 12313 prtad, devad, addr); 12314 12315 /* The HW expects different devad if CL22 is used */ 12316 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12317 12318 bnx2x_acquire_phy_lock(bp); 12319 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 12320 bnx2x_release_phy_lock(bp); 12321 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 12322 12323 if (!rc) 12324 rc = value; 12325 return rc; 12326 } 12327 12328 /* called with rtnl_lock */ 12329 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 12330 u16 addr, u16 value) 12331 { 12332 struct bnx2x *bp = netdev_priv(netdev); 12333 int rc; 12334 12335 DP(NETIF_MSG_LINK, 12336 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 12337 prtad, devad, addr, value); 12338 12339 /* The HW expects different devad if CL22 is used */ 12340 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12341 12342 bnx2x_acquire_phy_lock(bp); 12343 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 12344 bnx2x_release_phy_lock(bp); 12345 return rc; 12346 } 12347 12348 /* called with rtnl_lock */ 12349 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12350 { 12351 struct bnx2x *bp = netdev_priv(dev); 12352 struct mii_ioctl_data *mdio = if_mii(ifr); 12353 12354 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 12355 mdio->phy_id, mdio->reg_num, mdio->val_in); 12356 12357 if (!netif_running(dev)) 12358 return -EAGAIN; 12359 12360 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 12361 } 12362 12363 #ifdef CONFIG_NET_POLL_CONTROLLER 12364 static void poll_bnx2x(struct net_device *dev) 12365 { 12366 struct bnx2x *bp = netdev_priv(dev); 12367 int i; 12368 12369 for_each_eth_queue(bp, i) { 12370 struct bnx2x_fastpath *fp = &bp->fp[i]; 12371 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 12372 } 12373 } 12374 #endif 12375 12376 static int bnx2x_validate_addr(struct net_device *dev) 12377 { 12378 struct bnx2x *bp = netdev_priv(dev); 12379 12380 /* query the bulletin board for mac address configured by the PF */ 12381 if (IS_VF(bp)) 12382 bnx2x_sample_bulletin(bp); 12383 12384 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 12385 BNX2X_ERR("Non-valid Ethernet address\n"); 12386 return -EADDRNOTAVAIL; 12387 } 12388 return 0; 12389 } 12390 12391 static int bnx2x_get_phys_port_id(struct net_device *netdev, 12392 struct netdev_phys_port_id *ppid) 12393 { 12394 struct bnx2x *bp = netdev_priv(netdev); 12395 12396 if (!(bp->flags & HAS_PHYS_PORT_ID)) 12397 return -EOPNOTSUPP; 12398 12399 ppid->id_len = sizeof(bp->phys_port_id); 12400 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); 12401 12402 return 0; 12403 } 12404 12405 static const struct net_device_ops bnx2x_netdev_ops = { 12406 .ndo_open = bnx2x_open, 12407 .ndo_stop = bnx2x_close, 12408 .ndo_start_xmit = bnx2x_start_xmit, 12409 .ndo_select_queue = bnx2x_select_queue, 12410 .ndo_set_rx_mode = bnx2x_set_rx_mode, 12411 .ndo_set_mac_address = bnx2x_change_mac_addr, 12412 .ndo_validate_addr = bnx2x_validate_addr, 12413 .ndo_do_ioctl = bnx2x_ioctl, 12414 .ndo_change_mtu = bnx2x_change_mtu, 12415 .ndo_fix_features = bnx2x_fix_features, 12416 .ndo_set_features = bnx2x_set_features, 12417 .ndo_tx_timeout = bnx2x_tx_timeout, 12418 #ifdef CONFIG_NET_POLL_CONTROLLER 12419 .ndo_poll_controller = poll_bnx2x, 12420 #endif 12421 .ndo_setup_tc = bnx2x_setup_tc, 12422 #ifdef CONFIG_BNX2X_SRIOV 12423 .ndo_set_vf_mac = bnx2x_set_vf_mac, 12424 .ndo_set_vf_vlan = bnx2x_set_vf_vlan, 12425 .ndo_get_vf_config = bnx2x_get_vf_config, 12426 #endif 12427 #ifdef NETDEV_FCOE_WWNN 12428 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 12429 #endif 12430 12431 #ifdef CONFIG_NET_RX_BUSY_POLL 12432 .ndo_busy_poll = bnx2x_low_latency_recv, 12433 #endif 12434 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 12435 .ndo_set_vf_link_state = bnx2x_set_vf_link_state, 12436 }; 12437 12438 static int bnx2x_set_coherency_mask(struct bnx2x *bp) 12439 { 12440 struct device *dev = &bp->pdev->dev; 12441 12442 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && 12443 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { 12444 dev_err(dev, "System does not support DMA, aborting\n"); 12445 return -EIO; 12446 } 12447 12448 return 0; 12449 } 12450 12451 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) 12452 { 12453 if (bp->flags & AER_ENABLED) { 12454 pci_disable_pcie_error_reporting(bp->pdev); 12455 bp->flags &= ~AER_ENABLED; 12456 } 12457 } 12458 12459 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, 12460 struct net_device *dev, unsigned long board_type) 12461 { 12462 int rc; 12463 u32 pci_cfg_dword; 12464 bool chip_is_e1x = (board_type == BCM57710 || 12465 board_type == BCM57711 || 12466 board_type == BCM57711E); 12467 12468 SET_NETDEV_DEV(dev, &pdev->dev); 12469 12470 bp->dev = dev; 12471 bp->pdev = pdev; 12472 12473 rc = pci_enable_device(pdev); 12474 if (rc) { 12475 dev_err(&bp->pdev->dev, 12476 "Cannot enable PCI device, aborting\n"); 12477 goto err_out; 12478 } 12479 12480 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12481 dev_err(&bp->pdev->dev, 12482 "Cannot find PCI device base address, aborting\n"); 12483 rc = -ENODEV; 12484 goto err_out_disable; 12485 } 12486 12487 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12488 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); 12489 rc = -ENODEV; 12490 goto err_out_disable; 12491 } 12492 12493 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); 12494 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == 12495 PCICFG_REVESION_ID_ERROR_VAL) { 12496 pr_err("PCI device error, probably due to fan failure, aborting\n"); 12497 rc = -ENODEV; 12498 goto err_out_disable; 12499 } 12500 12501 if (atomic_read(&pdev->enable_cnt) == 1) { 12502 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12503 if (rc) { 12504 dev_err(&bp->pdev->dev, 12505 "Cannot obtain PCI resources, aborting\n"); 12506 goto err_out_disable; 12507 } 12508 12509 pci_set_master(pdev); 12510 pci_save_state(pdev); 12511 } 12512 12513 if (IS_PF(bp)) { 12514 if (!pdev->pm_cap) { 12515 dev_err(&bp->pdev->dev, 12516 "Cannot find power management capability, aborting\n"); 12517 rc = -EIO; 12518 goto err_out_release; 12519 } 12520 } 12521 12522 if (!pci_is_pcie(pdev)) { 12523 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 12524 rc = -EIO; 12525 goto err_out_release; 12526 } 12527 12528 rc = bnx2x_set_coherency_mask(bp); 12529 if (rc) 12530 goto err_out_release; 12531 12532 dev->mem_start = pci_resource_start(pdev, 0); 12533 dev->base_addr = dev->mem_start; 12534 dev->mem_end = pci_resource_end(pdev, 0); 12535 12536 dev->irq = pdev->irq; 12537 12538 bp->regview = pci_ioremap_bar(pdev, 0); 12539 if (!bp->regview) { 12540 dev_err(&bp->pdev->dev, 12541 "Cannot map register space, aborting\n"); 12542 rc = -ENOMEM; 12543 goto err_out_release; 12544 } 12545 12546 /* In E1/E1H use pci device function given by kernel. 12547 * In E2/E3 read physical function from ME register since these chips 12548 * support Physical Device Assignment where kernel BDF maybe arbitrary 12549 * (depending on hypervisor). 12550 */ 12551 if (chip_is_e1x) { 12552 bp->pf_num = PCI_FUNC(pdev->devfn); 12553 } else { 12554 /* chip is E2/3*/ 12555 pci_read_config_dword(bp->pdev, 12556 PCICFG_ME_REGISTER, &pci_cfg_dword); 12557 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 12558 ME_REG_ABS_PF_NUM_SHIFT); 12559 } 12560 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 12561 12562 /* clean indirect addresses */ 12563 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12564 PCICFG_VENDOR_ID_OFFSET); 12565 12566 /* AER (Advanced Error reporting) configuration */ 12567 rc = pci_enable_pcie_error_reporting(pdev); 12568 if (!rc) 12569 bp->flags |= AER_ENABLED; 12570 else 12571 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); 12572 12573 /* 12574 * Clean the following indirect addresses for all functions since it 12575 * is not used by the driver. 12576 */ 12577 if (IS_PF(bp)) { 12578 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 12579 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 12580 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 12581 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 12582 12583 if (chip_is_e1x) { 12584 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 12585 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 12586 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 12587 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 12588 } 12589 12590 /* Enable internal target-read (in case we are probed after PF 12591 * FLR). Must be done prior to any BAR read access. Only for 12592 * 57712 and up 12593 */ 12594 if (!chip_is_e1x) 12595 REG_WR(bp, 12596 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 12597 } 12598 12599 dev->watchdog_timeo = TX_TIMEOUT; 12600 12601 dev->netdev_ops = &bnx2x_netdev_ops; 12602 bnx2x_set_ethtool_ops(bp, dev); 12603 12604 dev->priv_flags |= IFF_UNICAST_FLT; 12605 12606 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12607 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12608 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12609 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12610 if (!CHIP_IS_E1x(bp)) { 12611 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12612 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12613 dev->hw_enc_features = 12614 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 12615 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12616 NETIF_F_GSO_IPIP | 12617 NETIF_F_GSO_SIT | 12618 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; 12619 } 12620 12621 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12622 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12623 12624 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12625 dev->features |= NETIF_F_HIGHDMA; 12626 12627 /* Add Loopback capability to the device */ 12628 dev->hw_features |= NETIF_F_LOOPBACK; 12629 12630 #ifdef BCM_DCBNL 12631 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 12632 #endif 12633 12634 /* get_port_hwinfo() will set prtad and mmds properly */ 12635 bp->mdio.prtad = MDIO_PRTAD_NONE; 12636 bp->mdio.mmds = 0; 12637 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 12638 bp->mdio.dev = dev; 12639 bp->mdio.mdio_read = bnx2x_mdio_read; 12640 bp->mdio.mdio_write = bnx2x_mdio_write; 12641 12642 return 0; 12643 12644 err_out_release: 12645 if (atomic_read(&pdev->enable_cnt) == 1) 12646 pci_release_regions(pdev); 12647 12648 err_out_disable: 12649 pci_disable_device(pdev); 12650 12651 err_out: 12652 return rc; 12653 } 12654 12655 static int bnx2x_check_firmware(struct bnx2x *bp) 12656 { 12657 const struct firmware *firmware = bp->firmware; 12658 struct bnx2x_fw_file_hdr *fw_hdr; 12659 struct bnx2x_fw_file_section *sections; 12660 u32 offset, len, num_ops; 12661 __be16 *ops_offsets; 12662 int i; 12663 const u8 *fw_ver; 12664 12665 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 12666 BNX2X_ERR("Wrong FW size\n"); 12667 return -EINVAL; 12668 } 12669 12670 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 12671 sections = (struct bnx2x_fw_file_section *)fw_hdr; 12672 12673 /* Make sure none of the offsets and sizes make us read beyond 12674 * the end of the firmware data */ 12675 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 12676 offset = be32_to_cpu(sections[i].offset); 12677 len = be32_to_cpu(sections[i].len); 12678 if (offset + len > firmware->size) { 12679 BNX2X_ERR("Section %d length is out of bounds\n", i); 12680 return -EINVAL; 12681 } 12682 } 12683 12684 /* Likewise for the init_ops offsets */ 12685 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 12686 ops_offsets = (__force __be16 *)(firmware->data + offset); 12687 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 12688 12689 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 12690 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 12691 BNX2X_ERR("Section offset %d is out of bounds\n", i); 12692 return -EINVAL; 12693 } 12694 } 12695 12696 /* Check FW version */ 12697 offset = be32_to_cpu(fw_hdr->fw_version.offset); 12698 fw_ver = firmware->data + offset; 12699 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 12700 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 12701 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 12702 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 12703 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 12704 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 12705 BCM_5710_FW_MAJOR_VERSION, 12706 BCM_5710_FW_MINOR_VERSION, 12707 BCM_5710_FW_REVISION_VERSION, 12708 BCM_5710_FW_ENGINEERING_VERSION); 12709 return -EINVAL; 12710 } 12711 12712 return 0; 12713 } 12714 12715 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 12716 { 12717 const __be32 *source = (const __be32 *)_source; 12718 u32 *target = (u32 *)_target; 12719 u32 i; 12720 12721 for (i = 0; i < n/4; i++) 12722 target[i] = be32_to_cpu(source[i]); 12723 } 12724 12725 /* 12726 Ops array is stored in the following format: 12727 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 12728 */ 12729 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 12730 { 12731 const __be32 *source = (const __be32 *)_source; 12732 struct raw_op *target = (struct raw_op *)_target; 12733 u32 i, j, tmp; 12734 12735 for (i = 0, j = 0; i < n/8; i++, j += 2) { 12736 tmp = be32_to_cpu(source[j]); 12737 target[i].op = (tmp >> 24) & 0xff; 12738 target[i].offset = tmp & 0xffffff; 12739 target[i].raw_data = be32_to_cpu(source[j + 1]); 12740 } 12741 } 12742 12743 /* IRO array is stored in the following format: 12744 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 12745 */ 12746 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 12747 { 12748 const __be32 *source = (const __be32 *)_source; 12749 struct iro *target = (struct iro *)_target; 12750 u32 i, j, tmp; 12751 12752 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 12753 target[i].base = be32_to_cpu(source[j]); 12754 j++; 12755 tmp = be32_to_cpu(source[j]); 12756 target[i].m1 = (tmp >> 16) & 0xffff; 12757 target[i].m2 = tmp & 0xffff; 12758 j++; 12759 tmp = be32_to_cpu(source[j]); 12760 target[i].m3 = (tmp >> 16) & 0xffff; 12761 target[i].size = tmp & 0xffff; 12762 j++; 12763 } 12764 } 12765 12766 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 12767 { 12768 const __be16 *source = (const __be16 *)_source; 12769 u16 *target = (u16 *)_target; 12770 u32 i; 12771 12772 for (i = 0; i < n/2; i++) 12773 target[i] = be16_to_cpu(source[i]); 12774 } 12775 12776 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 12777 do { \ 12778 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 12779 bp->arr = kmalloc(len, GFP_KERNEL); \ 12780 if (!bp->arr) \ 12781 goto lbl; \ 12782 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 12783 (u8 *)bp->arr, len); \ 12784 } while (0) 12785 12786 static int bnx2x_init_firmware(struct bnx2x *bp) 12787 { 12788 const char *fw_file_name; 12789 struct bnx2x_fw_file_hdr *fw_hdr; 12790 int rc; 12791 12792 if (bp->firmware) 12793 return 0; 12794 12795 if (CHIP_IS_E1(bp)) 12796 fw_file_name = FW_FILE_NAME_E1; 12797 else if (CHIP_IS_E1H(bp)) 12798 fw_file_name = FW_FILE_NAME_E1H; 12799 else if (!CHIP_IS_E1x(bp)) 12800 fw_file_name = FW_FILE_NAME_E2; 12801 else { 12802 BNX2X_ERR("Unsupported chip revision\n"); 12803 return -EINVAL; 12804 } 12805 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 12806 12807 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 12808 if (rc) { 12809 BNX2X_ERR("Can't load firmware file %s\n", 12810 fw_file_name); 12811 goto request_firmware_exit; 12812 } 12813 12814 rc = bnx2x_check_firmware(bp); 12815 if (rc) { 12816 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 12817 goto request_firmware_exit; 12818 } 12819 12820 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 12821 12822 /* Initialize the pointers to the init arrays */ 12823 /* Blob */ 12824 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 12825 12826 /* Opcodes */ 12827 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 12828 12829 /* Offsets */ 12830 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 12831 be16_to_cpu_n); 12832 12833 /* STORMs firmware */ 12834 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12835 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 12836 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 12837 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 12838 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12839 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 12840 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 12841 be32_to_cpu(fw_hdr->usem_pram_data.offset); 12842 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12843 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 12844 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 12845 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 12846 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12847 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 12848 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 12849 be32_to_cpu(fw_hdr->csem_pram_data.offset); 12850 /* IRO */ 12851 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 12852 12853 return 0; 12854 12855 iro_alloc_err: 12856 kfree(bp->init_ops_offsets); 12857 init_offsets_alloc_err: 12858 kfree(bp->init_ops); 12859 init_ops_alloc_err: 12860 kfree(bp->init_data); 12861 request_firmware_exit: 12862 release_firmware(bp->firmware); 12863 bp->firmware = NULL; 12864 12865 return rc; 12866 } 12867 12868 static void bnx2x_release_firmware(struct bnx2x *bp) 12869 { 12870 kfree(bp->init_ops_offsets); 12871 kfree(bp->init_ops); 12872 kfree(bp->init_data); 12873 release_firmware(bp->firmware); 12874 bp->firmware = NULL; 12875 } 12876 12877 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 12878 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 12879 .init_hw_cmn = bnx2x_init_hw_common, 12880 .init_hw_port = bnx2x_init_hw_port, 12881 .init_hw_func = bnx2x_init_hw_func, 12882 12883 .reset_hw_cmn = bnx2x_reset_common, 12884 .reset_hw_port = bnx2x_reset_port, 12885 .reset_hw_func = bnx2x_reset_func, 12886 12887 .gunzip_init = bnx2x_gunzip_init, 12888 .gunzip_end = bnx2x_gunzip_end, 12889 12890 .init_fw = bnx2x_init_firmware, 12891 .release_fw = bnx2x_release_firmware, 12892 }; 12893 12894 void bnx2x__init_func_obj(struct bnx2x *bp) 12895 { 12896 /* Prepare DMAE related driver resources */ 12897 bnx2x_setup_dmae(bp); 12898 12899 bnx2x_init_func_obj(bp, &bp->func_obj, 12900 bnx2x_sp(bp, func_rdata), 12901 bnx2x_sp_mapping(bp, func_rdata), 12902 bnx2x_sp(bp, func_afex_rdata), 12903 bnx2x_sp_mapping(bp, func_afex_rdata), 12904 &bnx2x_func_sp_drv); 12905 } 12906 12907 /* must be called after sriov-enable */ 12908 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 12909 { 12910 int cid_count = BNX2X_L2_MAX_CID(bp); 12911 12912 if (IS_SRIOV(bp)) 12913 cid_count += BNX2X_VF_CIDS; 12914 12915 if (CNIC_SUPPORT(bp)) 12916 cid_count += CNIC_CID_MAX; 12917 12918 return roundup(cid_count, QM_CID_ROUND); 12919 } 12920 12921 /** 12922 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 12923 * 12924 * @dev: pci device 12925 * 12926 */ 12927 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) 12928 { 12929 int index; 12930 u16 control = 0; 12931 12932 /* 12933 * If MSI-X is not supported - return number of SBs needed to support 12934 * one fast path queue: one FP queue + SB for CNIC 12935 */ 12936 if (!pdev->msix_cap) { 12937 dev_info(&pdev->dev, "no msix capability found\n"); 12938 return 1 + cnic_cnt; 12939 } 12940 dev_info(&pdev->dev, "msix capability found\n"); 12941 12942 /* 12943 * The value in the PCI configuration space is the index of the last 12944 * entry, namely one less than the actual size of the table, which is 12945 * exactly what we want to return from this function: number of all SBs 12946 * without the default SB. 12947 * For VFs there is no default SB, then we return (index+1). 12948 */ 12949 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); 12950 12951 index = control & PCI_MSIX_FLAGS_QSIZE; 12952 12953 return index; 12954 } 12955 12956 static int set_max_cos_est(int chip_id) 12957 { 12958 switch (chip_id) { 12959 case BCM57710: 12960 case BCM57711: 12961 case BCM57711E: 12962 return BNX2X_MULTI_TX_COS_E1X; 12963 case BCM57712: 12964 case BCM57712_MF: 12965 return BNX2X_MULTI_TX_COS_E2_E3A0; 12966 case BCM57800: 12967 case BCM57800_MF: 12968 case BCM57810: 12969 case BCM57810_MF: 12970 case BCM57840_4_10: 12971 case BCM57840_2_20: 12972 case BCM57840_O: 12973 case BCM57840_MFO: 12974 case BCM57840_MF: 12975 case BCM57811: 12976 case BCM57811_MF: 12977 return BNX2X_MULTI_TX_COS_E3B0; 12978 case BCM57712_VF: 12979 case BCM57800_VF: 12980 case BCM57810_VF: 12981 case BCM57840_VF: 12982 case BCM57811_VF: 12983 return 1; 12984 default: 12985 pr_err("Unknown board_type (%d), aborting\n", chip_id); 12986 return -ENODEV; 12987 } 12988 } 12989 12990 static int set_is_vf(int chip_id) 12991 { 12992 switch (chip_id) { 12993 case BCM57712_VF: 12994 case BCM57800_VF: 12995 case BCM57810_VF: 12996 case BCM57840_VF: 12997 case BCM57811_VF: 12998 return true; 12999 default: 13000 return false; 13001 } 13002 } 13003 13004 static int bnx2x_init_one(struct pci_dev *pdev, 13005 const struct pci_device_id *ent) 13006 { 13007 struct net_device *dev = NULL; 13008 struct bnx2x *bp; 13009 enum pcie_link_width pcie_width; 13010 enum pci_bus_speed pcie_speed; 13011 int rc, max_non_def_sbs; 13012 int rx_count, tx_count, rss_count, doorbell_size; 13013 int max_cos_est; 13014 bool is_vf; 13015 int cnic_cnt; 13016 13017 /* An estimated maximum supported CoS number according to the chip 13018 * version. 13019 * We will try to roughly estimate the maximum number of CoSes this chip 13020 * may support in order to minimize the memory allocated for Tx 13021 * netdev_queue's. This number will be accurately calculated during the 13022 * initialization of bp->max_cos based on the chip versions AND chip 13023 * revision in the bnx2x_init_bp(). 13024 */ 13025 max_cos_est = set_max_cos_est(ent->driver_data); 13026 if (max_cos_est < 0) 13027 return max_cos_est; 13028 is_vf = set_is_vf(ent->driver_data); 13029 cnic_cnt = is_vf ? 0 : 1; 13030 13031 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); 13032 13033 /* add another SB for VF as it has no default SB */ 13034 max_non_def_sbs += is_vf ? 1 : 0; 13035 13036 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 13037 rss_count = max_non_def_sbs - cnic_cnt; 13038 13039 if (rss_count < 1) 13040 return -EINVAL; 13041 13042 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 13043 rx_count = rss_count + cnic_cnt; 13044 13045 /* Maximum number of netdev Tx queues: 13046 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 13047 */ 13048 tx_count = rss_count * max_cos_est + cnic_cnt; 13049 13050 /* dev zeroed in init_etherdev */ 13051 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 13052 if (!dev) 13053 return -ENOMEM; 13054 13055 bp = netdev_priv(dev); 13056 13057 bp->flags = 0; 13058 if (is_vf) 13059 bp->flags |= IS_VF_FLAG; 13060 13061 bp->igu_sb_cnt = max_non_def_sbs; 13062 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 13063 bp->msg_enable = debug; 13064 bp->cnic_support = cnic_cnt; 13065 bp->cnic_probe = bnx2x_cnic_probe; 13066 13067 pci_set_drvdata(pdev, dev); 13068 13069 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); 13070 if (rc < 0) { 13071 free_netdev(dev); 13072 return rc; 13073 } 13074 13075 BNX2X_DEV_INFO("This is a %s function\n", 13076 IS_PF(bp) ? "physical" : "virtual"); 13077 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); 13078 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); 13079 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 13080 tx_count, rx_count); 13081 13082 rc = bnx2x_init_bp(bp); 13083 if (rc) 13084 goto init_one_exit; 13085 13086 /* Map doorbells here as we need the real value of bp->max_cos which 13087 * is initialized in bnx2x_init_bp() to determine the number of 13088 * l2 connections. 13089 */ 13090 if (IS_VF(bp)) { 13091 bp->doorbells = bnx2x_vf_doorbells(bp); 13092 rc = bnx2x_vf_pci_alloc(bp); 13093 if (rc) 13094 goto init_one_exit; 13095 } else { 13096 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 13097 if (doorbell_size > pci_resource_len(pdev, 2)) { 13098 dev_err(&bp->pdev->dev, 13099 "Cannot map doorbells, bar size too small, aborting\n"); 13100 rc = -ENOMEM; 13101 goto init_one_exit; 13102 } 13103 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 13104 doorbell_size); 13105 } 13106 if (!bp->doorbells) { 13107 dev_err(&bp->pdev->dev, 13108 "Cannot map doorbell space, aborting\n"); 13109 rc = -ENOMEM; 13110 goto init_one_exit; 13111 } 13112 13113 if (IS_VF(bp)) { 13114 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 13115 if (rc) 13116 goto init_one_exit; 13117 } 13118 13119 /* Enable SRIOV if capability found in configuration space */ 13120 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 13121 if (rc) 13122 goto init_one_exit; 13123 13124 /* calc qm_cid_count */ 13125 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 13126 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); 13127 13128 /* disable FCOE L2 queue for E1x*/ 13129 if (CHIP_IS_E1x(bp)) 13130 bp->flags |= NO_FCOE_FLAG; 13131 13132 /* Set bp->num_queues for MSI-X mode*/ 13133 bnx2x_set_num_queues(bp); 13134 13135 /* Configure interrupt mode: try to enable MSI-X/MSI if 13136 * needed. 13137 */ 13138 rc = bnx2x_set_int_mode(bp); 13139 if (rc) { 13140 dev_err(&pdev->dev, "Cannot set interrupts\n"); 13141 goto init_one_exit; 13142 } 13143 BNX2X_DEV_INFO("set interrupts successfully\n"); 13144 13145 /* register the net device */ 13146 rc = register_netdev(dev); 13147 if (rc) { 13148 dev_err(&pdev->dev, "Cannot register net device\n"); 13149 goto init_one_exit; 13150 } 13151 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 13152 13153 if (!NO_FCOE(bp)) { 13154 /* Add storage MAC address */ 13155 rtnl_lock(); 13156 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 13157 rtnl_unlock(); 13158 } 13159 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || 13160 pcie_speed == PCI_SPEED_UNKNOWN || 13161 pcie_width == PCIE_LNK_WIDTH_UNKNOWN) 13162 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); 13163 else 13164 BNX2X_DEV_INFO( 13165 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 13166 board_info[ent->driver_data].name, 13167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 13168 pcie_width, 13169 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : 13170 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : 13171 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" : 13172 "Unknown", 13173 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13174 13175 return 0; 13176 13177 init_one_exit: 13178 bnx2x_disable_pcie_error_reporting(bp); 13179 13180 if (bp->regview) 13181 iounmap(bp->regview); 13182 13183 if (IS_PF(bp) && bp->doorbells) 13184 iounmap(bp->doorbells); 13185 13186 free_netdev(dev); 13187 13188 if (atomic_read(&pdev->enable_cnt) == 1) 13189 pci_release_regions(pdev); 13190 13191 pci_disable_device(pdev); 13192 13193 return rc; 13194 } 13195 13196 static void __bnx2x_remove(struct pci_dev *pdev, 13197 struct net_device *dev, 13198 struct bnx2x *bp, 13199 bool remove_netdev) 13200 { 13201 /* Delete storage MAC address */ 13202 if (!NO_FCOE(bp)) { 13203 rtnl_lock(); 13204 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 13205 rtnl_unlock(); 13206 } 13207 13208 #ifdef BCM_DCBNL 13209 /* Delete app tlvs from dcbnl */ 13210 bnx2x_dcbnl_update_applist(bp, true); 13211 #endif 13212 13213 if (IS_PF(bp) && 13214 !BP_NOMCP(bp) && 13215 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) 13216 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); 13217 13218 /* Close the interface - either directly or implicitly */ 13219 if (remove_netdev) { 13220 unregister_netdev(dev); 13221 } else { 13222 rtnl_lock(); 13223 dev_close(dev); 13224 rtnl_unlock(); 13225 } 13226 13227 bnx2x_iov_remove_one(bp); 13228 13229 /* Power on: we can't let PCI layer write to us while we are in D3 */ 13230 if (IS_PF(bp)) 13231 bnx2x_set_power_state(bp, PCI_D0); 13232 13233 /* Disable MSI/MSI-X */ 13234 bnx2x_disable_msi(bp); 13235 13236 /* Power off */ 13237 if (IS_PF(bp)) 13238 bnx2x_set_power_state(bp, PCI_D3hot); 13239 13240 /* Make sure RESET task is not scheduled before continuing */ 13241 cancel_delayed_work_sync(&bp->sp_rtnl_task); 13242 13243 /* send message via vfpf channel to release the resources of this vf */ 13244 if (IS_VF(bp)) 13245 bnx2x_vfpf_release(bp); 13246 13247 /* Assumes no further PCIe PM changes will occur */ 13248 if (system_state == SYSTEM_POWER_OFF) { 13249 pci_wake_from_d3(pdev, bp->wol); 13250 pci_set_power_state(pdev, PCI_D3hot); 13251 } 13252 13253 bnx2x_disable_pcie_error_reporting(bp); 13254 if (remove_netdev) { 13255 if (bp->regview) 13256 iounmap(bp->regview); 13257 13258 /* For vfs, doorbells are part of the regview and were unmapped 13259 * along with it. FW is only loaded by PF. 13260 */ 13261 if (IS_PF(bp)) { 13262 if (bp->doorbells) 13263 iounmap(bp->doorbells); 13264 13265 bnx2x_release_firmware(bp); 13266 } else { 13267 bnx2x_vf_pci_dealloc(bp); 13268 } 13269 bnx2x_free_mem_bp(bp); 13270 13271 free_netdev(dev); 13272 13273 if (atomic_read(&pdev->enable_cnt) == 1) 13274 pci_release_regions(pdev); 13275 13276 pci_disable_device(pdev); 13277 } 13278 } 13279 13280 static void bnx2x_remove_one(struct pci_dev *pdev) 13281 { 13282 struct net_device *dev = pci_get_drvdata(pdev); 13283 struct bnx2x *bp; 13284 13285 if (!dev) { 13286 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 13287 return; 13288 } 13289 bp = netdev_priv(dev); 13290 13291 __bnx2x_remove(pdev, dev, bp, true); 13292 } 13293 13294 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 13295 { 13296 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 13297 13298 bp->rx_mode = BNX2X_RX_MODE_NONE; 13299 13300 if (CNIC_LOADED(bp)) 13301 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 13302 13303 /* Stop Tx */ 13304 bnx2x_tx_disable(bp); 13305 /* Delete all NAPI objects */ 13306 bnx2x_del_all_napi(bp); 13307 if (CNIC_LOADED(bp)) 13308 bnx2x_del_all_napi_cnic(bp); 13309 netdev_reset_tc(bp->dev); 13310 13311 del_timer_sync(&bp->timer); 13312 cancel_delayed_work_sync(&bp->sp_task); 13313 cancel_delayed_work_sync(&bp->period_task); 13314 13315 spin_lock_bh(&bp->stats_lock); 13316 bp->stats_state = STATS_STATE_DISABLED; 13317 spin_unlock_bh(&bp->stats_lock); 13318 13319 bnx2x_save_statistics(bp); 13320 13321 netif_carrier_off(bp->dev); 13322 13323 return 0; 13324 } 13325 13326 /** 13327 * bnx2x_io_error_detected - called when PCI error is detected 13328 * @pdev: Pointer to PCI device 13329 * @state: The current pci connection state 13330 * 13331 * This function is called after a PCI bus error affecting 13332 * this device has been detected. 13333 */ 13334 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 13335 pci_channel_state_t state) 13336 { 13337 struct net_device *dev = pci_get_drvdata(pdev); 13338 struct bnx2x *bp = netdev_priv(dev); 13339 13340 rtnl_lock(); 13341 13342 BNX2X_ERR("IO error detected\n"); 13343 13344 netif_device_detach(dev); 13345 13346 if (state == pci_channel_io_perm_failure) { 13347 rtnl_unlock(); 13348 return PCI_ERS_RESULT_DISCONNECT; 13349 } 13350 13351 if (netif_running(dev)) 13352 bnx2x_eeh_nic_unload(bp); 13353 13354 bnx2x_prev_path_mark_eeh(bp); 13355 13356 pci_disable_device(pdev); 13357 13358 rtnl_unlock(); 13359 13360 /* Request a slot reset */ 13361 return PCI_ERS_RESULT_NEED_RESET; 13362 } 13363 13364 /** 13365 * bnx2x_io_slot_reset - called after the PCI bus has been reset 13366 * @pdev: Pointer to PCI device 13367 * 13368 * Restart the card from scratch, as if from a cold-boot. 13369 */ 13370 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 13371 { 13372 struct net_device *dev = pci_get_drvdata(pdev); 13373 struct bnx2x *bp = netdev_priv(dev); 13374 int i; 13375 13376 rtnl_lock(); 13377 BNX2X_ERR("IO slot reset initializing...\n"); 13378 if (pci_enable_device(pdev)) { 13379 dev_err(&pdev->dev, 13380 "Cannot re-enable PCI device after reset\n"); 13381 rtnl_unlock(); 13382 return PCI_ERS_RESULT_DISCONNECT; 13383 } 13384 13385 pci_set_master(pdev); 13386 pci_restore_state(pdev); 13387 pci_save_state(pdev); 13388 13389 if (netif_running(dev)) 13390 bnx2x_set_power_state(bp, PCI_D0); 13391 13392 if (netif_running(dev)) { 13393 BNX2X_ERR("IO slot reset --> driver unload\n"); 13394 13395 /* MCP should have been reset; Need to wait for validity */ 13396 bnx2x_init_shmem(bp); 13397 13398 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 13399 u32 v; 13400 13401 v = SHMEM2_RD(bp, 13402 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 13403 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 13404 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 13405 } 13406 bnx2x_drain_tx_queues(bp); 13407 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); 13408 bnx2x_netif_stop(bp, 1); 13409 bnx2x_free_irq(bp); 13410 13411 /* Report UNLOAD_DONE to MCP */ 13412 bnx2x_send_unload_done(bp, true); 13413 13414 bp->sp_state = 0; 13415 bp->port.pmf = 0; 13416 13417 bnx2x_prev_unload(bp); 13418 13419 /* We should have reseted the engine, so It's fair to 13420 * assume the FW will no longer write to the bnx2x driver. 13421 */ 13422 bnx2x_squeeze_objects(bp); 13423 bnx2x_free_skbs(bp); 13424 for_each_rx_queue(bp, i) 13425 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 13426 bnx2x_free_fp_mem(bp); 13427 bnx2x_free_mem(bp); 13428 13429 bp->state = BNX2X_STATE_CLOSED; 13430 } 13431 13432 rtnl_unlock(); 13433 13434 /* If AER, perform cleanup of the PCIe registers */ 13435 if (bp->flags & AER_ENABLED) { 13436 if (pci_cleanup_aer_uncorrect_error_status(pdev)) 13437 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n"); 13438 else 13439 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n"); 13440 } 13441 13442 return PCI_ERS_RESULT_RECOVERED; 13443 } 13444 13445 /** 13446 * bnx2x_io_resume - called when traffic can start flowing again 13447 * @pdev: Pointer to PCI device 13448 * 13449 * This callback is called when the error recovery driver tells us that 13450 * its OK to resume normal operation. 13451 */ 13452 static void bnx2x_io_resume(struct pci_dev *pdev) 13453 { 13454 struct net_device *dev = pci_get_drvdata(pdev); 13455 struct bnx2x *bp = netdev_priv(dev); 13456 13457 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 13458 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 13459 return; 13460 } 13461 13462 rtnl_lock(); 13463 13464 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 13465 DRV_MSG_SEQ_NUMBER_MASK; 13466 13467 if (netif_running(dev)) 13468 bnx2x_nic_load(bp, LOAD_NORMAL); 13469 13470 netif_device_attach(dev); 13471 13472 rtnl_unlock(); 13473 } 13474 13475 static const struct pci_error_handlers bnx2x_err_handler = { 13476 .error_detected = bnx2x_io_error_detected, 13477 .slot_reset = bnx2x_io_slot_reset, 13478 .resume = bnx2x_io_resume, 13479 }; 13480 13481 static void bnx2x_shutdown(struct pci_dev *pdev) 13482 { 13483 struct net_device *dev = pci_get_drvdata(pdev); 13484 struct bnx2x *bp; 13485 13486 if (!dev) 13487 return; 13488 13489 bp = netdev_priv(dev); 13490 if (!bp) 13491 return; 13492 13493 rtnl_lock(); 13494 netif_device_detach(dev); 13495 rtnl_unlock(); 13496 13497 /* Don't remove the netdevice, as there are scenarios which will cause 13498 * the kernel to hang, e.g., when trying to remove bnx2i while the 13499 * rootfs is mounted from SAN. 13500 */ 13501 __bnx2x_remove(pdev, dev, bp, false); 13502 } 13503 13504 static struct pci_driver bnx2x_pci_driver = { 13505 .name = DRV_MODULE_NAME, 13506 .id_table = bnx2x_pci_tbl, 13507 .probe = bnx2x_init_one, 13508 .remove = bnx2x_remove_one, 13509 .suspend = bnx2x_suspend, 13510 .resume = bnx2x_resume, 13511 .err_handler = &bnx2x_err_handler, 13512 #ifdef CONFIG_BNX2X_SRIOV 13513 .sriov_configure = bnx2x_sriov_configure, 13514 #endif 13515 .shutdown = bnx2x_shutdown, 13516 }; 13517 13518 static int __init bnx2x_init(void) 13519 { 13520 int ret; 13521 13522 pr_info("%s", version); 13523 13524 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 13525 if (bnx2x_wq == NULL) { 13526 pr_err("Cannot create workqueue\n"); 13527 return -ENOMEM; 13528 } 13529 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); 13530 if (!bnx2x_iov_wq) { 13531 pr_err("Cannot create iov workqueue\n"); 13532 destroy_workqueue(bnx2x_wq); 13533 return -ENOMEM; 13534 } 13535 13536 ret = pci_register_driver(&bnx2x_pci_driver); 13537 if (ret) { 13538 pr_err("Cannot register driver\n"); 13539 destroy_workqueue(bnx2x_wq); 13540 destroy_workqueue(bnx2x_iov_wq); 13541 } 13542 return ret; 13543 } 13544 13545 static void __exit bnx2x_cleanup(void) 13546 { 13547 struct list_head *pos, *q; 13548 13549 pci_unregister_driver(&bnx2x_pci_driver); 13550 13551 destroy_workqueue(bnx2x_wq); 13552 destroy_workqueue(bnx2x_iov_wq); 13553 13554 /* Free globally allocated resources */ 13555 list_for_each_safe(pos, q, &bnx2x_prev_list) { 13556 struct bnx2x_prev_path_list *tmp = 13557 list_entry(pos, struct bnx2x_prev_path_list, list); 13558 list_del(pos); 13559 kfree(tmp); 13560 } 13561 } 13562 13563 void bnx2x_notify_link_changed(struct bnx2x *bp) 13564 { 13565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 13566 } 13567 13568 module_init(bnx2x_init); 13569 module_exit(bnx2x_cleanup); 13570 13571 /** 13572 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 13573 * 13574 * @bp: driver handle 13575 * @set: set or clear the CAM entry 13576 * 13577 * This function will wait until the ramrod completion returns. 13578 * Return 0 if success, -ENODEV if ramrod doesn't return. 13579 */ 13580 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 13581 { 13582 unsigned long ramrod_flags = 0; 13583 13584 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 13585 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 13586 &bp->iscsi_l2_mac_obj, true, 13587 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 13588 } 13589 13590 /* count denotes the number of new completions we have seen */ 13591 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 13592 { 13593 struct eth_spe *spe; 13594 int cxt_index, cxt_offset; 13595 13596 #ifdef BNX2X_STOP_ON_ERROR 13597 if (unlikely(bp->panic)) 13598 return; 13599 #endif 13600 13601 spin_lock_bh(&bp->spq_lock); 13602 BUG_ON(bp->cnic_spq_pending < count); 13603 bp->cnic_spq_pending -= count; 13604 13605 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 13606 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 13607 & SPE_HDR_CONN_TYPE) >> 13608 SPE_HDR_CONN_TYPE_SHIFT; 13609 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 13610 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 13611 13612 /* Set validation for iSCSI L2 client before sending SETUP 13613 * ramrod 13614 */ 13615 if (type == ETH_CONNECTION_TYPE) { 13616 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 13617 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 13618 ILT_PAGE_CIDS; 13619 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 13620 (cxt_index * ILT_PAGE_CIDS); 13621 bnx2x_set_ctx_validation(bp, 13622 &bp->context[cxt_index]. 13623 vcxt[cxt_offset].eth, 13624 BNX2X_ISCSI_ETH_CID(bp)); 13625 } 13626 } 13627 13628 /* 13629 * There may be not more than 8 L2, not more than 8 L5 SPEs 13630 * and in the air. We also check that number of outstanding 13631 * COMMON ramrods is not more than the EQ and SPQ can 13632 * accommodate. 13633 */ 13634 if (type == ETH_CONNECTION_TYPE) { 13635 if (!atomic_read(&bp->cq_spq_left)) 13636 break; 13637 else 13638 atomic_dec(&bp->cq_spq_left); 13639 } else if (type == NONE_CONNECTION_TYPE) { 13640 if (!atomic_read(&bp->eq_spq_left)) 13641 break; 13642 else 13643 atomic_dec(&bp->eq_spq_left); 13644 } else if ((type == ISCSI_CONNECTION_TYPE) || 13645 (type == FCOE_CONNECTION_TYPE)) { 13646 if (bp->cnic_spq_pending >= 13647 bp->cnic_eth_dev.max_kwqe_pending) 13648 break; 13649 else 13650 bp->cnic_spq_pending++; 13651 } else { 13652 BNX2X_ERR("Unknown SPE type: %d\n", type); 13653 bnx2x_panic(); 13654 break; 13655 } 13656 13657 spe = bnx2x_sp_get_next(bp); 13658 *spe = *bp->cnic_kwq_cons; 13659 13660 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 13661 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 13662 13663 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 13664 bp->cnic_kwq_cons = bp->cnic_kwq; 13665 else 13666 bp->cnic_kwq_cons++; 13667 } 13668 bnx2x_sp_prod_update(bp); 13669 spin_unlock_bh(&bp->spq_lock); 13670 } 13671 13672 static int bnx2x_cnic_sp_queue(struct net_device *dev, 13673 struct kwqe_16 *kwqes[], u32 count) 13674 { 13675 struct bnx2x *bp = netdev_priv(dev); 13676 int i; 13677 13678 #ifdef BNX2X_STOP_ON_ERROR 13679 if (unlikely(bp->panic)) { 13680 BNX2X_ERR("Can't post to SP queue while panic\n"); 13681 return -EIO; 13682 } 13683 #endif 13684 13685 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 13686 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 13687 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 13688 return -EAGAIN; 13689 } 13690 13691 spin_lock_bh(&bp->spq_lock); 13692 13693 for (i = 0; i < count; i++) { 13694 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 13695 13696 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 13697 break; 13698 13699 *bp->cnic_kwq_prod = *spe; 13700 13701 bp->cnic_kwq_pending++; 13702 13703 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 13704 spe->hdr.conn_and_cmd_data, spe->hdr.type, 13705 spe->data.update_data_addr.hi, 13706 spe->data.update_data_addr.lo, 13707 bp->cnic_kwq_pending); 13708 13709 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 13710 bp->cnic_kwq_prod = bp->cnic_kwq; 13711 else 13712 bp->cnic_kwq_prod++; 13713 } 13714 13715 spin_unlock_bh(&bp->spq_lock); 13716 13717 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 13718 bnx2x_cnic_sp_post(bp, 0); 13719 13720 return i; 13721 } 13722 13723 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 13724 { 13725 struct cnic_ops *c_ops; 13726 int rc = 0; 13727 13728 mutex_lock(&bp->cnic_mutex); 13729 c_ops = rcu_dereference_protected(bp->cnic_ops, 13730 lockdep_is_held(&bp->cnic_mutex)); 13731 if (c_ops) 13732 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 13733 mutex_unlock(&bp->cnic_mutex); 13734 13735 return rc; 13736 } 13737 13738 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 13739 { 13740 struct cnic_ops *c_ops; 13741 int rc = 0; 13742 13743 rcu_read_lock(); 13744 c_ops = rcu_dereference(bp->cnic_ops); 13745 if (c_ops) 13746 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 13747 rcu_read_unlock(); 13748 13749 return rc; 13750 } 13751 13752 /* 13753 * for commands that have no data 13754 */ 13755 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 13756 { 13757 struct cnic_ctl_info ctl = {0}; 13758 13759 ctl.cmd = cmd; 13760 13761 return bnx2x_cnic_ctl_send(bp, &ctl); 13762 } 13763 13764 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 13765 { 13766 struct cnic_ctl_info ctl = {0}; 13767 13768 /* first we tell CNIC and only then we count this as a completion */ 13769 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 13770 ctl.data.comp.cid = cid; 13771 ctl.data.comp.error = err; 13772 13773 bnx2x_cnic_ctl_send_bh(bp, &ctl); 13774 bnx2x_cnic_sp_post(bp, 0); 13775 } 13776 13777 /* Called with netif_addr_lock_bh() taken. 13778 * Sets an rx_mode config for an iSCSI ETH client. 13779 * Doesn't block. 13780 * Completion should be checked outside. 13781 */ 13782 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 13783 { 13784 unsigned long accept_flags = 0, ramrod_flags = 0; 13785 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 13786 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 13787 13788 if (start) { 13789 /* Start accepting on iSCSI L2 ring. Accept all multicasts 13790 * because it's the only way for UIO Queue to accept 13791 * multicasts (in non-promiscuous mode only one Queue per 13792 * function will receive multicast packets (leading in our 13793 * case). 13794 */ 13795 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 13796 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 13797 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 13798 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 13799 13800 /* Clear STOP_PENDING bit if START is requested */ 13801 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 13802 13803 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 13804 } else 13805 /* Clear START_PENDING bit if STOP is requested */ 13806 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 13807 13808 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 13809 set_bit(sched_state, &bp->sp_state); 13810 else { 13811 __set_bit(RAMROD_RX, &ramrod_flags); 13812 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 13813 ramrod_flags); 13814 } 13815 } 13816 13817 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 13818 { 13819 struct bnx2x *bp = netdev_priv(dev); 13820 int rc = 0; 13821 13822 switch (ctl->cmd) { 13823 case DRV_CTL_CTXTBL_WR_CMD: { 13824 u32 index = ctl->data.io.offset; 13825 dma_addr_t addr = ctl->data.io.dma_addr; 13826 13827 bnx2x_ilt_wr(bp, index, addr); 13828 break; 13829 } 13830 13831 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 13832 int count = ctl->data.credit.credit_count; 13833 13834 bnx2x_cnic_sp_post(bp, count); 13835 break; 13836 } 13837 13838 /* rtnl_lock is held. */ 13839 case DRV_CTL_START_L2_CMD: { 13840 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13841 unsigned long sp_bits = 0; 13842 13843 /* Configure the iSCSI classification object */ 13844 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 13845 cp->iscsi_l2_client_id, 13846 cp->iscsi_l2_cid, BP_FUNC(bp), 13847 bnx2x_sp(bp, mac_rdata), 13848 bnx2x_sp_mapping(bp, mac_rdata), 13849 BNX2X_FILTER_MAC_PENDING, 13850 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 13851 &bp->macs_pool); 13852 13853 /* Set iSCSI MAC address */ 13854 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 13855 if (rc) 13856 break; 13857 13858 mmiowb(); 13859 barrier(); 13860 13861 /* Start accepting on iSCSI L2 ring */ 13862 13863 netif_addr_lock_bh(dev); 13864 bnx2x_set_iscsi_eth_rx_mode(bp, true); 13865 netif_addr_unlock_bh(dev); 13866 13867 /* bits to wait on */ 13868 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 13869 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 13870 13871 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 13872 BNX2X_ERR("rx_mode completion timed out!\n"); 13873 13874 break; 13875 } 13876 13877 /* rtnl_lock is held. */ 13878 case DRV_CTL_STOP_L2_CMD: { 13879 unsigned long sp_bits = 0; 13880 13881 /* Stop accepting on iSCSI L2 ring */ 13882 netif_addr_lock_bh(dev); 13883 bnx2x_set_iscsi_eth_rx_mode(bp, false); 13884 netif_addr_unlock_bh(dev); 13885 13886 /* bits to wait on */ 13887 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 13888 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 13889 13890 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 13891 BNX2X_ERR("rx_mode completion timed out!\n"); 13892 13893 mmiowb(); 13894 barrier(); 13895 13896 /* Unset iSCSI L2 MAC */ 13897 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 13898 BNX2X_ISCSI_ETH_MAC, true); 13899 break; 13900 } 13901 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 13902 int count = ctl->data.credit.credit_count; 13903 13904 smp_mb__before_atomic(); 13905 atomic_add(count, &bp->cq_spq_left); 13906 smp_mb__after_atomic(); 13907 break; 13908 } 13909 case DRV_CTL_ULP_REGISTER_CMD: { 13910 int ulp_type = ctl->data.register_data.ulp_type; 13911 13912 if (CHIP_IS_E3(bp)) { 13913 int idx = BP_FW_MB_IDX(bp); 13914 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 13915 int path = BP_PATH(bp); 13916 int port = BP_PORT(bp); 13917 int i; 13918 u32 scratch_offset; 13919 u32 *host_addr; 13920 13921 /* first write capability to shmem2 */ 13922 if (ulp_type == CNIC_ULP_ISCSI) 13923 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 13924 else if (ulp_type == CNIC_ULP_FCOE) 13925 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13926 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13927 13928 if ((ulp_type != CNIC_ULP_FCOE) || 13929 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || 13930 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) 13931 break; 13932 13933 /* if reached here - should write fcoe capabilities */ 13934 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); 13935 if (!scratch_offset) 13936 break; 13937 scratch_offset += offsetof(struct glob_ncsi_oem_data, 13938 fcoe_features[path][port]); 13939 host_addr = (u32 *) &(ctl->data.register_data. 13940 fcoe_features); 13941 for (i = 0; i < sizeof(struct fcoe_capabilities); 13942 i += 4) 13943 REG_WR(bp, scratch_offset + i, 13944 *(host_addr + i/4)); 13945 } 13946 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 13947 break; 13948 } 13949 13950 case DRV_CTL_ULP_UNREGISTER_CMD: { 13951 int ulp_type = ctl->data.ulp_type; 13952 13953 if (CHIP_IS_E3(bp)) { 13954 int idx = BP_FW_MB_IDX(bp); 13955 u32 cap; 13956 13957 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 13958 if (ulp_type == CNIC_ULP_ISCSI) 13959 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 13960 else if (ulp_type == CNIC_ULP_FCOE) 13961 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13962 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13963 } 13964 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 13965 break; 13966 } 13967 13968 default: 13969 BNX2X_ERR("unknown command %x\n", ctl->cmd); 13970 rc = -EINVAL; 13971 } 13972 13973 return rc; 13974 } 13975 13976 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 13977 { 13978 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13979 13980 if (bp->flags & USING_MSIX_FLAG) { 13981 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 13982 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 13983 cp->irq_arr[0].vector = bp->msix_table[1].vector; 13984 } else { 13985 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 13986 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 13987 } 13988 if (!CHIP_IS_E1x(bp)) 13989 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 13990 else 13991 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 13992 13993 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 13994 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 13995 cp->irq_arr[1].status_blk = bp->def_status_blk; 13996 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 13997 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 13998 13999 cp->num_irq = 2; 14000 } 14001 14002 void bnx2x_setup_cnic_info(struct bnx2x *bp) 14003 { 14004 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14005 14006 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 14007 bnx2x_cid_ilt_lines(bp); 14008 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 14009 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 14010 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 14011 14012 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", 14013 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, 14014 cp->iscsi_l2_cid); 14015 14016 if (NO_ISCSI_OOO(bp)) 14017 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 14018 } 14019 14020 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 14021 void *data) 14022 { 14023 struct bnx2x *bp = netdev_priv(dev); 14024 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14025 int rc; 14026 14027 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); 14028 14029 if (ops == NULL) { 14030 BNX2X_ERR("NULL ops received\n"); 14031 return -EINVAL; 14032 } 14033 14034 if (!CNIC_SUPPORT(bp)) { 14035 BNX2X_ERR("Can't register CNIC when not supported\n"); 14036 return -EOPNOTSUPP; 14037 } 14038 14039 if (!CNIC_LOADED(bp)) { 14040 rc = bnx2x_load_cnic(bp); 14041 if (rc) { 14042 BNX2X_ERR("CNIC-related load failed\n"); 14043 return rc; 14044 } 14045 } 14046 14047 bp->cnic_enabled = true; 14048 14049 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 14050 if (!bp->cnic_kwq) 14051 return -ENOMEM; 14052 14053 bp->cnic_kwq_cons = bp->cnic_kwq; 14054 bp->cnic_kwq_prod = bp->cnic_kwq; 14055 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 14056 14057 bp->cnic_spq_pending = 0; 14058 bp->cnic_kwq_pending = 0; 14059 14060 bp->cnic_data = data; 14061 14062 cp->num_irq = 0; 14063 cp->drv_state |= CNIC_DRV_STATE_REGD; 14064 cp->iro_arr = bp->iro_arr; 14065 14066 bnx2x_setup_cnic_irq_info(bp); 14067 14068 rcu_assign_pointer(bp->cnic_ops, ops); 14069 14070 /* Schedule driver to read CNIC driver versions */ 14071 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 14072 14073 return 0; 14074 } 14075 14076 static int bnx2x_unregister_cnic(struct net_device *dev) 14077 { 14078 struct bnx2x *bp = netdev_priv(dev); 14079 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14080 14081 mutex_lock(&bp->cnic_mutex); 14082 cp->drv_state = 0; 14083 RCU_INIT_POINTER(bp->cnic_ops, NULL); 14084 mutex_unlock(&bp->cnic_mutex); 14085 synchronize_rcu(); 14086 bp->cnic_enabled = false; 14087 kfree(bp->cnic_kwq); 14088 bp->cnic_kwq = NULL; 14089 14090 return 0; 14091 } 14092 14093 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 14094 { 14095 struct bnx2x *bp = netdev_priv(dev); 14096 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14097 14098 /* If both iSCSI and FCoE are disabled - return NULL in 14099 * order to indicate CNIC that it should not try to work 14100 * with this device. 14101 */ 14102 if (NO_ISCSI(bp) && NO_FCOE(bp)) 14103 return NULL; 14104 14105 cp->drv_owner = THIS_MODULE; 14106 cp->chip_id = CHIP_ID(bp); 14107 cp->pdev = bp->pdev; 14108 cp->io_base = bp->regview; 14109 cp->io_base2 = bp->doorbells; 14110 cp->max_kwqe_pending = 8; 14111 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 14112 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 14113 bnx2x_cid_ilt_lines(bp); 14114 cp->ctx_tbl_len = CNIC_ILT_LINES; 14115 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 14116 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 14117 cp->drv_ctl = bnx2x_drv_ctl; 14118 cp->drv_register_cnic = bnx2x_register_cnic; 14119 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 14120 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 14121 cp->iscsi_l2_client_id = 14122 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 14123 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 14124 14125 if (NO_ISCSI_OOO(bp)) 14126 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 14127 14128 if (NO_ISCSI(bp)) 14129 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 14130 14131 if (NO_FCOE(bp)) 14132 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 14133 14134 BNX2X_DEV_INFO( 14135 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 14136 cp->ctx_blk_size, 14137 cp->ctx_tbl_offset, 14138 cp->ctx_tbl_len, 14139 cp->starting_cid); 14140 return cp; 14141 } 14142 14143 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 14144 { 14145 struct bnx2x *bp = fp->bp; 14146 u32 offset = BAR_USTRORM_INTMEM; 14147 14148 if (IS_VF(bp)) 14149 return bnx2x_vf_ustorm_prods_offset(bp, fp); 14150 else if (!CHIP_IS_E1x(bp)) 14151 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 14152 else 14153 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 14154 14155 return offset; 14156 } 14157 14158 /* called only on E1H or E2. 14159 * When pretending to be PF, the pretend value is the function number 0...7 14160 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 14161 * combination 14162 */ 14163 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 14164 { 14165 u32 pretend_reg; 14166 14167 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) 14168 return -1; 14169 14170 /* get my own pretend register */ 14171 pretend_reg = bnx2x_get_pretend_reg(bp); 14172 REG_WR(bp, pretend_reg, pretend_func_val); 14173 REG_RD(bp, pretend_reg); 14174 return 0; 14175 } 14176