1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/kernel.h> 23 #include <linux/device.h> /* for dev_info() */ 24 #include <linux/timer.h> 25 #include <linux/errno.h> 26 #include <linux/ioport.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/pci.h> 30 #include <linux/init.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/bitops.h> 36 #include <linux/irq.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/time.h> 40 #include <linux/ethtool.h> 41 #include <linux/mii.h> 42 #include <linux/if_vlan.h> 43 #include <net/ip.h> 44 #include <net/ipv6.h> 45 #include <net/tcp.h> 46 #include <net/checksum.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/workqueue.h> 49 #include <linux/crc32.h> 50 #include <linux/crc32c.h> 51 #include <linux/prefetch.h> 52 #include <linux/zlib.h> 53 #include <linux/io.h> 54 #include <linux/semaphore.h> 55 #include <linux/stringify.h> 56 #include <linux/vmalloc.h> 57 58 #include "bnx2x.h" 59 #include "bnx2x_init.h" 60 #include "bnx2x_init_ops.h" 61 #include "bnx2x_cmn.h" 62 #include "bnx2x_dcb.h" 63 #include "bnx2x_sp.h" 64 65 #include <linux/firmware.h> 66 #include "bnx2x_fw_file_hdr.h" 67 /* FW files */ 68 #define FW_FILE_VERSION \ 69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 72 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 73 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 74 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 75 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 76 77 /* Time in jiffies before concluding the transmitter is hung */ 78 #define TX_TIMEOUT (5*HZ) 79 80 static char version[] __devinitdata = 81 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 82 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 83 84 MODULE_AUTHOR("Eliezer Tamir"); 85 MODULE_DESCRIPTION("Broadcom NetXtreme II " 86 "BCM57710/57711/57711E/" 87 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 88 "57840/57840_MF Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(DRV_MODULE_VERSION); 91 MODULE_FIRMWARE(FW_FILE_NAME_E1); 92 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 93 MODULE_FIRMWARE(FW_FILE_NAME_E2); 94 95 96 int num_queues; 97 module_param(num_queues, int, 0); 98 MODULE_PARM_DESC(num_queues, 99 " Set number of queues (default is as a number of CPUs)"); 100 101 static int disable_tpa; 102 module_param(disable_tpa, int, 0); 103 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 104 105 #define INT_MODE_INTx 1 106 #define INT_MODE_MSI 2 107 static int int_mode; 108 module_param(int_mode, int, 0); 109 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 110 "(1 INT#x; 2 MSI)"); 111 112 static int dropless_fc; 113 module_param(dropless_fc, int, 0); 114 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 115 116 static int mrrs = -1; 117 module_param(mrrs, int, 0); 118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 119 120 static int debug; 121 module_param(debug, int, 0); 122 MODULE_PARM_DESC(debug, " Default debug msglevel"); 123 124 125 126 struct workqueue_struct *bnx2x_wq; 127 128 enum bnx2x_board_type { 129 BCM57710 = 0, 130 BCM57711, 131 BCM57711E, 132 BCM57712, 133 BCM57712_MF, 134 BCM57800, 135 BCM57800_MF, 136 BCM57810, 137 BCM57810_MF, 138 BCM57840, 139 BCM57840_MF, 140 BCM57811, 141 BCM57811_MF 142 }; 143 144 /* indexed by board_type, above */ 145 static struct { 146 char *name; 147 } board_info[] __devinitdata = { 148 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 149 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 150 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 151 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, 152 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, 153 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, 154 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, 155 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 156 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 157 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 158 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, 159 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"}, 160 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"}, 161 }; 162 163 #ifndef PCI_DEVICE_ID_NX2_57710 164 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 165 #endif 166 #ifndef PCI_DEVICE_ID_NX2_57711 167 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 168 #endif 169 #ifndef PCI_DEVICE_ID_NX2_57711E 170 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 171 #endif 172 #ifndef PCI_DEVICE_ID_NX2_57712 173 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 174 #endif 175 #ifndef PCI_DEVICE_ID_NX2_57712_MF 176 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 177 #endif 178 #ifndef PCI_DEVICE_ID_NX2_57800 179 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 180 #endif 181 #ifndef PCI_DEVICE_ID_NX2_57800_MF 182 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 183 #endif 184 #ifndef PCI_DEVICE_ID_NX2_57810 185 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 186 #endif 187 #ifndef PCI_DEVICE_ID_NX2_57810_MF 188 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 189 #endif 190 #ifndef PCI_DEVICE_ID_NX2_57840 191 #define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 192 #endif 193 #ifndef PCI_DEVICE_ID_NX2_57840_MF 194 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 195 #endif 196 #ifndef PCI_DEVICE_ID_NX2_57811 197 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 198 #endif 199 #ifndef PCI_DEVICE_ID_NX2_57811_MF 200 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 201 #endif 202 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 203 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 204 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 205 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 206 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 207 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 208 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 209 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 210 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 211 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 212 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, 213 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 214 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 215 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 216 { 0 } 217 }; 218 219 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 220 221 /* Global resources for unloading a previously loaded device */ 222 #define BNX2X_PREV_WAIT_NEEDED 1 223 static DEFINE_SEMAPHORE(bnx2x_prev_sem); 224 static LIST_HEAD(bnx2x_prev_list); 225 /**************************************************************************** 226 * General service functions 227 ****************************************************************************/ 228 229 static void __storm_memset_dma_mapping(struct bnx2x *bp, 230 u32 addr, dma_addr_t mapping) 231 { 232 REG_WR(bp, addr, U64_LO(mapping)); 233 REG_WR(bp, addr + 4, U64_HI(mapping)); 234 } 235 236 static void storm_memset_spq_addr(struct bnx2x *bp, 237 dma_addr_t mapping, u16 abs_fid) 238 { 239 u32 addr = XSEM_REG_FAST_MEMORY + 240 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 241 242 __storm_memset_dma_mapping(bp, addr, mapping); 243 } 244 245 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 246 u16 pf_id) 247 { 248 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 249 pf_id); 250 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 251 pf_id); 252 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 253 pf_id); 254 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 255 pf_id); 256 } 257 258 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 259 u8 enable) 260 { 261 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 262 enable); 263 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 264 enable); 265 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 266 enable); 267 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 268 enable); 269 } 270 271 static void storm_memset_eq_data(struct bnx2x *bp, 272 struct event_ring_data *eq_data, 273 u16 pfid) 274 { 275 size_t size = sizeof(struct event_ring_data); 276 277 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 278 279 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 280 } 281 282 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 283 u16 pfid) 284 { 285 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 286 REG_WR16(bp, addr, eq_prod); 287 } 288 289 /* used only at init 290 * locking is done by mcp 291 */ 292 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 293 { 294 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 295 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 296 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 297 PCICFG_VENDOR_ID_OFFSET); 298 } 299 300 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 301 { 302 u32 val; 303 304 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 305 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 306 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 307 PCICFG_VENDOR_ID_OFFSET); 308 309 return val; 310 } 311 312 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 313 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 314 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 315 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 316 #define DMAE_DP_DST_NONE "dst_addr [none]" 317 318 319 /* copy command into DMAE command memory and set DMAE command go */ 320 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 321 { 322 u32 cmd_offset; 323 int i; 324 325 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 326 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 327 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 328 } 329 REG_WR(bp, dmae_reg_go_c[idx], 1); 330 } 331 332 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 333 { 334 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 335 DMAE_CMD_C_ENABLE); 336 } 337 338 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 339 { 340 return opcode & ~DMAE_CMD_SRC_RESET; 341 } 342 343 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 344 bool with_comp, u8 comp_type) 345 { 346 u32 opcode = 0; 347 348 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 349 (dst_type << DMAE_COMMAND_DST_SHIFT)); 350 351 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 352 353 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 354 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 355 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 356 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 357 358 #ifdef __BIG_ENDIAN 359 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 360 #else 361 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 362 #endif 363 if (with_comp) 364 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 365 return opcode; 366 } 367 368 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 369 struct dmae_command *dmae, 370 u8 src_type, u8 dst_type) 371 { 372 memset(dmae, 0, sizeof(struct dmae_command)); 373 374 /* set the opcode */ 375 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 376 true, DMAE_COMP_PCI); 377 378 /* fill in the completion parameters */ 379 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 380 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 381 dmae->comp_val = DMAE_COMP_VAL; 382 } 383 384 /* issue a dmae command over the init-channel and wailt for completion */ 385 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, 386 struct dmae_command *dmae) 387 { 388 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 389 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 390 int rc = 0; 391 392 /* 393 * Lock the dmae channel. Disable BHs to prevent a dead-lock 394 * as long as this code is called both from syscall context and 395 * from ndo_set_rx_mode() flow that may be called from BH. 396 */ 397 spin_lock_bh(&bp->dmae_lock); 398 399 /* reset completion */ 400 *wb_comp = 0; 401 402 /* post the command on the channel used for initializations */ 403 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 404 405 /* wait for completion */ 406 udelay(5); 407 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 408 409 if (!cnt || 410 (bp->recovery_state != BNX2X_RECOVERY_DONE && 411 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 412 BNX2X_ERR("DMAE timeout!\n"); 413 rc = DMAE_TIMEOUT; 414 goto unlock; 415 } 416 cnt--; 417 udelay(50); 418 } 419 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 420 BNX2X_ERR("DMAE PCI error!\n"); 421 rc = DMAE_PCI_ERROR; 422 } 423 424 unlock: 425 spin_unlock_bh(&bp->dmae_lock); 426 return rc; 427 } 428 429 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 430 u32 len32) 431 { 432 struct dmae_command dmae; 433 434 if (!bp->dmae_ready) { 435 u32 *data = bnx2x_sp(bp, wb_data[0]); 436 437 if (CHIP_IS_E1(bp)) 438 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 439 else 440 bnx2x_init_str_wr(bp, dst_addr, data, len32); 441 return; 442 } 443 444 /* set opcode and fixed command fields */ 445 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 446 447 /* fill in addresses and len */ 448 dmae.src_addr_lo = U64_LO(dma_addr); 449 dmae.src_addr_hi = U64_HI(dma_addr); 450 dmae.dst_addr_lo = dst_addr >> 2; 451 dmae.dst_addr_hi = 0; 452 dmae.len = len32; 453 454 /* issue the command and wait for completion */ 455 bnx2x_issue_dmae_with_comp(bp, &dmae); 456 } 457 458 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 459 { 460 struct dmae_command dmae; 461 462 if (!bp->dmae_ready) { 463 u32 *data = bnx2x_sp(bp, wb_data[0]); 464 int i; 465 466 if (CHIP_IS_E1(bp)) 467 for (i = 0; i < len32; i++) 468 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 469 else 470 for (i = 0; i < len32; i++) 471 data[i] = REG_RD(bp, src_addr + i*4); 472 473 return; 474 } 475 476 /* set opcode and fixed command fields */ 477 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 478 479 /* fill in addresses and len */ 480 dmae.src_addr_lo = src_addr >> 2; 481 dmae.src_addr_hi = 0; 482 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 483 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 484 dmae.len = len32; 485 486 /* issue the command and wait for completion */ 487 bnx2x_issue_dmae_with_comp(bp, &dmae); 488 } 489 490 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 491 u32 addr, u32 len) 492 { 493 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 494 int offset = 0; 495 496 while (len > dmae_wr_max) { 497 bnx2x_write_dmae(bp, phys_addr + offset, 498 addr + offset, dmae_wr_max); 499 offset += dmae_wr_max * 4; 500 len -= dmae_wr_max; 501 } 502 503 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 504 } 505 506 static int bnx2x_mc_assert(struct bnx2x *bp) 507 { 508 char last_idx; 509 int i, rc = 0; 510 u32 row0, row1, row2, row3; 511 512 /* XSTORM */ 513 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 514 XSTORM_ASSERT_LIST_INDEX_OFFSET); 515 if (last_idx) 516 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 517 518 /* print the asserts */ 519 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 520 521 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 522 XSTORM_ASSERT_LIST_OFFSET(i)); 523 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 524 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 525 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 526 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 527 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 528 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 529 530 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 531 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 532 i, row3, row2, row1, row0); 533 rc++; 534 } else { 535 break; 536 } 537 } 538 539 /* TSTORM */ 540 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + 541 TSTORM_ASSERT_LIST_INDEX_OFFSET); 542 if (last_idx) 543 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 544 545 /* print the asserts */ 546 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 547 548 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + 549 TSTORM_ASSERT_LIST_OFFSET(i)); 550 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + 551 TSTORM_ASSERT_LIST_OFFSET(i) + 4); 552 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + 553 TSTORM_ASSERT_LIST_OFFSET(i) + 8); 554 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + 555 TSTORM_ASSERT_LIST_OFFSET(i) + 12); 556 557 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 558 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 559 i, row3, row2, row1, row0); 560 rc++; 561 } else { 562 break; 563 } 564 } 565 566 /* CSTORM */ 567 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 568 CSTORM_ASSERT_LIST_INDEX_OFFSET); 569 if (last_idx) 570 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 571 572 /* print the asserts */ 573 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 574 575 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 576 CSTORM_ASSERT_LIST_OFFSET(i)); 577 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 578 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 579 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 580 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 581 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 582 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 583 584 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 585 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 586 i, row3, row2, row1, row0); 587 rc++; 588 } else { 589 break; 590 } 591 } 592 593 /* USTORM */ 594 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 595 USTORM_ASSERT_LIST_INDEX_OFFSET); 596 if (last_idx) 597 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 598 599 /* print the asserts */ 600 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 601 602 row0 = REG_RD(bp, BAR_USTRORM_INTMEM + 603 USTORM_ASSERT_LIST_OFFSET(i)); 604 row1 = REG_RD(bp, BAR_USTRORM_INTMEM + 605 USTORM_ASSERT_LIST_OFFSET(i) + 4); 606 row2 = REG_RD(bp, BAR_USTRORM_INTMEM + 607 USTORM_ASSERT_LIST_OFFSET(i) + 8); 608 row3 = REG_RD(bp, BAR_USTRORM_INTMEM + 609 USTORM_ASSERT_LIST_OFFSET(i) + 12); 610 611 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 612 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 613 i, row3, row2, row1, row0); 614 rc++; 615 } else { 616 break; 617 } 618 } 619 620 return rc; 621 } 622 623 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 624 { 625 u32 addr, val; 626 u32 mark, offset; 627 __be32 data[9]; 628 int word; 629 u32 trace_shmem_base; 630 if (BP_NOMCP(bp)) { 631 BNX2X_ERR("NO MCP - can not dump\n"); 632 return; 633 } 634 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 635 (bp->common.bc_ver & 0xff0000) >> 16, 636 (bp->common.bc_ver & 0xff00) >> 8, 637 (bp->common.bc_ver & 0xff)); 638 639 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 640 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 641 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 642 643 if (BP_PATH(bp) == 0) 644 trace_shmem_base = bp->common.shmem_base; 645 else 646 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 647 addr = trace_shmem_base - 0x800; 648 649 /* validate TRCB signature */ 650 mark = REG_RD(bp, addr); 651 if (mark != MFW_TRACE_SIGNATURE) { 652 BNX2X_ERR("Trace buffer signature is missing."); 653 return ; 654 } 655 656 /* read cyclic buffer pointer */ 657 addr += 4; 658 mark = REG_RD(bp, addr); 659 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 660 + ((mark + 0x3) & ~0x3) - 0x08000000; 661 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 662 663 printk("%s", lvl); 664 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 665 for (word = 0; word < 8; word++) 666 data[word] = htonl(REG_RD(bp, offset + 4*word)); 667 data[8] = 0x0; 668 pr_cont("%s", (char *)data); 669 } 670 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 671 for (word = 0; word < 8; word++) 672 data[word] = htonl(REG_RD(bp, offset + 4*word)); 673 data[8] = 0x0; 674 pr_cont("%s", (char *)data); 675 } 676 printk("%s" "end of fw dump\n", lvl); 677 } 678 679 static void bnx2x_fw_dump(struct bnx2x *bp) 680 { 681 bnx2x_fw_dump_lvl(bp, KERN_ERR); 682 } 683 684 void bnx2x_panic_dump(struct bnx2x *bp) 685 { 686 int i; 687 u16 j; 688 struct hc_sp_status_block_data sp_sb_data; 689 int func = BP_FUNC(bp); 690 #ifdef BNX2X_STOP_ON_ERROR 691 u16 start = 0, end = 0; 692 u8 cos; 693 #endif 694 695 bp->stats_state = STATS_STATE_DISABLED; 696 bp->eth_stats.unrecoverable_error++; 697 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 698 699 BNX2X_ERR("begin crash dump -----------------\n"); 700 701 /* Indices */ 702 /* Common */ 703 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 704 bp->def_idx, bp->def_att_idx, bp->attn_state, 705 bp->spq_prod_idx, bp->stats_counter); 706 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 707 bp->def_status_blk->atten_status_block.attn_bits, 708 bp->def_status_blk->atten_status_block.attn_bits_ack, 709 bp->def_status_blk->atten_status_block.status_block_id, 710 bp->def_status_blk->atten_status_block.attn_bits_index); 711 BNX2X_ERR(" def ("); 712 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 713 pr_cont("0x%x%s", 714 bp->def_status_blk->sp_sb.index_values[i], 715 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 716 717 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 718 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 719 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 720 i*sizeof(u32)); 721 722 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 723 sp_sb_data.igu_sb_id, 724 sp_sb_data.igu_seg_id, 725 sp_sb_data.p_func.pf_id, 726 sp_sb_data.p_func.vnic_id, 727 sp_sb_data.p_func.vf_id, 728 sp_sb_data.p_func.vf_valid, 729 sp_sb_data.state); 730 731 732 for_each_eth_queue(bp, i) { 733 struct bnx2x_fastpath *fp = &bp->fp[i]; 734 int loop; 735 struct hc_status_block_data_e2 sb_data_e2; 736 struct hc_status_block_data_e1x sb_data_e1x; 737 struct hc_status_block_sm *hc_sm_p = 738 CHIP_IS_E1x(bp) ? 739 sb_data_e1x.common.state_machine : 740 sb_data_e2.common.state_machine; 741 struct hc_index_data *hc_index_p = 742 CHIP_IS_E1x(bp) ? 743 sb_data_e1x.index_data : 744 sb_data_e2.index_data; 745 u8 data_size, cos; 746 u32 *sb_data_p; 747 struct bnx2x_fp_txdata txdata; 748 749 /* Rx */ 750 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 751 i, fp->rx_bd_prod, fp->rx_bd_cons, 752 fp->rx_comp_prod, 753 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 754 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 755 fp->rx_sge_prod, fp->last_max_sge, 756 le16_to_cpu(fp->fp_hc_idx)); 757 758 /* Tx */ 759 for_each_cos_in_tx_queue(fp, cos) 760 { 761 txdata = fp->txdata[cos]; 762 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 763 i, txdata.tx_pkt_prod, 764 txdata.tx_pkt_cons, txdata.tx_bd_prod, 765 txdata.tx_bd_cons, 766 le16_to_cpu(*txdata.tx_cons_sb)); 767 } 768 769 loop = CHIP_IS_E1x(bp) ? 770 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 771 772 /* host sb data */ 773 774 #ifdef BCM_CNIC 775 if (IS_FCOE_FP(fp)) 776 continue; 777 #endif 778 BNX2X_ERR(" run indexes ("); 779 for (j = 0; j < HC_SB_MAX_SM; j++) 780 pr_cont("0x%x%s", 781 fp->sb_running_index[j], 782 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 783 784 BNX2X_ERR(" indexes ("); 785 for (j = 0; j < loop; j++) 786 pr_cont("0x%x%s", 787 fp->sb_index_values[j], 788 (j == loop - 1) ? ")" : " "); 789 /* fw sb data */ 790 data_size = CHIP_IS_E1x(bp) ? 791 sizeof(struct hc_status_block_data_e1x) : 792 sizeof(struct hc_status_block_data_e2); 793 data_size /= sizeof(u32); 794 sb_data_p = CHIP_IS_E1x(bp) ? 795 (u32 *)&sb_data_e1x : 796 (u32 *)&sb_data_e2; 797 /* copy sb data in here */ 798 for (j = 0; j < data_size; j++) 799 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 800 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 801 j * sizeof(u32)); 802 803 if (!CHIP_IS_E1x(bp)) { 804 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 805 sb_data_e2.common.p_func.pf_id, 806 sb_data_e2.common.p_func.vf_id, 807 sb_data_e2.common.p_func.vf_valid, 808 sb_data_e2.common.p_func.vnic_id, 809 sb_data_e2.common.same_igu_sb_1b, 810 sb_data_e2.common.state); 811 } else { 812 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 813 sb_data_e1x.common.p_func.pf_id, 814 sb_data_e1x.common.p_func.vf_id, 815 sb_data_e1x.common.p_func.vf_valid, 816 sb_data_e1x.common.p_func.vnic_id, 817 sb_data_e1x.common.same_igu_sb_1b, 818 sb_data_e1x.common.state); 819 } 820 821 /* SB_SMs data */ 822 for (j = 0; j < HC_SB_MAX_SM; j++) { 823 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 824 j, hc_sm_p[j].__flags, 825 hc_sm_p[j].igu_sb_id, 826 hc_sm_p[j].igu_seg_id, 827 hc_sm_p[j].time_to_expire, 828 hc_sm_p[j].timer_value); 829 } 830 831 /* Indecies data */ 832 for (j = 0; j < loop; j++) { 833 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 834 hc_index_p[j].flags, 835 hc_index_p[j].timeout); 836 } 837 } 838 839 #ifdef BNX2X_STOP_ON_ERROR 840 /* Rings */ 841 /* Rx */ 842 for_each_rx_queue(bp, i) { 843 struct bnx2x_fastpath *fp = &bp->fp[i]; 844 845 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 846 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 847 for (j = start; j != end; j = RX_BD(j + 1)) { 848 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 849 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 850 851 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 852 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 853 } 854 855 start = RX_SGE(fp->rx_sge_prod); 856 end = RX_SGE(fp->last_max_sge); 857 for (j = start; j != end; j = RX_SGE(j + 1)) { 858 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 859 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 860 861 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 862 i, j, rx_sge[1], rx_sge[0], sw_page->page); 863 } 864 865 start = RCQ_BD(fp->rx_comp_cons - 10); 866 end = RCQ_BD(fp->rx_comp_cons + 503); 867 for (j = start; j != end; j = RCQ_BD(j + 1)) { 868 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 869 870 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 871 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 872 } 873 } 874 875 /* Tx */ 876 for_each_tx_queue(bp, i) { 877 struct bnx2x_fastpath *fp = &bp->fp[i]; 878 for_each_cos_in_tx_queue(fp, cos) { 879 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 880 881 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 882 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 883 for (j = start; j != end; j = TX_BD(j + 1)) { 884 struct sw_tx_bd *sw_bd = 885 &txdata->tx_buf_ring[j]; 886 887 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 888 i, cos, j, sw_bd->skb, 889 sw_bd->first_bd); 890 } 891 892 start = TX_BD(txdata->tx_bd_cons - 10); 893 end = TX_BD(txdata->tx_bd_cons + 254); 894 for (j = start; j != end; j = TX_BD(j + 1)) { 895 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 896 897 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 898 i, cos, j, tx_bd[0], tx_bd[1], 899 tx_bd[2], tx_bd[3]); 900 } 901 } 902 } 903 #endif 904 bnx2x_fw_dump(bp); 905 bnx2x_mc_assert(bp); 906 BNX2X_ERR("end crash dump -----------------\n"); 907 } 908 909 /* 910 * FLR Support for E2 911 * 912 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 913 * initialization. 914 */ 915 #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ 916 #define FLR_WAIT_INTERVAL 50 /* usec */ 917 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 918 919 struct pbf_pN_buf_regs { 920 int pN; 921 u32 init_crd; 922 u32 crd; 923 u32 crd_freed; 924 }; 925 926 struct pbf_pN_cmd_regs { 927 int pN; 928 u32 lines_occup; 929 u32 lines_freed; 930 }; 931 932 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 933 struct pbf_pN_buf_regs *regs, 934 u32 poll_count) 935 { 936 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 937 u32 cur_cnt = poll_count; 938 939 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 940 crd = crd_start = REG_RD(bp, regs->crd); 941 init_crd = REG_RD(bp, regs->init_crd); 942 943 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 944 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 945 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 946 947 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 948 (init_crd - crd_start))) { 949 if (cur_cnt--) { 950 udelay(FLR_WAIT_INTERVAL); 951 crd = REG_RD(bp, regs->crd); 952 crd_freed = REG_RD(bp, regs->crd_freed); 953 } else { 954 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 955 regs->pN); 956 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 957 regs->pN, crd); 958 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 959 regs->pN, crd_freed); 960 break; 961 } 962 } 963 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 964 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 965 } 966 967 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 968 struct pbf_pN_cmd_regs *regs, 969 u32 poll_count) 970 { 971 u32 occup, to_free, freed, freed_start; 972 u32 cur_cnt = poll_count; 973 974 occup = to_free = REG_RD(bp, regs->lines_occup); 975 freed = freed_start = REG_RD(bp, regs->lines_freed); 976 977 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 978 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 979 980 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 981 if (cur_cnt--) { 982 udelay(FLR_WAIT_INTERVAL); 983 occup = REG_RD(bp, regs->lines_occup); 984 freed = REG_RD(bp, regs->lines_freed); 985 } else { 986 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 987 regs->pN); 988 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 989 regs->pN, occup); 990 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 991 regs->pN, freed); 992 break; 993 } 994 } 995 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 996 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 997 } 998 999 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1000 u32 expected, u32 poll_count) 1001 { 1002 u32 cur_cnt = poll_count; 1003 u32 val; 1004 1005 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1006 udelay(FLR_WAIT_INTERVAL); 1007 1008 return val; 1009 } 1010 1011 static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1012 char *msg, u32 poll_cnt) 1013 { 1014 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1015 if (val != 0) { 1016 BNX2X_ERR("%s usage count=%d\n", msg, val); 1017 return 1; 1018 } 1019 return 0; 1020 } 1021 1022 static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1023 { 1024 /* adjust polling timeout */ 1025 if (CHIP_REV_IS_EMUL(bp)) 1026 return FLR_POLL_CNT * 2000; 1027 1028 if (CHIP_REV_IS_FPGA(bp)) 1029 return FLR_POLL_CNT * 120; 1030 1031 return FLR_POLL_CNT; 1032 } 1033 1034 static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1035 { 1036 struct pbf_pN_cmd_regs cmd_regs[] = { 1037 {0, (CHIP_IS_E3B0(bp)) ? 1038 PBF_REG_TQ_OCCUPANCY_Q0 : 1039 PBF_REG_P0_TQ_OCCUPANCY, 1040 (CHIP_IS_E3B0(bp)) ? 1041 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1042 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1043 {1, (CHIP_IS_E3B0(bp)) ? 1044 PBF_REG_TQ_OCCUPANCY_Q1 : 1045 PBF_REG_P1_TQ_OCCUPANCY, 1046 (CHIP_IS_E3B0(bp)) ? 1047 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1048 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1049 {4, (CHIP_IS_E3B0(bp)) ? 1050 PBF_REG_TQ_OCCUPANCY_LB_Q : 1051 PBF_REG_P4_TQ_OCCUPANCY, 1052 (CHIP_IS_E3B0(bp)) ? 1053 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1054 PBF_REG_P4_TQ_LINES_FREED_CNT} 1055 }; 1056 1057 struct pbf_pN_buf_regs buf_regs[] = { 1058 {0, (CHIP_IS_E3B0(bp)) ? 1059 PBF_REG_INIT_CRD_Q0 : 1060 PBF_REG_P0_INIT_CRD , 1061 (CHIP_IS_E3B0(bp)) ? 1062 PBF_REG_CREDIT_Q0 : 1063 PBF_REG_P0_CREDIT, 1064 (CHIP_IS_E3B0(bp)) ? 1065 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1066 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1067 {1, (CHIP_IS_E3B0(bp)) ? 1068 PBF_REG_INIT_CRD_Q1 : 1069 PBF_REG_P1_INIT_CRD, 1070 (CHIP_IS_E3B0(bp)) ? 1071 PBF_REG_CREDIT_Q1 : 1072 PBF_REG_P1_CREDIT, 1073 (CHIP_IS_E3B0(bp)) ? 1074 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1075 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1076 {4, (CHIP_IS_E3B0(bp)) ? 1077 PBF_REG_INIT_CRD_LB_Q : 1078 PBF_REG_P4_INIT_CRD, 1079 (CHIP_IS_E3B0(bp)) ? 1080 PBF_REG_CREDIT_LB_Q : 1081 PBF_REG_P4_CREDIT, 1082 (CHIP_IS_E3B0(bp)) ? 1083 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1084 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1085 }; 1086 1087 int i; 1088 1089 /* Verify the command queues are flushed P0, P1, P4 */ 1090 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1091 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1092 1093 1094 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1095 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1096 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1097 } 1098 1099 #define OP_GEN_PARAM(param) \ 1100 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1101 1102 #define OP_GEN_TYPE(type) \ 1103 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1104 1105 #define OP_GEN_AGG_VECT(index) \ 1106 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1107 1108 1109 static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, 1110 u32 poll_cnt) 1111 { 1112 struct sdm_op_gen op_gen = {0}; 1113 1114 u32 comp_addr = BAR_CSTRORM_INTMEM + 1115 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1116 int ret = 0; 1117 1118 if (REG_RD(bp, comp_addr)) { 1119 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1120 return 1; 1121 } 1122 1123 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1124 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1125 op_gen.command |= OP_GEN_AGG_VECT(clnup_func); 1126 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1127 1128 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1129 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); 1130 1131 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1132 BNX2X_ERR("FW final cleanup did not succeed\n"); 1133 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1134 (REG_RD(bp, comp_addr))); 1135 ret = 1; 1136 } 1137 /* Zero completion for nxt FLR */ 1138 REG_WR(bp, comp_addr, 0); 1139 1140 return ret; 1141 } 1142 1143 static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1144 { 1145 int pos; 1146 u16 status; 1147 1148 pos = pci_pcie_cap(dev); 1149 if (!pos) 1150 return false; 1151 1152 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 1153 return status & PCI_EXP_DEVSTA_TRPND; 1154 } 1155 1156 /* PF FLR specific routines 1157 */ 1158 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1159 { 1160 1161 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1162 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1163 CFC_REG_NUM_LCIDS_INSIDE_PF, 1164 "CFC PF usage counter timed out", 1165 poll_cnt)) 1166 return 1; 1167 1168 1169 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1170 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1171 DORQ_REG_PF_USAGE_CNT, 1172 "DQ PF usage counter timed out", 1173 poll_cnt)) 1174 return 1; 1175 1176 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1177 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1178 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1179 "QM PF usage counter timed out", 1180 poll_cnt)) 1181 return 1; 1182 1183 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1184 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1185 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1186 "Timers VNIC usage counter timed out", 1187 poll_cnt)) 1188 return 1; 1189 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1190 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1191 "Timers NUM_SCANS usage counter timed out", 1192 poll_cnt)) 1193 return 1; 1194 1195 /* Wait DMAE PF usage counter to zero */ 1196 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1197 dmae_reg_go_c[INIT_DMAE_C(bp)], 1198 "DMAE dommand register timed out", 1199 poll_cnt)) 1200 return 1; 1201 1202 return 0; 1203 } 1204 1205 static void bnx2x_hw_enable_status(struct bnx2x *bp) 1206 { 1207 u32 val; 1208 1209 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1210 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1211 1212 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1213 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1214 1215 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1216 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1217 1218 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1219 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1220 1221 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1222 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1223 1224 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1225 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1226 1227 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1228 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1229 1230 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1231 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1232 val); 1233 } 1234 1235 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1236 { 1237 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1238 1239 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1240 1241 /* Re-enable PF target read access */ 1242 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1243 1244 /* Poll HW usage counters */ 1245 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1246 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1247 return -EBUSY; 1248 1249 /* Zero the igu 'trailing edge' and 'leading edge' */ 1250 1251 /* Send the FW cleanup command */ 1252 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1253 return -EBUSY; 1254 1255 /* ATC cleanup */ 1256 1257 /* Verify TX hw is flushed */ 1258 bnx2x_tx_hw_flushed(bp, poll_cnt); 1259 1260 /* Wait 100ms (not adjusted according to platform) */ 1261 msleep(100); 1262 1263 /* Verify no pending pci transactions */ 1264 if (bnx2x_is_pcie_pending(bp->pdev)) 1265 BNX2X_ERR("PCIE Transactions still pending\n"); 1266 1267 /* Debug */ 1268 bnx2x_hw_enable_status(bp); 1269 1270 /* 1271 * Master enable - Due to WB DMAE writes performed before this 1272 * register is re-initialized as part of the regular function init 1273 */ 1274 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1275 1276 return 0; 1277 } 1278 1279 static void bnx2x_hc_int_enable(struct bnx2x *bp) 1280 { 1281 int port = BP_PORT(bp); 1282 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1283 u32 val = REG_RD(bp, addr); 1284 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1285 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1286 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1287 1288 if (msix) { 1289 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1290 HC_CONFIG_0_REG_INT_LINE_EN_0); 1291 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1292 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1293 if (single_msix) 1294 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1295 } else if (msi) { 1296 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1297 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1298 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1299 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1300 } else { 1301 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1302 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1303 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1304 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1305 1306 if (!CHIP_IS_E1(bp)) { 1307 DP(NETIF_MSG_IFUP, 1308 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1309 1310 REG_WR(bp, addr, val); 1311 1312 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1313 } 1314 } 1315 1316 if (CHIP_IS_E1(bp)) 1317 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1318 1319 DP(NETIF_MSG_IFUP, 1320 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1321 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1322 1323 REG_WR(bp, addr, val); 1324 /* 1325 * Ensure that HC_CONFIG is written before leading/trailing edge config 1326 */ 1327 mmiowb(); 1328 barrier(); 1329 1330 if (!CHIP_IS_E1(bp)) { 1331 /* init leading/trailing edge */ 1332 if (IS_MF(bp)) { 1333 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1334 if (bp->port.pmf) 1335 /* enable nig and gpio3 attention */ 1336 val |= 0x1100; 1337 } else 1338 val = 0xffff; 1339 1340 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1341 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1342 } 1343 1344 /* Make sure that interrupts are indeed enabled from here on */ 1345 mmiowb(); 1346 } 1347 1348 static void bnx2x_igu_int_enable(struct bnx2x *bp) 1349 { 1350 u32 val; 1351 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1352 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1353 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1354 1355 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1356 1357 if (msix) { 1358 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1359 IGU_PF_CONF_SINGLE_ISR_EN); 1360 val |= (IGU_PF_CONF_FUNC_EN | 1361 IGU_PF_CONF_MSI_MSIX_EN | 1362 IGU_PF_CONF_ATTN_BIT_EN); 1363 1364 if (single_msix) 1365 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1366 } else if (msi) { 1367 val &= ~IGU_PF_CONF_INT_LINE_EN; 1368 val |= (IGU_PF_CONF_FUNC_EN | 1369 IGU_PF_CONF_MSI_MSIX_EN | 1370 IGU_PF_CONF_ATTN_BIT_EN | 1371 IGU_PF_CONF_SINGLE_ISR_EN); 1372 } else { 1373 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1374 val |= (IGU_PF_CONF_FUNC_EN | 1375 IGU_PF_CONF_INT_LINE_EN | 1376 IGU_PF_CONF_ATTN_BIT_EN | 1377 IGU_PF_CONF_SINGLE_ISR_EN); 1378 } 1379 1380 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1381 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1382 1383 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1384 1385 if (val & IGU_PF_CONF_INT_LINE_EN) 1386 pci_intx(bp->pdev, true); 1387 1388 barrier(); 1389 1390 /* init leading/trailing edge */ 1391 if (IS_MF(bp)) { 1392 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1393 if (bp->port.pmf) 1394 /* enable nig and gpio3 attention */ 1395 val |= 0x1100; 1396 } else 1397 val = 0xffff; 1398 1399 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1400 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1401 1402 /* Make sure that interrupts are indeed enabled from here on */ 1403 mmiowb(); 1404 } 1405 1406 void bnx2x_int_enable(struct bnx2x *bp) 1407 { 1408 if (bp->common.int_block == INT_BLOCK_HC) 1409 bnx2x_hc_int_enable(bp); 1410 else 1411 bnx2x_igu_int_enable(bp); 1412 } 1413 1414 static void bnx2x_hc_int_disable(struct bnx2x *bp) 1415 { 1416 int port = BP_PORT(bp); 1417 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1418 u32 val = REG_RD(bp, addr); 1419 1420 /* 1421 * in E1 we must use only PCI configuration space to disable 1422 * MSI/MSIX capablility 1423 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 1424 */ 1425 if (CHIP_IS_E1(bp)) { 1426 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 1427 * Use mask register to prevent from HC sending interrupts 1428 * after we exit the function 1429 */ 1430 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 1431 1432 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1433 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1434 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1435 } else 1436 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1437 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1438 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1439 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1440 1441 DP(NETIF_MSG_IFDOWN, 1442 "write %x to HC %d (addr 0x%x)\n", 1443 val, port, addr); 1444 1445 /* flush all outstanding writes */ 1446 mmiowb(); 1447 1448 REG_WR(bp, addr, val); 1449 if (REG_RD(bp, addr) != val) 1450 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1451 } 1452 1453 static void bnx2x_igu_int_disable(struct bnx2x *bp) 1454 { 1455 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1456 1457 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 1458 IGU_PF_CONF_INT_LINE_EN | 1459 IGU_PF_CONF_ATTN_BIT_EN); 1460 1461 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 1462 1463 /* flush all outstanding writes */ 1464 mmiowb(); 1465 1466 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1467 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 1468 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1469 } 1470 1471 void bnx2x_int_disable(struct bnx2x *bp) 1472 { 1473 if (bp->common.int_block == INT_BLOCK_HC) 1474 bnx2x_hc_int_disable(bp); 1475 else 1476 bnx2x_igu_int_disable(bp); 1477 } 1478 1479 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1480 { 1481 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1482 int i, offset; 1483 1484 if (disable_hw) 1485 /* prevent the HW from sending interrupts */ 1486 bnx2x_int_disable(bp); 1487 1488 /* make sure all ISRs are done */ 1489 if (msix) { 1490 synchronize_irq(bp->msix_table[0].vector); 1491 offset = 1; 1492 #ifdef BCM_CNIC 1493 offset++; 1494 #endif 1495 for_each_eth_queue(bp, i) 1496 synchronize_irq(bp->msix_table[offset++].vector); 1497 } else 1498 synchronize_irq(bp->pdev->irq); 1499 1500 /* make sure sp_task is not running */ 1501 cancel_delayed_work(&bp->sp_task); 1502 cancel_delayed_work(&bp->period_task); 1503 flush_workqueue(bnx2x_wq); 1504 } 1505 1506 /* fast path */ 1507 1508 /* 1509 * General service functions 1510 */ 1511 1512 /* Return true if succeeded to acquire the lock */ 1513 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1514 { 1515 u32 lock_status; 1516 u32 resource_bit = (1 << resource); 1517 int func = BP_FUNC(bp); 1518 u32 hw_lock_control_reg; 1519 1520 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1521 "Trying to take a lock on resource %d\n", resource); 1522 1523 /* Validating that the resource is within range */ 1524 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1525 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1526 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1527 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1528 return false; 1529 } 1530 1531 if (func <= 5) 1532 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1533 else 1534 hw_lock_control_reg = 1535 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1536 1537 /* Try to acquire the lock */ 1538 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1539 lock_status = REG_RD(bp, hw_lock_control_reg); 1540 if (lock_status & resource_bit) 1541 return true; 1542 1543 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1544 "Failed to get a lock on resource %d\n", resource); 1545 return false; 1546 } 1547 1548 /** 1549 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1550 * 1551 * @bp: driver handle 1552 * 1553 * Returns the recovery leader resource id according to the engine this function 1554 * belongs to. Currently only only 2 engines is supported. 1555 */ 1556 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1557 { 1558 if (BP_PATH(bp)) 1559 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1560 else 1561 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1562 } 1563 1564 /** 1565 * bnx2x_trylock_leader_lock- try to aquire a leader lock. 1566 * 1567 * @bp: driver handle 1568 * 1569 * Tries to aquire a leader lock for current engine. 1570 */ 1571 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1572 { 1573 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1574 } 1575 1576 #ifdef BCM_CNIC 1577 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1578 #endif 1579 1580 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1581 { 1582 struct bnx2x *bp = fp->bp; 1583 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1584 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1585 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1586 struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; 1587 1588 DP(BNX2X_MSG_SP, 1589 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1590 fp->index, cid, command, bp->state, 1591 rr_cqe->ramrod_cqe.ramrod_type); 1592 1593 switch (command) { 1594 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1595 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1596 drv_cmd = BNX2X_Q_CMD_UPDATE; 1597 break; 1598 1599 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1600 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1601 drv_cmd = BNX2X_Q_CMD_SETUP; 1602 break; 1603 1604 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1605 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1606 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1607 break; 1608 1609 case (RAMROD_CMD_ID_ETH_HALT): 1610 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1611 drv_cmd = BNX2X_Q_CMD_HALT; 1612 break; 1613 1614 case (RAMROD_CMD_ID_ETH_TERMINATE): 1615 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); 1616 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1617 break; 1618 1619 case (RAMROD_CMD_ID_ETH_EMPTY): 1620 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1621 drv_cmd = BNX2X_Q_CMD_EMPTY; 1622 break; 1623 1624 default: 1625 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1626 command, fp->index); 1627 return; 1628 } 1629 1630 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1631 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1632 /* q_obj->complete_cmd() failure means that this was 1633 * an unexpected completion. 1634 * 1635 * In this case we don't want to increase the bp->spq_left 1636 * because apparently we haven't sent this command the first 1637 * place. 1638 */ 1639 #ifdef BNX2X_STOP_ON_ERROR 1640 bnx2x_panic(); 1641 #else 1642 return; 1643 #endif 1644 1645 smp_mb__before_atomic_inc(); 1646 atomic_inc(&bp->cq_spq_left); 1647 /* push the change in bp->spq_left and towards the memory */ 1648 smp_mb__after_atomic_inc(); 1649 1650 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1651 1652 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1653 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1654 /* if Q update ramrod is completed for last Q in AFEX vif set 1655 * flow, then ACK MCP at the end 1656 * 1657 * mark pending ACK to MCP bit. 1658 * prevent case that both bits are cleared. 1659 * At the end of load/unload driver checks that 1660 * sp_state is cleaerd, and this order prevents 1661 * races 1662 */ 1663 smp_mb__before_clear_bit(); 1664 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1665 wmb(); 1666 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1667 smp_mb__after_clear_bit(); 1668 1669 /* schedule workqueue to send ack to MCP */ 1670 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1671 } 1672 1673 return; 1674 } 1675 1676 void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 1677 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) 1678 { 1679 u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; 1680 1681 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, 1682 start); 1683 } 1684 1685 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1686 { 1687 struct bnx2x *bp = netdev_priv(dev_instance); 1688 u16 status = bnx2x_ack_int(bp); 1689 u16 mask; 1690 int i; 1691 u8 cos; 1692 1693 /* Return here if interrupt is shared and it's not for us */ 1694 if (unlikely(status == 0)) { 1695 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1696 return IRQ_NONE; 1697 } 1698 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1699 1700 #ifdef BNX2X_STOP_ON_ERROR 1701 if (unlikely(bp->panic)) 1702 return IRQ_HANDLED; 1703 #endif 1704 1705 for_each_eth_queue(bp, i) { 1706 struct bnx2x_fastpath *fp = &bp->fp[i]; 1707 1708 mask = 0x2 << (fp->index + CNIC_PRESENT); 1709 if (status & mask) { 1710 /* Handle Rx or Tx according to SB id */ 1711 prefetch(fp->rx_cons_sb); 1712 for_each_cos_in_tx_queue(fp, cos) 1713 prefetch(fp->txdata[cos].tx_cons_sb); 1714 prefetch(&fp->sb_running_index[SM_RX_ID]); 1715 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1716 status &= ~mask; 1717 } 1718 } 1719 1720 #ifdef BCM_CNIC 1721 mask = 0x2; 1722 if (status & (mask | 0x1)) { 1723 struct cnic_ops *c_ops = NULL; 1724 1725 if (likely(bp->state == BNX2X_STATE_OPEN)) { 1726 rcu_read_lock(); 1727 c_ops = rcu_dereference(bp->cnic_ops); 1728 if (c_ops) 1729 c_ops->cnic_handler(bp->cnic_data, NULL); 1730 rcu_read_unlock(); 1731 } 1732 1733 status &= ~mask; 1734 } 1735 #endif 1736 1737 if (unlikely(status & 0x1)) { 1738 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1739 1740 status &= ~0x1; 1741 if (!status) 1742 return IRQ_HANDLED; 1743 } 1744 1745 if (unlikely(status)) 1746 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1747 status); 1748 1749 return IRQ_HANDLED; 1750 } 1751 1752 /* Link */ 1753 1754 /* 1755 * General service functions 1756 */ 1757 1758 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1759 { 1760 u32 lock_status; 1761 u32 resource_bit = (1 << resource); 1762 int func = BP_FUNC(bp); 1763 u32 hw_lock_control_reg; 1764 int cnt; 1765 1766 /* Validating that the resource is within range */ 1767 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1768 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1769 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1770 return -EINVAL; 1771 } 1772 1773 if (func <= 5) { 1774 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1775 } else { 1776 hw_lock_control_reg = 1777 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1778 } 1779 1780 /* Validating that the resource is not already taken */ 1781 lock_status = REG_RD(bp, hw_lock_control_reg); 1782 if (lock_status & resource_bit) { 1783 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 1784 lock_status, resource_bit); 1785 return -EEXIST; 1786 } 1787 1788 /* Try for 5 second every 5ms */ 1789 for (cnt = 0; cnt < 1000; cnt++) { 1790 /* Try to acquire the lock */ 1791 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1792 lock_status = REG_RD(bp, hw_lock_control_reg); 1793 if (lock_status & resource_bit) 1794 return 0; 1795 1796 msleep(5); 1797 } 1798 BNX2X_ERR("Timeout\n"); 1799 return -EAGAIN; 1800 } 1801 1802 int bnx2x_release_leader_lock(struct bnx2x *bp) 1803 { 1804 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1805 } 1806 1807 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1808 { 1809 u32 lock_status; 1810 u32 resource_bit = (1 << resource); 1811 int func = BP_FUNC(bp); 1812 u32 hw_lock_control_reg; 1813 1814 /* Validating that the resource is within range */ 1815 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1816 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1817 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1818 return -EINVAL; 1819 } 1820 1821 if (func <= 5) { 1822 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1823 } else { 1824 hw_lock_control_reg = 1825 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1826 } 1827 1828 /* Validating that the resource is currently taken */ 1829 lock_status = REG_RD(bp, hw_lock_control_reg); 1830 if (!(lock_status & resource_bit)) { 1831 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", 1832 lock_status, resource_bit); 1833 return -EFAULT; 1834 } 1835 1836 REG_WR(bp, hw_lock_control_reg, resource_bit); 1837 return 0; 1838 } 1839 1840 1841 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 1842 { 1843 /* The GPIO should be swapped if swap register is set and active */ 1844 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1845 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1846 int gpio_shift = gpio_num + 1847 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1848 u32 gpio_mask = (1 << gpio_shift); 1849 u32 gpio_reg; 1850 int value; 1851 1852 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1853 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1854 return -EINVAL; 1855 } 1856 1857 /* read GPIO value */ 1858 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 1859 1860 /* get the requested pin value */ 1861 if ((gpio_reg & gpio_mask) == gpio_mask) 1862 value = 1; 1863 else 1864 value = 0; 1865 1866 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); 1867 1868 return value; 1869 } 1870 1871 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 1872 { 1873 /* The GPIO should be swapped if swap register is set and active */ 1874 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1875 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1876 int gpio_shift = gpio_num + 1877 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1878 u32 gpio_mask = (1 << gpio_shift); 1879 u32 gpio_reg; 1880 1881 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1882 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1883 return -EINVAL; 1884 } 1885 1886 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1887 /* read GPIO and mask except the float bits */ 1888 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1889 1890 switch (mode) { 1891 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1892 DP(NETIF_MSG_LINK, 1893 "Set GPIO %d (shift %d) -> output low\n", 1894 gpio_num, gpio_shift); 1895 /* clear FLOAT and set CLR */ 1896 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1897 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 1898 break; 1899 1900 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1901 DP(NETIF_MSG_LINK, 1902 "Set GPIO %d (shift %d) -> output high\n", 1903 gpio_num, gpio_shift); 1904 /* clear FLOAT and set SET */ 1905 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1906 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1907 break; 1908 1909 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1910 DP(NETIF_MSG_LINK, 1911 "Set GPIO %d (shift %d) -> input\n", 1912 gpio_num, gpio_shift); 1913 /* set FLOAT */ 1914 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1915 break; 1916 1917 default: 1918 break; 1919 } 1920 1921 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1922 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1923 1924 return 0; 1925 } 1926 1927 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 1928 { 1929 u32 gpio_reg = 0; 1930 int rc = 0; 1931 1932 /* Any port swapping should be handled by caller. */ 1933 1934 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1935 /* read GPIO and mask except the float bits */ 1936 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 1937 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1938 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 1939 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 1940 1941 switch (mode) { 1942 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1943 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 1944 /* set CLR */ 1945 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 1946 break; 1947 1948 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1949 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 1950 /* set SET */ 1951 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 1952 break; 1953 1954 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1955 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 1956 /* set FLOAT */ 1957 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1958 break; 1959 1960 default: 1961 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 1962 rc = -EINVAL; 1963 break; 1964 } 1965 1966 if (rc == 0) 1967 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1968 1969 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1970 1971 return rc; 1972 } 1973 1974 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 1975 { 1976 /* The GPIO should be swapped if swap register is set and active */ 1977 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1978 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1979 int gpio_shift = gpio_num + 1980 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1981 u32 gpio_mask = (1 << gpio_shift); 1982 u32 gpio_reg; 1983 1984 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1985 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1986 return -EINVAL; 1987 } 1988 1989 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1990 /* read GPIO int */ 1991 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 1992 1993 switch (mode) { 1994 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 1995 DP(NETIF_MSG_LINK, 1996 "Clear GPIO INT %d (shift %d) -> output low\n", 1997 gpio_num, gpio_shift); 1998 /* clear SET and set CLR */ 1999 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2000 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2001 break; 2002 2003 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2004 DP(NETIF_MSG_LINK, 2005 "Set GPIO INT %d (shift %d) -> output high\n", 2006 gpio_num, gpio_shift); 2007 /* clear CLR and set SET */ 2008 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2009 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2010 break; 2011 2012 default: 2013 break; 2014 } 2015 2016 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2017 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2018 2019 return 0; 2020 } 2021 2022 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) 2023 { 2024 u32 spio_mask = (1 << spio_num); 2025 u32 spio_reg; 2026 2027 if ((spio_num < MISC_REGISTERS_SPIO_4) || 2028 (spio_num > MISC_REGISTERS_SPIO_7)) { 2029 BNX2X_ERR("Invalid SPIO %d\n", spio_num); 2030 return -EINVAL; 2031 } 2032 2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2034 /* read SPIO and mask except the float bits */ 2035 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 2036 2037 switch (mode) { 2038 case MISC_REGISTERS_SPIO_OUTPUT_LOW: 2039 DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num); 2040 /* clear FLOAT and set CLR */ 2041 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2042 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 2043 break; 2044 2045 case MISC_REGISTERS_SPIO_OUTPUT_HIGH: 2046 DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num); 2047 /* clear FLOAT and set SET */ 2048 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2049 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); 2050 break; 2051 2052 case MISC_REGISTERS_SPIO_INPUT_HI_Z: 2053 DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num); 2054 /* set FLOAT */ 2055 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2056 break; 2057 2058 default: 2059 break; 2060 } 2061 2062 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2063 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2064 2065 return 0; 2066 } 2067 2068 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2069 { 2070 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2071 switch (bp->link_vars.ieee_fc & 2072 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2073 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2074 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2075 ADVERTISED_Pause); 2076 break; 2077 2078 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2079 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2080 ADVERTISED_Pause); 2081 break; 2082 2083 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2084 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2085 break; 2086 2087 default: 2088 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2089 ADVERTISED_Pause); 2090 break; 2091 } 2092 } 2093 2094 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2095 { 2096 if (!BP_NOMCP(bp)) { 2097 u8 rc; 2098 int cfx_idx = bnx2x_get_link_cfg_idx(bp); 2099 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2100 /* 2101 * Initialize link parameters structure variables 2102 * It is recommended to turn off RX FC for jumbo frames 2103 * for better performance 2104 */ 2105 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2106 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2107 else 2108 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2109 2110 bnx2x_acquire_phy_lock(bp); 2111 2112 if (load_mode == LOAD_DIAG) { 2113 struct link_params *lp = &bp->link_params; 2114 lp->loopback_mode = LOOPBACK_XGXS; 2115 /* do PHY loopback at 10G speed, if possible */ 2116 if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2117 if (lp->speed_cap_mask[cfx_idx] & 2118 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2119 lp->req_line_speed[cfx_idx] = 2120 SPEED_10000; 2121 else 2122 lp->req_line_speed[cfx_idx] = 2123 SPEED_1000; 2124 } 2125 } 2126 2127 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2128 2129 bnx2x_release_phy_lock(bp); 2130 2131 bnx2x_calc_fc_adv(bp); 2132 2133 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { 2134 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2135 bnx2x_link_report(bp); 2136 } else 2137 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2138 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2139 return rc; 2140 } 2141 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2142 return -EINVAL; 2143 } 2144 2145 void bnx2x_link_set(struct bnx2x *bp) 2146 { 2147 if (!BP_NOMCP(bp)) { 2148 bnx2x_acquire_phy_lock(bp); 2149 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2150 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2151 bnx2x_release_phy_lock(bp); 2152 2153 bnx2x_calc_fc_adv(bp); 2154 } else 2155 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2156 } 2157 2158 static void bnx2x__link_reset(struct bnx2x *bp) 2159 { 2160 if (!BP_NOMCP(bp)) { 2161 bnx2x_acquire_phy_lock(bp); 2162 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2163 bnx2x_release_phy_lock(bp); 2164 } else 2165 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2166 } 2167 2168 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2169 { 2170 u8 rc = 0; 2171 2172 if (!BP_NOMCP(bp)) { 2173 bnx2x_acquire_phy_lock(bp); 2174 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2175 is_serdes); 2176 bnx2x_release_phy_lock(bp); 2177 } else 2178 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2179 2180 return rc; 2181 } 2182 2183 2184 /* Calculates the sum of vn_min_rates. 2185 It's needed for further normalizing of the min_rates. 2186 Returns: 2187 sum of vn_min_rates. 2188 or 2189 0 - if all the min_rates are 0. 2190 In the later case fainess algorithm should be deactivated. 2191 If not all min_rates are zero then those that are zeroes will be set to 1. 2192 */ 2193 static void bnx2x_calc_vn_min(struct bnx2x *bp, 2194 struct cmng_init_input *input) 2195 { 2196 int all_zero = 1; 2197 int vn; 2198 2199 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2200 u32 vn_cfg = bp->mf_config[vn]; 2201 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2202 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2203 2204 /* Skip hidden vns */ 2205 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2206 vn_min_rate = 0; 2207 /* If min rate is zero - set it to 1 */ 2208 else if (!vn_min_rate) 2209 vn_min_rate = DEF_MIN_RATE; 2210 else 2211 all_zero = 0; 2212 2213 input->vnic_min_rate[vn] = vn_min_rate; 2214 } 2215 2216 /* if ETS or all min rates are zeros - disable fairness */ 2217 if (BNX2X_IS_ETS_ENABLED(bp)) { 2218 input->flags.cmng_enables &= 2219 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2220 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2221 } else if (all_zero) { 2222 input->flags.cmng_enables &= 2223 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2224 DP(NETIF_MSG_IFUP, 2225 "All MIN values are zeroes fairness will be disabled\n"); 2226 } else 2227 input->flags.cmng_enables |= 2228 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2229 } 2230 2231 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2232 struct cmng_init_input *input) 2233 { 2234 u16 vn_max_rate; 2235 u32 vn_cfg = bp->mf_config[vn]; 2236 2237 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2238 vn_max_rate = 0; 2239 else { 2240 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2241 2242 if (IS_MF_SI(bp)) { 2243 /* maxCfg in percents of linkspeed */ 2244 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2245 } else /* SD modes */ 2246 /* maxCfg is absolute in 100Mb units */ 2247 vn_max_rate = maxCfg * 100; 2248 } 2249 2250 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2251 2252 input->vnic_max_rate[vn] = vn_max_rate; 2253 } 2254 2255 2256 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2257 { 2258 if (CHIP_REV_IS_SLOW(bp)) 2259 return CMNG_FNS_NONE; 2260 if (IS_MF(bp)) 2261 return CMNG_FNS_MINMAX; 2262 2263 return CMNG_FNS_NONE; 2264 } 2265 2266 void bnx2x_read_mf_cfg(struct bnx2x *bp) 2267 { 2268 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2269 2270 if (BP_NOMCP(bp)) 2271 return; /* what should be the default bvalue in this case */ 2272 2273 /* For 2 port configuration the absolute function number formula 2274 * is: 2275 * abs_func = 2 * vn + BP_PORT + BP_PATH 2276 * 2277 * and there are 4 functions per port 2278 * 2279 * For 4 port configuration it is 2280 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2281 * 2282 * and there are 2 functions per port 2283 */ 2284 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2285 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2286 2287 if (func >= E1H_FUNC_MAX) 2288 break; 2289 2290 bp->mf_config[vn] = 2291 MF_CFG_RD(bp, func_mf_config[func].config); 2292 } 2293 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2294 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2295 bp->flags |= MF_FUNC_DIS; 2296 } else { 2297 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2298 bp->flags &= ~MF_FUNC_DIS; 2299 } 2300 } 2301 2302 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2303 { 2304 struct cmng_init_input input; 2305 memset(&input, 0, sizeof(struct cmng_init_input)); 2306 2307 input.port_rate = bp->link_vars.line_speed; 2308 2309 if (cmng_type == CMNG_FNS_MINMAX) { 2310 int vn; 2311 2312 /* read mf conf from shmem */ 2313 if (read_cfg) 2314 bnx2x_read_mf_cfg(bp); 2315 2316 /* vn_weight_sum and enable fairness if not 0 */ 2317 bnx2x_calc_vn_min(bp, &input); 2318 2319 /* calculate and set min-max rate for each vn */ 2320 if (bp->port.pmf) 2321 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2322 bnx2x_calc_vn_max(bp, vn, &input); 2323 2324 /* always enable rate shaping and fairness */ 2325 input.flags.cmng_enables |= 2326 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2327 2328 bnx2x_init_cmng(&input, &bp->cmng); 2329 return; 2330 } 2331 2332 /* rate shaping and fairness are disabled */ 2333 DP(NETIF_MSG_IFUP, 2334 "rate shaping and fairness are disabled\n"); 2335 } 2336 2337 static void storm_memset_cmng(struct bnx2x *bp, 2338 struct cmng_init *cmng, 2339 u8 port) 2340 { 2341 int vn; 2342 size_t size = sizeof(struct cmng_struct_per_port); 2343 2344 u32 addr = BAR_XSTRORM_INTMEM + 2345 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2346 2347 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2348 2349 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2350 int func = func_by_vn(bp, vn); 2351 2352 addr = BAR_XSTRORM_INTMEM + 2353 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2354 size = sizeof(struct rate_shaping_vars_per_vn); 2355 __storm_memset_struct(bp, addr, size, 2356 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2357 2358 addr = BAR_XSTRORM_INTMEM + 2359 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2360 size = sizeof(struct fairness_vars_per_vn); 2361 __storm_memset_struct(bp, addr, size, 2362 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2363 } 2364 } 2365 2366 /* This function is called upon link interrupt */ 2367 static void bnx2x_link_attn(struct bnx2x *bp) 2368 { 2369 /* Make sure that we are synced with the current statistics */ 2370 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2371 2372 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2373 2374 if (bp->link_vars.link_up) { 2375 2376 /* dropless flow control */ 2377 if (!CHIP_IS_E1(bp) && bp->dropless_fc) { 2378 int port = BP_PORT(bp); 2379 u32 pause_enabled = 0; 2380 2381 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2382 pause_enabled = 1; 2383 2384 REG_WR(bp, BAR_USTRORM_INTMEM + 2385 USTORM_ETH_PAUSE_ENABLED_OFFSET(port), 2386 pause_enabled); 2387 } 2388 2389 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2390 struct host_port_stats *pstats; 2391 2392 pstats = bnx2x_sp(bp, port_stats); 2393 /* reset old mac stats */ 2394 memset(&(pstats->mac_stx[0]), 0, 2395 sizeof(struct mac_stx)); 2396 } 2397 if (bp->state == BNX2X_STATE_OPEN) 2398 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2399 } 2400 2401 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2402 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2403 2404 if (cmng_fns != CMNG_FNS_NONE) { 2405 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2406 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2407 } else 2408 /* rate shaping and fairness are disabled */ 2409 DP(NETIF_MSG_IFUP, 2410 "single function mode without fairness\n"); 2411 } 2412 2413 __bnx2x_link_report(bp); 2414 2415 if (IS_MF(bp)) 2416 bnx2x_link_sync_notify(bp); 2417 } 2418 2419 void bnx2x__link_status_update(struct bnx2x *bp) 2420 { 2421 if (bp->state != BNX2X_STATE_OPEN) 2422 return; 2423 2424 /* read updated dcb configuration */ 2425 bnx2x_dcbx_pmf_update(bp); 2426 2427 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2428 2429 if (bp->link_vars.link_up) 2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2431 else 2432 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2433 2434 /* indicate link status */ 2435 bnx2x_link_report(bp); 2436 } 2437 2438 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2439 u16 vlan_val, u8 allowed_prio) 2440 { 2441 struct bnx2x_func_state_params func_params = {0}; 2442 struct bnx2x_func_afex_update_params *f_update_params = 2443 &func_params.params.afex_update; 2444 2445 func_params.f_obj = &bp->func_obj; 2446 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2447 2448 /* no need to wait for RAMROD completion, so don't 2449 * set RAMROD_COMP_WAIT flag 2450 */ 2451 2452 f_update_params->vif_id = vifid; 2453 f_update_params->afex_default_vlan = vlan_val; 2454 f_update_params->allowed_priorities = allowed_prio; 2455 2456 /* if ramrod can not be sent, response to MCP immediately */ 2457 if (bnx2x_func_state_change(bp, &func_params) < 0) 2458 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2459 2460 return 0; 2461 } 2462 2463 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2464 u16 vif_index, u8 func_bit_map) 2465 { 2466 struct bnx2x_func_state_params func_params = {0}; 2467 struct bnx2x_func_afex_viflists_params *update_params = 2468 &func_params.params.afex_viflists; 2469 int rc; 2470 u32 drv_msg_code; 2471 2472 /* validate only LIST_SET and LIST_GET are received from switch */ 2473 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2474 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2475 cmd_type); 2476 2477 func_params.f_obj = &bp->func_obj; 2478 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2479 2480 /* set parameters according to cmd_type */ 2481 update_params->afex_vif_list_command = cmd_type; 2482 update_params->vif_list_index = cpu_to_le16(vif_index); 2483 update_params->func_bit_map = 2484 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2485 update_params->func_to_clear = 0; 2486 drv_msg_code = 2487 (cmd_type == VIF_LIST_RULE_GET) ? 2488 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2489 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2490 2491 /* if ramrod can not be sent, respond to MCP immediately for 2492 * SET and GET requests (other are not triggered from MCP) 2493 */ 2494 rc = bnx2x_func_state_change(bp, &func_params); 2495 if (rc < 0) 2496 bnx2x_fw_command(bp, drv_msg_code, 0); 2497 2498 return 0; 2499 } 2500 2501 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2502 { 2503 struct afex_stats afex_stats; 2504 u32 func = BP_ABS_FUNC(bp); 2505 u32 mf_config; 2506 u16 vlan_val; 2507 u32 vlan_prio; 2508 u16 vif_id; 2509 u8 allowed_prio; 2510 u8 vlan_mode; 2511 u32 addr_to_write, vifid, addrs, stats_type, i; 2512 2513 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2514 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2515 DP(BNX2X_MSG_MCP, 2516 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2517 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2518 } 2519 2520 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2521 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2522 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2523 DP(BNX2X_MSG_MCP, 2524 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2525 vifid, addrs); 2526 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2527 addrs); 2528 } 2529 2530 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2531 addr_to_write = SHMEM2_RD(bp, 2532 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2533 stats_type = SHMEM2_RD(bp, 2534 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2535 2536 DP(BNX2X_MSG_MCP, 2537 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2538 addr_to_write); 2539 2540 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2541 2542 /* write response to scratchpad, for MCP */ 2543 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2544 REG_WR(bp, addr_to_write + i*sizeof(u32), 2545 *(((u32 *)(&afex_stats))+i)); 2546 2547 /* send ack message to MCP */ 2548 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2549 } 2550 2551 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2552 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2553 bp->mf_config[BP_VN(bp)] = mf_config; 2554 DP(BNX2X_MSG_MCP, 2555 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2556 mf_config); 2557 2558 /* if VIF_SET is "enabled" */ 2559 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2560 /* set rate limit directly to internal RAM */ 2561 struct cmng_init_input cmng_input; 2562 struct rate_shaping_vars_per_vn m_rs_vn; 2563 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2564 u32 addr = BAR_XSTRORM_INTMEM + 2565 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2566 2567 bp->mf_config[BP_VN(bp)] = mf_config; 2568 2569 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2570 m_rs_vn.vn_counter.rate = 2571 cmng_input.vnic_max_rate[BP_VN(bp)]; 2572 m_rs_vn.vn_counter.quota = 2573 (m_rs_vn.vn_counter.rate * 2574 RS_PERIODIC_TIMEOUT_USEC) / 8; 2575 2576 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2577 2578 /* read relevant values from mf_cfg struct in shmem */ 2579 vif_id = 2580 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2581 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2582 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2583 vlan_val = 2584 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2585 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2586 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2587 vlan_prio = (mf_config & 2588 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2589 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2590 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2591 vlan_mode = 2592 (MF_CFG_RD(bp, 2593 func_mf_config[func].afex_config) & 2594 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2595 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2596 allowed_prio = 2597 (MF_CFG_RD(bp, 2598 func_mf_config[func].afex_config) & 2599 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2600 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2601 2602 /* send ramrod to FW, return in case of failure */ 2603 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2604 allowed_prio)) 2605 return; 2606 2607 bp->afex_def_vlan_tag = vlan_val; 2608 bp->afex_vlan_mode = vlan_mode; 2609 } else { 2610 /* notify link down because BP->flags is disabled */ 2611 bnx2x_link_report(bp); 2612 2613 /* send INVALID VIF ramrod to FW */ 2614 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2615 2616 /* Reset the default afex VLAN */ 2617 bp->afex_def_vlan_tag = -1; 2618 } 2619 } 2620 } 2621 2622 static void bnx2x_pmf_update(struct bnx2x *bp) 2623 { 2624 int port = BP_PORT(bp); 2625 u32 val; 2626 2627 bp->port.pmf = 1; 2628 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2629 2630 /* 2631 * We need the mb() to ensure the ordering between the writing to 2632 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2633 */ 2634 smp_mb(); 2635 2636 /* queue a periodic task */ 2637 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2638 2639 bnx2x_dcbx_pmf_update(bp); 2640 2641 /* enable nig attention */ 2642 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2643 if (bp->common.int_block == INT_BLOCK_HC) { 2644 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2645 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2646 } else if (!CHIP_IS_E1x(bp)) { 2647 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2648 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2649 } 2650 2651 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2652 } 2653 2654 /* end of Link */ 2655 2656 /* slow path */ 2657 2658 /* 2659 * General service functions 2660 */ 2661 2662 /* send the MCP a request, block until there is a reply */ 2663 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2664 { 2665 int mb_idx = BP_FW_MB_IDX(bp); 2666 u32 seq; 2667 u32 rc = 0; 2668 u32 cnt = 1; 2669 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2670 2671 mutex_lock(&bp->fw_mb_mutex); 2672 seq = ++bp->fw_seq; 2673 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2674 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2675 2676 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 2677 (command | seq), param); 2678 2679 do { 2680 /* let the FW do it's magic ... */ 2681 msleep(delay); 2682 2683 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 2684 2685 /* Give the FW up to 5 second (500*10ms) */ 2686 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2687 2688 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2689 cnt*delay, rc, seq); 2690 2691 /* is this a reply to our command? */ 2692 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 2693 rc &= FW_MSG_CODE_MASK; 2694 else { 2695 /* FW BUG! */ 2696 BNX2X_ERR("FW failed to respond!\n"); 2697 bnx2x_fw_dump(bp); 2698 rc = 0; 2699 } 2700 mutex_unlock(&bp->fw_mb_mutex); 2701 2702 return rc; 2703 } 2704 2705 2706 static void storm_memset_func_cfg(struct bnx2x *bp, 2707 struct tstorm_eth_function_common_config *tcfg, 2708 u16 abs_fid) 2709 { 2710 size_t size = sizeof(struct tstorm_eth_function_common_config); 2711 2712 u32 addr = BAR_TSTRORM_INTMEM + 2713 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 2714 2715 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 2716 } 2717 2718 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2719 { 2720 if (CHIP_IS_E1x(bp)) { 2721 struct tstorm_eth_function_common_config tcfg = {0}; 2722 2723 storm_memset_func_cfg(bp, &tcfg, p->func_id); 2724 } 2725 2726 /* Enable the function in the FW */ 2727 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 2728 storm_memset_func_en(bp, p->func_id, 1); 2729 2730 /* spq */ 2731 if (p->func_flgs & FUNC_FLG_SPQ) { 2732 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 2733 REG_WR(bp, XSEM_REG_FAST_MEMORY + 2734 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 2735 } 2736 } 2737 2738 /** 2739 * bnx2x_get_tx_only_flags - Return common flags 2740 * 2741 * @bp device handle 2742 * @fp queue handle 2743 * @zero_stats TRUE if statistics zeroing is needed 2744 * 2745 * Return the flags that are common for the Tx-only and not normal connections. 2746 */ 2747 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2748 struct bnx2x_fastpath *fp, 2749 bool zero_stats) 2750 { 2751 unsigned long flags = 0; 2752 2753 /* PF driver will always initialize the Queue to an ACTIVE state */ 2754 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2755 2756 /* tx only connections collect statistics (on the same index as the 2757 * parent connection). The statistics are zeroed when the parent 2758 * connection is initialized. 2759 */ 2760 2761 __set_bit(BNX2X_Q_FLG_STATS, &flags); 2762 if (zero_stats) 2763 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 2764 2765 2766 return flags; 2767 } 2768 2769 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2770 struct bnx2x_fastpath *fp, 2771 bool leading) 2772 { 2773 unsigned long flags = 0; 2774 2775 /* calculate other queue flags */ 2776 if (IS_MF_SD(bp)) 2777 __set_bit(BNX2X_Q_FLG_OV, &flags); 2778 2779 if (IS_FCOE_FP(fp)) { 2780 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2781 /* For FCoE - force usage of default priority (for afex) */ 2782 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 2783 } 2784 2785 if (!fp->disable_tpa) { 2786 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2787 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 2788 if (fp->mode == TPA_MODE_GRO) 2789 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 2790 } 2791 2792 if (leading) { 2793 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 2794 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 2795 } 2796 2797 /* Always set HW VLAN stripping */ 2798 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2799 2800 /* configure silent vlan removal */ 2801 if (IS_MF_AFEX(bp)) 2802 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 2803 2804 2805 return flags | bnx2x_get_common_flags(bp, fp, true); 2806 } 2807 2808 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 2809 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 2810 u8 cos) 2811 { 2812 gen_init->stat_id = bnx2x_stats_id(fp); 2813 gen_init->spcl_id = fp->cl_id; 2814 2815 /* Always use mini-jumbo MTU for FCoE L2 ring */ 2816 if (IS_FCOE_FP(fp)) 2817 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 2818 else 2819 gen_init->mtu = bp->dev->mtu; 2820 2821 gen_init->cos = cos; 2822 } 2823 2824 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 2825 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 2826 struct bnx2x_rxq_setup_params *rxq_init) 2827 { 2828 u8 max_sge = 0; 2829 u16 sge_sz = 0; 2830 u16 tpa_agg_size = 0; 2831 2832 if (!fp->disable_tpa) { 2833 pause->sge_th_lo = SGE_TH_LO(bp); 2834 pause->sge_th_hi = SGE_TH_HI(bp); 2835 2836 /* validate SGE ring has enough to cross high threshold */ 2837 WARN_ON(bp->dropless_fc && 2838 pause->sge_th_hi + FW_PREFETCH_CNT > 2839 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 2840 2841 tpa_agg_size = min_t(u32, 2842 (min_t(u32, 8, MAX_SKB_FRAGS) * 2843 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2844 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 2845 SGE_PAGE_SHIFT; 2846 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 2847 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 2848 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, 2849 0xffff); 2850 } 2851 2852 /* pause - not for e1 */ 2853 if (!CHIP_IS_E1(bp)) { 2854 pause->bd_th_lo = BD_TH_LO(bp); 2855 pause->bd_th_hi = BD_TH_HI(bp); 2856 2857 pause->rcq_th_lo = RCQ_TH_LO(bp); 2858 pause->rcq_th_hi = RCQ_TH_HI(bp); 2859 /* 2860 * validate that rings have enough entries to cross 2861 * high thresholds 2862 */ 2863 WARN_ON(bp->dropless_fc && 2864 pause->bd_th_hi + FW_PREFETCH_CNT > 2865 bp->rx_ring_size); 2866 WARN_ON(bp->dropless_fc && 2867 pause->rcq_th_hi + FW_PREFETCH_CNT > 2868 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 2869 2870 pause->pri_map = 1; 2871 } 2872 2873 /* rxq setup */ 2874 rxq_init->dscr_map = fp->rx_desc_mapping; 2875 rxq_init->sge_map = fp->rx_sge_mapping; 2876 rxq_init->rcq_map = fp->rx_comp_mapping; 2877 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2878 2879 /* This should be a maximum number of data bytes that may be 2880 * placed on the BD (not including paddings). 2881 */ 2882 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 2883 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 2884 2885 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2886 rxq_init->tpa_agg_sz = tpa_agg_size; 2887 rxq_init->sge_buf_sz = sge_sz; 2888 rxq_init->max_sges_pkt = max_sge; 2889 rxq_init->rss_engine_id = BP_FUNC(bp); 2890 rxq_init->mcast_engine_id = BP_FUNC(bp); 2891 2892 /* Maximum number or simultaneous TPA aggregation for this Queue. 2893 * 2894 * For PF Clients it should be the maximum avaliable number. 2895 * VF driver(s) may want to define it to a smaller value. 2896 */ 2897 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 2898 2899 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2900 rxq_init->fw_sb_id = fp->fw_sb_id; 2901 2902 if (IS_FCOE_FP(fp)) 2903 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2904 else 2905 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 2906 /* configure silent vlan removal 2907 * if multi function mode is afex, then mask default vlan 2908 */ 2909 if (IS_MF_AFEX(bp)) { 2910 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 2911 rxq_init->silent_removal_mask = VLAN_VID_MASK; 2912 } 2913 } 2914 2915 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2916 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 2917 u8 cos) 2918 { 2919 txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; 2920 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 2921 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2922 txq_init->fw_sb_id = fp->fw_sb_id; 2923 2924 /* 2925 * set the tss leading client id for TX classfication == 2926 * leading RSS client id 2927 */ 2928 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 2929 2930 if (IS_FCOE_FP(fp)) { 2931 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 2932 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 2933 } 2934 } 2935 2936 static void bnx2x_pf_init(struct bnx2x *bp) 2937 { 2938 struct bnx2x_func_init_params func_init = {0}; 2939 struct event_ring_data eq_data = { {0} }; 2940 u16 flags; 2941 2942 if (!CHIP_IS_E1x(bp)) { 2943 /* reset IGU PF statistics: MSIX + ATTN */ 2944 /* PF */ 2945 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 2946 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 2947 (CHIP_MODE_IS_4_PORT(bp) ? 2948 BP_FUNC(bp) : BP_VN(bp))*4, 0); 2949 /* ATTN */ 2950 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 2951 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 2952 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 2953 (CHIP_MODE_IS_4_PORT(bp) ? 2954 BP_FUNC(bp) : BP_VN(bp))*4, 0); 2955 } 2956 2957 /* function setup flags */ 2958 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 2959 2960 /* This flag is relevant for E1x only. 2961 * E2 doesn't have a TPA configuration in a function level. 2962 */ 2963 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 2964 2965 func_init.func_flgs = flags; 2966 func_init.pf_id = BP_FUNC(bp); 2967 func_init.func_id = BP_FUNC(bp); 2968 func_init.spq_map = bp->spq_mapping; 2969 func_init.spq_prod = bp->spq_prod_idx; 2970 2971 bnx2x_func_init(bp, &func_init); 2972 2973 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 2974 2975 /* 2976 * Congestion management values depend on the link rate 2977 * There is no active link so initial link rate is set to 10 Gbps. 2978 * When the link comes up The congestion management values are 2979 * re-calculated according to the actual link rate. 2980 */ 2981 bp->link_vars.line_speed = SPEED_10000; 2982 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 2983 2984 /* Only the PMF sets the HW */ 2985 if (bp->port.pmf) 2986 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2987 2988 /* init Event Queue */ 2989 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 2990 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 2991 eq_data.producer = bp->eq_prod; 2992 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 2993 eq_data.sb_id = DEF_SB_ID; 2994 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 2995 } 2996 2997 2998 static void bnx2x_e1h_disable(struct bnx2x *bp) 2999 { 3000 int port = BP_PORT(bp); 3001 3002 bnx2x_tx_disable(bp); 3003 3004 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3005 } 3006 3007 static void bnx2x_e1h_enable(struct bnx2x *bp) 3008 { 3009 int port = BP_PORT(bp); 3010 3011 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3012 3013 /* Tx queue should be only reenabled */ 3014 netif_tx_wake_all_queues(bp->dev); 3015 3016 /* 3017 * Should not call netif_carrier_on since it will be called if the link 3018 * is up when checking for link state 3019 */ 3020 } 3021 3022 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3023 3024 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3025 { 3026 struct eth_stats_info *ether_stat = 3027 &bp->slowpath->drv_info_to_mcp.ether_stat; 3028 3029 /* leave last char as NULL */ 3030 memcpy(ether_stat->version, DRV_MODULE_VERSION, 3031 ETH_STAT_INFO_VERSION_LEN - 1); 3032 3033 bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, 3034 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3035 ether_stat->mac_local); 3036 3037 ether_stat->mtu_size = bp->dev->mtu; 3038 3039 if (bp->dev->features & NETIF_F_RXCSUM) 3040 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3041 if (bp->dev->features & NETIF_F_TSO) 3042 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3043 ether_stat->feature_flags |= bp->common.boot_mode; 3044 3045 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3046 3047 ether_stat->txq_size = bp->tx_ring_size; 3048 ether_stat->rxq_size = bp->rx_ring_size; 3049 } 3050 3051 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3052 { 3053 #ifdef BCM_CNIC 3054 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3055 struct fcoe_stats_info *fcoe_stat = 3056 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3057 3058 memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); 3059 3060 fcoe_stat->qos_priority = 3061 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3062 3063 /* insert FCoE stats from ramrod response */ 3064 if (!NO_FCOE(bp)) { 3065 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3066 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3067 tstorm_queue_statistics; 3068 3069 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3070 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3071 xstorm_queue_statistics; 3072 3073 struct fcoe_statistics_params *fw_fcoe_stat = 3074 &bp->fw_stats_data->fcoe; 3075 3076 ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, 3077 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3078 3079 ADD_64(fcoe_stat->rx_bytes_hi, 3080 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3081 fcoe_stat->rx_bytes_lo, 3082 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3083 3084 ADD_64(fcoe_stat->rx_bytes_hi, 3085 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3086 fcoe_stat->rx_bytes_lo, 3087 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3088 3089 ADD_64(fcoe_stat->rx_bytes_hi, 3090 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3091 fcoe_stat->rx_bytes_lo, 3092 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3093 3094 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3095 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3096 3097 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3098 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3099 3100 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3101 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3102 3103 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3104 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3105 3106 ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, 3107 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3108 3109 ADD_64(fcoe_stat->tx_bytes_hi, 3110 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3111 fcoe_stat->tx_bytes_lo, 3112 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3113 3114 ADD_64(fcoe_stat->tx_bytes_hi, 3115 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3116 fcoe_stat->tx_bytes_lo, 3117 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3118 3119 ADD_64(fcoe_stat->tx_bytes_hi, 3120 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3121 fcoe_stat->tx_bytes_lo, 3122 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3123 3124 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3125 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3126 3127 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3128 fcoe_q_xstorm_stats->ucast_pkts_sent); 3129 3130 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3131 fcoe_q_xstorm_stats->bcast_pkts_sent); 3132 3133 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3134 fcoe_q_xstorm_stats->mcast_pkts_sent); 3135 } 3136 3137 /* ask L5 driver to add data to the struct */ 3138 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3139 #endif 3140 } 3141 3142 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3143 { 3144 #ifdef BCM_CNIC 3145 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3146 struct iscsi_stats_info *iscsi_stat = 3147 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3148 3149 memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3150 3151 iscsi_stat->qos_priority = 3152 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3153 3154 /* ask L5 driver to add data to the struct */ 3155 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3156 #endif 3157 } 3158 3159 /* called due to MCP event (on pmf): 3160 * reread new bandwidth configuration 3161 * configure FW 3162 * notify others function about the change 3163 */ 3164 static void bnx2x_config_mf_bw(struct bnx2x *bp) 3165 { 3166 if (bp->link_vars.link_up) { 3167 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3168 bnx2x_link_sync_notify(bp); 3169 } 3170 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3171 } 3172 3173 static void bnx2x_set_mf_bw(struct bnx2x *bp) 3174 { 3175 bnx2x_config_mf_bw(bp); 3176 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3177 } 3178 3179 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3180 { 3181 enum drv_info_opcode op_code; 3182 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3183 3184 /* if drv_info version supported by MFW doesn't match - send NACK */ 3185 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3186 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3187 return; 3188 } 3189 3190 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3191 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3192 3193 memset(&bp->slowpath->drv_info_to_mcp, 0, 3194 sizeof(union drv_info_to_mcp)); 3195 3196 switch (op_code) { 3197 case ETH_STATS_OPCODE: 3198 bnx2x_drv_info_ether_stat(bp); 3199 break; 3200 case FCOE_STATS_OPCODE: 3201 bnx2x_drv_info_fcoe_stat(bp); 3202 break; 3203 case ISCSI_STATS_OPCODE: 3204 bnx2x_drv_info_iscsi_stat(bp); 3205 break; 3206 default: 3207 /* if op code isn't supported - send NACK */ 3208 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3209 return; 3210 } 3211 3212 /* if we got drv_info attn from MFW then these fields are defined in 3213 * shmem2 for sure 3214 */ 3215 SHMEM2_WR(bp, drv_info_host_addr_lo, 3216 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3217 SHMEM2_WR(bp, drv_info_host_addr_hi, 3218 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3219 3220 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3221 } 3222 3223 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3224 { 3225 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 3226 3227 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3228 3229 /* 3230 * This is the only place besides the function initialization 3231 * where the bp->flags can change so it is done without any 3232 * locks 3233 */ 3234 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3235 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3236 bp->flags |= MF_FUNC_DIS; 3237 3238 bnx2x_e1h_disable(bp); 3239 } else { 3240 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3241 bp->flags &= ~MF_FUNC_DIS; 3242 3243 bnx2x_e1h_enable(bp); 3244 } 3245 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3246 } 3247 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3248 bnx2x_config_mf_bw(bp); 3249 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3250 } 3251 3252 /* Report results to MCP */ 3253 if (dcc_event) 3254 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); 3255 else 3256 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); 3257 } 3258 3259 /* must be called under the spq lock */ 3260 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3261 { 3262 struct eth_spe *next_spe = bp->spq_prod_bd; 3263 3264 if (bp->spq_prod_bd == bp->spq_last_bd) { 3265 bp->spq_prod_bd = bp->spq; 3266 bp->spq_prod_idx = 0; 3267 DP(BNX2X_MSG_SP, "end of spq\n"); 3268 } else { 3269 bp->spq_prod_bd++; 3270 bp->spq_prod_idx++; 3271 } 3272 return next_spe; 3273 } 3274 3275 /* must be called under the spq lock */ 3276 static void bnx2x_sp_prod_update(struct bnx2x *bp) 3277 { 3278 int func = BP_FUNC(bp); 3279 3280 /* 3281 * Make sure that BD data is updated before writing the producer: 3282 * BD data is written to the memory, the producer is read from the 3283 * memory, thus we need a full memory barrier to ensure the ordering. 3284 */ 3285 mb(); 3286 3287 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3288 bp->spq_prod_idx); 3289 mmiowb(); 3290 } 3291 3292 /** 3293 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3294 * 3295 * @cmd: command to check 3296 * @cmd_type: command type 3297 */ 3298 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3299 { 3300 if ((cmd_type == NONE_CONNECTION_TYPE) || 3301 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3302 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3303 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3304 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3305 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3306 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3307 return true; 3308 else 3309 return false; 3310 3311 } 3312 3313 3314 /** 3315 * bnx2x_sp_post - place a single command on an SP ring 3316 * 3317 * @bp: driver handle 3318 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3319 * @cid: SW CID the command is related to 3320 * @data_hi: command private data address (high 32 bits) 3321 * @data_lo: command private data address (low 32 bits) 3322 * @cmd_type: command type (e.g. NONE, ETH) 3323 * 3324 * SP data is handled as if it's always an address pair, thus data fields are 3325 * not swapped to little endian in upper functions. Instead this function swaps 3326 * data as if it's two u32 fields. 3327 */ 3328 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3329 u32 data_hi, u32 data_lo, int cmd_type) 3330 { 3331 struct eth_spe *spe; 3332 u16 type; 3333 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3334 3335 #ifdef BNX2X_STOP_ON_ERROR 3336 if (unlikely(bp->panic)) { 3337 BNX2X_ERR("Can't post SP when there is panic\n"); 3338 return -EIO; 3339 } 3340 #endif 3341 3342 spin_lock_bh(&bp->spq_lock); 3343 3344 if (common) { 3345 if (!atomic_read(&bp->eq_spq_left)) { 3346 BNX2X_ERR("BUG! EQ ring full!\n"); 3347 spin_unlock_bh(&bp->spq_lock); 3348 bnx2x_panic(); 3349 return -EBUSY; 3350 } 3351 } else if (!atomic_read(&bp->cq_spq_left)) { 3352 BNX2X_ERR("BUG! SPQ ring full!\n"); 3353 spin_unlock_bh(&bp->spq_lock); 3354 bnx2x_panic(); 3355 return -EBUSY; 3356 } 3357 3358 spe = bnx2x_sp_get_next(bp); 3359 3360 /* CID needs port number to be encoded int it */ 3361 spe->hdr.conn_and_cmd_data = 3362 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3363 HW_CID(bp, cid)); 3364 3365 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3366 3367 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3368 SPE_HDR_FUNCTION_ID); 3369 3370 spe->hdr.type = cpu_to_le16(type); 3371 3372 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3373 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3374 3375 /* 3376 * It's ok if the actual decrement is issued towards the memory 3377 * somewhere between the spin_lock and spin_unlock. Thus no 3378 * more explict memory barrier is needed. 3379 */ 3380 if (common) 3381 atomic_dec(&bp->eq_spq_left); 3382 else 3383 atomic_dec(&bp->cq_spq_left); 3384 3385 3386 DP(BNX2X_MSG_SP, 3387 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3388 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3389 (u32)(U64_LO(bp->spq_mapping) + 3390 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3391 HW_CID(bp, cid), data_hi, data_lo, type, 3392 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3393 3394 bnx2x_sp_prod_update(bp); 3395 spin_unlock_bh(&bp->spq_lock); 3396 return 0; 3397 } 3398 3399 /* acquire split MCP access lock register */ 3400 static int bnx2x_acquire_alr(struct bnx2x *bp) 3401 { 3402 u32 j, val; 3403 int rc = 0; 3404 3405 might_sleep(); 3406 for (j = 0; j < 1000; j++) { 3407 val = (1UL << 31); 3408 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 3409 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 3410 if (val & (1L << 31)) 3411 break; 3412 3413 msleep(5); 3414 } 3415 if (!(val & (1L << 31))) { 3416 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3417 rc = -EBUSY; 3418 } 3419 3420 return rc; 3421 } 3422 3423 /* release split MCP access lock register */ 3424 static void bnx2x_release_alr(struct bnx2x *bp) 3425 { 3426 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 3427 } 3428 3429 #define BNX2X_DEF_SB_ATT_IDX 0x0001 3430 #define BNX2X_DEF_SB_IDX 0x0002 3431 3432 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3433 { 3434 struct host_sp_status_block *def_sb = bp->def_status_blk; 3435 u16 rc = 0; 3436 3437 barrier(); /* status block is written to by the chip */ 3438 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3439 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3440 rc |= BNX2X_DEF_SB_ATT_IDX; 3441 } 3442 3443 if (bp->def_idx != def_sb->sp_sb.running_index) { 3444 bp->def_idx = def_sb->sp_sb.running_index; 3445 rc |= BNX2X_DEF_SB_IDX; 3446 } 3447 3448 /* Do not reorder: indecies reading should complete before handling */ 3449 barrier(); 3450 return rc; 3451 } 3452 3453 /* 3454 * slow path service functions 3455 */ 3456 3457 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 3458 { 3459 int port = BP_PORT(bp); 3460 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 3461 MISC_REG_AEU_MASK_ATTN_FUNC_0; 3462 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 3463 NIG_REG_MASK_INTERRUPT_PORT0; 3464 u32 aeu_mask; 3465 u32 nig_mask = 0; 3466 u32 reg_addr; 3467 3468 if (bp->attn_state & asserted) 3469 BNX2X_ERR("IGU ERROR\n"); 3470 3471 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3472 aeu_mask = REG_RD(bp, aeu_addr); 3473 3474 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 3475 aeu_mask, asserted); 3476 aeu_mask &= ~(asserted & 0x3ff); 3477 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3478 3479 REG_WR(bp, aeu_addr, aeu_mask); 3480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3481 3482 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 3483 bp->attn_state |= asserted; 3484 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 3485 3486 if (asserted & ATTN_HARD_WIRED_MASK) { 3487 if (asserted & ATTN_NIG_FOR_FUNC) { 3488 3489 bnx2x_acquire_phy_lock(bp); 3490 3491 /* save nig interrupt mask */ 3492 nig_mask = REG_RD(bp, nig_int_mask_addr); 3493 3494 /* If nig_mask is not set, no need to call the update 3495 * function. 3496 */ 3497 if (nig_mask) { 3498 REG_WR(bp, nig_int_mask_addr, 0); 3499 3500 bnx2x_link_attn(bp); 3501 } 3502 3503 /* handle unicore attn? */ 3504 } 3505 if (asserted & ATTN_SW_TIMER_4_FUNC) 3506 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 3507 3508 if (asserted & GPIO_2_FUNC) 3509 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 3510 3511 if (asserted & GPIO_3_FUNC) 3512 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 3513 3514 if (asserted & GPIO_4_FUNC) 3515 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 3516 3517 if (port == 0) { 3518 if (asserted & ATTN_GENERAL_ATTN_1) { 3519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 3520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3521 } 3522 if (asserted & ATTN_GENERAL_ATTN_2) { 3523 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 3524 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3525 } 3526 if (asserted & ATTN_GENERAL_ATTN_3) { 3527 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 3528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3529 } 3530 } else { 3531 if (asserted & ATTN_GENERAL_ATTN_4) { 3532 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 3533 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3534 } 3535 if (asserted & ATTN_GENERAL_ATTN_5) { 3536 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 3537 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3538 } 3539 if (asserted & ATTN_GENERAL_ATTN_6) { 3540 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 3541 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3542 } 3543 } 3544 3545 } /* if hardwired */ 3546 3547 if (bp->common.int_block == INT_BLOCK_HC) 3548 reg_addr = (HC_REG_COMMAND_REG + port*32 + 3549 COMMAND_REG_ATTN_BITS_SET); 3550 else 3551 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 3552 3553 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 3554 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 3555 REG_WR(bp, reg_addr, asserted); 3556 3557 /* now set back the mask */ 3558 if (asserted & ATTN_NIG_FOR_FUNC) { 3559 REG_WR(bp, nig_int_mask_addr, nig_mask); 3560 bnx2x_release_phy_lock(bp); 3561 } 3562 } 3563 3564 static void bnx2x_fan_failure(struct bnx2x *bp) 3565 { 3566 int port = BP_PORT(bp); 3567 u32 ext_phy_config; 3568 /* mark the failure */ 3569 ext_phy_config = 3570 SHMEM_RD(bp, 3571 dev_info.port_hw_config[port].external_phy_config); 3572 3573 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 3574 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 3575 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 3576 ext_phy_config); 3577 3578 /* log the failure */ 3579 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 3580 "Please contact OEM Support for assistance\n"); 3581 3582 /* 3583 * Scheudle device reset (unload) 3584 * This is due to some boards consuming sufficient power when driver is 3585 * up to overheat if fan fails. 3586 */ 3587 smp_mb__before_clear_bit(); 3588 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); 3589 smp_mb__after_clear_bit(); 3590 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3591 3592 } 3593 3594 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3595 { 3596 int port = BP_PORT(bp); 3597 int reg_offset; 3598 u32 val; 3599 3600 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 3601 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 3602 3603 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 3604 3605 val = REG_RD(bp, reg_offset); 3606 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 3607 REG_WR(bp, reg_offset, val); 3608 3609 BNX2X_ERR("SPIO5 hw attention\n"); 3610 3611 /* Fan failure attention */ 3612 bnx2x_hw_reset_phy(&bp->link_params); 3613 bnx2x_fan_failure(bp); 3614 } 3615 3616 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 3617 bnx2x_acquire_phy_lock(bp); 3618 bnx2x_handle_module_detect_int(&bp->link_params); 3619 bnx2x_release_phy_lock(bp); 3620 } 3621 3622 if (attn & HW_INTERRUT_ASSERT_SET_0) { 3623 3624 val = REG_RD(bp, reg_offset); 3625 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 3626 REG_WR(bp, reg_offset, val); 3627 3628 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 3629 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 3630 bnx2x_panic(); 3631 } 3632 } 3633 3634 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3635 { 3636 u32 val; 3637 3638 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 3639 3640 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 3641 BNX2X_ERR("DB hw attention 0x%x\n", val); 3642 /* DORQ discard attention */ 3643 if (val & 0x2) 3644 BNX2X_ERR("FATAL error from DORQ\n"); 3645 } 3646 3647 if (attn & HW_INTERRUT_ASSERT_SET_1) { 3648 3649 int port = BP_PORT(bp); 3650 int reg_offset; 3651 3652 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 3653 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 3654 3655 val = REG_RD(bp, reg_offset); 3656 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 3657 REG_WR(bp, reg_offset, val); 3658 3659 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 3660 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 3661 bnx2x_panic(); 3662 } 3663 } 3664 3665 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3666 { 3667 u32 val; 3668 3669 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3670 3671 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 3672 BNX2X_ERR("CFC hw attention 0x%x\n", val); 3673 /* CFC error attention */ 3674 if (val & 0x2) 3675 BNX2X_ERR("FATAL error from CFC\n"); 3676 } 3677 3678 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3679 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 3680 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 3681 /* RQ_USDMDP_FIFO_OVERFLOW */ 3682 if (val & 0x18000) 3683 BNX2X_ERR("FATAL error from PXP\n"); 3684 3685 if (!CHIP_IS_E1x(bp)) { 3686 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 3687 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 3688 } 3689 } 3690 3691 if (attn & HW_INTERRUT_ASSERT_SET_2) { 3692 3693 int port = BP_PORT(bp); 3694 int reg_offset; 3695 3696 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 3697 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 3698 3699 val = REG_RD(bp, reg_offset); 3700 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 3701 REG_WR(bp, reg_offset, val); 3702 3703 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 3704 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 3705 bnx2x_panic(); 3706 } 3707 } 3708 3709 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3710 { 3711 u32 val; 3712 3713 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3714 3715 if (attn & BNX2X_PMF_LINK_ASSERT) { 3716 int func = BP_FUNC(bp); 3717 3718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3719 bnx2x_read_mf_cfg(bp); 3720 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3721 func_mf_config[BP_ABS_FUNC(bp)].config); 3722 val = SHMEM_RD(bp, 3723 func_mb[BP_FW_MB_IDX(bp)].drv_status); 3724 if (val & DRV_STATUS_DCC_EVENT_MASK) 3725 bnx2x_dcc_event(bp, 3726 (val & DRV_STATUS_DCC_EVENT_MASK)); 3727 3728 if (val & DRV_STATUS_SET_MF_BW) 3729 bnx2x_set_mf_bw(bp); 3730 3731 if (val & DRV_STATUS_DRV_INFO_REQ) 3732 bnx2x_handle_drv_info_req(bp); 3733 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3734 bnx2x_pmf_update(bp); 3735 3736 if (bp->port.pmf && 3737 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 3738 bp->dcbx_enabled > 0) 3739 /* start dcbx state machine */ 3740 bnx2x_dcbx_set_params(bp, 3741 BNX2X_DCBX_STATE_NEG_RECEIVED); 3742 if (val & DRV_STATUS_AFEX_EVENT_MASK) 3743 bnx2x_handle_afex_cmd(bp, 3744 val & DRV_STATUS_AFEX_EVENT_MASK); 3745 if (bp->link_vars.periodic_flags & 3746 PERIODIC_FLAGS_LINK_EVENT) { 3747 /* sync with link */ 3748 bnx2x_acquire_phy_lock(bp); 3749 bp->link_vars.periodic_flags &= 3750 ~PERIODIC_FLAGS_LINK_EVENT; 3751 bnx2x_release_phy_lock(bp); 3752 if (IS_MF(bp)) 3753 bnx2x_link_sync_notify(bp); 3754 bnx2x_link_report(bp); 3755 } 3756 /* Always call it here: bnx2x_link_report() will 3757 * prevent the link indication duplication. 3758 */ 3759 bnx2x__link_status_update(bp); 3760 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3761 3762 BNX2X_ERR("MC assert!\n"); 3763 bnx2x_mc_assert(bp); 3764 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 3765 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 3766 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 3767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 3768 bnx2x_panic(); 3769 3770 } else if (attn & BNX2X_MCP_ASSERT) { 3771 3772 BNX2X_ERR("MCP assert!\n"); 3773 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 3774 bnx2x_fw_dump(bp); 3775 3776 } else 3777 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 3778 } 3779 3780 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3781 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3782 if (attn & BNX2X_GRC_TIMEOUT) { 3783 val = CHIP_IS_E1(bp) ? 0 : 3784 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 3785 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3786 } 3787 if (attn & BNX2X_GRC_RSV) { 3788 val = CHIP_IS_E1(bp) ? 0 : 3789 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 3790 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3791 } 3792 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3793 } 3794 } 3795 3796 /* 3797 * Bits map: 3798 * 0-7 - Engine0 load counter. 3799 * 8-15 - Engine1 load counter. 3800 * 16 - Engine0 RESET_IN_PROGRESS bit. 3801 * 17 - Engine1 RESET_IN_PROGRESS bit. 3802 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 3803 * on the engine 3804 * 19 - Engine1 ONE_IS_LOADED. 3805 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 3806 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 3807 * just the one belonging to its engine). 3808 * 3809 */ 3810 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 3811 3812 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 3813 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 3814 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 3815 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 3816 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 3817 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 3818 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 3819 3820 /* 3821 * Set the GLOBAL_RESET bit. 3822 * 3823 * Should be run under rtnl lock 3824 */ 3825 void bnx2x_set_reset_global(struct bnx2x *bp) 3826 { 3827 u32 val; 3828 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3829 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3830 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 3831 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3832 } 3833 3834 /* 3835 * Clear the GLOBAL_RESET bit. 3836 * 3837 * Should be run under rtnl lock 3838 */ 3839 static void bnx2x_clear_reset_global(struct bnx2x *bp) 3840 { 3841 u32 val; 3842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3843 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3844 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 3845 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3846 } 3847 3848 /* 3849 * Checks the GLOBAL_RESET bit. 3850 * 3851 * should be run under rtnl lock 3852 */ 3853 static bool bnx2x_reset_is_global(struct bnx2x *bp) 3854 { 3855 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3856 3857 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 3858 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 3859 } 3860 3861 /* 3862 * Clear RESET_IN_PROGRESS bit for the current engine. 3863 * 3864 * Should be run under rtnl lock 3865 */ 3866 static void bnx2x_set_reset_done(struct bnx2x *bp) 3867 { 3868 u32 val; 3869 u32 bit = BP_PATH(bp) ? 3870 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3871 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3872 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3873 3874 /* Clear the bit */ 3875 val &= ~bit; 3876 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3877 3878 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3879 } 3880 3881 /* 3882 * Set RESET_IN_PROGRESS for the current engine. 3883 * 3884 * should be run under rtnl lock 3885 */ 3886 void bnx2x_set_reset_in_progress(struct bnx2x *bp) 3887 { 3888 u32 val; 3889 u32 bit = BP_PATH(bp) ? 3890 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3891 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3892 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3893 3894 /* Set the bit */ 3895 val |= bit; 3896 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3897 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3898 } 3899 3900 /* 3901 * Checks the RESET_IN_PROGRESS bit for the given engine. 3902 * should be run under rtnl lock 3903 */ 3904 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 3905 { 3906 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3907 u32 bit = engine ? 3908 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3909 3910 /* return false if bit is set */ 3911 return (val & bit) ? false : true; 3912 } 3913 3914 /* 3915 * set pf load for the current pf. 3916 * 3917 * should be run under rtnl lock 3918 */ 3919 void bnx2x_set_pf_load(struct bnx2x *bp) 3920 { 3921 u32 val1, val; 3922 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3923 BNX2X_PATH0_LOAD_CNT_MASK; 3924 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3925 BNX2X_PATH0_LOAD_CNT_SHIFT; 3926 3927 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3928 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3929 3930 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 3931 3932 /* get the current counter value */ 3933 val1 = (val & mask) >> shift; 3934 3935 /* set bit of that PF */ 3936 val1 |= (1 << bp->pf_num); 3937 3938 /* clear the old value */ 3939 val &= ~mask; 3940 3941 /* set the new one */ 3942 val |= ((val1 << shift) & mask); 3943 3944 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3945 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3946 } 3947 3948 /** 3949 * bnx2x_clear_pf_load - clear pf load mark 3950 * 3951 * @bp: driver handle 3952 * 3953 * Should be run under rtnl lock. 3954 * Decrements the load counter for the current engine. Returns 3955 * whether other functions are still loaded 3956 */ 3957 bool bnx2x_clear_pf_load(struct bnx2x *bp) 3958 { 3959 u32 val1, val; 3960 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3961 BNX2X_PATH0_LOAD_CNT_MASK; 3962 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3963 BNX2X_PATH0_LOAD_CNT_SHIFT; 3964 3965 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3966 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3967 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 3968 3969 /* get the current counter value */ 3970 val1 = (val & mask) >> shift; 3971 3972 /* clear bit of that PF */ 3973 val1 &= ~(1 << bp->pf_num); 3974 3975 /* clear the old value */ 3976 val &= ~mask; 3977 3978 /* set the new one */ 3979 val |= ((val1 << shift) & mask); 3980 3981 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3982 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3983 return val1 != 0; 3984 } 3985 3986 /* 3987 * Read the load status for the current engine. 3988 * 3989 * should be run under rtnl lock 3990 */ 3991 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 3992 { 3993 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 3994 BNX2X_PATH0_LOAD_CNT_MASK); 3995 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3996 BNX2X_PATH0_LOAD_CNT_SHIFT); 3997 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3998 3999 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4000 4001 val = (val & mask) >> shift; 4002 4003 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4004 engine, val); 4005 4006 return val != 0; 4007 } 4008 4009 /* 4010 * Reset the load status for the current engine. 4011 */ 4012 static void bnx2x_clear_load_status(struct bnx2x *bp) 4013 { 4014 u32 val; 4015 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4016 BNX2X_PATH0_LOAD_CNT_MASK); 4017 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4018 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4019 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); 4020 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4021 } 4022 4023 static void _print_next_block(int idx, const char *blk) 4024 { 4025 pr_cont("%s%s", idx ? ", " : "", blk); 4026 } 4027 4028 static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4029 bool print) 4030 { 4031 int i = 0; 4032 u32 cur_bit = 0; 4033 for (i = 0; sig; i++) { 4034 cur_bit = ((u32)0x1 << i); 4035 if (sig & cur_bit) { 4036 switch (cur_bit) { 4037 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4038 if (print) 4039 _print_next_block(par_num++, "BRB"); 4040 break; 4041 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4042 if (print) 4043 _print_next_block(par_num++, "PARSER"); 4044 break; 4045 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4046 if (print) 4047 _print_next_block(par_num++, "TSDM"); 4048 break; 4049 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4050 if (print) 4051 _print_next_block(par_num++, 4052 "SEARCHER"); 4053 break; 4054 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4055 if (print) 4056 _print_next_block(par_num++, "TCM"); 4057 break; 4058 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4059 if (print) 4060 _print_next_block(par_num++, "TSEMI"); 4061 break; 4062 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4063 if (print) 4064 _print_next_block(par_num++, "XPB"); 4065 break; 4066 } 4067 4068 /* Clear the bit */ 4069 sig &= ~cur_bit; 4070 } 4071 } 4072 4073 return par_num; 4074 } 4075 4076 static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4077 bool *global, bool print) 4078 { 4079 int i = 0; 4080 u32 cur_bit = 0; 4081 for (i = 0; sig; i++) { 4082 cur_bit = ((u32)0x1 << i); 4083 if (sig & cur_bit) { 4084 switch (cur_bit) { 4085 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4086 if (print) 4087 _print_next_block(par_num++, "PBF"); 4088 break; 4089 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4090 if (print) 4091 _print_next_block(par_num++, "QM"); 4092 break; 4093 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4094 if (print) 4095 _print_next_block(par_num++, "TM"); 4096 break; 4097 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4098 if (print) 4099 _print_next_block(par_num++, "XSDM"); 4100 break; 4101 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4102 if (print) 4103 _print_next_block(par_num++, "XCM"); 4104 break; 4105 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4106 if (print) 4107 _print_next_block(par_num++, "XSEMI"); 4108 break; 4109 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4110 if (print) 4111 _print_next_block(par_num++, 4112 "DOORBELLQ"); 4113 break; 4114 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4115 if (print) 4116 _print_next_block(par_num++, "NIG"); 4117 break; 4118 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4119 if (print) 4120 _print_next_block(par_num++, 4121 "VAUX PCI CORE"); 4122 *global = true; 4123 break; 4124 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4125 if (print) 4126 _print_next_block(par_num++, "DEBUG"); 4127 break; 4128 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4129 if (print) 4130 _print_next_block(par_num++, "USDM"); 4131 break; 4132 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4133 if (print) 4134 _print_next_block(par_num++, "UCM"); 4135 break; 4136 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4137 if (print) 4138 _print_next_block(par_num++, "USEMI"); 4139 break; 4140 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4141 if (print) 4142 _print_next_block(par_num++, "UPB"); 4143 break; 4144 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4145 if (print) 4146 _print_next_block(par_num++, "CSDM"); 4147 break; 4148 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4149 if (print) 4150 _print_next_block(par_num++, "CCM"); 4151 break; 4152 } 4153 4154 /* Clear the bit */ 4155 sig &= ~cur_bit; 4156 } 4157 } 4158 4159 return par_num; 4160 } 4161 4162 static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4163 bool print) 4164 { 4165 int i = 0; 4166 u32 cur_bit = 0; 4167 for (i = 0; sig; i++) { 4168 cur_bit = ((u32)0x1 << i); 4169 if (sig & cur_bit) { 4170 switch (cur_bit) { 4171 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4172 if (print) 4173 _print_next_block(par_num++, "CSEMI"); 4174 break; 4175 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4176 if (print) 4177 _print_next_block(par_num++, "PXP"); 4178 break; 4179 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4180 if (print) 4181 _print_next_block(par_num++, 4182 "PXPPCICLOCKCLIENT"); 4183 break; 4184 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4185 if (print) 4186 _print_next_block(par_num++, "CFC"); 4187 break; 4188 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4189 if (print) 4190 _print_next_block(par_num++, "CDU"); 4191 break; 4192 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4193 if (print) 4194 _print_next_block(par_num++, "DMAE"); 4195 break; 4196 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4197 if (print) 4198 _print_next_block(par_num++, "IGU"); 4199 break; 4200 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4201 if (print) 4202 _print_next_block(par_num++, "MISC"); 4203 break; 4204 } 4205 4206 /* Clear the bit */ 4207 sig &= ~cur_bit; 4208 } 4209 } 4210 4211 return par_num; 4212 } 4213 4214 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4215 bool *global, bool print) 4216 { 4217 int i = 0; 4218 u32 cur_bit = 0; 4219 for (i = 0; sig; i++) { 4220 cur_bit = ((u32)0x1 << i); 4221 if (sig & cur_bit) { 4222 switch (cur_bit) { 4223 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4224 if (print) 4225 _print_next_block(par_num++, "MCP ROM"); 4226 *global = true; 4227 break; 4228 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4229 if (print) 4230 _print_next_block(par_num++, 4231 "MCP UMP RX"); 4232 *global = true; 4233 break; 4234 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4235 if (print) 4236 _print_next_block(par_num++, 4237 "MCP UMP TX"); 4238 *global = true; 4239 break; 4240 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4241 if (print) 4242 _print_next_block(par_num++, 4243 "MCP SCPAD"); 4244 *global = true; 4245 break; 4246 } 4247 4248 /* Clear the bit */ 4249 sig &= ~cur_bit; 4250 } 4251 } 4252 4253 return par_num; 4254 } 4255 4256 static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4257 bool print) 4258 { 4259 int i = 0; 4260 u32 cur_bit = 0; 4261 for (i = 0; sig; i++) { 4262 cur_bit = ((u32)0x1 << i); 4263 if (sig & cur_bit) { 4264 switch (cur_bit) { 4265 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4266 if (print) 4267 _print_next_block(par_num++, "PGLUE_B"); 4268 break; 4269 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4270 if (print) 4271 _print_next_block(par_num++, "ATC"); 4272 break; 4273 } 4274 4275 /* Clear the bit */ 4276 sig &= ~cur_bit; 4277 } 4278 } 4279 4280 return par_num; 4281 } 4282 4283 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4284 u32 *sig) 4285 { 4286 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4287 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4288 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4289 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4290 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4291 int par_num = 0; 4292 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4293 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4294 sig[0] & HW_PRTY_ASSERT_SET_0, 4295 sig[1] & HW_PRTY_ASSERT_SET_1, 4296 sig[2] & HW_PRTY_ASSERT_SET_2, 4297 sig[3] & HW_PRTY_ASSERT_SET_3, 4298 sig[4] & HW_PRTY_ASSERT_SET_4); 4299 if (print) 4300 netdev_err(bp->dev, 4301 "Parity errors detected in blocks: "); 4302 par_num = bnx2x_check_blocks_with_parity0( 4303 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4304 par_num = bnx2x_check_blocks_with_parity1( 4305 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4306 par_num = bnx2x_check_blocks_with_parity2( 4307 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4308 par_num = bnx2x_check_blocks_with_parity3( 4309 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4310 par_num = bnx2x_check_blocks_with_parity4( 4311 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4312 4313 if (print) 4314 pr_cont("\n"); 4315 4316 return true; 4317 } else 4318 return false; 4319 } 4320 4321 /** 4322 * bnx2x_chk_parity_attn - checks for parity attentions. 4323 * 4324 * @bp: driver handle 4325 * @global: true if there was a global attention 4326 * @print: show parity attention in syslog 4327 */ 4328 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 4329 { 4330 struct attn_route attn = { {0} }; 4331 int port = BP_PORT(bp); 4332 4333 attn.sig[0] = REG_RD(bp, 4334 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 4335 port*4); 4336 attn.sig[1] = REG_RD(bp, 4337 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 4338 port*4); 4339 attn.sig[2] = REG_RD(bp, 4340 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 4341 port*4); 4342 attn.sig[3] = REG_RD(bp, 4343 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4344 port*4); 4345 4346 if (!CHIP_IS_E1x(bp)) 4347 attn.sig[4] = REG_RD(bp, 4348 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 4349 port*4); 4350 4351 return bnx2x_parity_attn(bp, global, print, attn.sig); 4352 } 4353 4354 4355 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4356 { 4357 u32 val; 4358 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4359 4360 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 4361 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 4362 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 4363 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 4364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 4365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 4366 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 4367 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 4368 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 4369 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 4370 if (val & 4371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 4372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 4373 if (val & 4374 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 4375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 4376 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 4377 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 4378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 4379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 4380 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 4381 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 4382 } 4383 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 4384 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 4385 BNX2X_ERR("ATC hw attention 0x%x\n", val); 4386 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 4387 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 4388 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 4389 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 4390 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 4391 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 4392 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 4393 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 4394 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 4395 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 4396 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 4397 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 4398 } 4399 4400 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4401 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 4402 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 4403 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4404 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4405 } 4406 4407 } 4408 4409 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4410 { 4411 struct attn_route attn, *group_mask; 4412 int port = BP_PORT(bp); 4413 int index; 4414 u32 reg_addr; 4415 u32 val; 4416 u32 aeu_mask; 4417 bool global = false; 4418 4419 /* need to take HW lock because MCP or other port might also 4420 try to handle this event */ 4421 bnx2x_acquire_alr(bp); 4422 4423 if (bnx2x_chk_parity_attn(bp, &global, true)) { 4424 #ifndef BNX2X_STOP_ON_ERROR 4425 bp->recovery_state = BNX2X_RECOVERY_INIT; 4426 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4427 /* Disable HW interrupts */ 4428 bnx2x_int_disable(bp); 4429 /* In case of parity errors don't handle attentions so that 4430 * other function would "see" parity errors. 4431 */ 4432 #else 4433 bnx2x_panic(); 4434 #endif 4435 bnx2x_release_alr(bp); 4436 return; 4437 } 4438 4439 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 4440 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 4441 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 4442 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 4443 if (!CHIP_IS_E1x(bp)) 4444 attn.sig[4] = 4445 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 4446 else 4447 attn.sig[4] = 0; 4448 4449 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 4450 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 4451 4452 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4453 if (deasserted & (1 << index)) { 4454 group_mask = &bp->attn_group[index]; 4455 4456 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 4457 index, 4458 group_mask->sig[0], group_mask->sig[1], 4459 group_mask->sig[2], group_mask->sig[3], 4460 group_mask->sig[4]); 4461 4462 bnx2x_attn_int_deasserted4(bp, 4463 attn.sig[4] & group_mask->sig[4]); 4464 bnx2x_attn_int_deasserted3(bp, 4465 attn.sig[3] & group_mask->sig[3]); 4466 bnx2x_attn_int_deasserted1(bp, 4467 attn.sig[1] & group_mask->sig[1]); 4468 bnx2x_attn_int_deasserted2(bp, 4469 attn.sig[2] & group_mask->sig[2]); 4470 bnx2x_attn_int_deasserted0(bp, 4471 attn.sig[0] & group_mask->sig[0]); 4472 } 4473 } 4474 4475 bnx2x_release_alr(bp); 4476 4477 if (bp->common.int_block == INT_BLOCK_HC) 4478 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4479 COMMAND_REG_ATTN_BITS_CLR); 4480 else 4481 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 4482 4483 val = ~deasserted; 4484 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 4485 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4486 REG_WR(bp, reg_addr, val); 4487 4488 if (~bp->attn_state & deasserted) 4489 BNX2X_ERR("IGU ERROR\n"); 4490 4491 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4492 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4493 4494 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4495 aeu_mask = REG_RD(bp, reg_addr); 4496 4497 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 4498 aeu_mask, deasserted); 4499 aeu_mask |= (deasserted & 0x3ff); 4500 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4501 4502 REG_WR(bp, reg_addr, aeu_mask); 4503 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4504 4505 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4506 bp->attn_state &= ~deasserted; 4507 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4508 } 4509 4510 static void bnx2x_attn_int(struct bnx2x *bp) 4511 { 4512 /* read local copy of bits */ 4513 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 4514 attn_bits); 4515 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 4516 attn_bits_ack); 4517 u32 attn_state = bp->attn_state; 4518 4519 /* look for changed bits */ 4520 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 4521 u32 deasserted = ~attn_bits & attn_ack & attn_state; 4522 4523 DP(NETIF_MSG_HW, 4524 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 4525 attn_bits, attn_ack, asserted, deasserted); 4526 4527 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 4528 BNX2X_ERR("BAD attention state\n"); 4529 4530 /* handle bits that were raised */ 4531 if (asserted) 4532 bnx2x_attn_int_asserted(bp, asserted); 4533 4534 if (deasserted) 4535 bnx2x_attn_int_deasserted(bp, deasserted); 4536 } 4537 4538 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 4539 u16 index, u8 op, u8 update) 4540 { 4541 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 4542 4543 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 4544 igu_addr); 4545 } 4546 4547 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4548 { 4549 /* No memory barriers */ 4550 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4551 mmiowb(); /* keep prod updates ordered */ 4552 } 4553 4554 #ifdef BCM_CNIC 4555 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4556 union event_ring_elem *elem) 4557 { 4558 u8 err = elem->message.error; 4559 4560 if (!bp->cnic_eth_dev.starting_cid || 4561 (cid < bp->cnic_eth_dev.starting_cid && 4562 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 4563 return 1; 4564 4565 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 4566 4567 if (unlikely(err)) { 4568 4569 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 4570 cid); 4571 bnx2x_panic_dump(bp); 4572 } 4573 bnx2x_cnic_cfc_comp(bp, cid, err); 4574 return 0; 4575 } 4576 #endif 4577 4578 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4579 { 4580 struct bnx2x_mcast_ramrod_params rparam; 4581 int rc; 4582 4583 memset(&rparam, 0, sizeof(rparam)); 4584 4585 rparam.mcast_obj = &bp->mcast_obj; 4586 4587 netif_addr_lock_bh(bp->dev); 4588 4589 /* Clear pending state for the last command */ 4590 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 4591 4592 /* If there are pending mcast commands - send them */ 4593 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 4594 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 4595 if (rc < 0) 4596 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 4597 rc); 4598 } 4599 4600 netif_addr_unlock_bh(bp->dev); 4601 } 4602 4603 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4604 union event_ring_elem *elem) 4605 { 4606 unsigned long ramrod_flags = 0; 4607 int rc = 0; 4608 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4609 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 4610 4611 /* Always push next commands out, don't wait here */ 4612 __set_bit(RAMROD_CONT, &ramrod_flags); 4613 4614 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 4615 case BNX2X_FILTER_MAC_PENDING: 4616 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4617 #ifdef BCM_CNIC 4618 if (cid == BNX2X_ISCSI_ETH_CID) 4619 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4620 else 4621 #endif 4622 vlan_mac_obj = &bp->fp[cid].mac_obj; 4623 4624 break; 4625 case BNX2X_FILTER_MCAST_PENDING: 4626 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 4627 /* This is only relevant for 57710 where multicast MACs are 4628 * configured as unicast MACs using the same ramrod. 4629 */ 4630 bnx2x_handle_mcast_eqe(bp); 4631 return; 4632 default: 4633 BNX2X_ERR("Unsupported classification command: %d\n", 4634 elem->message.data.eth_event.echo); 4635 return; 4636 } 4637 4638 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 4639 4640 if (rc < 0) 4641 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 4642 else if (rc > 0) 4643 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 4644 4645 } 4646 4647 #ifdef BCM_CNIC 4648 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4649 #endif 4650 4651 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4652 { 4653 netif_addr_lock_bh(bp->dev); 4654 4655 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 4656 4657 /* Send rx_mode command again if was requested */ 4658 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4659 bnx2x_set_storm_rx_mode(bp); 4660 #ifdef BCM_CNIC 4661 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4662 &bp->sp_state)) 4663 bnx2x_set_iscsi_eth_rx_mode(bp, true); 4664 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 4665 &bp->sp_state)) 4666 bnx2x_set_iscsi_eth_rx_mode(bp, false); 4667 #endif 4668 4669 netif_addr_unlock_bh(bp->dev); 4670 } 4671 4672 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 4673 union event_ring_elem *elem) 4674 { 4675 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 4676 DP(BNX2X_MSG_SP, 4677 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 4678 elem->message.data.vif_list_event.func_bit_map); 4679 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 4680 elem->message.data.vif_list_event.func_bit_map); 4681 } else if (elem->message.data.vif_list_event.echo == 4682 VIF_LIST_RULE_SET) { 4683 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 4684 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 4685 } 4686 } 4687 4688 /* called with rtnl_lock */ 4689 static void bnx2x_after_function_update(struct bnx2x *bp) 4690 { 4691 int q, rc; 4692 struct bnx2x_fastpath *fp; 4693 struct bnx2x_queue_state_params queue_params = {NULL}; 4694 struct bnx2x_queue_update_params *q_update_params = 4695 &queue_params.params.update; 4696 4697 /* Send Q update command with afex vlan removal values for all Qs */ 4698 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 4699 4700 /* set silent vlan removal values according to vlan mode */ 4701 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4702 &q_update_params->update_flags); 4703 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 4704 &q_update_params->update_flags); 4705 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4706 4707 /* in access mode mark mask and value are 0 to strip all vlans */ 4708 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 4709 q_update_params->silent_removal_value = 0; 4710 q_update_params->silent_removal_mask = 0; 4711 } else { 4712 q_update_params->silent_removal_value = 4713 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 4714 q_update_params->silent_removal_mask = VLAN_VID_MASK; 4715 } 4716 4717 for_each_eth_queue(bp, q) { 4718 /* Set the appropriate Queue object */ 4719 fp = &bp->fp[q]; 4720 queue_params.q_obj = &fp->q_obj; 4721 4722 /* send the ramrod */ 4723 rc = bnx2x_queue_state_change(bp, &queue_params); 4724 if (rc < 0) 4725 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 4726 q); 4727 } 4728 4729 #ifdef BCM_CNIC 4730 if (!NO_FCOE(bp)) { 4731 fp = &bp->fp[FCOE_IDX]; 4732 queue_params.q_obj = &fp->q_obj; 4733 4734 /* clear pending completion bit */ 4735 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4736 4737 /* mark latest Q bit */ 4738 smp_mb__before_clear_bit(); 4739 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 4740 smp_mb__after_clear_bit(); 4741 4742 /* send Q update ramrod for FCoE Q */ 4743 rc = bnx2x_queue_state_change(bp, &queue_params); 4744 if (rc < 0) 4745 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 4746 q); 4747 } else { 4748 /* If no FCoE ring - ACK MCP now */ 4749 bnx2x_link_report(bp); 4750 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4751 } 4752 #else 4753 /* If no FCoE ring - ACK MCP now */ 4754 bnx2x_link_report(bp); 4755 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4756 #endif /* BCM_CNIC */ 4757 } 4758 4759 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4760 struct bnx2x *bp, u32 cid) 4761 { 4762 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4763 #ifdef BCM_CNIC 4764 if (cid == BNX2X_FCOE_ETH_CID) 4765 return &bnx2x_fcoe(bp, q_obj); 4766 else 4767 #endif 4768 return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); 4769 } 4770 4771 static void bnx2x_eq_int(struct bnx2x *bp) 4772 { 4773 u16 hw_cons, sw_cons, sw_prod; 4774 union event_ring_elem *elem; 4775 u32 cid; 4776 u8 opcode; 4777 int spqe_cnt = 0; 4778 struct bnx2x_queue_sp_obj *q_obj; 4779 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 4780 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 4781 4782 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 4783 4784 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 4785 * when we get the the next-page we nned to adjust so the loop 4786 * condition below will be met. The next element is the size of a 4787 * regular element and hence incrementing by 1 4788 */ 4789 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 4790 hw_cons++; 4791 4792 /* This function may never run in parallel with itself for a 4793 * specific bp, thus there is no need in "paired" read memory 4794 * barrier here. 4795 */ 4796 sw_cons = bp->eq_cons; 4797 sw_prod = bp->eq_prod; 4798 4799 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 4800 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 4801 4802 for (; sw_cons != hw_cons; 4803 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 4804 4805 4806 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 4807 4808 cid = SW_CID(elem->message.data.cfc_del_event.cid); 4809 opcode = elem->message.opcode; 4810 4811 4812 /* handle eq element */ 4813 switch (opcode) { 4814 case EVENT_RING_OPCODE_STAT_QUERY: 4815 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 4816 "got statistics comp event %d\n", 4817 bp->stats_comp++); 4818 /* nothing to do with stats comp */ 4819 goto next_spqe; 4820 4821 case EVENT_RING_OPCODE_CFC_DEL: 4822 /* handle according to cid range */ 4823 /* 4824 * we may want to verify here that the bp state is 4825 * HALTING 4826 */ 4827 DP(BNX2X_MSG_SP, 4828 "got delete ramrod for MULTI[%d]\n", cid); 4829 #ifdef BCM_CNIC 4830 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 4831 goto next_spqe; 4832 #endif 4833 q_obj = bnx2x_cid_to_q_obj(bp, cid); 4834 4835 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 4836 break; 4837 4838 4839 4840 goto next_spqe; 4841 4842 case EVENT_RING_OPCODE_STOP_TRAFFIC: 4843 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 4844 if (f_obj->complete_cmd(bp, f_obj, 4845 BNX2X_F_CMD_TX_STOP)) 4846 break; 4847 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 4848 goto next_spqe; 4849 4850 case EVENT_RING_OPCODE_START_TRAFFIC: 4851 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 4852 if (f_obj->complete_cmd(bp, f_obj, 4853 BNX2X_F_CMD_TX_START)) 4854 break; 4855 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4856 goto next_spqe; 4857 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4858 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 4859 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 4860 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); 4861 4862 /* We will perform the Queues update from sp_rtnl task 4863 * as all Queue SP operations should run under 4864 * rtnl_lock. 4865 */ 4866 smp_mb__before_clear_bit(); 4867 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 4868 &bp->sp_rtnl_state); 4869 smp_mb__after_clear_bit(); 4870 4871 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4872 goto next_spqe; 4873 4874 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 4875 f_obj->complete_cmd(bp, f_obj, 4876 BNX2X_F_CMD_AFEX_VIFLISTS); 4877 bnx2x_after_afex_vif_lists(bp, elem); 4878 goto next_spqe; 4879 case EVENT_RING_OPCODE_FUNCTION_START: 4880 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4881 "got FUNC_START ramrod\n"); 4882 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 4883 break; 4884 4885 goto next_spqe; 4886 4887 case EVENT_RING_OPCODE_FUNCTION_STOP: 4888 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4889 "got FUNC_STOP ramrod\n"); 4890 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 4891 break; 4892 4893 goto next_spqe; 4894 } 4895 4896 switch (opcode | bp->state) { 4897 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 4898 BNX2X_STATE_OPEN): 4899 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 4900 BNX2X_STATE_OPENING_WAIT4_PORT): 4901 cid = elem->message.data.eth_event.echo & 4902 BNX2X_SWCID_MASK; 4903 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 4904 cid); 4905 rss_raw->clear_pending(rss_raw); 4906 break; 4907 4908 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 4909 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 4910 case (EVENT_RING_OPCODE_SET_MAC | 4911 BNX2X_STATE_CLOSING_WAIT4_HALT): 4912 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4913 BNX2X_STATE_OPEN): 4914 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4915 BNX2X_STATE_DIAG): 4916 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4917 BNX2X_STATE_CLOSING_WAIT4_HALT): 4918 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); 4919 bnx2x_handle_classification_eqe(bp, elem); 4920 break; 4921 4922 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4923 BNX2X_STATE_OPEN): 4924 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4925 BNX2X_STATE_DIAG): 4926 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4927 BNX2X_STATE_CLOSING_WAIT4_HALT): 4928 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 4929 bnx2x_handle_mcast_eqe(bp); 4930 break; 4931 4932 case (EVENT_RING_OPCODE_FILTERS_RULES | 4933 BNX2X_STATE_OPEN): 4934 case (EVENT_RING_OPCODE_FILTERS_RULES | 4935 BNX2X_STATE_DIAG): 4936 case (EVENT_RING_OPCODE_FILTERS_RULES | 4937 BNX2X_STATE_CLOSING_WAIT4_HALT): 4938 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 4939 bnx2x_handle_rx_mode_eqe(bp); 4940 break; 4941 default: 4942 /* unknown event log error and continue */ 4943 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 4944 elem->message.opcode, bp->state); 4945 } 4946 next_spqe: 4947 spqe_cnt++; 4948 } /* for */ 4949 4950 smp_mb__before_atomic_inc(); 4951 atomic_add(spqe_cnt, &bp->eq_spq_left); 4952 4953 bp->eq_cons = sw_cons; 4954 bp->eq_prod = sw_prod; 4955 /* Make sure that above mem writes were issued towards the memory */ 4956 smp_wmb(); 4957 4958 /* update producer */ 4959 bnx2x_update_eq_prod(bp, bp->eq_prod); 4960 } 4961 4962 static void bnx2x_sp_task(struct work_struct *work) 4963 { 4964 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 4965 u16 status; 4966 4967 status = bnx2x_update_dsb_idx(bp); 4968 /* if (status == 0) */ 4969 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 4970 4971 DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); 4972 4973 /* HW attentions */ 4974 if (status & BNX2X_DEF_SB_ATT_IDX) { 4975 bnx2x_attn_int(bp); 4976 status &= ~BNX2X_DEF_SB_ATT_IDX; 4977 } 4978 4979 /* SP events: STAT_QUERY and others */ 4980 if (status & BNX2X_DEF_SB_IDX) { 4981 #ifdef BCM_CNIC 4982 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 4983 4984 if ((!NO_FCOE(bp)) && 4985 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 4986 /* 4987 * Prevent local bottom-halves from running as 4988 * we are going to change the local NAPI list. 4989 */ 4990 local_bh_disable(); 4991 napi_schedule(&bnx2x_fcoe(bp, napi)); 4992 local_bh_enable(); 4993 } 4994 #endif 4995 /* Handle EQ completions */ 4996 bnx2x_eq_int(bp); 4997 4998 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 4999 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5000 5001 status &= ~BNX2X_DEF_SB_IDX; 5002 } 5003 5004 if (unlikely(status)) 5005 DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", 5006 status); 5007 5008 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5009 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5010 5011 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5012 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5013 &bp->sp_state)) { 5014 bnx2x_link_report(bp); 5015 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5016 } 5017 } 5018 5019 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5020 { 5021 struct net_device *dev = dev_instance; 5022 struct bnx2x *bp = netdev_priv(dev); 5023 5024 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5025 IGU_INT_DISABLE, 0); 5026 5027 #ifdef BNX2X_STOP_ON_ERROR 5028 if (unlikely(bp->panic)) 5029 return IRQ_HANDLED; 5030 #endif 5031 5032 #ifdef BCM_CNIC 5033 { 5034 struct cnic_ops *c_ops; 5035 5036 rcu_read_lock(); 5037 c_ops = rcu_dereference(bp->cnic_ops); 5038 if (c_ops) 5039 c_ops->cnic_handler(bp->cnic_data, NULL); 5040 rcu_read_unlock(); 5041 } 5042 #endif 5043 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 5044 5045 return IRQ_HANDLED; 5046 } 5047 5048 /* end of slow path */ 5049 5050 5051 void bnx2x_drv_pulse(struct bnx2x *bp) 5052 { 5053 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5054 bp->fw_drv_pulse_wr_seq); 5055 } 5056 5057 5058 static void bnx2x_timer(unsigned long data) 5059 { 5060 struct bnx2x *bp = (struct bnx2x *) data; 5061 5062 if (!netif_running(bp->dev)) 5063 return; 5064 5065 if (!BP_NOMCP(bp)) { 5066 int mb_idx = BP_FW_MB_IDX(bp); 5067 u32 drv_pulse; 5068 u32 mcp_pulse; 5069 5070 ++bp->fw_drv_pulse_wr_seq; 5071 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5072 /* TBD - add SYSTEM_TIME */ 5073 drv_pulse = bp->fw_drv_pulse_wr_seq; 5074 bnx2x_drv_pulse(bp); 5075 5076 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5077 MCP_PULSE_SEQ_MASK); 5078 /* The delta between driver pulse and mcp response 5079 * should be 1 (before mcp response) or 0 (after mcp response) 5080 */ 5081 if ((drv_pulse != mcp_pulse) && 5082 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5083 /* someone lost a heartbeat... */ 5084 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5085 drv_pulse, mcp_pulse); 5086 } 5087 } 5088 5089 if (bp->state == BNX2X_STATE_OPEN) 5090 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5091 5092 mod_timer(&bp->timer, jiffies + bp->current_interval); 5093 } 5094 5095 /* end of Statistics */ 5096 5097 /* nic init */ 5098 5099 /* 5100 * nic init service functions 5101 */ 5102 5103 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5104 { 5105 u32 i; 5106 if (!(len%4) && !(addr%4)) 5107 for (i = 0; i < len; i += 4) 5108 REG_WR(bp, addr + i, fill); 5109 else 5110 for (i = 0; i < len; i++) 5111 REG_WR8(bp, addr + i, fill); 5112 5113 } 5114 5115 /* helper: writes FP SP data to FW - data_size in dwords */ 5116 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5117 int fw_sb_id, 5118 u32 *sb_data_p, 5119 u32 data_size) 5120 { 5121 int index; 5122 for (index = 0; index < data_size; index++) 5123 REG_WR(bp, BAR_CSTRORM_INTMEM + 5124 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5125 sizeof(u32)*index, 5126 *(sb_data_p + index)); 5127 } 5128 5129 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5130 { 5131 u32 *sb_data_p; 5132 u32 data_size = 0; 5133 struct hc_status_block_data_e2 sb_data_e2; 5134 struct hc_status_block_data_e1x sb_data_e1x; 5135 5136 /* disable the function first */ 5137 if (!CHIP_IS_E1x(bp)) { 5138 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5139 sb_data_e2.common.state = SB_DISABLED; 5140 sb_data_e2.common.p_func.vf_valid = false; 5141 sb_data_p = (u32 *)&sb_data_e2; 5142 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5143 } else { 5144 memset(&sb_data_e1x, 0, 5145 sizeof(struct hc_status_block_data_e1x)); 5146 sb_data_e1x.common.state = SB_DISABLED; 5147 sb_data_e1x.common.p_func.vf_valid = false; 5148 sb_data_p = (u32 *)&sb_data_e1x; 5149 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5150 } 5151 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5152 5153 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5154 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5155 CSTORM_STATUS_BLOCK_SIZE); 5156 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5157 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5158 CSTORM_SYNC_BLOCK_SIZE); 5159 } 5160 5161 /* helper: writes SP SB data to FW */ 5162 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5163 struct hc_sp_status_block_data *sp_sb_data) 5164 { 5165 int func = BP_FUNC(bp); 5166 int i; 5167 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5168 REG_WR(bp, BAR_CSTRORM_INTMEM + 5169 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5170 i*sizeof(u32), 5171 *((u32 *)sp_sb_data + i)); 5172 } 5173 5174 static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5175 { 5176 int func = BP_FUNC(bp); 5177 struct hc_sp_status_block_data sp_sb_data; 5178 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5179 5180 sp_sb_data.state = SB_DISABLED; 5181 sp_sb_data.p_func.vf_valid = false; 5182 5183 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5184 5185 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5186 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5187 CSTORM_SP_STATUS_BLOCK_SIZE); 5188 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5189 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5190 CSTORM_SP_SYNC_BLOCK_SIZE); 5191 5192 } 5193 5194 5195 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5196 int igu_sb_id, int igu_seg_id) 5197 { 5198 hc_sm->igu_sb_id = igu_sb_id; 5199 hc_sm->igu_seg_id = igu_seg_id; 5200 hc_sm->timer_value = 0xFF; 5201 hc_sm->time_to_expire = 0xFFFFFFFF; 5202 } 5203 5204 5205 /* allocates state machine ids. */ 5206 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5207 { 5208 /* zero out state machine indices */ 5209 /* rx indices */ 5210 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5211 5212 /* tx indices */ 5213 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5214 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5215 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5216 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5217 5218 /* map indices */ 5219 /* rx indices */ 5220 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5221 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5222 5223 /* tx indices */ 5224 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5225 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5226 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5227 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5228 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5229 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5230 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5231 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5232 } 5233 5234 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5235 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5236 { 5237 int igu_seg_id; 5238 5239 struct hc_status_block_data_e2 sb_data_e2; 5240 struct hc_status_block_data_e1x sb_data_e1x; 5241 struct hc_status_block_sm *hc_sm_p; 5242 int data_size; 5243 u32 *sb_data_p; 5244 5245 if (CHIP_INT_MODE_IS_BC(bp)) 5246 igu_seg_id = HC_SEG_ACCESS_NORM; 5247 else 5248 igu_seg_id = IGU_SEG_ACCESS_NORM; 5249 5250 bnx2x_zero_fp_sb(bp, fw_sb_id); 5251 5252 if (!CHIP_IS_E1x(bp)) { 5253 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5254 sb_data_e2.common.state = SB_ENABLED; 5255 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5256 sb_data_e2.common.p_func.vf_id = vfid; 5257 sb_data_e2.common.p_func.vf_valid = vf_valid; 5258 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5259 sb_data_e2.common.same_igu_sb_1b = true; 5260 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5261 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5262 hc_sm_p = sb_data_e2.common.state_machine; 5263 sb_data_p = (u32 *)&sb_data_e2; 5264 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5265 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5266 } else { 5267 memset(&sb_data_e1x, 0, 5268 sizeof(struct hc_status_block_data_e1x)); 5269 sb_data_e1x.common.state = SB_ENABLED; 5270 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5271 sb_data_e1x.common.p_func.vf_id = 0xff; 5272 sb_data_e1x.common.p_func.vf_valid = false; 5273 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5274 sb_data_e1x.common.same_igu_sb_1b = true; 5275 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5276 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5277 hc_sm_p = sb_data_e1x.common.state_machine; 5278 sb_data_p = (u32 *)&sb_data_e1x; 5279 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5280 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5281 } 5282 5283 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5284 igu_sb_id, igu_seg_id); 5285 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 5286 igu_sb_id, igu_seg_id); 5287 5288 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 5289 5290 /* write indecies to HW */ 5291 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5292 } 5293 5294 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 5295 u16 tx_usec, u16 rx_usec) 5296 { 5297 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 5298 false, rx_usec); 5299 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5300 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 5301 tx_usec); 5302 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5303 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 5304 tx_usec); 5305 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5306 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 5307 tx_usec); 5308 } 5309 5310 static void bnx2x_init_def_sb(struct bnx2x *bp) 5311 { 5312 struct host_sp_status_block *def_sb = bp->def_status_blk; 5313 dma_addr_t mapping = bp->def_status_blk_mapping; 5314 int igu_sp_sb_index; 5315 int igu_seg_id; 5316 int port = BP_PORT(bp); 5317 int func = BP_FUNC(bp); 5318 int reg_offset, reg_offset_en5; 5319 u64 section; 5320 int index; 5321 struct hc_sp_status_block_data sp_sb_data; 5322 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5323 5324 if (CHIP_INT_MODE_IS_BC(bp)) { 5325 igu_sp_sb_index = DEF_SB_IGU_ID; 5326 igu_seg_id = HC_SEG_ACCESS_DEF; 5327 } else { 5328 igu_sp_sb_index = bp->igu_dsb_id; 5329 igu_seg_id = IGU_SEG_ACCESS_DEF; 5330 } 5331 5332 /* ATTN */ 5333 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5334 atten_status_block); 5335 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5336 5337 bp->attn_state = 0; 5338 5339 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5341 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5342 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 5343 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5344 int sindex; 5345 /* take care of sig[0]..sig[4] */ 5346 for (sindex = 0; sindex < 4; sindex++) 5347 bp->attn_group[index].sig[sindex] = 5348 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 5349 5350 if (!CHIP_IS_E1x(bp)) 5351 /* 5352 * enable5 is separate from the rest of the registers, 5353 * and therefore the address skip is 4 5354 * and not 16 between the different groups 5355 */ 5356 bp->attn_group[index].sig[4] = REG_RD(bp, 5357 reg_offset_en5 + 0x4*index); 5358 else 5359 bp->attn_group[index].sig[4] = 0; 5360 } 5361 5362 if (bp->common.int_block == INT_BLOCK_HC) { 5363 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 5364 HC_REG_ATTN_MSG0_ADDR_L); 5365 5366 REG_WR(bp, reg_offset, U64_LO(section)); 5367 REG_WR(bp, reg_offset + 4, U64_HI(section)); 5368 } else if (!CHIP_IS_E1x(bp)) { 5369 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5370 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5371 } 5372 5373 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5374 sp_sb); 5375 5376 bnx2x_zero_sp_sb(bp); 5377 5378 sp_sb_data.state = SB_ENABLED; 5379 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5380 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5381 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5382 sp_sb_data.igu_seg_id = igu_seg_id; 5383 sp_sb_data.p_func.pf_id = func; 5384 sp_sb_data.p_func.vnic_id = BP_VN(bp); 5385 sp_sb_data.p_func.vf_id = 0xff; 5386 5387 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5388 5389 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5390 } 5391 5392 void bnx2x_update_coalesce(struct bnx2x *bp) 5393 { 5394 int i; 5395 5396 for_each_eth_queue(bp, i) 5397 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 5398 bp->tx_ticks, bp->rx_ticks); 5399 } 5400 5401 static void bnx2x_init_sp_ring(struct bnx2x *bp) 5402 { 5403 spin_lock_init(&bp->spq_lock); 5404 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 5405 5406 bp->spq_prod_idx = 0; 5407 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5408 bp->spq_prod_bd = bp->spq; 5409 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5410 } 5411 5412 static void bnx2x_init_eq_ring(struct bnx2x *bp) 5413 { 5414 int i; 5415 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5416 union event_ring_elem *elem = 5417 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 5418 5419 elem->next_page.addr.hi = 5420 cpu_to_le32(U64_HI(bp->eq_mapping + 5421 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 5422 elem->next_page.addr.lo = 5423 cpu_to_le32(U64_LO(bp->eq_mapping + 5424 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 5425 } 5426 bp->eq_cons = 0; 5427 bp->eq_prod = NUM_EQ_DESC; 5428 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5429 /* we want a warning message before it gets rought... */ 5430 atomic_set(&bp->eq_spq_left, 5431 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5432 } 5433 5434 5435 /* called with netif_addr_lock_bh() */ 5436 void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 5437 unsigned long rx_mode_flags, 5438 unsigned long rx_accept_flags, 5439 unsigned long tx_accept_flags, 5440 unsigned long ramrod_flags) 5441 { 5442 struct bnx2x_rx_mode_ramrod_params ramrod_param; 5443 int rc; 5444 5445 memset(&ramrod_param, 0, sizeof(ramrod_param)); 5446 5447 /* Prepare ramrod parameters */ 5448 ramrod_param.cid = 0; 5449 ramrod_param.cl_id = cl_id; 5450 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 5451 ramrod_param.func_id = BP_FUNC(bp); 5452 5453 ramrod_param.pstate = &bp->sp_state; 5454 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 5455 5456 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 5457 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 5458 5459 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5460 5461 ramrod_param.ramrod_flags = ramrod_flags; 5462 ramrod_param.rx_mode_flags = rx_mode_flags; 5463 5464 ramrod_param.rx_accept_flags = rx_accept_flags; 5465 ramrod_param.tx_accept_flags = tx_accept_flags; 5466 5467 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 5468 if (rc < 0) { 5469 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 5470 return; 5471 } 5472 } 5473 5474 /* called with netif_addr_lock_bh() */ 5475 void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5476 { 5477 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5478 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5479 5480 #ifdef BCM_CNIC 5481 if (!NO_FCOE(bp)) 5482 5483 /* Configure rx_mode of FCoE Queue */ 5484 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5485 #endif 5486 5487 switch (bp->rx_mode) { 5488 case BNX2X_RX_MODE_NONE: 5489 /* 5490 * 'drop all' supersedes any accept flags that may have been 5491 * passed to the function. 5492 */ 5493 break; 5494 case BNX2X_RX_MODE_NORMAL: 5495 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5496 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); 5497 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5498 5499 /* internal switching mode */ 5500 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5501 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); 5502 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5503 5504 break; 5505 case BNX2X_RX_MODE_ALLMULTI: 5506 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5507 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); 5508 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5509 5510 /* internal switching mode */ 5511 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5512 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); 5513 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5514 5515 break; 5516 case BNX2X_RX_MODE_PROMISC: 5517 /* According to deffinition of SI mode, iface in promisc mode 5518 * should receive matched and unmatched (in resolution of port) 5519 * unicast packets. 5520 */ 5521 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); 5522 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5523 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); 5524 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5525 5526 /* internal switching mode */ 5527 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); 5528 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5529 5530 if (IS_MF_SI(bp)) 5531 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); 5532 else 5533 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5534 5535 break; 5536 default: 5537 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); 5538 return; 5539 } 5540 5541 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 5542 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); 5543 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); 5544 } 5545 5546 __set_bit(RAMROD_RX, &ramrod_flags); 5547 __set_bit(RAMROD_TX, &ramrod_flags); 5548 5549 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, 5550 tx_accept_flags, ramrod_flags); 5551 } 5552 5553 static void bnx2x_init_internal_common(struct bnx2x *bp) 5554 { 5555 int i; 5556 5557 if (IS_MF_SI(bp)) 5558 /* 5559 * In switch independent mode, the TSTORM needs to accept 5560 * packets that failed classification, since approximate match 5561 * mac addresses aren't written to NIG LLH 5562 */ 5563 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5564 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); 5565 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ 5566 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5567 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); 5568 5569 /* Zero this manually as its initialization is 5570 currently missing in the initTool */ 5571 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 5572 REG_WR(bp, BAR_USTRORM_INTMEM + 5573 USTORM_AGG_DATA_OFFSET + i * 4, 0); 5574 if (!CHIP_IS_E1x(bp)) { 5575 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 5576 CHIP_INT_MODE_IS_BC(bp) ? 5577 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 5578 } 5579 } 5580 5581 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 5582 { 5583 switch (load_code) { 5584 case FW_MSG_CODE_DRV_LOAD_COMMON: 5585 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5586 bnx2x_init_internal_common(bp); 5587 /* no break */ 5588 5589 case FW_MSG_CODE_DRV_LOAD_PORT: 5590 /* nothing to do */ 5591 /* no break */ 5592 5593 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5594 /* internal memory per function is 5595 initialized inside bnx2x_pf_init */ 5596 break; 5597 5598 default: 5599 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5600 break; 5601 } 5602 } 5603 5604 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5605 { 5606 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; 5607 } 5608 5609 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5610 { 5611 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5612 } 5613 5614 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5615 { 5616 if (CHIP_IS_E1x(fp->bp)) 5617 return BP_L_ID(fp->bp) + fp->index; 5618 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 5619 return bnx2x_fp_igu_sb_id(fp); 5620 } 5621 5622 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 5623 { 5624 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 5625 u8 cos; 5626 unsigned long q_type = 0; 5627 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 5628 fp->rx_queue = fp_idx; 5629 fp->cid = fp_idx; 5630 fp->cl_id = bnx2x_fp_cl_id(fp); 5631 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 5632 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 5633 /* qZone id equals to FW (per path) client id */ 5634 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 5635 5636 /* init shortcut */ 5637 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 5638 5639 /* Setup SB indicies */ 5640 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 5641 5642 /* Configure Queue State object */ 5643 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 5644 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 5645 5646 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 5647 5648 /* init tx data */ 5649 for_each_cos_in_tx_queue(fp, cos) { 5650 bnx2x_init_txdata(bp, &fp->txdata[cos], 5651 CID_COS_TO_TX_ONLY_CID(fp->cid, cos), 5652 FP_COS_TO_TXQ(fp, cos), 5653 BNX2X_TX_SB_INDEX_BASE + cos); 5654 cids[cos] = fp->txdata[cos].cid; 5655 } 5656 5657 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, 5658 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 5659 bnx2x_sp_mapping(bp, q_rdata), q_type); 5660 5661 /** 5662 * Configure classification DBs: Always enable Tx switching 5663 */ 5664 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 5665 5666 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 5667 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 5668 fp->igu_sb_id); 5669 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 5670 fp->fw_sb_id, fp->igu_sb_id); 5671 5672 bnx2x_update_fpsb_idx(fp); 5673 } 5674 5675 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 5676 { 5677 int i; 5678 5679 for (i = 1; i <= NUM_TX_RINGS; i++) { 5680 struct eth_tx_next_bd *tx_next_bd = 5681 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 5682 5683 tx_next_bd->addr_hi = 5684 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 5685 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 5686 tx_next_bd->addr_lo = 5687 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 5688 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 5689 } 5690 5691 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 5692 txdata->tx_db.data.zero_fill1 = 0; 5693 txdata->tx_db.data.prod = 0; 5694 5695 txdata->tx_pkt_prod = 0; 5696 txdata->tx_pkt_cons = 0; 5697 txdata->tx_bd_prod = 0; 5698 txdata->tx_bd_cons = 0; 5699 txdata->tx_pkt = 0; 5700 } 5701 5702 static void bnx2x_init_tx_rings(struct bnx2x *bp) 5703 { 5704 int i; 5705 u8 cos; 5706 5707 for_each_tx_queue(bp, i) 5708 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5709 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 5710 } 5711 5712 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5713 { 5714 int i; 5715 5716 for_each_eth_queue(bp, i) 5717 bnx2x_init_eth_fp(bp, i); 5718 #ifdef BCM_CNIC 5719 if (!NO_FCOE(bp)) 5720 bnx2x_init_fcoe_fp(bp); 5721 5722 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 5723 BNX2X_VF_ID_INVALID, false, 5724 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 5725 5726 #endif 5727 5728 /* Initialize MOD_ABS interrupts */ 5729 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 5730 bp->common.shmem_base, bp->common.shmem2_base, 5731 BP_PORT(bp)); 5732 /* ensure status block indices were read */ 5733 rmb(); 5734 5735 bnx2x_init_def_sb(bp); 5736 bnx2x_update_dsb_idx(bp); 5737 bnx2x_init_rx_rings(bp); 5738 bnx2x_init_tx_rings(bp); 5739 bnx2x_init_sp_ring(bp); 5740 bnx2x_init_eq_ring(bp); 5741 bnx2x_init_internal(bp, load_code); 5742 bnx2x_pf_init(bp); 5743 bnx2x_stats_init(bp); 5744 5745 /* flush all before enabling interrupts */ 5746 mb(); 5747 mmiowb(); 5748 5749 bnx2x_int_enable(bp); 5750 5751 /* Check for SPIO5 */ 5752 bnx2x_attn_int_deasserted0(bp, 5753 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 5754 AEU_INPUTS_ATTN_BITS_SPIO5); 5755 } 5756 5757 /* end of nic init */ 5758 5759 /* 5760 * gzip service functions 5761 */ 5762 5763 static int bnx2x_gunzip_init(struct bnx2x *bp) 5764 { 5765 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 5766 &bp->gunzip_mapping, GFP_KERNEL); 5767 if (bp->gunzip_buf == NULL) 5768 goto gunzip_nomem1; 5769 5770 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 5771 if (bp->strm == NULL) 5772 goto gunzip_nomem2; 5773 5774 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 5775 if (bp->strm->workspace == NULL) 5776 goto gunzip_nomem3; 5777 5778 return 0; 5779 5780 gunzip_nomem3: 5781 kfree(bp->strm); 5782 bp->strm = NULL; 5783 5784 gunzip_nomem2: 5785 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 5786 bp->gunzip_mapping); 5787 bp->gunzip_buf = NULL; 5788 5789 gunzip_nomem1: 5790 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 5791 return -ENOMEM; 5792 } 5793 5794 static void bnx2x_gunzip_end(struct bnx2x *bp) 5795 { 5796 if (bp->strm) { 5797 vfree(bp->strm->workspace); 5798 kfree(bp->strm); 5799 bp->strm = NULL; 5800 } 5801 5802 if (bp->gunzip_buf) { 5803 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 5804 bp->gunzip_mapping); 5805 bp->gunzip_buf = NULL; 5806 } 5807 } 5808 5809 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 5810 { 5811 int n, rc; 5812 5813 /* check gzip header */ 5814 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 5815 BNX2X_ERR("Bad gzip header\n"); 5816 return -EINVAL; 5817 } 5818 5819 n = 10; 5820 5821 #define FNAME 0x8 5822 5823 if (zbuf[3] & FNAME) 5824 while ((zbuf[n++] != 0) && (n < len)); 5825 5826 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 5827 bp->strm->avail_in = len - n; 5828 bp->strm->next_out = bp->gunzip_buf; 5829 bp->strm->avail_out = FW_BUF_SIZE; 5830 5831 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 5832 if (rc != Z_OK) 5833 return rc; 5834 5835 rc = zlib_inflate(bp->strm, Z_FINISH); 5836 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 5837 netdev_err(bp->dev, "Firmware decompression error: %s\n", 5838 bp->strm->msg); 5839 5840 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 5841 if (bp->gunzip_outlen & 0x3) 5842 netdev_err(bp->dev, 5843 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 5844 bp->gunzip_outlen); 5845 bp->gunzip_outlen >>= 2; 5846 5847 zlib_inflateEnd(bp->strm); 5848 5849 if (rc == Z_STREAM_END) 5850 return 0; 5851 5852 return rc; 5853 } 5854 5855 /* nic load/unload */ 5856 5857 /* 5858 * General service functions 5859 */ 5860 5861 /* send a NIG loopback debug packet */ 5862 static void bnx2x_lb_pckt(struct bnx2x *bp) 5863 { 5864 u32 wb_write[3]; 5865 5866 /* Ethernet source and destination addresses */ 5867 wb_write[0] = 0x55555555; 5868 wb_write[1] = 0x55555555; 5869 wb_write[2] = 0x20; /* SOP */ 5870 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 5871 5872 /* NON-IP protocol */ 5873 wb_write[0] = 0x09000000; 5874 wb_write[1] = 0x55555555; 5875 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 5876 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 5877 } 5878 5879 /* some of the internal memories 5880 * are not directly readable from the driver 5881 * to test them we send debug packets 5882 */ 5883 static int bnx2x_int_mem_test(struct bnx2x *bp) 5884 { 5885 int factor; 5886 int count, i; 5887 u32 val = 0; 5888 5889 if (CHIP_REV_IS_FPGA(bp)) 5890 factor = 120; 5891 else if (CHIP_REV_IS_EMUL(bp)) 5892 factor = 200; 5893 else 5894 factor = 1; 5895 5896 /* Disable inputs of parser neighbor blocks */ 5897 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 5898 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 5899 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 5900 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 5901 5902 /* Write 0 to parser credits for CFC search request */ 5903 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 5904 5905 /* send Ethernet packet */ 5906 bnx2x_lb_pckt(bp); 5907 5908 /* TODO do i reset NIG statistic? */ 5909 /* Wait until NIG register shows 1 packet of size 0x10 */ 5910 count = 1000 * factor; 5911 while (count) { 5912 5913 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5914 val = *bnx2x_sp(bp, wb_data[0]); 5915 if (val == 0x10) 5916 break; 5917 5918 msleep(10); 5919 count--; 5920 } 5921 if (val != 0x10) { 5922 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 5923 return -1; 5924 } 5925 5926 /* Wait until PRS register shows 1 packet */ 5927 count = 1000 * factor; 5928 while (count) { 5929 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 5930 if (val == 1) 5931 break; 5932 5933 msleep(10); 5934 count--; 5935 } 5936 if (val != 0x1) { 5937 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 5938 return -2; 5939 } 5940 5941 /* Reset and init BRB, PRS */ 5942 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 5943 msleep(50); 5944 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 5945 msleep(50); 5946 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 5947 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 5948 5949 DP(NETIF_MSG_HW, "part2\n"); 5950 5951 /* Disable inputs of parser neighbor blocks */ 5952 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 5953 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 5954 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 5955 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 5956 5957 /* Write 0 to parser credits for CFC search request */ 5958 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 5959 5960 /* send 10 Ethernet packets */ 5961 for (i = 0; i < 10; i++) 5962 bnx2x_lb_pckt(bp); 5963 5964 /* Wait until NIG register shows 10 + 1 5965 packets of size 11*0x10 = 0xb0 */ 5966 count = 1000 * factor; 5967 while (count) { 5968 5969 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5970 val = *bnx2x_sp(bp, wb_data[0]); 5971 if (val == 0xb0) 5972 break; 5973 5974 msleep(10); 5975 count--; 5976 } 5977 if (val != 0xb0) { 5978 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 5979 return -3; 5980 } 5981 5982 /* Wait until PRS register shows 2 packets */ 5983 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 5984 if (val != 2) 5985 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 5986 5987 /* Write 1 to parser credits for CFC search request */ 5988 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 5989 5990 /* Wait until PRS register shows 3 packets */ 5991 msleep(10 * factor); 5992 /* Wait until NIG register shows 1 packet of size 0x10 */ 5993 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 5994 if (val != 3) 5995 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 5996 5997 /* clear NIG EOP FIFO */ 5998 for (i = 0; i < 11; i++) 5999 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6000 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6001 if (val != 1) { 6002 BNX2X_ERR("clear of NIG failed\n"); 6003 return -4; 6004 } 6005 6006 /* Reset and init BRB, PRS, NIG */ 6007 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6008 msleep(50); 6009 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6010 msleep(50); 6011 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6012 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6013 #ifndef BCM_CNIC 6014 /* set NIC mode */ 6015 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6016 #endif 6017 6018 /* Enable inputs of parser neighbor blocks */ 6019 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6020 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6021 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6022 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6023 6024 DP(NETIF_MSG_HW, "done\n"); 6025 6026 return 0; /* OK */ 6027 } 6028 6029 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6030 { 6031 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6032 if (!CHIP_IS_E1x(bp)) 6033 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6034 else 6035 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6036 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6037 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6038 /* 6039 * mask read length error interrupts in brb for parser 6040 * (parsing unit and 'checksum and crc' unit) 6041 * these errors are legal (PU reads fixed length and CAC can cause 6042 * read length error on truncated packets) 6043 */ 6044 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6045 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6046 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6047 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6048 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6049 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6050 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6051 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6052 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6053 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6054 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6055 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6056 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6057 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6058 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6059 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6060 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6061 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6062 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6063 6064 if (CHIP_REV_IS_FPGA(bp)) 6065 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 6066 else if (!CHIP_IS_E1x(bp)) 6067 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 6068 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF 6069 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT 6070 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN 6071 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED 6072 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); 6073 else 6074 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 6075 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6076 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6077 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6078 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6079 6080 if (!CHIP_IS_E1x(bp)) 6081 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6082 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6083 6084 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6085 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6086 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6087 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6088 } 6089 6090 static void bnx2x_reset_common(struct bnx2x *bp) 6091 { 6092 u32 val = 0x1400; 6093 6094 /* reset_common */ 6095 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6096 0xd3ffff7f); 6097 6098 if (CHIP_IS_E3(bp)) { 6099 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6100 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6101 } 6102 6103 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6104 } 6105 6106 static void bnx2x_setup_dmae(struct bnx2x *bp) 6107 { 6108 bp->dmae_ready = 0; 6109 spin_lock_init(&bp->dmae_lock); 6110 } 6111 6112 static void bnx2x_init_pxp(struct bnx2x *bp) 6113 { 6114 u16 devctl; 6115 int r_order, w_order; 6116 6117 pci_read_config_word(bp->pdev, 6118 pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); 6119 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6120 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6121 if (bp->mrrs == -1) 6122 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6123 else { 6124 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6125 r_order = bp->mrrs; 6126 } 6127 6128 bnx2x_init_pxp_arb(bp, r_order, w_order); 6129 } 6130 6131 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6132 { 6133 int is_required; 6134 u32 val; 6135 int port; 6136 6137 if (BP_NOMCP(bp)) 6138 return; 6139 6140 is_required = 0; 6141 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6142 SHARED_HW_CFG_FAN_FAILURE_MASK; 6143 6144 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6145 is_required = 1; 6146 6147 /* 6148 * The fan failure mechanism is usually related to the PHY type since 6149 * the power consumption of the board is affected by the PHY. Currently, 6150 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6151 */ 6152 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6153 for (port = PORT_0; port < PORT_MAX; port++) { 6154 is_required |= 6155 bnx2x_fan_failure_det_req( 6156 bp, 6157 bp->common.shmem_base, 6158 bp->common.shmem2_base, 6159 port); 6160 } 6161 6162 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6163 6164 if (is_required == 0) 6165 return; 6166 6167 /* Fan failure is indicated by SPIO 5 */ 6168 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 6169 MISC_REGISTERS_SPIO_INPUT_HI_Z); 6170 6171 /* set to active low mode */ 6172 val = REG_RD(bp, MISC_REG_SPIO_INT); 6173 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6174 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6175 REG_WR(bp, MISC_REG_SPIO_INT, val); 6176 6177 /* enable interrupt to signal the IGU */ 6178 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6179 val |= (1 << MISC_REGISTERS_SPIO_5); 6180 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6181 } 6182 6183 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) 6184 { 6185 u32 offset = 0; 6186 6187 if (CHIP_IS_E1(bp)) 6188 return; 6189 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) 6190 return; 6191 6192 switch (BP_ABS_FUNC(bp)) { 6193 case 0: 6194 offset = PXP2_REG_PGL_PRETEND_FUNC_F0; 6195 break; 6196 case 1: 6197 offset = PXP2_REG_PGL_PRETEND_FUNC_F1; 6198 break; 6199 case 2: 6200 offset = PXP2_REG_PGL_PRETEND_FUNC_F2; 6201 break; 6202 case 3: 6203 offset = PXP2_REG_PGL_PRETEND_FUNC_F3; 6204 break; 6205 case 4: 6206 offset = PXP2_REG_PGL_PRETEND_FUNC_F4; 6207 break; 6208 case 5: 6209 offset = PXP2_REG_PGL_PRETEND_FUNC_F5; 6210 break; 6211 case 6: 6212 offset = PXP2_REG_PGL_PRETEND_FUNC_F6; 6213 break; 6214 case 7: 6215 offset = PXP2_REG_PGL_PRETEND_FUNC_F7; 6216 break; 6217 default: 6218 return; 6219 } 6220 6221 REG_WR(bp, offset, pretend_func_num); 6222 REG_RD(bp, offset); 6223 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); 6224 } 6225 6226 void bnx2x_pf_disable(struct bnx2x *bp) 6227 { 6228 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6229 val &= ~IGU_PF_CONF_FUNC_EN; 6230 6231 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6232 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6233 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6234 } 6235 6236 static void bnx2x__common_init_phy(struct bnx2x *bp) 6237 { 6238 u32 shmem_base[2], shmem2_base[2]; 6239 shmem_base[0] = bp->common.shmem_base; 6240 shmem2_base[0] = bp->common.shmem2_base; 6241 if (!CHIP_IS_E1x(bp)) { 6242 shmem_base[1] = 6243 SHMEM2_RD(bp, other_shmem_base_addr); 6244 shmem2_base[1] = 6245 SHMEM2_RD(bp, other_shmem2_base_addr); 6246 } 6247 bnx2x_acquire_phy_lock(bp); 6248 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 6249 bp->common.chip_id); 6250 bnx2x_release_phy_lock(bp); 6251 } 6252 6253 /** 6254 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6255 * 6256 * @bp: driver handle 6257 */ 6258 static int bnx2x_init_hw_common(struct bnx2x *bp) 6259 { 6260 u32 val; 6261 6262 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 6263 6264 /* 6265 * take the UNDI lock to protect undi_unload flow from accessing 6266 * registers while we're resetting the chip 6267 */ 6268 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6269 6270 bnx2x_reset_common(bp); 6271 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 6272 6273 val = 0xfffc; 6274 if (CHIP_IS_E3(bp)) { 6275 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6276 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6277 } 6278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 6279 6280 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6281 6282 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 6283 6284 if (!CHIP_IS_E1x(bp)) { 6285 u8 abs_func_id; 6286 6287 /** 6288 * 4-port mode or 2-port mode we need to turn of master-enable 6289 * for everyone, after that, turn it back on for self. 6290 * so, we disregard multi-function or not, and always disable 6291 * for all functions on the given path, this means 0,2,4,6 for 6292 * path 0 and 1,3,5,7 for path 1 6293 */ 6294 for (abs_func_id = BP_PATH(bp); 6295 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 6296 if (abs_func_id == BP_ABS_FUNC(bp)) { 6297 REG_WR(bp, 6298 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 6299 1); 6300 continue; 6301 } 6302 6303 bnx2x_pretend_func(bp, abs_func_id); 6304 /* clear pf enable */ 6305 bnx2x_pf_disable(bp); 6306 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6307 } 6308 } 6309 6310 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 6311 if (CHIP_IS_E1(bp)) { 6312 /* enable HW interrupt from PXP on USDM overflow 6313 bit 16 on INT_MASK_0 */ 6314 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6315 } 6316 6317 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6318 bnx2x_init_pxp(bp); 6319 6320 #ifdef __BIG_ENDIAN 6321 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); 6322 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); 6323 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 6324 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 6325 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 6326 /* make sure this value is 0 */ 6327 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 6328 6329 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 6330 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 6331 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); 6332 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); 6333 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 6334 #endif 6335 6336 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6337 6338 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6339 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 6340 6341 /* let the HW do it's magic ... */ 6342 msleep(100); 6343 /* finish PXP init */ 6344 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 6345 if (val != 1) { 6346 BNX2X_ERR("PXP2 CFG failed\n"); 6347 return -EBUSY; 6348 } 6349 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 6350 if (val != 1) { 6351 BNX2X_ERR("PXP2 RD_INIT failed\n"); 6352 return -EBUSY; 6353 } 6354 6355 /* Timers bug workaround E2 only. We need to set the entire ILT to 6356 * have entries with value "0" and valid bit on. 6357 * This needs to be done by the first PF that is loaded in a path 6358 * (i.e. common phase) 6359 */ 6360 if (!CHIP_IS_E1x(bp)) { 6361 /* In E2 there is a bug in the timers block that can cause function 6 / 7 6362 * (i.e. vnic3) to start even if it is marked as "scan-off". 6363 * This occurs when a different function (func2,3) is being marked 6364 * as "scan-off". Real-life scenario for example: if a driver is being 6365 * load-unloaded while func6,7 are down. This will cause the timer to access 6366 * the ilt, translate to a logical address and send a request to read/write. 6367 * Since the ilt for the function that is down is not valid, this will cause 6368 * a translation error which is unrecoverable. 6369 * The Workaround is intended to make sure that when this happens nothing fatal 6370 * will occur. The workaround: 6371 * 1. First PF driver which loads on a path will: 6372 * a. After taking the chip out of reset, by using pretend, 6373 * it will write "0" to the following registers of 6374 * the other vnics. 6375 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6376 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 6377 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 6378 * And for itself it will write '1' to 6379 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 6380 * dmae-operations (writing to pram for example.) 6381 * note: can be done for only function 6,7 but cleaner this 6382 * way. 6383 * b. Write zero+valid to the entire ILT. 6384 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 6385 * VNIC3 (of that port). The range allocated will be the 6386 * entire ILT. This is needed to prevent ILT range error. 6387 * 2. Any PF driver load flow: 6388 * a. ILT update with the physical addresses of the allocated 6389 * logical pages. 6390 * b. Wait 20msec. - note that this timeout is needed to make 6391 * sure there are no requests in one of the PXP internal 6392 * queues with "old" ILT addresses. 6393 * c. PF enable in the PGLC. 6394 * d. Clear the was_error of the PF in the PGLC. (could have 6395 * occured while driver was down) 6396 * e. PF enable in the CFC (WEAK + STRONG) 6397 * f. Timers scan enable 6398 * 3. PF driver unload flow: 6399 * a. Clear the Timers scan_en. 6400 * b. Polling for scan_on=0 for that PF. 6401 * c. Clear the PF enable bit in the PXP. 6402 * d. Clear the PF enable in the CFC (WEAK + STRONG) 6403 * e. Write zero+valid to all ILT entries (The valid bit must 6404 * stay set) 6405 * f. If this is VNIC 3 of a port then also init 6406 * first_timers_ilt_entry to zero and last_timers_ilt_entry 6407 * to the last enrty in the ILT. 6408 * 6409 * Notes: 6410 * Currently the PF error in the PGLC is non recoverable. 6411 * In the future the there will be a recovery routine for this error. 6412 * Currently attention is masked. 6413 * Having an MCP lock on the load/unload process does not guarantee that 6414 * there is no Timer disable during Func6/7 enable. This is because the 6415 * Timers scan is currently being cleared by the MCP on FLR. 6416 * Step 2.d can be done only for PF6/7 and the driver can also check if 6417 * there is error before clearing it. But the flow above is simpler and 6418 * more general. 6419 * All ILT entries are written by zero+valid and not just PF6/7 6420 * ILT entries since in the future the ILT entries allocation for 6421 * PF-s might be dynamic. 6422 */ 6423 struct ilt_client_info ilt_cli; 6424 struct bnx2x_ilt ilt; 6425 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 6426 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 6427 6428 /* initialize dummy TM client */ 6429 ilt_cli.start = 0; 6430 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 6431 ilt_cli.client_num = ILT_CLIENT_TM; 6432 6433 /* Step 1: set zeroes to all ilt page entries with valid bit on 6434 * Step 2: set the timers first/last ilt entry to point 6435 * to the entire range to prevent ILT range error for 3rd/4th 6436 * vnic (this code assumes existance of the vnic) 6437 * 6438 * both steps performed by call to bnx2x_ilt_client_init_op() 6439 * with dummy TM client 6440 * 6441 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 6442 * and his brother are split registers 6443 */ 6444 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 6445 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 6446 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6447 6448 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 6449 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 6450 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 6451 } 6452 6453 6454 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 6455 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 6456 6457 if (!CHIP_IS_E1x(bp)) { 6458 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 6459 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 6460 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 6461 6462 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 6463 6464 /* let the HW do it's magic ... */ 6465 do { 6466 msleep(200); 6467 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 6468 } while (factor-- && (val != 1)); 6469 6470 if (val != 1) { 6471 BNX2X_ERR("ATC_INIT failed\n"); 6472 return -EBUSY; 6473 } 6474 } 6475 6476 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 6477 6478 /* clean the DMAE memory */ 6479 bp->dmae_ready = 1; 6480 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 6481 6482 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 6483 6484 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 6485 6486 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 6487 6488 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 6489 6490 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 6491 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 6492 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 6493 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6494 6495 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 6496 6497 6498 /* QM queues pointers table */ 6499 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 6500 6501 /* soft reset pulse */ 6502 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6503 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6504 6505 #ifdef BCM_CNIC 6506 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6507 #endif 6508 6509 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6510 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6511 if (!CHIP_REV_IS_SLOW(bp)) 6512 /* enable hw interrupt from doorbell Q */ 6513 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6514 6515 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6516 6517 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6518 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6519 6520 if (!CHIP_IS_E1(bp)) 6521 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6522 6523 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 6524 if (IS_MF_AFEX(bp)) { 6525 /* configure that VNTag and VLAN headers must be 6526 * received in afex mode 6527 */ 6528 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 6529 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 6530 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 6531 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 6532 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 6533 } else { 6534 /* Bit-map indicating which L2 hdrs may appear 6535 * after the basic Ethernet header 6536 */ 6537 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6538 bp->path_has_ovlan ? 7 : 6); 6539 } 6540 } 6541 6542 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6543 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6544 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 6545 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 6546 6547 if (!CHIP_IS_E1x(bp)) { 6548 /* reset VFC memories */ 6549 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6550 VFC_MEMORIES_RST_REG_CAM_RST | 6551 VFC_MEMORIES_RST_REG_RAM_RST); 6552 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6553 VFC_MEMORIES_RST_REG_CAM_RST | 6554 VFC_MEMORIES_RST_REG_RAM_RST); 6555 6556 msleep(20); 6557 } 6558 6559 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 6560 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 6561 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 6562 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 6563 6564 /* sync semi rtc */ 6565 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6566 0x80000000); 6567 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 6568 0x80000000); 6569 6570 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 6571 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6572 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6573 6574 if (!CHIP_IS_E1x(bp)) { 6575 if (IS_MF_AFEX(bp)) { 6576 /* configure that VNTag and VLAN headers must be 6577 * sent in afex mode 6578 */ 6579 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 6580 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 6581 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 6582 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 6583 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 6584 } else { 6585 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6586 bp->path_has_ovlan ? 7 : 6); 6587 } 6588 } 6589 6590 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6591 6592 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6593 6594 #ifdef BCM_CNIC 6595 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6596 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6597 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6598 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6599 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6600 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6601 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6602 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6603 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6604 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6605 #endif 6606 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6607 6608 if (sizeof(union cdu_context) != 1024) 6609 /* we currently assume that a context is 1024 bytes */ 6610 dev_alert(&bp->pdev->dev, 6611 "please adjust the size of cdu_context(%ld)\n", 6612 (long)sizeof(union cdu_context)); 6613 6614 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 6615 val = (4 << 24) + (0 << 12) + 1024; 6616 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 6617 6618 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 6619 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 6620 /* enable context validation interrupt from CFC */ 6621 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6622 6623 /* set the thresholds to prevent CFC/CDU race */ 6624 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 6625 6626 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 6627 6628 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 6629 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 6630 6631 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 6632 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 6633 6634 /* Reset PCIE errors for debug */ 6635 REG_WR(bp, 0x2814, 0xffffffff); 6636 REG_WR(bp, 0x3820, 0xffffffff); 6637 6638 if (!CHIP_IS_E1x(bp)) { 6639 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 6640 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 6641 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 6642 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 6643 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 6644 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 6645 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 6646 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 6647 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 6648 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 6649 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 6650 } 6651 6652 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 6653 if (!CHIP_IS_E1(bp)) { 6654 /* in E3 this done in per-port section */ 6655 if (!CHIP_IS_E3(bp)) 6656 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 6657 } 6658 if (CHIP_IS_E1H(bp)) 6659 /* not applicable for E2 (and above ...) */ 6660 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 6661 6662 if (CHIP_REV_IS_SLOW(bp)) 6663 msleep(200); 6664 6665 /* finish CFC init */ 6666 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 6667 if (val != 1) { 6668 BNX2X_ERR("CFC LL_INIT failed\n"); 6669 return -EBUSY; 6670 } 6671 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 6672 if (val != 1) { 6673 BNX2X_ERR("CFC AC_INIT failed\n"); 6674 return -EBUSY; 6675 } 6676 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 6677 if (val != 1) { 6678 BNX2X_ERR("CFC CAM_INIT failed\n"); 6679 return -EBUSY; 6680 } 6681 REG_WR(bp, CFC_REG_DEBUG0, 0); 6682 6683 if (CHIP_IS_E1(bp)) { 6684 /* read NIG statistic 6685 to see if this is our first up since powerup */ 6686 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6687 val = *bnx2x_sp(bp, wb_data[0]); 6688 6689 /* do internal memory self test */ 6690 if ((val == 0) && bnx2x_int_mem_test(bp)) { 6691 BNX2X_ERR("internal mem self test failed\n"); 6692 return -EBUSY; 6693 } 6694 } 6695 6696 bnx2x_setup_fan_failure_detection(bp); 6697 6698 /* clear PXP2 attentions */ 6699 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6700 6701 bnx2x_enable_blocks_attention(bp); 6702 bnx2x_enable_blocks_parity(bp); 6703 6704 if (!BP_NOMCP(bp)) { 6705 if (CHIP_IS_E1x(bp)) 6706 bnx2x__common_init_phy(bp); 6707 } else 6708 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 6709 6710 return 0; 6711 } 6712 6713 /** 6714 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 6715 * 6716 * @bp: driver handle 6717 */ 6718 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 6719 { 6720 int rc = bnx2x_init_hw_common(bp); 6721 6722 if (rc) 6723 return rc; 6724 6725 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 6726 if (!BP_NOMCP(bp)) 6727 bnx2x__common_init_phy(bp); 6728 6729 return 0; 6730 } 6731 6732 static int bnx2x_init_hw_port(struct bnx2x *bp) 6733 { 6734 int port = BP_PORT(bp); 6735 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 6736 u32 low, high; 6737 u32 val; 6738 6739 bnx2x__link_reset(bp); 6740 6741 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 6742 6743 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6744 6745 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 6746 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 6747 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 6748 6749 /* Timers bug workaround: disables the pf_master bit in pglue at 6750 * common phase, we need to enable it here before any dmae access are 6751 * attempted. Therefore we manually added the enable-master to the 6752 * port phase (it also happens in the function phase) 6753 */ 6754 if (!CHIP_IS_E1x(bp)) 6755 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 6756 6757 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 6758 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 6759 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 6760 bnx2x_init_block(bp, BLOCK_QM, init_phase); 6761 6762 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 6763 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 6764 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 6765 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 6766 6767 /* QM cid (connection) count */ 6768 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 6769 6770 #ifdef BCM_CNIC 6771 bnx2x_init_block(bp, BLOCK_TM, init_phase); 6772 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6773 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6774 #endif 6775 6776 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6777 6778 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6779 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 6780 6781 if (IS_MF(bp)) 6782 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6783 else if (bp->dev->mtu > 4096) { 6784 if (bp->flags & ONE_PORT_FLAG) 6785 low = 160; 6786 else { 6787 val = bp->dev->mtu; 6788 /* (24*1024 + val*4)/256 */ 6789 low = 96 + (val/64) + 6790 ((val % 64) ? 1 : 0); 6791 } 6792 } else 6793 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 6794 high = low + 56; /* 14*1024/256 */ 6795 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 6796 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 6797 } 6798 6799 if (CHIP_MODE_IS_4_PORT(bp)) 6800 REG_WR(bp, (BP_PORT(bp) ? 6801 BRB1_REG_MAC_GUARANTIED_1 : 6802 BRB1_REG_MAC_GUARANTIED_0), 40); 6803 6804 6805 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 6806 if (CHIP_IS_E3B0(bp)) { 6807 if (IS_MF_AFEX(bp)) { 6808 /* configure headers for AFEX mode */ 6809 REG_WR(bp, BP_PORT(bp) ? 6810 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6811 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 6812 REG_WR(bp, BP_PORT(bp) ? 6813 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 6814 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 6815 REG_WR(bp, BP_PORT(bp) ? 6816 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 6817 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 6818 } else { 6819 /* Ovlan exists only if we are in multi-function + 6820 * switch-dependent mode, in switch-independent there 6821 * is no ovlan headers 6822 */ 6823 REG_WR(bp, BP_PORT(bp) ? 6824 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6825 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 6826 (bp->path_has_ovlan ? 7 : 6)); 6827 } 6828 } 6829 6830 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 6831 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 6832 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 6833 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 6834 6835 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 6836 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 6837 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 6838 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 6839 6840 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 6841 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 6842 6843 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 6844 6845 if (CHIP_IS_E1x(bp)) { 6846 /* configure PBF to work without PAUSE mtu 9000 */ 6847 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 6848 6849 /* update threshold */ 6850 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 6851 /* update init credit */ 6852 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 6853 6854 /* probe changes */ 6855 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 6856 udelay(50); 6857 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6858 } 6859 6860 #ifdef BCM_CNIC 6861 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 6862 #endif 6863 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 6864 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 6865 6866 if (CHIP_IS_E1(bp)) { 6867 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6868 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6869 } 6870 bnx2x_init_block(bp, BLOCK_HC, init_phase); 6871 6872 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 6873 6874 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 6875 /* init aeu_mask_attn_func_0/1: 6876 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 6877 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 6878 * bits 4-7 are used for "per vn group attention" */ 6879 val = IS_MF(bp) ? 0xF7 : 0x7; 6880 /* Enable DCBX attention for all but E1 */ 6881 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 6882 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 6883 6884 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 6885 6886 if (!CHIP_IS_E1x(bp)) { 6887 /* Bit-map indicating which L2 hdrs may appear after the 6888 * basic Ethernet header 6889 */ 6890 if (IS_MF_AFEX(bp)) 6891 REG_WR(bp, BP_PORT(bp) ? 6892 NIG_REG_P1_HDRS_AFTER_BASIC : 6893 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 6894 else 6895 REG_WR(bp, BP_PORT(bp) ? 6896 NIG_REG_P1_HDRS_AFTER_BASIC : 6897 NIG_REG_P0_HDRS_AFTER_BASIC, 6898 IS_MF_SD(bp) ? 7 : 6); 6899 6900 if (CHIP_IS_E3(bp)) 6901 REG_WR(bp, BP_PORT(bp) ? 6902 NIG_REG_LLH1_MF_MODE : 6903 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 6904 } 6905 if (!CHIP_IS_E3(bp)) 6906 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 6907 6908 if (!CHIP_IS_E1(bp)) { 6909 /* 0x2 disable mf_ov, 0x1 enable */ 6910 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 6911 (IS_MF_SD(bp) ? 0x1 : 0x2)); 6912 6913 if (!CHIP_IS_E1x(bp)) { 6914 val = 0; 6915 switch (bp->mf_mode) { 6916 case MULTI_FUNCTION_SD: 6917 val = 1; 6918 break; 6919 case MULTI_FUNCTION_SI: 6920 case MULTI_FUNCTION_AFEX: 6921 val = 2; 6922 break; 6923 } 6924 6925 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 6926 NIG_REG_LLH0_CLS_TYPE), val); 6927 } 6928 { 6929 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 6930 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 6931 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 6932 } 6933 } 6934 6935 6936 /* If SPIO5 is set to generate interrupts, enable it for this port */ 6937 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6938 if (val & (1 << MISC_REGISTERS_SPIO_5)) { 6939 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 6940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 6941 val = REG_RD(bp, reg_addr); 6942 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 6943 REG_WR(bp, reg_addr, val); 6944 } 6945 6946 return 0; 6947 } 6948 6949 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6950 { 6951 int reg; 6952 u32 wb_write[2]; 6953 6954 if (CHIP_IS_E1(bp)) 6955 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 6956 else 6957 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 6958 6959 wb_write[0] = ONCHIP_ADDR1(addr); 6960 wb_write[1] = ONCHIP_ADDR2(addr); 6961 REG_WR_DMAE(bp, reg, wb_write, 2); 6962 } 6963 6964 static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 6965 u8 idu_sb_id, bool is_Pf) 6966 { 6967 u32 data, ctl, cnt = 100; 6968 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 6969 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 6970 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 6971 u32 sb_bit = 1 << (idu_sb_id%32); 6972 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 6973 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 6974 6975 /* Not supported in BC mode */ 6976 if (CHIP_INT_MODE_IS_BC(bp)) 6977 return; 6978 6979 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 6980 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 6981 IGU_REGULAR_CLEANUP_SET | 6982 IGU_REGULAR_BCLEANUP; 6983 6984 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 6985 func_encode << IGU_CTRL_REG_FID_SHIFT | 6986 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 6987 6988 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 6989 data, igu_addr_data); 6990 REG_WR(bp, igu_addr_data, data); 6991 mmiowb(); 6992 barrier(); 6993 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 6994 ctl, igu_addr_ctl); 6995 REG_WR(bp, igu_addr_ctl, ctl); 6996 mmiowb(); 6997 barrier(); 6998 6999 /* wait for clean up to finish */ 7000 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7001 msleep(20); 7002 7003 7004 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7005 DP(NETIF_MSG_HW, 7006 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7007 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7008 } 7009 } 7010 7011 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7012 { 7013 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7014 } 7015 7016 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7017 { 7018 u32 i, base = FUNC_ILT_BASE(func); 7019 for (i = base; i < base + ILT_PER_FUNC; i++) 7020 bnx2x_ilt_wr(bp, i, 0); 7021 } 7022 7023 static int bnx2x_init_hw_func(struct bnx2x *bp) 7024 { 7025 int port = BP_PORT(bp); 7026 int func = BP_FUNC(bp); 7027 int init_phase = PHASE_PF0 + func; 7028 struct bnx2x_ilt *ilt = BP_ILT(bp); 7029 u16 cdu_ilt_start; 7030 u32 addr, val; 7031 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7032 int i, main_mem_width, rc; 7033 7034 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7035 7036 /* FLR cleanup - hmmm */ 7037 if (!CHIP_IS_E1x(bp)) { 7038 rc = bnx2x_pf_flr_clnup(bp); 7039 if (rc) 7040 return rc; 7041 } 7042 7043 /* set MSI reconfigure capability */ 7044 if (bp->common.int_block == INT_BLOCK_HC) { 7045 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7046 val = REG_RD(bp, addr); 7047 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7048 REG_WR(bp, addr, val); 7049 } 7050 7051 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7052 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7053 7054 ilt = BP_ILT(bp); 7055 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7056 7057 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7058 ilt->lines[cdu_ilt_start + i].page = 7059 bp->context.vcxt + (ILT_PAGE_CIDS * i); 7060 ilt->lines[cdu_ilt_start + i].page_mapping = 7061 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); 7062 /* cdu ilt pages are allocated manually so there's no need to 7063 set the size */ 7064 } 7065 bnx2x_ilt_init_op(bp, INITOP_SET); 7066 7067 #ifdef BCM_CNIC 7068 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7069 7070 /* T1 hash bits value determines the T1 number of entries */ 7071 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7072 #endif 7073 7074 #ifndef BCM_CNIC 7075 /* set NIC mode */ 7076 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7077 #endif /* BCM_CNIC */ 7078 7079 if (!CHIP_IS_E1x(bp)) { 7080 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7081 7082 /* Turn on a single ISR mode in IGU if driver is going to use 7083 * INT#x or MSI 7084 */ 7085 if (!(bp->flags & USING_MSIX_FLAG)) 7086 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 7087 /* 7088 * Timers workaround bug: function init part. 7089 * Need to wait 20msec after initializing ILT, 7090 * needed to make sure there are no requests in 7091 * one of the PXP internal queues with "old" ILT addresses 7092 */ 7093 msleep(20); 7094 /* 7095 * Master enable - Due to WB DMAE writes performed before this 7096 * register is re-initialized as part of the regular function 7097 * init 7098 */ 7099 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7100 /* Enable the function in IGU */ 7101 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 7102 } 7103 7104 bp->dmae_ready = 1; 7105 7106 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7107 7108 if (!CHIP_IS_E1x(bp)) 7109 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 7110 7111 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7112 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7113 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7114 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7115 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7116 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7117 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7118 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7119 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7120 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7121 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7122 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7123 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7124 7125 if (!CHIP_IS_E1x(bp)) 7126 REG_WR(bp, QM_REG_PF_EN, 1); 7127 7128 if (!CHIP_IS_E1x(bp)) { 7129 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7130 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7131 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7132 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7133 } 7134 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7135 7136 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7137 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7138 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7139 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7140 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7141 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7142 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7143 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7144 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7145 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7146 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7147 if (!CHIP_IS_E1x(bp)) 7148 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 7149 7150 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7151 7152 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7153 7154 if (!CHIP_IS_E1x(bp)) 7155 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 7156 7157 if (IS_MF(bp)) { 7158 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7159 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 7160 } 7161 7162 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7163 7164 /* HC init per function */ 7165 if (bp->common.int_block == INT_BLOCK_HC) { 7166 if (CHIP_IS_E1H(bp)) { 7167 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7168 7169 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7170 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7171 } 7172 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7173 7174 } else { 7175 int num_segs, sb_idx, prod_offset; 7176 7177 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7178 7179 if (!CHIP_IS_E1x(bp)) { 7180 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 7181 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 7182 } 7183 7184 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7185 7186 if (!CHIP_IS_E1x(bp)) { 7187 int dsb_idx = 0; 7188 /** 7189 * Producer memory: 7190 * E2 mode: address 0-135 match to the mapping memory; 7191 * 136 - PF0 default prod; 137 - PF1 default prod; 7192 * 138 - PF2 default prod; 139 - PF3 default prod; 7193 * 140 - PF0 attn prod; 141 - PF1 attn prod; 7194 * 142 - PF2 attn prod; 143 - PF3 attn prod; 7195 * 144-147 reserved. 7196 * 7197 * E1.5 mode - In backward compatible mode; 7198 * for non default SB; each even line in the memory 7199 * holds the U producer and each odd line hold 7200 * the C producer. The first 128 producers are for 7201 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 7202 * producers are for the DSB for each PF. 7203 * Each PF has five segments: (the order inside each 7204 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 7205 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 7206 * 144-147 attn prods; 7207 */ 7208 /* non-default-status-blocks */ 7209 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7210 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 7211 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 7212 prod_offset = (bp->igu_base_sb + sb_idx) * 7213 num_segs; 7214 7215 for (i = 0; i < num_segs; i++) { 7216 addr = IGU_REG_PROD_CONS_MEMORY + 7217 (prod_offset + i) * 4; 7218 REG_WR(bp, addr, 0); 7219 } 7220 /* send consumer update with value 0 */ 7221 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 7222 USTORM_ID, 0, IGU_INT_NOP, 1); 7223 bnx2x_igu_clear_sb(bp, 7224 bp->igu_base_sb + sb_idx); 7225 } 7226 7227 /* default-status-blocks */ 7228 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7229 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 7230 7231 if (CHIP_MODE_IS_4_PORT(bp)) 7232 dsb_idx = BP_FUNC(bp); 7233 else 7234 dsb_idx = BP_VN(bp); 7235 7236 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 7237 IGU_BC_BASE_DSB_PROD + dsb_idx : 7238 IGU_NORM_BASE_DSB_PROD + dsb_idx); 7239 7240 /* 7241 * igu prods come in chunks of E1HVN_MAX (4) - 7242 * does not matters what is the current chip mode 7243 */ 7244 for (i = 0; i < (num_segs * E1HVN_MAX); 7245 i += E1HVN_MAX) { 7246 addr = IGU_REG_PROD_CONS_MEMORY + 7247 (prod_offset + i)*4; 7248 REG_WR(bp, addr, 0); 7249 } 7250 /* send consumer update with 0 */ 7251 if (CHIP_INT_MODE_IS_BC(bp)) { 7252 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7253 USTORM_ID, 0, IGU_INT_NOP, 1); 7254 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7255 CSTORM_ID, 0, IGU_INT_NOP, 1); 7256 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7257 XSTORM_ID, 0, IGU_INT_NOP, 1); 7258 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7259 TSTORM_ID, 0, IGU_INT_NOP, 1); 7260 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7261 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7262 } else { 7263 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7264 USTORM_ID, 0, IGU_INT_NOP, 1); 7265 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7266 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7267 } 7268 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 7269 7270 /* !!! these should become driver const once 7271 rf-tool supports split-68 const */ 7272 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 7273 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 7274 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 7275 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 7276 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 7277 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 7278 } 7279 } 7280 7281 /* Reset PCIE errors for debug */ 7282 REG_WR(bp, 0x2114, 0xffffffff); 7283 REG_WR(bp, 0x2120, 0xffffffff); 7284 7285 if (CHIP_IS_E1x(bp)) { 7286 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 7287 main_mem_base = HC_REG_MAIN_MEMORY + 7288 BP_PORT(bp) * (main_mem_size * 4); 7289 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 7290 main_mem_width = 8; 7291 7292 val = REG_RD(bp, main_mem_prty_clr); 7293 if (val) 7294 DP(NETIF_MSG_HW, 7295 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 7296 val); 7297 7298 /* Clear "false" parity errors in MSI-X table */ 7299 for (i = main_mem_base; 7300 i < main_mem_base + main_mem_size * 4; 7301 i += main_mem_width) { 7302 bnx2x_read_dmae(bp, i, main_mem_width / 4); 7303 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 7304 i, main_mem_width / 4); 7305 } 7306 /* Clear HC parity attention */ 7307 REG_RD(bp, main_mem_prty_clr); 7308 } 7309 7310 #ifdef BNX2X_STOP_ON_ERROR 7311 /* Enable STORMs SP logging */ 7312 REG_WR8(bp, BAR_USTRORM_INTMEM + 7313 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7314 REG_WR8(bp, BAR_TSTRORM_INTMEM + 7315 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7316 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7317 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7318 REG_WR8(bp, BAR_XSTRORM_INTMEM + 7319 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7320 #endif 7321 7322 bnx2x_phy_probe(&bp->link_params); 7323 7324 return 0; 7325 } 7326 7327 7328 void bnx2x_free_mem(struct bnx2x *bp) 7329 { 7330 /* fastpath */ 7331 bnx2x_free_fp_mem(bp); 7332 /* end of fastpath */ 7333 7334 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7335 sizeof(struct host_sp_status_block)); 7336 7337 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7338 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7339 7340 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7341 sizeof(struct bnx2x_slowpath)); 7342 7343 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, 7344 bp->context.size); 7345 7346 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7347 7348 BNX2X_FREE(bp->ilt->lines); 7349 7350 #ifdef BCM_CNIC 7351 if (!CHIP_IS_E1x(bp)) 7352 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 7353 sizeof(struct host_hc_status_block_e2)); 7354 else 7355 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 7356 sizeof(struct host_hc_status_block_e1x)); 7357 7358 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7359 #endif 7360 7361 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7362 7363 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7364 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7365 } 7366 7367 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 7368 { 7369 int num_groups; 7370 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 7371 7372 /* number of queues for statistics is number of eth queues + FCoE */ 7373 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; 7374 7375 /* Total number of FW statistics requests = 7376 * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + 7377 * num of queues 7378 */ 7379 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; 7380 7381 7382 /* Request is built from stats_query_header and an array of 7383 * stats_query_cmd_group each of which contains 7384 * STATS_QUERY_CMD_COUNT rules. The real number or requests is 7385 * configured in the stats_query_header. 7386 */ 7387 num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + 7388 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); 7389 7390 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + 7391 num_groups * sizeof(struct stats_query_cmd_group); 7392 7393 /* Data for statistics requests + stats_conter 7394 * 7395 * stats_counter holds per-STORM counters that are incremented 7396 * when STORM has finished with the current request. 7397 * 7398 * memory for FCoE offloaded statistics are counted anyway, 7399 * even if they will not be sent. 7400 */ 7401 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + 7402 sizeof(struct per_pf_stats) + 7403 sizeof(struct fcoe_statistics_params) + 7404 sizeof(struct per_queue_stats) * num_queue_stats + 7405 sizeof(struct stats_counter); 7406 7407 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, 7408 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7409 7410 /* Set shortcuts */ 7411 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 7412 bp->fw_stats_req_mapping = bp->fw_stats_mapping; 7413 7414 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) 7415 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); 7416 7417 bp->fw_stats_data_mapping = bp->fw_stats_mapping + 7418 bp->fw_stats_req_sz; 7419 return 0; 7420 7421 alloc_mem_err: 7422 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7423 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7424 BNX2X_ERR("Can't allocate memory\n"); 7425 return -ENOMEM; 7426 } 7427 7428 7429 int bnx2x_alloc_mem(struct bnx2x *bp) 7430 { 7431 #ifdef BCM_CNIC 7432 if (!CHIP_IS_E1x(bp)) 7433 /* size = the status block + ramrod buffers */ 7434 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7435 sizeof(struct host_hc_status_block_e2)); 7436 else 7437 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 7438 sizeof(struct host_hc_status_block_e1x)); 7439 7440 /* allocate searcher T2 table */ 7441 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7442 #endif 7443 7444 7445 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7446 sizeof(struct host_sp_status_block)); 7447 7448 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7449 sizeof(struct bnx2x_slowpath)); 7450 7451 #ifdef BCM_CNIC 7452 /* write address to which L5 should insert its values */ 7453 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp; 7454 #endif 7455 7456 /* Allocated memory for FW statistics */ 7457 if (bnx2x_alloc_fw_stats_mem(bp)) 7458 goto alloc_mem_err; 7459 7460 bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7461 7462 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, 7463 bp->context.size); 7464 7465 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7466 7467 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7468 goto alloc_mem_err; 7469 7470 /* Slow path ring */ 7471 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 7472 7473 /* EQ */ 7474 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 7475 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7476 7477 7478 /* fastpath */ 7479 /* need to be done at the end, since it's self adjusting to amount 7480 * of memory available for RSS queues 7481 */ 7482 if (bnx2x_alloc_fp_mem(bp)) 7483 goto alloc_mem_err; 7484 return 0; 7485 7486 alloc_mem_err: 7487 bnx2x_free_mem(bp); 7488 BNX2X_ERR("Can't allocate memory\n"); 7489 return -ENOMEM; 7490 } 7491 7492 /* 7493 * Init service functions 7494 */ 7495 7496 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 7497 struct bnx2x_vlan_mac_obj *obj, bool set, 7498 int mac_type, unsigned long *ramrod_flags) 7499 { 7500 int rc; 7501 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 7502 7503 memset(&ramrod_param, 0, sizeof(ramrod_param)); 7504 7505 /* Fill general parameters */ 7506 ramrod_param.vlan_mac_obj = obj; 7507 ramrod_param.ramrod_flags = *ramrod_flags; 7508 7509 /* Fill a user request section if needed */ 7510 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 7511 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 7512 7513 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 7514 7515 /* Set the command: ADD or DEL */ 7516 if (set) 7517 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 7518 else 7519 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 7520 } 7521 7522 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 7523 if (rc < 0) 7524 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 7525 return rc; 7526 } 7527 7528 int bnx2x_del_all_macs(struct bnx2x *bp, 7529 struct bnx2x_vlan_mac_obj *mac_obj, 7530 int mac_type, bool wait_for_comp) 7531 { 7532 int rc; 7533 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 7534 7535 /* Wait for completion of requested */ 7536 if (wait_for_comp) 7537 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7538 7539 /* Set the mac type of addresses we want to clear */ 7540 __set_bit(mac_type, &vlan_mac_flags); 7541 7542 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 7543 if (rc < 0) 7544 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 7545 7546 return rc; 7547 } 7548 7549 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 7550 { 7551 unsigned long ramrod_flags = 0; 7552 7553 #ifdef BCM_CNIC 7554 if (is_zero_ether_addr(bp->dev->dev_addr) && 7555 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7556 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7557 "Ignoring Zero MAC for STORAGE SD mode\n"); 7558 return 0; 7559 } 7560 #endif 7561 7562 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7563 7564 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7565 /* Eth MAC is set on RSS leading client (fp[0]) */ 7566 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, 7567 BNX2X_ETH_MAC, &ramrod_flags); 7568 } 7569 7570 int bnx2x_setup_leading(struct bnx2x *bp) 7571 { 7572 return bnx2x_setup_queue(bp, &bp->fp[0], 1); 7573 } 7574 7575 /** 7576 * bnx2x_set_int_mode - configure interrupt mode 7577 * 7578 * @bp: driver handle 7579 * 7580 * In case of MSI-X it will also try to enable MSI-X. 7581 */ 7582 static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) 7583 { 7584 switch (int_mode) { 7585 case INT_MODE_MSI: 7586 bnx2x_enable_msi(bp); 7587 /* falling through... */ 7588 case INT_MODE_INTx: 7589 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7590 BNX2X_DEV_INFO("set number of queues to 1\n"); 7591 break; 7592 default: 7593 /* Set number of queues for MSI-X mode */ 7594 bnx2x_set_num_queues(bp); 7595 7596 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 7597 7598 /* if we can't use MSI-X we only need one fp, 7599 * so try to enable MSI-X with the requested number of fp's 7600 * and fallback to MSI or legacy INTx with one fp 7601 */ 7602 if (bnx2x_enable_msix(bp) || 7603 bp->flags & USING_SINGLE_MSIX_FLAG) { 7604 /* failed to enable multiple MSI-X */ 7605 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 7606 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7607 7608 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7609 7610 /* Try to enable MSI */ 7611 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && 7612 !(bp->flags & DISABLE_MSI_FLAG)) 7613 bnx2x_enable_msi(bp); 7614 } 7615 break; 7616 } 7617 } 7618 7619 /* must be called prioir to any HW initializations */ 7620 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 7621 { 7622 return L2_ILT_LINES(bp); 7623 } 7624 7625 void bnx2x_ilt_set_info(struct bnx2x *bp) 7626 { 7627 struct ilt_client_info *ilt_client; 7628 struct bnx2x_ilt *ilt = BP_ILT(bp); 7629 u16 line = 0; 7630 7631 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 7632 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 7633 7634 /* CDU */ 7635 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 7636 ilt_client->client_num = ILT_CLIENT_CDU; 7637 ilt_client->page_size = CDU_ILT_PAGE_SZ; 7638 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 7639 ilt_client->start = line; 7640 line += bnx2x_cid_ilt_lines(bp); 7641 #ifdef BCM_CNIC 7642 line += CNIC_ILT_LINES; 7643 #endif 7644 ilt_client->end = line - 1; 7645 7646 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7647 ilt_client->start, 7648 ilt_client->end, 7649 ilt_client->page_size, 7650 ilt_client->flags, 7651 ilog2(ilt_client->page_size >> 12)); 7652 7653 /* QM */ 7654 if (QM_INIT(bp->qm_cid_count)) { 7655 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 7656 ilt_client->client_num = ILT_CLIENT_QM; 7657 ilt_client->page_size = QM_ILT_PAGE_SZ; 7658 ilt_client->flags = 0; 7659 ilt_client->start = line; 7660 7661 /* 4 bytes for each cid */ 7662 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 7663 QM_ILT_PAGE_SZ); 7664 7665 ilt_client->end = line - 1; 7666 7667 DP(NETIF_MSG_IFUP, 7668 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7669 ilt_client->start, 7670 ilt_client->end, 7671 ilt_client->page_size, 7672 ilt_client->flags, 7673 ilog2(ilt_client->page_size >> 12)); 7674 7675 } 7676 /* SRC */ 7677 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 7678 #ifdef BCM_CNIC 7679 ilt_client->client_num = ILT_CLIENT_SRC; 7680 ilt_client->page_size = SRC_ILT_PAGE_SZ; 7681 ilt_client->flags = 0; 7682 ilt_client->start = line; 7683 line += SRC_ILT_LINES; 7684 ilt_client->end = line - 1; 7685 7686 DP(NETIF_MSG_IFUP, 7687 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7688 ilt_client->start, 7689 ilt_client->end, 7690 ilt_client->page_size, 7691 ilt_client->flags, 7692 ilog2(ilt_client->page_size >> 12)); 7693 7694 #else 7695 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7696 #endif 7697 7698 /* TM */ 7699 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 7700 #ifdef BCM_CNIC 7701 ilt_client->client_num = ILT_CLIENT_TM; 7702 ilt_client->page_size = TM_ILT_PAGE_SZ; 7703 ilt_client->flags = 0; 7704 ilt_client->start = line; 7705 line += TM_ILT_LINES; 7706 ilt_client->end = line - 1; 7707 7708 DP(NETIF_MSG_IFUP, 7709 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7710 ilt_client->start, 7711 ilt_client->end, 7712 ilt_client->page_size, 7713 ilt_client->flags, 7714 ilog2(ilt_client->page_size >> 12)); 7715 7716 #else 7717 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7718 #endif 7719 BUG_ON(line > ILT_MAX_LINES); 7720 } 7721 7722 /** 7723 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 7724 * 7725 * @bp: driver handle 7726 * @fp: pointer to fastpath 7727 * @init_params: pointer to parameters structure 7728 * 7729 * parameters configured: 7730 * - HC configuration 7731 * - Queue's CDU context 7732 */ 7733 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7734 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7735 { 7736 7737 u8 cos; 7738 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7739 if (!IS_FCOE_FP(fp)) { 7740 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7741 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 7742 7743 /* If HC is supporterd, enable host coalescing in the transition 7744 * to INIT state. 7745 */ 7746 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 7747 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 7748 7749 /* HC rate */ 7750 init_params->rx.hc_rate = bp->rx_ticks ? 7751 (1000000 / bp->rx_ticks) : 0; 7752 init_params->tx.hc_rate = bp->tx_ticks ? 7753 (1000000 / bp->tx_ticks) : 0; 7754 7755 /* FW SB ID */ 7756 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 7757 fp->fw_sb_id; 7758 7759 /* 7760 * CQ index among the SB indices: FCoE clients uses the default 7761 * SB, therefore it's different. 7762 */ 7763 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 7764 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 7765 } 7766 7767 /* set maximum number of COSs supported by this queue */ 7768 init_params->max_cos = fp->max_cos; 7769 7770 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 7771 fp->index, init_params->max_cos); 7772 7773 /* set the context pointers queue object */ 7774 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) 7775 init_params->cxts[cos] = 7776 &bp->context.vcxt[fp->txdata[cos].cid].eth; 7777 } 7778 7779 int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7780 struct bnx2x_queue_state_params *q_params, 7781 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 7782 int tx_index, bool leading) 7783 { 7784 memset(tx_only_params, 0, sizeof(*tx_only_params)); 7785 7786 /* Set the command */ 7787 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 7788 7789 /* Set tx-only QUEUE flags: don't zero statistics */ 7790 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 7791 7792 /* choose the index of the cid to send the slow path on */ 7793 tx_only_params->cid_index = tx_index; 7794 7795 /* Set general TX_ONLY_SETUP parameters */ 7796 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 7797 7798 /* Set Tx TX_ONLY_SETUP parameters */ 7799 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 7800 7801 DP(NETIF_MSG_IFUP, 7802 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 7803 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 7804 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 7805 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 7806 7807 /* send the ramrod */ 7808 return bnx2x_queue_state_change(bp, q_params); 7809 } 7810 7811 7812 /** 7813 * bnx2x_setup_queue - setup queue 7814 * 7815 * @bp: driver handle 7816 * @fp: pointer to fastpath 7817 * @leading: is leading 7818 * 7819 * This function performs 2 steps in a Queue state machine 7820 * actually: 1) RESET->INIT 2) INIT->SETUP 7821 */ 7822 7823 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7824 bool leading) 7825 { 7826 struct bnx2x_queue_state_params q_params = {NULL}; 7827 struct bnx2x_queue_setup_params *setup_params = 7828 &q_params.params.setup; 7829 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 7830 &q_params.params.tx_only; 7831 int rc; 7832 u8 tx_index; 7833 7834 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 7835 7836 /* reset IGU state skip FCoE L2 queue */ 7837 if (!IS_FCOE_FP(fp)) 7838 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 7839 IGU_INT_ENABLE, 0); 7840 7841 q_params.q_obj = &fp->q_obj; 7842 /* We want to wait for completion in this context */ 7843 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7844 7845 /* Prepare the INIT parameters */ 7846 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 7847 7848 /* Set the command */ 7849 q_params.cmd = BNX2X_Q_CMD_INIT; 7850 7851 /* Change the state to INIT */ 7852 rc = bnx2x_queue_state_change(bp, &q_params); 7853 if (rc) { 7854 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 7855 return rc; 7856 } 7857 7858 DP(NETIF_MSG_IFUP, "init complete\n"); 7859 7860 7861 /* Now move the Queue to the SETUP state... */ 7862 memset(setup_params, 0, sizeof(*setup_params)); 7863 7864 /* Set QUEUE flags */ 7865 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 7866 7867 /* Set general SETUP parameters */ 7868 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 7869 FIRST_TX_COS_INDEX); 7870 7871 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 7872 &setup_params->rxq_params); 7873 7874 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 7875 FIRST_TX_COS_INDEX); 7876 7877 /* Set the command */ 7878 q_params.cmd = BNX2X_Q_CMD_SETUP; 7879 7880 /* Change the state to SETUP */ 7881 rc = bnx2x_queue_state_change(bp, &q_params); 7882 if (rc) { 7883 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 7884 return rc; 7885 } 7886 7887 /* loop through the relevant tx-only indices */ 7888 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 7889 tx_index < fp->max_cos; 7890 tx_index++) { 7891 7892 /* prepare and send tx-only ramrod*/ 7893 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 7894 tx_only_params, tx_index, leading); 7895 if (rc) { 7896 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 7897 fp->index, tx_index); 7898 return rc; 7899 } 7900 } 7901 7902 return rc; 7903 } 7904 7905 static int bnx2x_stop_queue(struct bnx2x *bp, int index) 7906 { 7907 struct bnx2x_fastpath *fp = &bp->fp[index]; 7908 struct bnx2x_fp_txdata *txdata; 7909 struct bnx2x_queue_state_params q_params = {NULL}; 7910 int rc, tx_index; 7911 7912 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 7913 7914 q_params.q_obj = &fp->q_obj; 7915 /* We want to wait for completion in this context */ 7916 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7917 7918 7919 /* close tx-only connections */ 7920 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 7921 tx_index < fp->max_cos; 7922 tx_index++){ 7923 7924 /* ascertain this is a normal queue*/ 7925 txdata = &fp->txdata[tx_index]; 7926 7927 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 7928 txdata->txq_index); 7929 7930 /* send halt terminate on tx-only connection */ 7931 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 7932 memset(&q_params.params.terminate, 0, 7933 sizeof(q_params.params.terminate)); 7934 q_params.params.terminate.cid_index = tx_index; 7935 7936 rc = bnx2x_queue_state_change(bp, &q_params); 7937 if (rc) 7938 return rc; 7939 7940 /* send halt terminate on tx-only connection */ 7941 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 7942 memset(&q_params.params.cfc_del, 0, 7943 sizeof(q_params.params.cfc_del)); 7944 q_params.params.cfc_del.cid_index = tx_index; 7945 rc = bnx2x_queue_state_change(bp, &q_params); 7946 if (rc) 7947 return rc; 7948 } 7949 /* Stop the primary connection: */ 7950 /* ...halt the connection */ 7951 q_params.cmd = BNX2X_Q_CMD_HALT; 7952 rc = bnx2x_queue_state_change(bp, &q_params); 7953 if (rc) 7954 return rc; 7955 7956 /* ...terminate the connection */ 7957 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 7958 memset(&q_params.params.terminate, 0, 7959 sizeof(q_params.params.terminate)); 7960 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 7961 rc = bnx2x_queue_state_change(bp, &q_params); 7962 if (rc) 7963 return rc; 7964 /* ...delete cfc entry */ 7965 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 7966 memset(&q_params.params.cfc_del, 0, 7967 sizeof(q_params.params.cfc_del)); 7968 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 7969 return bnx2x_queue_state_change(bp, &q_params); 7970 } 7971 7972 7973 static void bnx2x_reset_func(struct bnx2x *bp) 7974 { 7975 int port = BP_PORT(bp); 7976 int func = BP_FUNC(bp); 7977 int i; 7978 7979 /* Disable the function in the FW */ 7980 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 7981 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 7982 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 7983 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 7984 7985 /* FP SBs */ 7986 for_each_eth_queue(bp, i) { 7987 struct bnx2x_fastpath *fp = &bp->fp[i]; 7988 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7989 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 7990 SB_DISABLED); 7991 } 7992 7993 #ifdef BCM_CNIC 7994 /* CNIC SB */ 7995 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7996 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), 7997 SB_DISABLED); 7998 #endif 7999 /* SP SB */ 8000 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8001 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8002 SB_DISABLED); 8003 8004 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 8005 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8006 0); 8007 8008 /* Configure IGU */ 8009 if (bp->common.int_block == INT_BLOCK_HC) { 8010 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8011 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8012 } else { 8013 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8014 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8015 } 8016 8017 #ifdef BCM_CNIC 8018 /* Disable Timer scan */ 8019 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8020 /* 8021 * Wait for at least 10ms and up to 2 second for the timers scan to 8022 * complete 8023 */ 8024 for (i = 0; i < 200; i++) { 8025 msleep(10); 8026 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8027 break; 8028 } 8029 #endif 8030 /* Clear ILT */ 8031 bnx2x_clear_func_ilt(bp, func); 8032 8033 /* Timers workaround bug for E2: if this is vnic-3, 8034 * we need to set the entire ilt range for this timers. 8035 */ 8036 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8037 struct ilt_client_info ilt_cli; 8038 /* use dummy TM client */ 8039 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 8040 ilt_cli.start = 0; 8041 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 8042 ilt_cli.client_num = ILT_CLIENT_TM; 8043 8044 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8045 } 8046 8047 /* this assumes that reset_port() called before reset_func()*/ 8048 if (!CHIP_IS_E1x(bp)) 8049 bnx2x_pf_disable(bp); 8050 8051 bp->dmae_ready = 0; 8052 } 8053 8054 static void bnx2x_reset_port(struct bnx2x *bp) 8055 { 8056 int port = BP_PORT(bp); 8057 u32 val; 8058 8059 /* Reset physical Link */ 8060 bnx2x__link_reset(bp); 8061 8062 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 8063 8064 /* Do not rcv packets to BRB */ 8065 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 8066 /* Do not direct rcv packets that are not for MCP to the BRB */ 8067 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 8068 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 8069 8070 /* Configure AEU */ 8071 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 8072 8073 msleep(100); 8074 /* Check for BRB port occupancy */ 8075 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 8076 if (val) 8077 DP(NETIF_MSG_IFDOWN, 8078 "BRB1 is not empty %d blocks are occupied\n", val); 8079 8080 /* TODO: Close Doorbell port? */ 8081 } 8082 8083 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8084 { 8085 struct bnx2x_func_state_params func_params = {NULL}; 8086 8087 /* Prepare parameters for function state transitions */ 8088 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8089 8090 func_params.f_obj = &bp->func_obj; 8091 func_params.cmd = BNX2X_F_CMD_HW_RESET; 8092 8093 func_params.params.hw_init.load_phase = load_code; 8094 8095 return bnx2x_func_state_change(bp, &func_params); 8096 } 8097 8098 static int bnx2x_func_stop(struct bnx2x *bp) 8099 { 8100 struct bnx2x_func_state_params func_params = {NULL}; 8101 int rc; 8102 8103 /* Prepare parameters for function state transitions */ 8104 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8105 func_params.f_obj = &bp->func_obj; 8106 func_params.cmd = BNX2X_F_CMD_STOP; 8107 8108 /* 8109 * Try to stop the function the 'good way'. If fails (in case 8110 * of a parity error during bnx2x_chip_cleanup()) and we are 8111 * not in a debug mode, perform a state transaction in order to 8112 * enable further HW_RESET transaction. 8113 */ 8114 rc = bnx2x_func_state_change(bp, &func_params); 8115 if (rc) { 8116 #ifdef BNX2X_STOP_ON_ERROR 8117 return rc; 8118 #else 8119 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 8120 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 8121 return bnx2x_func_state_change(bp, &func_params); 8122 #endif 8123 } 8124 8125 return 0; 8126 } 8127 8128 /** 8129 * bnx2x_send_unload_req - request unload mode from the MCP. 8130 * 8131 * @bp: driver handle 8132 * @unload_mode: requested function's unload mode 8133 * 8134 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 8135 */ 8136 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 8137 { 8138 u32 reset_code = 0; 8139 int port = BP_PORT(bp); 8140 8141 /* Select the UNLOAD request mode */ 8142 if (unload_mode == UNLOAD_NORMAL) 8143 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8144 8145 else if (bp->flags & NO_WOL_FLAG) 8146 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 8147 8148 else if (bp->wol) { 8149 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8150 u8 *mac_addr = bp->dev->dev_addr; 8151 u32 val; 8152 u16 pmc; 8153 8154 /* The mac address is written to entries 1-4 to 8155 * preserve entry 0 which is used by the PMF 8156 */ 8157 u8 entry = (BP_VN(bp) + 1)*8; 8158 8159 val = (mac_addr[0] << 8) | mac_addr[1]; 8160 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 8161 8162 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 8163 (mac_addr[4] << 8) | mac_addr[5]; 8164 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8165 8166 /* Enable the PME and clear the status */ 8167 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8168 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8169 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8170 8171 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8172 8173 } else 8174 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8175 8176 /* Send the request to the MCP */ 8177 if (!BP_NOMCP(bp)) 8178 reset_code = bnx2x_fw_command(bp, reset_code, 0); 8179 else { 8180 int path = BP_PATH(bp); 8181 8182 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 8183 path, load_count[path][0], load_count[path][1], 8184 load_count[path][2]); 8185 load_count[path][0]--; 8186 load_count[path][1 + port]--; 8187 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 8188 path, load_count[path][0], load_count[path][1], 8189 load_count[path][2]); 8190 if (load_count[path][0] == 0) 8191 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 8192 else if (load_count[path][1 + port] == 0) 8193 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 8194 else 8195 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 8196 } 8197 8198 return reset_code; 8199 } 8200 8201 /** 8202 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8203 * 8204 * @bp: driver handle 8205 */ 8206 void bnx2x_send_unload_done(struct bnx2x *bp) 8207 { 8208 /* Report UNLOAD_DONE to MCP */ 8209 if (!BP_NOMCP(bp)) 8210 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8211 } 8212 8213 static int bnx2x_func_wait_started(struct bnx2x *bp) 8214 { 8215 int tout = 50; 8216 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8217 8218 if (!bp->port.pmf) 8219 return 0; 8220 8221 /* 8222 * (assumption: No Attention from MCP at this stage) 8223 * PMF probably in the middle of TXdisable/enable transaction 8224 * 1. Sync IRS for default SB 8225 * 2. Sync SP queue - this guarantes us that attention handling started 8226 * 3. Wait, that TXdisable/enable transaction completes 8227 * 8228 * 1+2 guranty that if DCBx attention was scheduled it already changed 8229 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy 8230 * received complettion for the transaction the state is TX_STOPPED. 8231 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8232 * transaction. 8233 */ 8234 8235 /* make sure default SB ISR is done */ 8236 if (msix) 8237 synchronize_irq(bp->msix_table[0].vector); 8238 else 8239 synchronize_irq(bp->pdev->irq); 8240 8241 flush_workqueue(bnx2x_wq); 8242 8243 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8244 BNX2X_F_STATE_STARTED && tout--) 8245 msleep(20); 8246 8247 if (bnx2x_func_get_state(bp, &bp->func_obj) != 8248 BNX2X_F_STATE_STARTED) { 8249 #ifdef BNX2X_STOP_ON_ERROR 8250 BNX2X_ERR("Wrong function state\n"); 8251 return -EBUSY; 8252 #else 8253 /* 8254 * Failed to complete the transaction in a "good way" 8255 * Force both transactions with CLR bit 8256 */ 8257 struct bnx2x_func_state_params func_params = {NULL}; 8258 8259 DP(NETIF_MSG_IFDOWN, 8260 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 8261 8262 func_params.f_obj = &bp->func_obj; 8263 __set_bit(RAMROD_DRV_CLR_ONLY, 8264 &func_params.ramrod_flags); 8265 8266 /* STARTED-->TX_ST0PPED */ 8267 func_params.cmd = BNX2X_F_CMD_TX_STOP; 8268 bnx2x_func_state_change(bp, &func_params); 8269 8270 /* TX_ST0PPED-->STARTED */ 8271 func_params.cmd = BNX2X_F_CMD_TX_START; 8272 return bnx2x_func_state_change(bp, &func_params); 8273 #endif 8274 } 8275 8276 return 0; 8277 } 8278 8279 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 8280 { 8281 int port = BP_PORT(bp); 8282 int i, rc = 0; 8283 u8 cos; 8284 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 8285 u32 reset_code; 8286 8287 /* Wait until tx fastpath tasks complete */ 8288 for_each_tx_queue(bp, i) { 8289 struct bnx2x_fastpath *fp = &bp->fp[i]; 8290 8291 for_each_cos_in_tx_queue(fp, cos) 8292 rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); 8293 #ifdef BNX2X_STOP_ON_ERROR 8294 if (rc) 8295 return; 8296 #endif 8297 } 8298 8299 /* Give HW time to discard old tx messages */ 8300 usleep_range(1000, 1000); 8301 8302 /* Clean all ETH MACs */ 8303 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); 8304 if (rc < 0) 8305 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8306 8307 /* Clean up UC list */ 8308 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, 8309 true); 8310 if (rc < 0) 8311 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8312 rc); 8313 8314 /* Disable LLH */ 8315 if (!CHIP_IS_E1(bp)) 8316 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8317 8318 /* Set "drop all" (stop Rx). 8319 * We need to take a netif_addr_lock() here in order to prevent 8320 * a race between the completion code and this code. 8321 */ 8322 netif_addr_lock_bh(bp->dev); 8323 /* Schedule the rx_mode command */ 8324 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 8325 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 8326 else 8327 bnx2x_set_storm_rx_mode(bp); 8328 8329 /* Cleanup multicast configuration */ 8330 rparam.mcast_obj = &bp->mcast_obj; 8331 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 8332 if (rc < 0) 8333 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 8334 8335 netif_addr_unlock_bh(bp->dev); 8336 8337 8338 8339 /* 8340 * Send the UNLOAD_REQUEST to the MCP. This will return if 8341 * this function should perform FUNC, PORT or COMMON HW 8342 * reset. 8343 */ 8344 reset_code = bnx2x_send_unload_req(bp, unload_mode); 8345 8346 /* 8347 * (assumption: No Attention from MCP at this stage) 8348 * PMF probably in the middle of TXdisable/enable transaction 8349 */ 8350 rc = bnx2x_func_wait_started(bp); 8351 if (rc) { 8352 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 8353 #ifdef BNX2X_STOP_ON_ERROR 8354 return; 8355 #endif 8356 } 8357 8358 /* Close multi and leading connections 8359 * Completions for ramrods are collected in a synchronous way 8360 */ 8361 for_each_queue(bp, i) 8362 if (bnx2x_stop_queue(bp, i)) 8363 #ifdef BNX2X_STOP_ON_ERROR 8364 return; 8365 #else 8366 goto unload_error; 8367 #endif 8368 /* If SP settings didn't get completed so far - something 8369 * very wrong has happen. 8370 */ 8371 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 8372 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 8373 8374 #ifndef BNX2X_STOP_ON_ERROR 8375 unload_error: 8376 #endif 8377 rc = bnx2x_func_stop(bp); 8378 if (rc) { 8379 BNX2X_ERR("Function stop failed!\n"); 8380 #ifdef BNX2X_STOP_ON_ERROR 8381 return; 8382 #endif 8383 } 8384 8385 /* Disable HW interrupts, NAPI */ 8386 bnx2x_netif_stop(bp, 1); 8387 8388 /* Release IRQs */ 8389 bnx2x_free_irq(bp); 8390 8391 /* Reset the chip */ 8392 rc = bnx2x_reset_hw(bp, reset_code); 8393 if (rc) 8394 BNX2X_ERR("HW_RESET failed\n"); 8395 8396 8397 /* Report UNLOAD_DONE to MCP */ 8398 bnx2x_send_unload_done(bp); 8399 } 8400 8401 void bnx2x_disable_close_the_gate(struct bnx2x *bp) 8402 { 8403 u32 val; 8404 8405 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 8406 8407 if (CHIP_IS_E1(bp)) { 8408 int port = BP_PORT(bp); 8409 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8410 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8411 8412 val = REG_RD(bp, addr); 8413 val &= ~(0x300); 8414 REG_WR(bp, addr, val); 8415 } else { 8416 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 8417 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 8418 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 8419 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 8420 } 8421 } 8422 8423 /* Close gates #2, #3 and #4: */ 8424 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 8425 { 8426 u32 val; 8427 8428 /* Gates #2 and #4a are closed/opened for "not E1" only */ 8429 if (!CHIP_IS_E1(bp)) { 8430 /* #4 */ 8431 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 8432 /* #2 */ 8433 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 8434 } 8435 8436 /* #3 */ 8437 if (CHIP_IS_E1x(bp)) { 8438 /* Prevent interrupts from HC on both ports */ 8439 val = REG_RD(bp, HC_REG_CONFIG_1); 8440 REG_WR(bp, HC_REG_CONFIG_1, 8441 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 8442 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 8443 8444 val = REG_RD(bp, HC_REG_CONFIG_0); 8445 REG_WR(bp, HC_REG_CONFIG_0, 8446 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 8447 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 8448 } else { 8449 /* Prevent incomming interrupts in IGU */ 8450 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 8451 8452 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 8453 (!close) ? 8454 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 8455 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 8456 } 8457 8458 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 8459 close ? "closing" : "opening"); 8460 mmiowb(); 8461 } 8462 8463 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 8464 8465 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 8466 { 8467 /* Do some magic... */ 8468 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8469 *magic_val = val & SHARED_MF_CLP_MAGIC; 8470 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 8471 } 8472 8473 /** 8474 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 8475 * 8476 * @bp: driver handle 8477 * @magic_val: old value of the `magic' bit. 8478 */ 8479 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 8480 { 8481 /* Restore the `magic' bit value... */ 8482 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8483 MF_CFG_WR(bp, shared_mf_config.clp_mb, 8484 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 8485 } 8486 8487 /** 8488 * bnx2x_reset_mcp_prep - prepare for MCP reset. 8489 * 8490 * @bp: driver handle 8491 * @magic_val: old value of 'magic' bit. 8492 * 8493 * Takes care of CLP configurations. 8494 */ 8495 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 8496 { 8497 u32 shmem; 8498 u32 validity_offset; 8499 8500 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 8501 8502 /* Set `magic' bit in order to save MF config */ 8503 if (!CHIP_IS_E1(bp)) 8504 bnx2x_clp_reset_prep(bp, magic_val); 8505 8506 /* Get shmem offset */ 8507 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8508 validity_offset = offsetof(struct shmem_region, validity_map[0]); 8509 8510 /* Clear validity map flags */ 8511 if (shmem > 0) 8512 REG_WR(bp, shmem + validity_offset, 0); 8513 } 8514 8515 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 8516 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 8517 8518 /** 8519 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 8520 * 8521 * @bp: driver handle 8522 */ 8523 static void bnx2x_mcp_wait_one(struct bnx2x *bp) 8524 { 8525 /* special handling for emulation and FPGA, 8526 wait 10 times longer */ 8527 if (CHIP_REV_IS_SLOW(bp)) 8528 msleep(MCP_ONE_TIMEOUT*10); 8529 else 8530 msleep(MCP_ONE_TIMEOUT); 8531 } 8532 8533 /* 8534 * initializes bp->common.shmem_base and waits for validity signature to appear 8535 */ 8536 static int bnx2x_init_shmem(struct bnx2x *bp) 8537 { 8538 int cnt = 0; 8539 u32 val = 0; 8540 8541 do { 8542 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8543 if (bp->common.shmem_base) { 8544 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 8545 if (val & SHR_MEM_VALIDITY_MB) 8546 return 0; 8547 } 8548 8549 bnx2x_mcp_wait_one(bp); 8550 8551 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 8552 8553 BNX2X_ERR("BAD MCP validity signature\n"); 8554 8555 return -ENODEV; 8556 } 8557 8558 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 8559 { 8560 int rc = bnx2x_init_shmem(bp); 8561 8562 /* Restore the `magic' bit value */ 8563 if (!CHIP_IS_E1(bp)) 8564 bnx2x_clp_reset_done(bp, magic_val); 8565 8566 return rc; 8567 } 8568 8569 static void bnx2x_pxp_prep(struct bnx2x *bp) 8570 { 8571 if (!CHIP_IS_E1(bp)) { 8572 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 8573 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 8574 mmiowb(); 8575 } 8576 } 8577 8578 /* 8579 * Reset the whole chip except for: 8580 * - PCIE core 8581 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 8582 * one reset bit) 8583 * - IGU 8584 * - MISC (including AEU) 8585 * - GRC 8586 * - RBCN, RBCP 8587 */ 8588 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 8589 { 8590 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 8591 u32 global_bits2, stay_reset2; 8592 8593 /* 8594 * Bits that have to be set in reset_mask2 if we want to reset 'global' 8595 * (per chip) blocks. 8596 */ 8597 global_bits2 = 8598 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 8599 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 8600 8601 /* Don't reset the following blocks */ 8602 not_reset_mask1 = 8603 MISC_REGISTERS_RESET_REG_1_RST_HC | 8604 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 8605 MISC_REGISTERS_RESET_REG_1_RST_PXP; 8606 8607 not_reset_mask2 = 8608 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 8609 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 8610 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 8611 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 8612 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 8613 MISC_REGISTERS_RESET_REG_2_RST_GRC | 8614 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 8615 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 8616 MISC_REGISTERS_RESET_REG_2_RST_ATC | 8617 MISC_REGISTERS_RESET_REG_2_PGLC; 8618 8619 /* 8620 * Keep the following blocks in reset: 8621 * - all xxMACs are handled by the bnx2x_link code. 8622 */ 8623 stay_reset2 = 8624 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 8625 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 8626 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 8627 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 8628 MISC_REGISTERS_RESET_REG_2_UMAC0 | 8629 MISC_REGISTERS_RESET_REG_2_UMAC1 | 8630 MISC_REGISTERS_RESET_REG_2_XMAC | 8631 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 8632 8633 /* Full reset masks according to the chip */ 8634 reset_mask1 = 0xffffffff; 8635 8636 if (CHIP_IS_E1(bp)) 8637 reset_mask2 = 0xffff; 8638 else if (CHIP_IS_E1H(bp)) 8639 reset_mask2 = 0x1ffff; 8640 else if (CHIP_IS_E2(bp)) 8641 reset_mask2 = 0xfffff; 8642 else /* CHIP_IS_E3 */ 8643 reset_mask2 = 0x3ffffff; 8644 8645 /* Don't reset global blocks unless we need to */ 8646 if (!global) 8647 reset_mask2 &= ~global_bits2; 8648 8649 /* 8650 * In case of attention in the QM, we need to reset PXP 8651 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 8652 * because otherwise QM reset would release 'close the gates' shortly 8653 * before resetting the PXP, then the PSWRQ would send a write 8654 * request to PGLUE. Then when PXP is reset, PGLUE would try to 8655 * read the payload data from PSWWR, but PSWWR would not 8656 * respond. The write queue in PGLUE would stuck, dmae commands 8657 * would not return. Therefore it's important to reset the second 8658 * reset register (containing the 8659 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 8660 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 8661 * bit). 8662 */ 8663 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 8664 reset_mask2 & (~not_reset_mask2)); 8665 8666 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 8667 reset_mask1 & (~not_reset_mask1)); 8668 8669 barrier(); 8670 mmiowb(); 8671 8672 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 8673 reset_mask2 & (~stay_reset2)); 8674 8675 barrier(); 8676 mmiowb(); 8677 8678 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 8679 mmiowb(); 8680 } 8681 8682 /** 8683 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 8684 * It should get cleared in no more than 1s. 8685 * 8686 * @bp: driver handle 8687 * 8688 * It should get cleared in no more than 1s. Returns 0 if 8689 * pending writes bit gets cleared. 8690 */ 8691 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 8692 { 8693 u32 cnt = 1000; 8694 u32 pend_bits = 0; 8695 8696 do { 8697 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 8698 8699 if (pend_bits == 0) 8700 break; 8701 8702 usleep_range(1000, 1000); 8703 } while (cnt-- > 0); 8704 8705 if (cnt <= 0) { 8706 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 8707 pend_bits); 8708 return -EBUSY; 8709 } 8710 8711 return 0; 8712 } 8713 8714 static int bnx2x_process_kill(struct bnx2x *bp, bool global) 8715 { 8716 int cnt = 1000; 8717 u32 val = 0; 8718 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 8719 8720 8721 /* Empty the Tetris buffer, wait for 1s */ 8722 do { 8723 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 8724 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 8725 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 8726 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 8727 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 8728 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 8729 ((port_is_idle_0 & 0x1) == 0x1) && 8730 ((port_is_idle_1 & 0x1) == 0x1) && 8731 (pgl_exp_rom2 == 0xffffffff)) 8732 break; 8733 usleep_range(1000, 1000); 8734 } while (cnt-- > 0); 8735 8736 if (cnt <= 0) { 8737 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 8738 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 8739 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 8740 pgl_exp_rom2); 8741 return -EAGAIN; 8742 } 8743 8744 barrier(); 8745 8746 /* Close gates #2, #3 and #4 */ 8747 bnx2x_set_234_gates(bp, true); 8748 8749 /* Poll for IGU VQs for 57712 and newer chips */ 8750 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 8751 return -EAGAIN; 8752 8753 8754 /* TBD: Indicate that "process kill" is in progress to MCP */ 8755 8756 /* Clear "unprepared" bit */ 8757 REG_WR(bp, MISC_REG_UNPREPARED, 0); 8758 barrier(); 8759 8760 /* Make sure all is written to the chip before the reset */ 8761 mmiowb(); 8762 8763 /* Wait for 1ms to empty GLUE and PCI-E core queues, 8764 * PSWHST, GRC and PSWRD Tetris buffer. 8765 */ 8766 usleep_range(1000, 1000); 8767 8768 /* Prepare to chip reset: */ 8769 /* MCP */ 8770 if (global) 8771 bnx2x_reset_mcp_prep(bp, &val); 8772 8773 /* PXP */ 8774 bnx2x_pxp_prep(bp); 8775 barrier(); 8776 8777 /* reset the chip */ 8778 bnx2x_process_kill_chip_reset(bp, global); 8779 barrier(); 8780 8781 /* Recover after reset: */ 8782 /* MCP */ 8783 if (global && bnx2x_reset_mcp_comp(bp, val)) 8784 return -EAGAIN; 8785 8786 /* TBD: Add resetting the NO_MCP mode DB here */ 8787 8788 /* PXP */ 8789 bnx2x_pxp_prep(bp); 8790 8791 /* Open the gates #2, #3 and #4 */ 8792 bnx2x_set_234_gates(bp, false); 8793 8794 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 8795 * reset state, re-enable attentions. */ 8796 8797 return 0; 8798 } 8799 8800 int bnx2x_leader_reset(struct bnx2x *bp) 8801 { 8802 int rc = 0; 8803 bool global = bnx2x_reset_is_global(bp); 8804 u32 load_code; 8805 8806 /* if not going to reset MCP - load "fake" driver to reset HW while 8807 * driver is owner of the HW 8808 */ 8809 if (!global && !BP_NOMCP(bp)) { 8810 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 8811 if (!load_code) { 8812 BNX2X_ERR("MCP response failure, aborting\n"); 8813 rc = -EAGAIN; 8814 goto exit_leader_reset; 8815 } 8816 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 8817 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 8818 BNX2X_ERR("MCP unexpected resp, aborting\n"); 8819 rc = -EAGAIN; 8820 goto exit_leader_reset2; 8821 } 8822 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 8823 if (!load_code) { 8824 BNX2X_ERR("MCP response failure, aborting\n"); 8825 rc = -EAGAIN; 8826 goto exit_leader_reset2; 8827 } 8828 } 8829 8830 /* Try to recover after the failure */ 8831 if (bnx2x_process_kill(bp, global)) { 8832 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 8833 BP_PATH(bp)); 8834 rc = -EAGAIN; 8835 goto exit_leader_reset2; 8836 } 8837 8838 /* 8839 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 8840 * state. 8841 */ 8842 bnx2x_set_reset_done(bp); 8843 if (global) 8844 bnx2x_clear_reset_global(bp); 8845 8846 exit_leader_reset2: 8847 /* unload "fake driver" if it was loaded */ 8848 if (!global && !BP_NOMCP(bp)) { 8849 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 8850 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8851 } 8852 exit_leader_reset: 8853 bp->is_leader = 0; 8854 bnx2x_release_leader_lock(bp); 8855 smp_mb(); 8856 return rc; 8857 } 8858 8859 static void bnx2x_recovery_failed(struct bnx2x *bp) 8860 { 8861 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 8862 8863 /* Disconnect this device */ 8864 netif_device_detach(bp->dev); 8865 8866 /* 8867 * Block ifup for all function on this engine until "process kill" 8868 * or power cycle. 8869 */ 8870 bnx2x_set_reset_in_progress(bp); 8871 8872 /* Shut down the power */ 8873 bnx2x_set_power_state(bp, PCI_D3hot); 8874 8875 bp->recovery_state = BNX2X_RECOVERY_FAILED; 8876 8877 smp_mb(); 8878 } 8879 8880 /* 8881 * Assumption: runs under rtnl lock. This together with the fact 8882 * that it's called only from bnx2x_sp_rtnl() ensure that it 8883 * will never be called when netif_running(bp->dev) is false. 8884 */ 8885 static void bnx2x_parity_recover(struct bnx2x *bp) 8886 { 8887 bool global = false; 8888 u32 error_recovered, error_unrecovered; 8889 bool is_parity; 8890 8891 DP(NETIF_MSG_HW, "Handling parity\n"); 8892 while (1) { 8893 switch (bp->recovery_state) { 8894 case BNX2X_RECOVERY_INIT: 8895 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 8896 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 8897 WARN_ON(!is_parity); 8898 8899 /* Try to get a LEADER_LOCK HW lock */ 8900 if (bnx2x_trylock_leader_lock(bp)) { 8901 bnx2x_set_reset_in_progress(bp); 8902 /* 8903 * Check if there is a global attention and if 8904 * there was a global attention, set the global 8905 * reset bit. 8906 */ 8907 8908 if (global) 8909 bnx2x_set_reset_global(bp); 8910 8911 bp->is_leader = 1; 8912 } 8913 8914 /* Stop the driver */ 8915 /* If interface has been removed - break */ 8916 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) 8917 return; 8918 8919 bp->recovery_state = BNX2X_RECOVERY_WAIT; 8920 8921 /* Ensure "is_leader", MCP command sequence and 8922 * "recovery_state" update values are seen on other 8923 * CPUs. 8924 */ 8925 smp_mb(); 8926 break; 8927 8928 case BNX2X_RECOVERY_WAIT: 8929 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 8930 if (bp->is_leader) { 8931 int other_engine = BP_PATH(bp) ? 0 : 1; 8932 bool other_load_status = 8933 bnx2x_get_load_status(bp, other_engine); 8934 bool load_status = 8935 bnx2x_get_load_status(bp, BP_PATH(bp)); 8936 global = bnx2x_reset_is_global(bp); 8937 8938 /* 8939 * In case of a parity in a global block, let 8940 * the first leader that performs a 8941 * leader_reset() reset the global blocks in 8942 * order to clear global attentions. Otherwise 8943 * the the gates will remain closed for that 8944 * engine. 8945 */ 8946 if (load_status || 8947 (global && other_load_status)) { 8948 /* Wait until all other functions get 8949 * down. 8950 */ 8951 schedule_delayed_work(&bp->sp_rtnl_task, 8952 HZ/10); 8953 return; 8954 } else { 8955 /* If all other functions got down - 8956 * try to bring the chip back to 8957 * normal. In any case it's an exit 8958 * point for a leader. 8959 */ 8960 if (bnx2x_leader_reset(bp)) { 8961 bnx2x_recovery_failed(bp); 8962 return; 8963 } 8964 8965 /* If we are here, means that the 8966 * leader has succeeded and doesn't 8967 * want to be a leader any more. Try 8968 * to continue as a none-leader. 8969 */ 8970 break; 8971 } 8972 } else { /* non-leader */ 8973 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 8974 /* Try to get a LEADER_LOCK HW lock as 8975 * long as a former leader may have 8976 * been unloaded by the user or 8977 * released a leadership by another 8978 * reason. 8979 */ 8980 if (bnx2x_trylock_leader_lock(bp)) { 8981 /* I'm a leader now! Restart a 8982 * switch case. 8983 */ 8984 bp->is_leader = 1; 8985 break; 8986 } 8987 8988 schedule_delayed_work(&bp->sp_rtnl_task, 8989 HZ/10); 8990 return; 8991 8992 } else { 8993 /* 8994 * If there was a global attention, wait 8995 * for it to be cleared. 8996 */ 8997 if (bnx2x_reset_is_global(bp)) { 8998 schedule_delayed_work( 8999 &bp->sp_rtnl_task, 9000 HZ/10); 9001 return; 9002 } 9003 9004 error_recovered = 9005 bp->eth_stats.recoverable_error; 9006 error_unrecovered = 9007 bp->eth_stats.unrecoverable_error; 9008 bp->recovery_state = 9009 BNX2X_RECOVERY_NIC_LOADING; 9010 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 9011 error_unrecovered++; 9012 netdev_err(bp->dev, 9013 "Recovery failed. Power cycle needed\n"); 9014 /* Disconnect this device */ 9015 netif_device_detach(bp->dev); 9016 /* Shut down the power */ 9017 bnx2x_set_power_state( 9018 bp, PCI_D3hot); 9019 smp_mb(); 9020 } else { 9021 bp->recovery_state = 9022 BNX2X_RECOVERY_DONE; 9023 error_recovered++; 9024 smp_mb(); 9025 } 9026 bp->eth_stats.recoverable_error = 9027 error_recovered; 9028 bp->eth_stats.unrecoverable_error = 9029 error_unrecovered; 9030 9031 return; 9032 } 9033 } 9034 default: 9035 return; 9036 } 9037 } 9038 } 9039 9040 static int bnx2x_close(struct net_device *dev); 9041 9042 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 9043 * scheduled on a general queue in order to prevent a dead lock. 9044 */ 9045 static void bnx2x_sp_rtnl_task(struct work_struct *work) 9046 { 9047 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 9048 9049 rtnl_lock(); 9050 9051 if (!netif_running(bp->dev)) 9052 goto sp_rtnl_exit; 9053 9054 /* if stop on error is defined no recovery flows should be executed */ 9055 #ifdef BNX2X_STOP_ON_ERROR 9056 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9057 "you will need to reboot when done\n"); 9058 goto sp_rtnl_not_reset; 9059 #endif 9060 9061 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 9062 /* 9063 * Clear all pending SP commands as we are going to reset the 9064 * function anyway. 9065 */ 9066 bp->sp_rtnl_state = 0; 9067 smp_mb(); 9068 9069 bnx2x_parity_recover(bp); 9070 9071 goto sp_rtnl_exit; 9072 } 9073 9074 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9075 /* 9076 * Clear all pending SP commands as we are going to reset the 9077 * function anyway. 9078 */ 9079 bp->sp_rtnl_state = 0; 9080 smp_mb(); 9081 9082 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 9083 bnx2x_nic_load(bp, LOAD_NORMAL); 9084 9085 goto sp_rtnl_exit; 9086 } 9087 #ifdef BNX2X_STOP_ON_ERROR 9088 sp_rtnl_not_reset: 9089 #endif 9090 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9091 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9092 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 9093 bnx2x_after_function_update(bp); 9094 /* 9095 * in case of fan failure we need to reset id if the "stop on error" 9096 * debug flag is set, since we trying to prevent permanent overheating 9097 * damage 9098 */ 9099 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 9100 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 9101 netif_device_detach(bp->dev); 9102 bnx2x_close(bp->dev); 9103 } 9104 9105 sp_rtnl_exit: 9106 rtnl_unlock(); 9107 } 9108 9109 /* end of nic load/unload */ 9110 9111 static void bnx2x_period_task(struct work_struct *work) 9112 { 9113 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 9114 9115 if (!netif_running(bp->dev)) 9116 goto period_task_exit; 9117 9118 if (CHIP_REV_IS_SLOW(bp)) { 9119 BNX2X_ERR("period task called on emulation, ignoring\n"); 9120 goto period_task_exit; 9121 } 9122 9123 bnx2x_acquire_phy_lock(bp); 9124 /* 9125 * The barrier is needed to ensure the ordering between the writing to 9126 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 9127 * the reading here. 9128 */ 9129 smp_mb(); 9130 if (bp->port.pmf) { 9131 bnx2x_period_func(&bp->link_params, &bp->link_vars); 9132 9133 /* Re-queue task in 1 sec */ 9134 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 9135 } 9136 9137 bnx2x_release_phy_lock(bp); 9138 period_task_exit: 9139 return; 9140 } 9141 9142 /* 9143 * Init service functions 9144 */ 9145 9146 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 9147 { 9148 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9149 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 9150 return base + (BP_ABS_FUNC(bp)) * stride; 9151 } 9152 9153 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) 9154 { 9155 u32 reg = bnx2x_get_pretend_reg(bp); 9156 9157 /* Flush all outstanding writes */ 9158 mmiowb(); 9159 9160 /* Pretend to be function 0 */ 9161 REG_WR(bp, reg, 0); 9162 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ 9163 9164 /* From now we are in the "like-E1" mode */ 9165 bnx2x_int_disable(bp); 9166 9167 /* Flush all outstanding writes */ 9168 mmiowb(); 9169 9170 /* Restore the original function */ 9171 REG_WR(bp, reg, BP_ABS_FUNC(bp)); 9172 REG_RD(bp, reg); 9173 } 9174 9175 static inline void bnx2x_undi_int_disable(struct bnx2x *bp) 9176 { 9177 if (CHIP_IS_E1(bp)) 9178 bnx2x_int_disable(bp); 9179 else 9180 bnx2x_undi_int_disable_e1h(bp); 9181 } 9182 9183 static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp) 9184 { 9185 u32 val, base_addr, offset, mask, reset_reg; 9186 bool mac_stopped = false; 9187 u8 port = BP_PORT(bp); 9188 9189 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 9190 9191 if (!CHIP_IS_E3(bp)) { 9192 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9193 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9194 if ((mask & reset_reg) && val) { 9195 u32 wb_data[2]; 9196 BNX2X_DEV_INFO("Disable bmac Rx\n"); 9197 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 9198 : NIG_REG_INGRESS_BMAC0_MEM; 9199 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 9200 : BIGMAC_REGISTER_BMAC_CONTROL; 9201 9202 /* 9203 * use rd/wr since we cannot use dmae. This is safe 9204 * since MCP won't access the bus due to the request 9205 * to unload, and no function on the path can be 9206 * loaded at this time. 9207 */ 9208 wb_data[0] = REG_RD(bp, base_addr + offset); 9209 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 9210 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 9211 REG_WR(bp, base_addr + offset, wb_data[0]); 9212 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]); 9213 9214 } 9215 BNX2X_DEV_INFO("Disable emac Rx\n"); 9216 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0); 9217 9218 mac_stopped = true; 9219 } else { 9220 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9221 BNX2X_DEV_INFO("Disable xmac Rx\n"); 9222 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9223 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 9224 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9225 val & ~(1 << 1)); 9226 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9227 val | (1 << 1)); 9228 REG_WR(bp, base_addr + XMAC_REG_CTRL, 0); 9229 mac_stopped = true; 9230 } 9231 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9232 if (mask & reset_reg) { 9233 BNX2X_DEV_INFO("Disable umac Rx\n"); 9234 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9235 REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0); 9236 mac_stopped = true; 9237 } 9238 } 9239 9240 if (mac_stopped) 9241 msleep(20); 9242 9243 } 9244 9245 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9246 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9247 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9248 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9249 9250 static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, 9251 u8 inc) 9252 { 9253 u16 rcq, bd; 9254 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9255 9256 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9257 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9258 9259 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9260 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9261 9262 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 9263 port, bd, rcq); 9264 } 9265 9266 static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) 9267 { 9268 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9269 if (!rc) { 9270 BNX2X_ERR("MCP response failure, aborting\n"); 9271 return -EBUSY; 9272 } 9273 9274 return 0; 9275 } 9276 9277 static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp) 9278 { 9279 struct bnx2x_prev_path_list *tmp_list; 9280 int rc = false; 9281 9282 if (down_trylock(&bnx2x_prev_sem)) 9283 return false; 9284 9285 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { 9286 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 9287 bp->pdev->bus->number == tmp_list->bus && 9288 BP_PATH(bp) == tmp_list->path) { 9289 rc = true; 9290 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 9291 BP_PATH(bp)); 9292 break; 9293 } 9294 } 9295 9296 up(&bnx2x_prev_sem); 9297 9298 return rc; 9299 } 9300 9301 static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) 9302 { 9303 struct bnx2x_prev_path_list *tmp_list; 9304 int rc; 9305 9306 tmp_list = (struct bnx2x_prev_path_list *) 9307 kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 9308 if (!tmp_list) { 9309 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 9310 return -ENOMEM; 9311 } 9312 9313 tmp_list->bus = bp->pdev->bus->number; 9314 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9315 tmp_list->path = BP_PATH(bp); 9316 9317 rc = down_interruptible(&bnx2x_prev_sem); 9318 if (rc) { 9319 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9320 kfree(tmp_list); 9321 } else { 9322 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", 9323 BP_PATH(bp)); 9324 list_add(&tmp_list->list, &bnx2x_prev_list); 9325 up(&bnx2x_prev_sem); 9326 } 9327 9328 return rc; 9329 } 9330 9331 static bool __devinit bnx2x_can_flr(struct bnx2x *bp) 9332 { 9333 int pos; 9334 u32 cap; 9335 struct pci_dev *dev = bp->pdev; 9336 9337 pos = pci_pcie_cap(dev); 9338 if (!pos) 9339 return false; 9340 9341 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 9342 if (!(cap & PCI_EXP_DEVCAP_FLR)) 9343 return false; 9344 9345 return true; 9346 } 9347 9348 static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9349 { 9350 int i, pos; 9351 u16 status; 9352 struct pci_dev *dev = bp->pdev; 9353 9354 /* probe the capability first */ 9355 if (bnx2x_can_flr(bp)) 9356 return -ENOTTY; 9357 9358 pos = pci_pcie_cap(dev); 9359 if (!pos) 9360 return -ENOTTY; 9361 9362 /* Wait for Transaction Pending bit clean */ 9363 for (i = 0; i < 4; i++) { 9364 if (i) 9365 msleep((1 << (i - 1)) * 100); 9366 9367 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 9368 if (!(status & PCI_EXP_DEVSTA_TRPND)) 9369 goto clear; 9370 } 9371 9372 dev_err(&dev->dev, 9373 "transaction is not cleared; proceeding with reset anyway\n"); 9374 9375 clear: 9376 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9377 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 9378 bp->common.bc_ver); 9379 return -EINVAL; 9380 } 9381 9382 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 9383 9384 return 0; 9385 } 9386 9387 static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) 9388 { 9389 int rc; 9390 9391 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 9392 9393 /* Test if previous unload process was already finished for this path */ 9394 if (bnx2x_prev_is_path_marked(bp)) 9395 return bnx2x_prev_mcp_done(bp); 9396 9397 /* If function has FLR capabilities, and existing FW version matches 9398 * the one required, then FLR will be sufficient to clean any residue 9399 * left by previous driver 9400 */ 9401 if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) 9402 return bnx2x_do_flr(bp); 9403 9404 /* Close the MCP request, return failure*/ 9405 rc = bnx2x_prev_mcp_done(bp); 9406 if (!rc) 9407 rc = BNX2X_PREV_WAIT_NEEDED; 9408 9409 return rc; 9410 } 9411 9412 static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) 9413 { 9414 u32 reset_reg, tmp_reg = 0, rc; 9415 /* It is possible a previous function received 'common' answer, 9416 * but hasn't loaded yet, therefore creating a scenario of 9417 * multiple functions receiving 'common' on the same path. 9418 */ 9419 BNX2X_DEV_INFO("Common unload Flow\n"); 9420 9421 if (bnx2x_prev_is_path_marked(bp)) 9422 return bnx2x_prev_mcp_done(bp); 9423 9424 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 9425 9426 /* Reset should be performed after BRB is emptied */ 9427 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9428 u32 timer_count = 1000; 9429 bool prev_undi = false; 9430 9431 /* Close the MAC Rx to prevent BRB from filling up */ 9432 bnx2x_prev_unload_close_mac(bp); 9433 9434 /* Check if the UNDI driver was previously loaded 9435 * UNDI driver initializes CID offset for normal bell to 0x7 9436 */ 9437 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 9438 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 9439 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 9440 if (tmp_reg == 0x7) { 9441 BNX2X_DEV_INFO("UNDI previously loaded\n"); 9442 prev_undi = true; 9443 /* clear the UNDI indication */ 9444 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 9445 } 9446 } 9447 /* wait until BRB is empty */ 9448 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9449 while (timer_count) { 9450 u32 prev_brb = tmp_reg; 9451 9452 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9453 if (!tmp_reg) 9454 break; 9455 9456 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 9457 9458 /* reset timer as long as BRB actually gets emptied */ 9459 if (prev_brb > tmp_reg) 9460 timer_count = 1000; 9461 else 9462 timer_count--; 9463 9464 /* If UNDI resides in memory, manually increment it */ 9465 if (prev_undi) 9466 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); 9467 9468 udelay(10); 9469 } 9470 9471 if (!timer_count) 9472 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 9473 9474 } 9475 9476 /* No packets are in the pipeline, path is ready for reset */ 9477 bnx2x_reset_common(bp); 9478 9479 rc = bnx2x_prev_mark_path(bp); 9480 if (rc) { 9481 bnx2x_prev_mcp_done(bp); 9482 return rc; 9483 } 9484 9485 return bnx2x_prev_mcp_done(bp); 9486 } 9487 9488 static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9489 { 9490 int time_counter = 10; 9491 u32 rc, fw, hw_lock_reg, hw_lock_val; 9492 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9493 9494 /* Release previously held locks */ 9495 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9496 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9497 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 9498 9499 hw_lock_val = (REG_RD(bp, hw_lock_reg)); 9500 if (hw_lock_val) { 9501 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 9502 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 9503 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 9504 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 9505 } 9506 9507 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 9508 REG_WR(bp, hw_lock_reg, 0xffffffff); 9509 } else 9510 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 9511 9512 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 9513 BNX2X_DEV_INFO("Release previously held alr\n"); 9514 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 9515 } 9516 9517 9518 do { 9519 /* Lock MCP using an unload request */ 9520 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 9521 if (!fw) { 9522 BNX2X_ERR("MCP response failure, aborting\n"); 9523 rc = -EBUSY; 9524 break; 9525 } 9526 9527 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 9528 rc = bnx2x_prev_unload_common(bp); 9529 break; 9530 } 9531 9532 /* non-common reply from MCP night require looping */ 9533 rc = bnx2x_prev_unload_uncommon(bp); 9534 if (rc != BNX2X_PREV_WAIT_NEEDED) 9535 break; 9536 9537 msleep(20); 9538 } while (--time_counter); 9539 9540 if (!time_counter || rc) { 9541 BNX2X_ERR("Failed unloading previous driver, aborting\n"); 9542 rc = -EBUSY; 9543 } 9544 9545 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 9546 9547 return rc; 9548 } 9549 9550 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 9551 { 9552 u32 val, val2, val3, val4, id, boot_mode; 9553 u16 pmc; 9554 9555 /* Get the chip revision id and number. */ 9556 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 9557 val = REG_RD(bp, MISC_REG_CHIP_NUM); 9558 id = ((val & 0xffff) << 16); 9559 val = REG_RD(bp, MISC_REG_CHIP_REV); 9560 id |= ((val & 0xf) << 12); 9561 val = REG_RD(bp, MISC_REG_CHIP_METAL); 9562 id |= ((val & 0xff) << 4); 9563 val = REG_RD(bp, MISC_REG_BOND_ID); 9564 id |= (val & 0xf); 9565 bp->common.chip_id = id; 9566 9567 /* force 57811 according to MISC register */ 9568 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 9569 if (CHIP_IS_57810(bp)) 9570 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 9571 (bp->common.chip_id & 0x0000FFFF); 9572 else if (CHIP_IS_57810_MF(bp)) 9573 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 9574 (bp->common.chip_id & 0x0000FFFF); 9575 bp->common.chip_id |= 0x1; 9576 } 9577 9578 /* Set doorbell size */ 9579 bp->db_size = (1 << BNX2X_DB_SHIFT); 9580 9581 if (!CHIP_IS_E1x(bp)) { 9582 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 9583 if ((val & 1) == 0) 9584 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 9585 else 9586 val = (val >> 1) & 1; 9587 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 9588 "2_PORT_MODE"); 9589 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 9590 CHIP_2_PORT_MODE; 9591 9592 if (CHIP_MODE_IS_4_PORT(bp)) 9593 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 9594 else 9595 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 9596 } else { 9597 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 9598 bp->pfid = bp->pf_num; /* 0..7 */ 9599 } 9600 9601 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 9602 9603 bp->link_params.chip_id = bp->common.chip_id; 9604 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 9605 9606 val = (REG_RD(bp, 0x2874) & 0x55); 9607 if ((bp->common.chip_id & 0x1) || 9608 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 9609 bp->flags |= ONE_PORT_FLAG; 9610 BNX2X_DEV_INFO("single port device\n"); 9611 } 9612 9613 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 9614 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 9615 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 9616 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 9617 bp->common.flash_size, bp->common.flash_size); 9618 9619 bnx2x_init_shmem(bp); 9620 9621 9622 9623 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 9624 MISC_REG_GENERIC_CR_1 : 9625 MISC_REG_GENERIC_CR_0)); 9626 9627 bp->link_params.shmem_base = bp->common.shmem_base; 9628 bp->link_params.shmem2_base = bp->common.shmem2_base; 9629 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 9630 bp->common.shmem_base, bp->common.shmem2_base); 9631 9632 if (!bp->common.shmem_base) { 9633 BNX2X_DEV_INFO("MCP not active\n"); 9634 bp->flags |= NO_MCP_FLAG; 9635 return; 9636 } 9637 9638 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9639 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9640 9641 bp->link_params.hw_led_mode = ((bp->common.hw_config & 9642 SHARED_HW_CFG_LED_MODE_MASK) >> 9643 SHARED_HW_CFG_LED_MODE_SHIFT); 9644 9645 bp->link_params.feature_config_flags = 0; 9646 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 9647 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 9648 bp->link_params.feature_config_flags |= 9649 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 9650 else 9651 bp->link_params.feature_config_flags &= 9652 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 9653 9654 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 9655 bp->common.bc_ver = val; 9656 BNX2X_DEV_INFO("bc_ver %X\n", val); 9657 if (val < BNX2X_BC_VER) { 9658 /* for now only warn 9659 * later we might need to enforce this */ 9660 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 9661 BNX2X_BC_VER, val); 9662 } 9663 bp->link_params.feature_config_flags |= 9664 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 9665 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 9666 9667 bp->link_params.feature_config_flags |= 9668 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 9669 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 9670 bp->link_params.feature_config_flags |= 9671 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 9672 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 9673 bp->link_params.feature_config_flags |= 9674 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9675 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9676 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9677 BC_SUPPORTS_PFC_STATS : 0; 9678 9679 boot_mode = SHMEM_RD(bp, 9680 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 9681 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 9682 switch (boot_mode) { 9683 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 9684 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 9685 break; 9686 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 9687 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 9688 break; 9689 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 9690 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 9691 break; 9692 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 9693 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 9694 break; 9695 } 9696 9697 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 9698 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 9699 9700 BNX2X_DEV_INFO("%sWoL capable\n", 9701 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 9702 9703 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 9704 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 9705 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9706 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9707 9708 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 9709 val, val2, val3, val4); 9710 } 9711 9712 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 9713 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 9714 9715 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 9716 { 9717 int pfid = BP_FUNC(bp); 9718 int igu_sb_id; 9719 u32 val; 9720 u8 fid, igu_sb_cnt = 0; 9721 9722 bp->igu_base_sb = 0xff; 9723 if (CHIP_INT_MODE_IS_BC(bp)) { 9724 int vn = BP_VN(bp); 9725 igu_sb_cnt = bp->igu_sb_cnt; 9726 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 9727 FP_SB_MAX_E1x; 9728 9729 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 9730 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 9731 9732 return; 9733 } 9734 9735 /* IGU in normal mode - read CAM */ 9736 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 9737 igu_sb_id++) { 9738 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 9739 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 9740 continue; 9741 fid = IGU_FID(val); 9742 if ((fid & IGU_FID_ENCODE_IS_PF)) { 9743 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 9744 continue; 9745 if (IGU_VEC(val) == 0) 9746 /* default status block */ 9747 bp->igu_dsb_id = igu_sb_id; 9748 else { 9749 if (bp->igu_base_sb == 0xff) 9750 bp->igu_base_sb = igu_sb_id; 9751 igu_sb_cnt++; 9752 } 9753 } 9754 } 9755 9756 #ifdef CONFIG_PCI_MSI 9757 /* 9758 * It's expected that number of CAM entries for this functions is equal 9759 * to the number evaluated based on the MSI-X table size. We want a 9760 * harsh warning if these values are different! 9761 */ 9762 WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); 9763 #endif 9764 9765 if (igu_sb_cnt == 0) 9766 BNX2X_ERR("CAM configuration error\n"); 9767 } 9768 9769 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9770 u32 switch_cfg) 9771 { 9772 int cfg_size = 0, idx, port = BP_PORT(bp); 9773 9774 /* Aggregation of supported attributes of all external phys */ 9775 bp->port.supported[0] = 0; 9776 bp->port.supported[1] = 0; 9777 switch (bp->link_params.num_phys) { 9778 case 1: 9779 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 9780 cfg_size = 1; 9781 break; 9782 case 2: 9783 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 9784 cfg_size = 1; 9785 break; 9786 case 3: 9787 if (bp->link_params.multi_phy_config & 9788 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 9789 bp->port.supported[1] = 9790 bp->link_params.phy[EXT_PHY1].supported; 9791 bp->port.supported[0] = 9792 bp->link_params.phy[EXT_PHY2].supported; 9793 } else { 9794 bp->port.supported[0] = 9795 bp->link_params.phy[EXT_PHY1].supported; 9796 bp->port.supported[1] = 9797 bp->link_params.phy[EXT_PHY2].supported; 9798 } 9799 cfg_size = 2; 9800 break; 9801 } 9802 9803 if (!(bp->port.supported[0] || bp->port.supported[1])) { 9804 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 9805 SHMEM_RD(bp, 9806 dev_info.port_hw_config[port].external_phy_config), 9807 SHMEM_RD(bp, 9808 dev_info.port_hw_config[port].external_phy_config2)); 9809 return; 9810 } 9811 9812 if (CHIP_IS_E3(bp)) 9813 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 9814 else { 9815 switch (switch_cfg) { 9816 case SWITCH_CFG_1G: 9817 bp->port.phy_addr = REG_RD( 9818 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 9819 break; 9820 case SWITCH_CFG_10G: 9821 bp->port.phy_addr = REG_RD( 9822 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 9823 break; 9824 default: 9825 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 9826 bp->port.link_config[0]); 9827 return; 9828 } 9829 } 9830 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 9831 /* mask what we support according to speed_cap_mask per configuration */ 9832 for (idx = 0; idx < cfg_size; idx++) { 9833 if (!(bp->link_params.speed_cap_mask[idx] & 9834 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 9835 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 9836 9837 if (!(bp->link_params.speed_cap_mask[idx] & 9838 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 9839 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 9840 9841 if (!(bp->link_params.speed_cap_mask[idx] & 9842 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 9843 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 9844 9845 if (!(bp->link_params.speed_cap_mask[idx] & 9846 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 9847 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 9848 9849 if (!(bp->link_params.speed_cap_mask[idx] & 9850 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 9851 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 9852 SUPPORTED_1000baseT_Full); 9853 9854 if (!(bp->link_params.speed_cap_mask[idx] & 9855 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 9856 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 9857 9858 if (!(bp->link_params.speed_cap_mask[idx] & 9859 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 9860 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 9861 9862 } 9863 9864 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 9865 bp->port.supported[1]); 9866 } 9867 9868 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 9869 { 9870 u32 link_config, idx, cfg_size = 0; 9871 bp->port.advertising[0] = 0; 9872 bp->port.advertising[1] = 0; 9873 switch (bp->link_params.num_phys) { 9874 case 1: 9875 case 2: 9876 cfg_size = 1; 9877 break; 9878 case 3: 9879 cfg_size = 2; 9880 break; 9881 } 9882 for (idx = 0; idx < cfg_size; idx++) { 9883 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 9884 link_config = bp->port.link_config[idx]; 9885 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 9886 case PORT_FEATURE_LINK_SPEED_AUTO: 9887 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 9888 bp->link_params.req_line_speed[idx] = 9889 SPEED_AUTO_NEG; 9890 bp->port.advertising[idx] |= 9891 bp->port.supported[idx]; 9892 if (bp->link_params.phy[EXT_PHY1].type == 9893 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 9894 bp->port.advertising[idx] |= 9895 (SUPPORTED_100baseT_Half | 9896 SUPPORTED_100baseT_Full); 9897 } else { 9898 /* force 10G, no AN */ 9899 bp->link_params.req_line_speed[idx] = 9900 SPEED_10000; 9901 bp->port.advertising[idx] |= 9902 (ADVERTISED_10000baseT_Full | 9903 ADVERTISED_FIBRE); 9904 continue; 9905 } 9906 break; 9907 9908 case PORT_FEATURE_LINK_SPEED_10M_FULL: 9909 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 9910 bp->link_params.req_line_speed[idx] = 9911 SPEED_10; 9912 bp->port.advertising[idx] |= 9913 (ADVERTISED_10baseT_Full | 9914 ADVERTISED_TP); 9915 } else { 9916 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9917 link_config, 9918 bp->link_params.speed_cap_mask[idx]); 9919 return; 9920 } 9921 break; 9922 9923 case PORT_FEATURE_LINK_SPEED_10M_HALF: 9924 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 9925 bp->link_params.req_line_speed[idx] = 9926 SPEED_10; 9927 bp->link_params.req_duplex[idx] = 9928 DUPLEX_HALF; 9929 bp->port.advertising[idx] |= 9930 (ADVERTISED_10baseT_Half | 9931 ADVERTISED_TP); 9932 } else { 9933 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9934 link_config, 9935 bp->link_params.speed_cap_mask[idx]); 9936 return; 9937 } 9938 break; 9939 9940 case PORT_FEATURE_LINK_SPEED_100M_FULL: 9941 if (bp->port.supported[idx] & 9942 SUPPORTED_100baseT_Full) { 9943 bp->link_params.req_line_speed[idx] = 9944 SPEED_100; 9945 bp->port.advertising[idx] |= 9946 (ADVERTISED_100baseT_Full | 9947 ADVERTISED_TP); 9948 } else { 9949 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9950 link_config, 9951 bp->link_params.speed_cap_mask[idx]); 9952 return; 9953 } 9954 break; 9955 9956 case PORT_FEATURE_LINK_SPEED_100M_HALF: 9957 if (bp->port.supported[idx] & 9958 SUPPORTED_100baseT_Half) { 9959 bp->link_params.req_line_speed[idx] = 9960 SPEED_100; 9961 bp->link_params.req_duplex[idx] = 9962 DUPLEX_HALF; 9963 bp->port.advertising[idx] |= 9964 (ADVERTISED_100baseT_Half | 9965 ADVERTISED_TP); 9966 } else { 9967 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9968 link_config, 9969 bp->link_params.speed_cap_mask[idx]); 9970 return; 9971 } 9972 break; 9973 9974 case PORT_FEATURE_LINK_SPEED_1G: 9975 if (bp->port.supported[idx] & 9976 SUPPORTED_1000baseT_Full) { 9977 bp->link_params.req_line_speed[idx] = 9978 SPEED_1000; 9979 bp->port.advertising[idx] |= 9980 (ADVERTISED_1000baseT_Full | 9981 ADVERTISED_TP); 9982 } else { 9983 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9984 link_config, 9985 bp->link_params.speed_cap_mask[idx]); 9986 return; 9987 } 9988 break; 9989 9990 case PORT_FEATURE_LINK_SPEED_2_5G: 9991 if (bp->port.supported[idx] & 9992 SUPPORTED_2500baseX_Full) { 9993 bp->link_params.req_line_speed[idx] = 9994 SPEED_2500; 9995 bp->port.advertising[idx] |= 9996 (ADVERTISED_2500baseX_Full | 9997 ADVERTISED_TP); 9998 } else { 9999 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10000 link_config, 10001 bp->link_params.speed_cap_mask[idx]); 10002 return; 10003 } 10004 break; 10005 10006 case PORT_FEATURE_LINK_SPEED_10G_CX4: 10007 if (bp->port.supported[idx] & 10008 SUPPORTED_10000baseT_Full) { 10009 bp->link_params.req_line_speed[idx] = 10010 SPEED_10000; 10011 bp->port.advertising[idx] |= 10012 (ADVERTISED_10000baseT_Full | 10013 ADVERTISED_FIBRE); 10014 } else { 10015 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10016 link_config, 10017 bp->link_params.speed_cap_mask[idx]); 10018 return; 10019 } 10020 break; 10021 case PORT_FEATURE_LINK_SPEED_20G: 10022 bp->link_params.req_line_speed[idx] = SPEED_20000; 10023 10024 break; 10025 default: 10026 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 10027 link_config); 10028 bp->link_params.req_line_speed[idx] = 10029 SPEED_AUTO_NEG; 10030 bp->port.advertising[idx] = 10031 bp->port.supported[idx]; 10032 break; 10033 } 10034 10035 bp->link_params.req_flow_ctrl[idx] = (link_config & 10036 PORT_FEATURE_FLOW_CONTROL_MASK); 10037 if ((bp->link_params.req_flow_ctrl[idx] == 10038 BNX2X_FLOW_CTRL_AUTO) && 10039 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { 10040 bp->link_params.req_flow_ctrl[idx] = 10041 BNX2X_FLOW_CTRL_NONE; 10042 } 10043 10044 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 10045 bp->link_params.req_line_speed[idx], 10046 bp->link_params.req_duplex[idx], 10047 bp->link_params.req_flow_ctrl[idx], 10048 bp->port.advertising[idx]); 10049 } 10050 } 10051 10052 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 10053 { 10054 mac_hi = cpu_to_be16(mac_hi); 10055 mac_lo = cpu_to_be32(mac_lo); 10056 memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); 10057 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); 10058 } 10059 10060 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 10061 { 10062 int port = BP_PORT(bp); 10063 u32 config; 10064 u32 ext_phy_type, ext_phy_config; 10065 10066 bp->link_params.bp = bp; 10067 bp->link_params.port = port; 10068 10069 bp->link_params.lane_config = 10070 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 10071 10072 bp->link_params.speed_cap_mask[0] = 10073 SHMEM_RD(bp, 10074 dev_info.port_hw_config[port].speed_capability_mask); 10075 bp->link_params.speed_cap_mask[1] = 10076 SHMEM_RD(bp, 10077 dev_info.port_hw_config[port].speed_capability_mask2); 10078 bp->port.link_config[0] = 10079 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 10080 10081 bp->port.link_config[1] = 10082 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 10083 10084 bp->link_params.multi_phy_config = 10085 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 10086 /* If the device is capable of WoL, set the default state according 10087 * to the HW 10088 */ 10089 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 10090 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 10091 (config & PORT_FEATURE_WOL_ENABLED)); 10092 10093 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 10094 bp->link_params.lane_config, 10095 bp->link_params.speed_cap_mask[0], 10096 bp->port.link_config[0]); 10097 10098 bp->link_params.switch_cfg = (bp->port.link_config[0] & 10099 PORT_FEATURE_CONNECTED_SWITCH_MASK); 10100 bnx2x_phy_probe(&bp->link_params); 10101 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 10102 10103 bnx2x_link_settings_requested(bp); 10104 10105 /* 10106 * If connected directly, work with the internal PHY, otherwise, work 10107 * with the external PHY 10108 */ 10109 ext_phy_config = 10110 SHMEM_RD(bp, 10111 dev_info.port_hw_config[port].external_phy_config); 10112 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 10113 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 10114 bp->mdio.prtad = bp->port.phy_addr; 10115 10116 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 10117 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 10118 bp->mdio.prtad = 10119 XGXS_EXT_PHY_ADDR(ext_phy_config); 10120 10121 /* 10122 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) 10123 * In MF mode, it is set to cover self test cases 10124 */ 10125 if (IS_MF(bp)) 10126 bp->port.need_hw_lock = 1; 10127 else 10128 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 10129 bp->common.shmem_base, 10130 bp->common.shmem2_base); 10131 } 10132 10133 void bnx2x_get_iscsi_info(struct bnx2x *bp) 10134 { 10135 u32 no_flags = NO_ISCSI_FLAG; 10136 #ifdef BCM_CNIC 10137 int port = BP_PORT(bp); 10138 10139 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10140 drv_lic_key[port].max_iscsi_conn); 10141 10142 /* Get the number of maximum allowed iSCSI connections */ 10143 bp->cnic_eth_dev.max_iscsi_conn = 10144 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10145 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 10146 10147 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 10148 bp->cnic_eth_dev.max_iscsi_conn); 10149 10150 /* 10151 * If maximum allowed number of connections is zero - 10152 * disable the feature. 10153 */ 10154 if (!bp->cnic_eth_dev.max_iscsi_conn) 10155 bp->flags |= no_flags; 10156 #else 10157 bp->flags |= no_flags; 10158 #endif 10159 } 10160 10161 #ifdef BCM_CNIC 10162 static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10163 { 10164 /* Port info */ 10165 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10166 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 10167 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10168 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 10169 10170 /* Node info */ 10171 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10172 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 10173 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10174 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10175 } 10176 #endif 10177 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) 10178 { 10179 #ifdef BCM_CNIC 10180 int port = BP_PORT(bp); 10181 int func = BP_ABS_FUNC(bp); 10182 10183 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10184 drv_lic_key[port].max_fcoe_conn); 10185 10186 /* Get the number of maximum allowed FCoE connections */ 10187 bp->cnic_eth_dev.max_fcoe_conn = 10188 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10189 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 10190 10191 /* Read the WWN: */ 10192 if (!IS_MF(bp)) { 10193 /* Port info */ 10194 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10195 SHMEM_RD(bp, 10196 dev_info.port_hw_config[port]. 10197 fcoe_wwn_port_name_upper); 10198 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10199 SHMEM_RD(bp, 10200 dev_info.port_hw_config[port]. 10201 fcoe_wwn_port_name_lower); 10202 10203 /* Node info */ 10204 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10205 SHMEM_RD(bp, 10206 dev_info.port_hw_config[port]. 10207 fcoe_wwn_node_name_upper); 10208 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10209 SHMEM_RD(bp, 10210 dev_info.port_hw_config[port]. 10211 fcoe_wwn_node_name_lower); 10212 } else if (!IS_MF_SD(bp)) { 10213 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10214 10215 /* 10216 * Read the WWN info only if the FCoE feature is enabled for 10217 * this function. 10218 */ 10219 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) 10220 bnx2x_get_ext_wwn_info(bp, func); 10221 10222 } else if (IS_MF_FCOE_SD(bp)) 10223 bnx2x_get_ext_wwn_info(bp, func); 10224 10225 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 10226 10227 /* 10228 * If maximum allowed number of connections is zero - 10229 * disable the feature. 10230 */ 10231 if (!bp->cnic_eth_dev.max_fcoe_conn) 10232 bp->flags |= NO_FCOE_FLAG; 10233 #else 10234 bp->flags |= NO_FCOE_FLAG; 10235 #endif 10236 } 10237 10238 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) 10239 { 10240 /* 10241 * iSCSI may be dynamically disabled but reading 10242 * info here we will decrease memory usage by driver 10243 * if the feature is disabled for good 10244 */ 10245 bnx2x_get_iscsi_info(bp); 10246 bnx2x_get_fcoe_info(bp); 10247 } 10248 10249 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 10250 { 10251 u32 val, val2; 10252 int func = BP_ABS_FUNC(bp); 10253 int port = BP_PORT(bp); 10254 #ifdef BCM_CNIC 10255 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10256 u8 *fip_mac = bp->fip_mac; 10257 #endif 10258 10259 /* Zero primary MAC configuration */ 10260 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10261 10262 if (BP_NOMCP(bp)) { 10263 BNX2X_ERROR("warning: random MAC workaround active\n"); 10264 eth_hw_addr_random(bp->dev); 10265 } else if (IS_MF(bp)) { 10266 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 10267 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 10268 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 10269 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 10270 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 10271 10272 #ifdef BCM_CNIC 10273 /* 10274 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 10275 * FCoE MAC then the appropriate feature should be disabled. 10276 * 10277 * In non SD mode features configuration comes from 10278 * struct func_ext_config. 10279 */ 10280 if (!IS_MF_SD(bp)) { 10281 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10282 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 10283 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10284 iscsi_mac_addr_upper); 10285 val = MF_CFG_RD(bp, func_ext_config[func]. 10286 iscsi_mac_addr_lower); 10287 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10288 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10289 iscsi_mac); 10290 } else 10291 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 10292 10293 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 10294 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10295 fcoe_mac_addr_upper); 10296 val = MF_CFG_RD(bp, func_ext_config[func]. 10297 fcoe_mac_addr_lower); 10298 bnx2x_set_mac_buf(fip_mac, val, val2); 10299 BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", 10300 fip_mac); 10301 10302 } else 10303 bp->flags |= NO_FCOE_FLAG; 10304 10305 bp->mf_ext_config = cfg; 10306 10307 } else { /* SD MODE */ 10308 if (IS_MF_STORAGE_SD(bp)) { 10309 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10310 /* use primary mac as iscsi mac */ 10311 memcpy(iscsi_mac, bp->dev->dev_addr, 10312 ETH_ALEN); 10313 10314 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 10315 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10316 iscsi_mac); 10317 } else { /* FCoE */ 10318 memcpy(fip_mac, bp->dev->dev_addr, 10319 ETH_ALEN); 10320 BNX2X_DEV_INFO("SD FCoE MODE\n"); 10321 BNX2X_DEV_INFO("Read FIP MAC: %pM\n", 10322 fip_mac); 10323 } 10324 /* Zero primary MAC configuration */ 10325 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10326 } 10327 } 10328 10329 if (IS_MF_FCOE_AFEX(bp)) 10330 /* use FIP MAC as primary MAC */ 10331 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10332 10333 #endif 10334 } else { 10335 /* in SF read MACs from port configuration */ 10336 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 10337 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 10338 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 10339 10340 #ifdef BCM_CNIC 10341 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10342 iscsi_mac_upper); 10343 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10344 iscsi_mac_lower); 10345 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10346 10347 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10348 fcoe_fip_mac_upper); 10349 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10350 fcoe_fip_mac_lower); 10351 bnx2x_set_mac_buf(fip_mac, val, val2); 10352 #endif 10353 } 10354 10355 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 10356 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 10357 10358 #ifdef BCM_CNIC 10359 /* Disable iSCSI if MAC configuration is 10360 * invalid. 10361 */ 10362 if (!is_valid_ether_addr(iscsi_mac)) { 10363 bp->flags |= NO_ISCSI_FLAG; 10364 memset(iscsi_mac, 0, ETH_ALEN); 10365 } 10366 10367 /* Disable FCoE if MAC configuration is 10368 * invalid. 10369 */ 10370 if (!is_valid_ether_addr(fip_mac)) { 10371 bp->flags |= NO_FCOE_FLAG; 10372 memset(bp->fip_mac, 0, ETH_ALEN); 10373 } 10374 #endif 10375 10376 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 10377 dev_err(&bp->pdev->dev, 10378 "bad Ethernet MAC address configuration: %pM\n" 10379 "change it manually before bringing up the appropriate network interface\n", 10380 bp->dev->dev_addr); 10381 10382 10383 } 10384 10385 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 10386 { 10387 int /*abs*/func = BP_ABS_FUNC(bp); 10388 int vn; 10389 u32 val = 0; 10390 int rc = 0; 10391 10392 bnx2x_get_common_hwinfo(bp); 10393 10394 /* 10395 * initialize IGU parameters 10396 */ 10397 if (CHIP_IS_E1x(bp)) { 10398 bp->common.int_block = INT_BLOCK_HC; 10399 10400 bp->igu_dsb_id = DEF_SB_IGU_ID; 10401 bp->igu_base_sb = 0; 10402 } else { 10403 bp->common.int_block = INT_BLOCK_IGU; 10404 10405 /* do not allow device reset during IGU info preocessing */ 10406 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10407 10408 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 10409 10410 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 10411 int tout = 5000; 10412 10413 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 10414 10415 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 10416 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 10417 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 10418 10419 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10420 tout--; 10421 usleep_range(1000, 1000); 10422 } 10423 10424 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10425 dev_err(&bp->pdev->dev, 10426 "FORCING Normal Mode failed!!!\n"); 10427 return -EPERM; 10428 } 10429 } 10430 10431 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 10432 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 10433 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 10434 } else 10435 BNX2X_DEV_INFO("IGU Normal Mode\n"); 10436 10437 bnx2x_get_igu_cam_info(bp); 10438 10439 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10440 } 10441 10442 /* 10443 * set base FW non-default (fast path) status block id, this value is 10444 * used to initialize the fw_sb_id saved on the fp/queue structure to 10445 * determine the id used by the FW. 10446 */ 10447 if (CHIP_IS_E1x(bp)) 10448 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 10449 else /* 10450 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 10451 * the same queue are indicated on the same IGU SB). So we prefer 10452 * FW and IGU SBs to be the same value. 10453 */ 10454 bp->base_fw_ndsb = bp->igu_base_sb; 10455 10456 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 10457 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 10458 bp->igu_sb_cnt, bp->base_fw_ndsb); 10459 10460 /* 10461 * Initialize MF configuration 10462 */ 10463 10464 bp->mf_ov = 0; 10465 bp->mf_mode = 0; 10466 vn = BP_VN(bp); 10467 10468 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 10469 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 10470 bp->common.shmem2_base, SHMEM2_RD(bp, size), 10471 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 10472 10473 if (SHMEM2_HAS(bp, mf_cfg_addr)) 10474 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 10475 else 10476 bp->common.mf_cfg_base = bp->common.shmem_base + 10477 offsetof(struct shmem_region, func_mb) + 10478 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 10479 /* 10480 * get mf configuration: 10481 * 1. existence of MF configuration 10482 * 2. MAC address must be legal (check only upper bytes) 10483 * for Switch-Independent mode; 10484 * OVLAN must be legal for Switch-Dependent mode 10485 * 3. SF_MODE configures specific MF mode 10486 */ 10487 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 10488 /* get mf configuration */ 10489 val = SHMEM_RD(bp, 10490 dev_info.shared_feature_config.config); 10491 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 10492 10493 switch (val) { 10494 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 10495 val = MF_CFG_RD(bp, func_mf_config[func]. 10496 mac_upper); 10497 /* check for legal mac (upper bytes)*/ 10498 if (val != 0xffff) { 10499 bp->mf_mode = MULTI_FUNCTION_SI; 10500 bp->mf_config[vn] = MF_CFG_RD(bp, 10501 func_mf_config[func].config); 10502 } else 10503 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 10504 break; 10505 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 10506 if ((!CHIP_IS_E1x(bp)) && 10507 (MF_CFG_RD(bp, func_mf_config[func]. 10508 mac_upper) != 0xffff) && 10509 (SHMEM2_HAS(bp, 10510 afex_driver_support))) { 10511 bp->mf_mode = MULTI_FUNCTION_AFEX; 10512 bp->mf_config[vn] = MF_CFG_RD(bp, 10513 func_mf_config[func].config); 10514 } else { 10515 BNX2X_DEV_INFO("can not configure afex mode\n"); 10516 } 10517 break; 10518 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 10519 /* get OV configuration */ 10520 val = MF_CFG_RD(bp, 10521 func_mf_config[FUNC_0].e1hov_tag); 10522 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 10523 10524 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 10525 bp->mf_mode = MULTI_FUNCTION_SD; 10526 bp->mf_config[vn] = MF_CFG_RD(bp, 10527 func_mf_config[func].config); 10528 } else 10529 BNX2X_DEV_INFO("illegal OV for SD\n"); 10530 break; 10531 default: 10532 /* Unknown configuration: reset mf_config */ 10533 bp->mf_config[vn] = 0; 10534 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 10535 } 10536 } 10537 10538 BNX2X_DEV_INFO("%s function mode\n", 10539 IS_MF(bp) ? "multi" : "single"); 10540 10541 switch (bp->mf_mode) { 10542 case MULTI_FUNCTION_SD: 10543 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 10544 FUNC_MF_CFG_E1HOV_TAG_MASK; 10545 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 10546 bp->mf_ov = val; 10547 bp->path_has_ovlan = true; 10548 10549 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 10550 func, bp->mf_ov, bp->mf_ov); 10551 } else { 10552 dev_err(&bp->pdev->dev, 10553 "No valid MF OV for func %d, aborting\n", 10554 func); 10555 return -EPERM; 10556 } 10557 break; 10558 case MULTI_FUNCTION_AFEX: 10559 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 10560 break; 10561 case MULTI_FUNCTION_SI: 10562 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 10563 func); 10564 break; 10565 default: 10566 if (vn) { 10567 dev_err(&bp->pdev->dev, 10568 "VN %d is in a single function mode, aborting\n", 10569 vn); 10570 return -EPERM; 10571 } 10572 break; 10573 } 10574 10575 /* check if other port on the path needs ovlan: 10576 * Since MF configuration is shared between ports 10577 * Possible mixed modes are only 10578 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 10579 */ 10580 if (CHIP_MODE_IS_4_PORT(bp) && 10581 !bp->path_has_ovlan && 10582 !IS_MF(bp) && 10583 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 10584 u8 other_port = !BP_PORT(bp); 10585 u8 other_func = BP_PATH(bp) + 2*other_port; 10586 val = MF_CFG_RD(bp, 10587 func_mf_config[other_func].e1hov_tag); 10588 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 10589 bp->path_has_ovlan = true; 10590 } 10591 } 10592 10593 /* adjust igu_sb_cnt to MF for E1x */ 10594 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 10595 bp->igu_sb_cnt /= E1HVN_MAX; 10596 10597 /* port info */ 10598 bnx2x_get_port_hwinfo(bp); 10599 10600 /* Get MAC addresses */ 10601 bnx2x_get_mac_hwinfo(bp); 10602 10603 bnx2x_get_cnic_info(bp); 10604 10605 return rc; 10606 } 10607 10608 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) 10609 { 10610 int cnt, i, block_end, rodi; 10611 char vpd_start[BNX2X_VPD_LEN+1]; 10612 char str_id_reg[VENDOR_ID_LEN+1]; 10613 char str_id_cap[VENDOR_ID_LEN+1]; 10614 char *vpd_data; 10615 char *vpd_extended_data = NULL; 10616 u8 len; 10617 10618 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 10619 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 10620 10621 if (cnt < BNX2X_VPD_LEN) 10622 goto out_not_found; 10623 10624 /* VPD RO tag should be first tag after identifier string, hence 10625 * we should be able to find it in first BNX2X_VPD_LEN chars 10626 */ 10627 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 10628 PCI_VPD_LRDT_RO_DATA); 10629 if (i < 0) 10630 goto out_not_found; 10631 10632 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 10633 pci_vpd_lrdt_size(&vpd_start[i]); 10634 10635 i += PCI_VPD_LRDT_TAG_SIZE; 10636 10637 if (block_end > BNX2X_VPD_LEN) { 10638 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 10639 if (vpd_extended_data == NULL) 10640 goto out_not_found; 10641 10642 /* read rest of vpd image into vpd_extended_data */ 10643 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 10644 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 10645 block_end - BNX2X_VPD_LEN, 10646 vpd_extended_data + BNX2X_VPD_LEN); 10647 if (cnt < (block_end - BNX2X_VPD_LEN)) 10648 goto out_not_found; 10649 vpd_data = vpd_extended_data; 10650 } else 10651 vpd_data = vpd_start; 10652 10653 /* now vpd_data holds full vpd content in both cases */ 10654 10655 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 10656 PCI_VPD_RO_KEYWORD_MFR_ID); 10657 if (rodi < 0) 10658 goto out_not_found; 10659 10660 len = pci_vpd_info_field_size(&vpd_data[rodi]); 10661 10662 if (len != VENDOR_ID_LEN) 10663 goto out_not_found; 10664 10665 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 10666 10667 /* vendor specific info */ 10668 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 10669 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 10670 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 10671 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 10672 10673 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 10674 PCI_VPD_RO_KEYWORD_VENDOR0); 10675 if (rodi >= 0) { 10676 len = pci_vpd_info_field_size(&vpd_data[rodi]); 10677 10678 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 10679 10680 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 10681 memcpy(bp->fw_ver, &vpd_data[rodi], len); 10682 bp->fw_ver[len] = ' '; 10683 } 10684 } 10685 kfree(vpd_extended_data); 10686 return; 10687 } 10688 out_not_found: 10689 kfree(vpd_extended_data); 10690 return; 10691 } 10692 10693 static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) 10694 { 10695 u32 flags = 0; 10696 10697 if (CHIP_REV_IS_FPGA(bp)) 10698 SET_FLAGS(flags, MODE_FPGA); 10699 else if (CHIP_REV_IS_EMUL(bp)) 10700 SET_FLAGS(flags, MODE_EMUL); 10701 else 10702 SET_FLAGS(flags, MODE_ASIC); 10703 10704 if (CHIP_MODE_IS_4_PORT(bp)) 10705 SET_FLAGS(flags, MODE_PORT4); 10706 else 10707 SET_FLAGS(flags, MODE_PORT2); 10708 10709 if (CHIP_IS_E2(bp)) 10710 SET_FLAGS(flags, MODE_E2); 10711 else if (CHIP_IS_E3(bp)) { 10712 SET_FLAGS(flags, MODE_E3); 10713 if (CHIP_REV(bp) == CHIP_REV_Ax) 10714 SET_FLAGS(flags, MODE_E3_A0); 10715 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 10716 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 10717 } 10718 10719 if (IS_MF(bp)) { 10720 SET_FLAGS(flags, MODE_MF); 10721 switch (bp->mf_mode) { 10722 case MULTI_FUNCTION_SD: 10723 SET_FLAGS(flags, MODE_MF_SD); 10724 break; 10725 case MULTI_FUNCTION_SI: 10726 SET_FLAGS(flags, MODE_MF_SI); 10727 break; 10728 case MULTI_FUNCTION_AFEX: 10729 SET_FLAGS(flags, MODE_MF_AFEX); 10730 break; 10731 } 10732 } else 10733 SET_FLAGS(flags, MODE_SF); 10734 10735 #if defined(__LITTLE_ENDIAN) 10736 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 10737 #else /*(__BIG_ENDIAN)*/ 10738 SET_FLAGS(flags, MODE_BIG_ENDIAN); 10739 #endif 10740 INIT_MODE_FLAGS(bp) = flags; 10741 } 10742 10743 static int __devinit bnx2x_init_bp(struct bnx2x *bp) 10744 { 10745 int func; 10746 int rc; 10747 10748 mutex_init(&bp->port.phy_mutex); 10749 mutex_init(&bp->fw_mb_mutex); 10750 spin_lock_init(&bp->stats_lock); 10751 #ifdef BCM_CNIC 10752 mutex_init(&bp->cnic_mutex); 10753 #endif 10754 10755 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 10756 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 10757 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 10758 rc = bnx2x_get_hwinfo(bp); 10759 if (rc) 10760 return rc; 10761 10762 bnx2x_set_modes_bitmap(bp); 10763 10764 rc = bnx2x_alloc_mem_bp(bp); 10765 if (rc) 10766 return rc; 10767 10768 bnx2x_read_fwinfo(bp); 10769 10770 func = BP_FUNC(bp); 10771 10772 /* need to reset chip if undi was active */ 10773 if (!BP_NOMCP(bp)) { 10774 /* init fw_seq */ 10775 bp->fw_seq = 10776 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 10777 DRV_MSG_SEQ_NUMBER_MASK; 10778 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 10779 10780 bnx2x_prev_unload(bp); 10781 } 10782 10783 10784 if (CHIP_REV_IS_FPGA(bp)) 10785 dev_err(&bp->pdev->dev, "FPGA detected\n"); 10786 10787 if (BP_NOMCP(bp) && (func == 0)) 10788 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 10789 10790 bp->disable_tpa = disable_tpa; 10791 10792 #ifdef BCM_CNIC 10793 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 10794 #endif 10795 10796 /* Set TPA flags */ 10797 if (bp->disable_tpa) { 10798 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 10799 bp->dev->features &= ~NETIF_F_LRO; 10800 } else { 10801 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 10802 bp->dev->features |= NETIF_F_LRO; 10803 } 10804 10805 if (CHIP_IS_E1(bp)) 10806 bp->dropless_fc = 0; 10807 else 10808 bp->dropless_fc = dropless_fc; 10809 10810 bp->mrrs = mrrs; 10811 10812 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 10813 10814 /* make sure that the numbers are in the right granularity */ 10815 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10816 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 10817 10818 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 10819 10820 init_timer(&bp->timer); 10821 bp->timer.expires = jiffies + bp->current_interval; 10822 bp->timer.data = (unsigned long) bp; 10823 bp->timer.function = bnx2x_timer; 10824 10825 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 10826 bnx2x_dcbx_init_params(bp); 10827 10828 #ifdef BCM_CNIC 10829 if (CHIP_IS_E1x(bp)) 10830 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 10831 else 10832 bp->cnic_base_cl_id = FP_SB_MAX_E2; 10833 #endif 10834 10835 /* multiple tx priority */ 10836 if (CHIP_IS_E1x(bp)) 10837 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 10838 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 10839 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 10840 if (CHIP_IS_E3B0(bp)) 10841 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 10842 10843 return rc; 10844 } 10845 10846 10847 /**************************************************************************** 10848 * General service functions 10849 ****************************************************************************/ 10850 10851 /* 10852 * net_device service functions 10853 */ 10854 10855 /* called with rtnl_lock */ 10856 static int bnx2x_open(struct net_device *dev) 10857 { 10858 struct bnx2x *bp = netdev_priv(dev); 10859 bool global = false; 10860 int other_engine = BP_PATH(bp) ? 0 : 1; 10861 bool other_load_status, load_status; 10862 10863 bp->stats_init = true; 10864 10865 netif_carrier_off(dev); 10866 10867 bnx2x_set_power_state(bp, PCI_D0); 10868 10869 other_load_status = bnx2x_get_load_status(bp, other_engine); 10870 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 10871 10872 /* 10873 * If parity had happen during the unload, then attentions 10874 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 10875 * want the first function loaded on the current engine to 10876 * complete the recovery. 10877 */ 10878 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 10879 bnx2x_chk_parity_attn(bp, &global, true)) 10880 do { 10881 /* 10882 * If there are attentions and they are in a global 10883 * blocks, set the GLOBAL_RESET bit regardless whether 10884 * it will be this function that will complete the 10885 * recovery or not. 10886 */ 10887 if (global) 10888 bnx2x_set_reset_global(bp); 10889 10890 /* 10891 * Only the first function on the current engine should 10892 * try to recover in open. In case of attentions in 10893 * global blocks only the first in the chip should try 10894 * to recover. 10895 */ 10896 if ((!load_status && 10897 (!global || !other_load_status)) && 10898 bnx2x_trylock_leader_lock(bp) && 10899 !bnx2x_leader_reset(bp)) { 10900 netdev_info(bp->dev, "Recovered in open\n"); 10901 break; 10902 } 10903 10904 /* recovery has failed... */ 10905 bnx2x_set_power_state(bp, PCI_D3hot); 10906 bp->recovery_state = BNX2X_RECOVERY_FAILED; 10907 10908 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 10909 "If you still see this message after a few retries then power cycle is required.\n"); 10910 10911 return -EAGAIN; 10912 } while (0); 10913 10914 bp->recovery_state = BNX2X_RECOVERY_DONE; 10915 return bnx2x_nic_load(bp, LOAD_OPEN); 10916 } 10917 10918 /* called with rtnl_lock */ 10919 static int bnx2x_close(struct net_device *dev) 10920 { 10921 struct bnx2x *bp = netdev_priv(dev); 10922 10923 /* Unload the driver, release IRQs */ 10924 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 10925 10926 /* Power off */ 10927 bnx2x_set_power_state(bp, PCI_D3hot); 10928 10929 return 0; 10930 } 10931 10932 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 10933 struct bnx2x_mcast_ramrod_params *p) 10934 { 10935 int mc_count = netdev_mc_count(bp->dev); 10936 struct bnx2x_mcast_list_elem *mc_mac = 10937 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 10938 struct netdev_hw_addr *ha; 10939 10940 if (!mc_mac) 10941 return -ENOMEM; 10942 10943 INIT_LIST_HEAD(&p->mcast_list); 10944 10945 netdev_for_each_mc_addr(ha, bp->dev) { 10946 mc_mac->mac = bnx2x_mc_addr(ha); 10947 list_add_tail(&mc_mac->link, &p->mcast_list); 10948 mc_mac++; 10949 } 10950 10951 p->mcast_list_len = mc_count; 10952 10953 return 0; 10954 } 10955 10956 static void bnx2x_free_mcast_macs_list( 10957 struct bnx2x_mcast_ramrod_params *p) 10958 { 10959 struct bnx2x_mcast_list_elem *mc_mac = 10960 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, 10961 link); 10962 10963 WARN_ON(!mc_mac); 10964 kfree(mc_mac); 10965 } 10966 10967 /** 10968 * bnx2x_set_uc_list - configure a new unicast MACs list. 10969 * 10970 * @bp: driver handle 10971 * 10972 * We will use zero (0) as a MAC type for these MACs. 10973 */ 10974 static int bnx2x_set_uc_list(struct bnx2x *bp) 10975 { 10976 int rc; 10977 struct net_device *dev = bp->dev; 10978 struct netdev_hw_addr *ha; 10979 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 10980 unsigned long ramrod_flags = 0; 10981 10982 /* First schedule a cleanup up of old configuration */ 10983 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 10984 if (rc < 0) { 10985 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 10986 return rc; 10987 } 10988 10989 netdev_for_each_uc_addr(ha, dev) { 10990 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 10991 BNX2X_UC_LIST_MAC, &ramrod_flags); 10992 if (rc < 0) { 10993 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 10994 rc); 10995 return rc; 10996 } 10997 } 10998 10999 /* Execute the pending commands */ 11000 __set_bit(RAMROD_CONT, &ramrod_flags); 11001 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 11002 BNX2X_UC_LIST_MAC, &ramrod_flags); 11003 } 11004 11005 static int bnx2x_set_mc_list(struct bnx2x *bp) 11006 { 11007 struct net_device *dev = bp->dev; 11008 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11009 int rc = 0; 11010 11011 rparam.mcast_obj = &bp->mcast_obj; 11012 11013 /* first, clear all configured multicast MACs */ 11014 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 11015 if (rc < 0) { 11016 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 11017 return rc; 11018 } 11019 11020 /* then, configure a new MACs list */ 11021 if (netdev_mc_count(dev)) { 11022 rc = bnx2x_init_mcast_macs_list(bp, &rparam); 11023 if (rc) { 11024 BNX2X_ERR("Failed to create multicast MACs list: %d\n", 11025 rc); 11026 return rc; 11027 } 11028 11029 /* Now add the new MACs */ 11030 rc = bnx2x_config_mcast(bp, &rparam, 11031 BNX2X_MCAST_CMD_ADD); 11032 if (rc < 0) 11033 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 11034 rc); 11035 11036 bnx2x_free_mcast_macs_list(&rparam); 11037 } 11038 11039 return rc; 11040 } 11041 11042 11043 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 11044 void bnx2x_set_rx_mode(struct net_device *dev) 11045 { 11046 struct bnx2x *bp = netdev_priv(dev); 11047 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 11048 11049 if (bp->state != BNX2X_STATE_OPEN) { 11050 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 11051 return; 11052 } 11053 11054 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 11055 11056 if (dev->flags & IFF_PROMISC) 11057 rx_mode = BNX2X_RX_MODE_PROMISC; 11058 else if ((dev->flags & IFF_ALLMULTI) || 11059 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 11060 CHIP_IS_E1(bp))) 11061 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11062 else { 11063 /* some multicasts */ 11064 if (bnx2x_set_mc_list(bp) < 0) 11065 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11066 11067 if (bnx2x_set_uc_list(bp) < 0) 11068 rx_mode = BNX2X_RX_MODE_PROMISC; 11069 } 11070 11071 bp->rx_mode = rx_mode; 11072 #ifdef BCM_CNIC 11073 /* handle ISCSI SD mode */ 11074 if (IS_MF_ISCSI_SD(bp)) 11075 bp->rx_mode = BNX2X_RX_MODE_NONE; 11076 #endif 11077 11078 /* Schedule the rx_mode command */ 11079 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11080 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 11081 return; 11082 } 11083 11084 bnx2x_set_storm_rx_mode(bp); 11085 } 11086 11087 /* called with rtnl_lock */ 11088 static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 11089 int devad, u16 addr) 11090 { 11091 struct bnx2x *bp = netdev_priv(netdev); 11092 u16 value; 11093 int rc; 11094 11095 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 11096 prtad, devad, addr); 11097 11098 /* The HW expects different devad if CL22 is used */ 11099 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11100 11101 bnx2x_acquire_phy_lock(bp); 11102 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 11103 bnx2x_release_phy_lock(bp); 11104 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 11105 11106 if (!rc) 11107 rc = value; 11108 return rc; 11109 } 11110 11111 /* called with rtnl_lock */ 11112 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 11113 u16 addr, u16 value) 11114 { 11115 struct bnx2x *bp = netdev_priv(netdev); 11116 int rc; 11117 11118 DP(NETIF_MSG_LINK, 11119 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 11120 prtad, devad, addr, value); 11121 11122 /* The HW expects different devad if CL22 is used */ 11123 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11124 11125 bnx2x_acquire_phy_lock(bp); 11126 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 11127 bnx2x_release_phy_lock(bp); 11128 return rc; 11129 } 11130 11131 /* called with rtnl_lock */ 11132 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11133 { 11134 struct bnx2x *bp = netdev_priv(dev); 11135 struct mii_ioctl_data *mdio = if_mii(ifr); 11136 11137 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 11138 mdio->phy_id, mdio->reg_num, mdio->val_in); 11139 11140 if (!netif_running(dev)) 11141 return -EAGAIN; 11142 11143 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 11144 } 11145 11146 #ifdef CONFIG_NET_POLL_CONTROLLER 11147 static void poll_bnx2x(struct net_device *dev) 11148 { 11149 struct bnx2x *bp = netdev_priv(dev); 11150 11151 disable_irq(bp->pdev->irq); 11152 bnx2x_interrupt(bp->pdev->irq, dev); 11153 enable_irq(bp->pdev->irq); 11154 } 11155 #endif 11156 11157 static int bnx2x_validate_addr(struct net_device *dev) 11158 { 11159 struct bnx2x *bp = netdev_priv(dev); 11160 11161 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 11162 BNX2X_ERR("Non-valid Ethernet address\n"); 11163 return -EADDRNOTAVAIL; 11164 } 11165 return 0; 11166 } 11167 11168 static const struct net_device_ops bnx2x_netdev_ops = { 11169 .ndo_open = bnx2x_open, 11170 .ndo_stop = bnx2x_close, 11171 .ndo_start_xmit = bnx2x_start_xmit, 11172 .ndo_select_queue = bnx2x_select_queue, 11173 .ndo_set_rx_mode = bnx2x_set_rx_mode, 11174 .ndo_set_mac_address = bnx2x_change_mac_addr, 11175 .ndo_validate_addr = bnx2x_validate_addr, 11176 .ndo_do_ioctl = bnx2x_ioctl, 11177 .ndo_change_mtu = bnx2x_change_mtu, 11178 .ndo_fix_features = bnx2x_fix_features, 11179 .ndo_set_features = bnx2x_set_features, 11180 .ndo_tx_timeout = bnx2x_tx_timeout, 11181 #ifdef CONFIG_NET_POLL_CONTROLLER 11182 .ndo_poll_controller = poll_bnx2x, 11183 #endif 11184 .ndo_setup_tc = bnx2x_setup_tc, 11185 11186 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 11187 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11188 #endif 11189 }; 11190 11191 static int bnx2x_set_coherency_mask(struct bnx2x *bp) 11192 { 11193 struct device *dev = &bp->pdev->dev; 11194 11195 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 11196 bp->flags |= USING_DAC_FLAG; 11197 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 11198 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 11199 return -EIO; 11200 } 11201 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 11202 dev_err(dev, "System does not support DMA, aborting\n"); 11203 return -EIO; 11204 } 11205 11206 return 0; 11207 } 11208 11209 static int __devinit bnx2x_init_dev(struct pci_dev *pdev, 11210 struct net_device *dev, 11211 unsigned long board_type) 11212 { 11213 struct bnx2x *bp; 11214 int rc; 11215 u32 pci_cfg_dword; 11216 bool chip_is_e1x = (board_type == BCM57710 || 11217 board_type == BCM57711 || 11218 board_type == BCM57711E); 11219 11220 SET_NETDEV_DEV(dev, &pdev->dev); 11221 bp = netdev_priv(dev); 11222 11223 bp->dev = dev; 11224 bp->pdev = pdev; 11225 bp->flags = 0; 11226 11227 rc = pci_enable_device(pdev); 11228 if (rc) { 11229 dev_err(&bp->pdev->dev, 11230 "Cannot enable PCI device, aborting\n"); 11231 goto err_out; 11232 } 11233 11234 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11235 dev_err(&bp->pdev->dev, 11236 "Cannot find PCI device base address, aborting\n"); 11237 rc = -ENODEV; 11238 goto err_out_disable; 11239 } 11240 11241 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 11242 dev_err(&bp->pdev->dev, "Cannot find second PCI device" 11243 " base address, aborting\n"); 11244 rc = -ENODEV; 11245 goto err_out_disable; 11246 } 11247 11248 if (atomic_read(&pdev->enable_cnt) == 1) { 11249 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11250 if (rc) { 11251 dev_err(&bp->pdev->dev, 11252 "Cannot obtain PCI resources, aborting\n"); 11253 goto err_out_disable; 11254 } 11255 11256 pci_set_master(pdev); 11257 pci_save_state(pdev); 11258 } 11259 11260 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 11261 if (bp->pm_cap == 0) { 11262 dev_err(&bp->pdev->dev, 11263 "Cannot find power management capability, aborting\n"); 11264 rc = -EIO; 11265 goto err_out_release; 11266 } 11267 11268 if (!pci_is_pcie(pdev)) { 11269 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 11270 rc = -EIO; 11271 goto err_out_release; 11272 } 11273 11274 rc = bnx2x_set_coherency_mask(bp); 11275 if (rc) 11276 goto err_out_release; 11277 11278 dev->mem_start = pci_resource_start(pdev, 0); 11279 dev->base_addr = dev->mem_start; 11280 dev->mem_end = pci_resource_end(pdev, 0); 11281 11282 dev->irq = pdev->irq; 11283 11284 bp->regview = pci_ioremap_bar(pdev, 0); 11285 if (!bp->regview) { 11286 dev_err(&bp->pdev->dev, 11287 "Cannot map register space, aborting\n"); 11288 rc = -ENOMEM; 11289 goto err_out_release; 11290 } 11291 11292 /* In E1/E1H use pci device function given by kernel. 11293 * In E2/E3 read physical function from ME register since these chips 11294 * support Physical Device Assignment where kernel BDF maybe arbitrary 11295 * (depending on hypervisor). 11296 */ 11297 if (chip_is_e1x) 11298 bp->pf_num = PCI_FUNC(pdev->devfn); 11299 else {/* chip is E2/3*/ 11300 pci_read_config_dword(bp->pdev, 11301 PCICFG_ME_REGISTER, &pci_cfg_dword); 11302 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 11303 ME_REG_ABS_PF_NUM_SHIFT); 11304 } 11305 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 11306 11307 bnx2x_set_power_state(bp, PCI_D0); 11308 11309 /* clean indirect addresses */ 11310 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 11311 PCICFG_VENDOR_ID_OFFSET); 11312 /* 11313 * Clean the following indirect addresses for all functions since it 11314 * is not used by the driver. 11315 */ 11316 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 11317 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 11318 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 11319 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 11320 11321 if (chip_is_e1x) { 11322 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 11323 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 11324 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 11325 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 11326 } 11327 11328 /* 11329 * Enable internal target-read (in case we are probed after PF FLR). 11330 * Must be done prior to any BAR read access. Only for 57712 and up 11331 */ 11332 if (!chip_is_e1x) 11333 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11334 11335 /* Reset the load counter */ 11336 bnx2x_clear_load_status(bp); 11337 11338 dev->watchdog_timeo = TX_TIMEOUT; 11339 11340 dev->netdev_ops = &bnx2x_netdev_ops; 11341 bnx2x_set_ethtool_ops(dev); 11342 11343 dev->priv_flags |= IFF_UNICAST_FLT; 11344 11345 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 11346 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 11347 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 11348 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; 11349 11350 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 11351 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 11352 11353 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; 11354 if (bp->flags & USING_DAC_FLAG) 11355 dev->features |= NETIF_F_HIGHDMA; 11356 11357 /* Add Loopback capability to the device */ 11358 dev->hw_features |= NETIF_F_LOOPBACK; 11359 11360 #ifdef BCM_DCBNL 11361 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 11362 #endif 11363 11364 /* get_port_hwinfo() will set prtad and mmds properly */ 11365 bp->mdio.prtad = MDIO_PRTAD_NONE; 11366 bp->mdio.mmds = 0; 11367 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 11368 bp->mdio.dev = dev; 11369 bp->mdio.mdio_read = bnx2x_mdio_read; 11370 bp->mdio.mdio_write = bnx2x_mdio_write; 11371 11372 return 0; 11373 11374 err_out_release: 11375 if (atomic_read(&pdev->enable_cnt) == 1) 11376 pci_release_regions(pdev); 11377 11378 err_out_disable: 11379 pci_disable_device(pdev); 11380 pci_set_drvdata(pdev, NULL); 11381 11382 err_out: 11383 return rc; 11384 } 11385 11386 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, 11387 int *width, int *speed) 11388 { 11389 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); 11390 11391 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; 11392 11393 /* return value of 1=2.5GHz 2=5GHz */ 11394 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 11395 } 11396 11397 static int bnx2x_check_firmware(struct bnx2x *bp) 11398 { 11399 const struct firmware *firmware = bp->firmware; 11400 struct bnx2x_fw_file_hdr *fw_hdr; 11401 struct bnx2x_fw_file_section *sections; 11402 u32 offset, len, num_ops; 11403 u16 *ops_offsets; 11404 int i; 11405 const u8 *fw_ver; 11406 11407 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 11408 BNX2X_ERR("Wrong FW size\n"); 11409 return -EINVAL; 11410 } 11411 11412 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 11413 sections = (struct bnx2x_fw_file_section *)fw_hdr; 11414 11415 /* Make sure none of the offsets and sizes make us read beyond 11416 * the end of the firmware data */ 11417 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 11418 offset = be32_to_cpu(sections[i].offset); 11419 len = be32_to_cpu(sections[i].len); 11420 if (offset + len > firmware->size) { 11421 BNX2X_ERR("Section %d length is out of bounds\n", i); 11422 return -EINVAL; 11423 } 11424 } 11425 11426 /* Likewise for the init_ops offsets */ 11427 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 11428 ops_offsets = (u16 *)(firmware->data + offset); 11429 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 11430 11431 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 11432 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 11433 BNX2X_ERR("Section offset %d is out of bounds\n", i); 11434 return -EINVAL; 11435 } 11436 } 11437 11438 /* Check FW version */ 11439 offset = be32_to_cpu(fw_hdr->fw_version.offset); 11440 fw_ver = firmware->data + offset; 11441 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 11442 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 11443 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 11444 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 11445 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 11446 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 11447 BCM_5710_FW_MAJOR_VERSION, 11448 BCM_5710_FW_MINOR_VERSION, 11449 BCM_5710_FW_REVISION_VERSION, 11450 BCM_5710_FW_ENGINEERING_VERSION); 11451 return -EINVAL; 11452 } 11453 11454 return 0; 11455 } 11456 11457 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11458 { 11459 const __be32 *source = (const __be32 *)_source; 11460 u32 *target = (u32 *)_target; 11461 u32 i; 11462 11463 for (i = 0; i < n/4; i++) 11464 target[i] = be32_to_cpu(source[i]); 11465 } 11466 11467 /* 11468 Ops array is stored in the following format: 11469 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 11470 */ 11471 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 11472 { 11473 const __be32 *source = (const __be32 *)_source; 11474 struct raw_op *target = (struct raw_op *)_target; 11475 u32 i, j, tmp; 11476 11477 for (i = 0, j = 0; i < n/8; i++, j += 2) { 11478 tmp = be32_to_cpu(source[j]); 11479 target[i].op = (tmp >> 24) & 0xff; 11480 target[i].offset = tmp & 0xffffff; 11481 target[i].raw_data = be32_to_cpu(source[j + 1]); 11482 } 11483 } 11484 11485 /** 11486 * IRO array is stored in the following format: 11487 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11488 */ 11489 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11490 { 11491 const __be32 *source = (const __be32 *)_source; 11492 struct iro *target = (struct iro *)_target; 11493 u32 i, j, tmp; 11494 11495 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 11496 target[i].base = be32_to_cpu(source[j]); 11497 j++; 11498 tmp = be32_to_cpu(source[j]); 11499 target[i].m1 = (tmp >> 16) & 0xffff; 11500 target[i].m2 = tmp & 0xffff; 11501 j++; 11502 tmp = be32_to_cpu(source[j]); 11503 target[i].m3 = (tmp >> 16) & 0xffff; 11504 target[i].size = tmp & 0xffff; 11505 j++; 11506 } 11507 } 11508 11509 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11510 { 11511 const __be16 *source = (const __be16 *)_source; 11512 u16 *target = (u16 *)_target; 11513 u32 i; 11514 11515 for (i = 0; i < n/2; i++) 11516 target[i] = be16_to_cpu(source[i]); 11517 } 11518 11519 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 11520 do { \ 11521 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 11522 bp->arr = kmalloc(len, GFP_KERNEL); \ 11523 if (!bp->arr) \ 11524 goto lbl; \ 11525 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 11526 (u8 *)bp->arr, len); \ 11527 } while (0) 11528 11529 static int bnx2x_init_firmware(struct bnx2x *bp) 11530 { 11531 const char *fw_file_name; 11532 struct bnx2x_fw_file_hdr *fw_hdr; 11533 int rc; 11534 11535 if (bp->firmware) 11536 return 0; 11537 11538 if (CHIP_IS_E1(bp)) 11539 fw_file_name = FW_FILE_NAME_E1; 11540 else if (CHIP_IS_E1H(bp)) 11541 fw_file_name = FW_FILE_NAME_E1H; 11542 else if (!CHIP_IS_E1x(bp)) 11543 fw_file_name = FW_FILE_NAME_E2; 11544 else { 11545 BNX2X_ERR("Unsupported chip revision\n"); 11546 return -EINVAL; 11547 } 11548 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 11549 11550 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 11551 if (rc) { 11552 BNX2X_ERR("Can't load firmware file %s\n", 11553 fw_file_name); 11554 goto request_firmware_exit; 11555 } 11556 11557 rc = bnx2x_check_firmware(bp); 11558 if (rc) { 11559 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 11560 goto request_firmware_exit; 11561 } 11562 11563 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 11564 11565 /* Initialize the pointers to the init arrays */ 11566 /* Blob */ 11567 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 11568 11569 /* Opcodes */ 11570 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 11571 11572 /* Offsets */ 11573 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 11574 be16_to_cpu_n); 11575 11576 /* STORMs firmware */ 11577 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11578 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 11579 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 11580 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 11581 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11582 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 11583 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 11584 be32_to_cpu(fw_hdr->usem_pram_data.offset); 11585 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11586 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 11587 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 11588 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 11589 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11590 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 11591 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 11592 be32_to_cpu(fw_hdr->csem_pram_data.offset); 11593 /* IRO */ 11594 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 11595 11596 return 0; 11597 11598 iro_alloc_err: 11599 kfree(bp->init_ops_offsets); 11600 init_offsets_alloc_err: 11601 kfree(bp->init_ops); 11602 init_ops_alloc_err: 11603 kfree(bp->init_data); 11604 request_firmware_exit: 11605 release_firmware(bp->firmware); 11606 bp->firmware = NULL; 11607 11608 return rc; 11609 } 11610 11611 static void bnx2x_release_firmware(struct bnx2x *bp) 11612 { 11613 kfree(bp->init_ops_offsets); 11614 kfree(bp->init_ops); 11615 kfree(bp->init_data); 11616 release_firmware(bp->firmware); 11617 bp->firmware = NULL; 11618 } 11619 11620 11621 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 11622 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 11623 .init_hw_cmn = bnx2x_init_hw_common, 11624 .init_hw_port = bnx2x_init_hw_port, 11625 .init_hw_func = bnx2x_init_hw_func, 11626 11627 .reset_hw_cmn = bnx2x_reset_common, 11628 .reset_hw_port = bnx2x_reset_port, 11629 .reset_hw_func = bnx2x_reset_func, 11630 11631 .gunzip_init = bnx2x_gunzip_init, 11632 .gunzip_end = bnx2x_gunzip_end, 11633 11634 .init_fw = bnx2x_init_firmware, 11635 .release_fw = bnx2x_release_firmware, 11636 }; 11637 11638 void bnx2x__init_func_obj(struct bnx2x *bp) 11639 { 11640 /* Prepare DMAE related driver resources */ 11641 bnx2x_setup_dmae(bp); 11642 11643 bnx2x_init_func_obj(bp, &bp->func_obj, 11644 bnx2x_sp(bp, func_rdata), 11645 bnx2x_sp_mapping(bp, func_rdata), 11646 bnx2x_sp(bp, func_afex_rdata), 11647 bnx2x_sp_mapping(bp, func_afex_rdata), 11648 &bnx2x_func_sp_drv); 11649 } 11650 11651 /* must be called after sriov-enable */ 11652 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11653 { 11654 int cid_count = BNX2X_L2_CID_COUNT(bp); 11655 11656 #ifdef BCM_CNIC 11657 cid_count += CNIC_CID_MAX; 11658 #endif 11659 return roundup(cid_count, QM_CID_ROUND); 11660 } 11661 11662 /** 11663 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 11664 * 11665 * @dev: pci device 11666 * 11667 */ 11668 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11669 { 11670 int pos; 11671 u16 control; 11672 11673 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 11674 11675 /* 11676 * If MSI-X is not supported - return number of SBs needed to support 11677 * one fast path queue: one FP queue + SB for CNIC 11678 */ 11679 if (!pos) 11680 return 1 + CNIC_PRESENT; 11681 11682 /* 11683 * The value in the PCI configuration space is the index of the last 11684 * entry, namely one less than the actual size of the table, which is 11685 * exactly what we want to return from this function: number of all SBs 11686 * without the default SB. 11687 */ 11688 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 11689 return control & PCI_MSIX_FLAGS_QSIZE; 11690 } 11691 11692 static int __devinit bnx2x_init_one(struct pci_dev *pdev, 11693 const struct pci_device_id *ent) 11694 { 11695 struct net_device *dev = NULL; 11696 struct bnx2x *bp; 11697 int pcie_width, pcie_speed; 11698 int rc, max_non_def_sbs; 11699 int rx_count, tx_count, rss_count; 11700 /* 11701 * An estimated maximum supported CoS number according to the chip 11702 * version. 11703 * We will try to roughly estimate the maximum number of CoSes this chip 11704 * may support in order to minimize the memory allocated for Tx 11705 * netdev_queue's. This number will be accurately calculated during the 11706 * initialization of bp->max_cos based on the chip versions AND chip 11707 * revision in the bnx2x_init_bp(). 11708 */ 11709 u8 max_cos_est = 0; 11710 11711 switch (ent->driver_data) { 11712 case BCM57710: 11713 case BCM57711: 11714 case BCM57711E: 11715 max_cos_est = BNX2X_MULTI_TX_COS_E1X; 11716 break; 11717 11718 case BCM57712: 11719 case BCM57712_MF: 11720 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; 11721 break; 11722 11723 case BCM57800: 11724 case BCM57800_MF: 11725 case BCM57810: 11726 case BCM57810_MF: 11727 case BCM57840: 11728 case BCM57840_MF: 11729 case BCM57811: 11730 case BCM57811_MF: 11731 max_cos_est = BNX2X_MULTI_TX_COS_E3B0; 11732 break; 11733 11734 default: 11735 pr_err("Unknown board_type (%ld), aborting\n", 11736 ent->driver_data); 11737 return -ENODEV; 11738 } 11739 11740 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11741 11742 /* !!! FIXME !!! 11743 * Do not allow the maximum SB count to grow above 16 11744 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. 11745 * We will use the FP_SB_MAX_E1x macro for this matter. 11746 */ 11747 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); 11748 11749 WARN_ON(!max_non_def_sbs); 11750 11751 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 11752 rss_count = max_non_def_sbs - CNIC_PRESENT; 11753 11754 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 11755 rx_count = rss_count + FCOE_PRESENT; 11756 11757 /* 11758 * Maximum number of netdev Tx queues: 11759 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 11760 */ 11761 tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; 11762 11763 /* dev zeroed in init_etherdev */ 11764 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11765 if (!dev) 11766 return -ENOMEM; 11767 11768 bp = netdev_priv(dev); 11769 11770 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 11771 tx_count, rx_count); 11772 11773 bp->igu_sb_cnt = max_non_def_sbs; 11774 bp->msg_enable = debug; 11775 pci_set_drvdata(pdev, dev); 11776 11777 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 11778 if (rc < 0) { 11779 free_netdev(dev); 11780 return rc; 11781 } 11782 11783 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 11784 11785 rc = bnx2x_init_bp(bp); 11786 if (rc) 11787 goto init_one_exit; 11788 11789 /* 11790 * Map doorbels here as we need the real value of bp->max_cos which 11791 * is initialized in bnx2x_init_bp(). 11792 */ 11793 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 11794 min_t(u64, BNX2X_DB_SIZE(bp), 11795 pci_resource_len(pdev, 2))); 11796 if (!bp->doorbells) { 11797 dev_err(&bp->pdev->dev, 11798 "Cannot map doorbell space, aborting\n"); 11799 rc = -ENOMEM; 11800 goto init_one_exit; 11801 } 11802 11803 /* calc qm_cid_count */ 11804 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 11805 11806 #ifdef BCM_CNIC 11807 /* disable FCOE L2 queue for E1x */ 11808 if (CHIP_IS_E1x(bp)) 11809 bp->flags |= NO_FCOE_FLAG; 11810 11811 #endif 11812 11813 /* Configure interrupt mode: try to enable MSI-X/MSI if 11814 * needed, set bp->num_queues appropriately. 11815 */ 11816 bnx2x_set_int_mode(bp); 11817 11818 /* Add all NAPI objects */ 11819 bnx2x_add_all_napi(bp); 11820 11821 rc = register_netdev(dev); 11822 if (rc) { 11823 dev_err(&pdev->dev, "Cannot register net device\n"); 11824 goto init_one_exit; 11825 } 11826 11827 #ifdef BCM_CNIC 11828 if (!NO_FCOE(bp)) { 11829 /* Add storage MAC address */ 11830 rtnl_lock(); 11831 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 11832 rtnl_unlock(); 11833 } 11834 #endif 11835 11836 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 11837 11838 BNX2X_DEV_INFO( 11839 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 11840 board_info[ent->driver_data].name, 11841 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 11842 pcie_width, 11843 ((!CHIP_IS_E2(bp) && pcie_speed == 2) || 11844 (CHIP_IS_E2(bp) && pcie_speed == 1)) ? 11845 "5GHz (Gen2)" : "2.5GHz", 11846 dev->base_addr, bp->pdev->irq, dev->dev_addr); 11847 11848 return 0; 11849 11850 init_one_exit: 11851 if (bp->regview) 11852 iounmap(bp->regview); 11853 11854 if (bp->doorbells) 11855 iounmap(bp->doorbells); 11856 11857 free_netdev(dev); 11858 11859 if (atomic_read(&pdev->enable_cnt) == 1) 11860 pci_release_regions(pdev); 11861 11862 pci_disable_device(pdev); 11863 pci_set_drvdata(pdev, NULL); 11864 11865 return rc; 11866 } 11867 11868 static void __devexit bnx2x_remove_one(struct pci_dev *pdev) 11869 { 11870 struct net_device *dev = pci_get_drvdata(pdev); 11871 struct bnx2x *bp; 11872 11873 if (!dev) { 11874 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 11875 return; 11876 } 11877 bp = netdev_priv(dev); 11878 11879 #ifdef BCM_CNIC 11880 /* Delete storage MAC address */ 11881 if (!NO_FCOE(bp)) { 11882 rtnl_lock(); 11883 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 11884 rtnl_unlock(); 11885 } 11886 #endif 11887 11888 #ifdef BCM_DCBNL 11889 /* Delete app tlvs from dcbnl */ 11890 bnx2x_dcbnl_update_applist(bp, true); 11891 #endif 11892 11893 unregister_netdev(dev); 11894 11895 /* Delete all NAPI objects */ 11896 bnx2x_del_all_napi(bp); 11897 11898 /* Power on: we can't let PCI layer write to us while we are in D3 */ 11899 bnx2x_set_power_state(bp, PCI_D0); 11900 11901 /* Disable MSI/MSI-X */ 11902 bnx2x_disable_msi(bp); 11903 11904 /* Power off */ 11905 bnx2x_set_power_state(bp, PCI_D3hot); 11906 11907 /* Make sure RESET task is not scheduled before continuing */ 11908 cancel_delayed_work_sync(&bp->sp_rtnl_task); 11909 11910 if (bp->regview) 11911 iounmap(bp->regview); 11912 11913 if (bp->doorbells) 11914 iounmap(bp->doorbells); 11915 11916 bnx2x_release_firmware(bp); 11917 11918 bnx2x_free_mem_bp(bp); 11919 11920 free_netdev(dev); 11921 11922 if (atomic_read(&pdev->enable_cnt) == 1) 11923 pci_release_regions(pdev); 11924 11925 pci_disable_device(pdev); 11926 pci_set_drvdata(pdev, NULL); 11927 } 11928 11929 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 11930 { 11931 int i; 11932 11933 bp->state = BNX2X_STATE_ERROR; 11934 11935 bp->rx_mode = BNX2X_RX_MODE_NONE; 11936 11937 #ifdef BCM_CNIC 11938 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 11939 #endif 11940 /* Stop Tx */ 11941 bnx2x_tx_disable(bp); 11942 11943 bnx2x_netif_stop(bp, 0); 11944 11945 del_timer_sync(&bp->timer); 11946 11947 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 11948 11949 /* Release IRQs */ 11950 bnx2x_free_irq(bp); 11951 11952 /* Free SKBs, SGEs, TPA pool and driver internals */ 11953 bnx2x_free_skbs(bp); 11954 11955 for_each_rx_queue(bp, i) 11956 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 11957 11958 bnx2x_free_mem(bp); 11959 11960 bp->state = BNX2X_STATE_CLOSED; 11961 11962 netif_carrier_off(bp->dev); 11963 11964 return 0; 11965 } 11966 11967 static void bnx2x_eeh_recover(struct bnx2x *bp) 11968 { 11969 u32 val; 11970 11971 mutex_init(&bp->port.phy_mutex); 11972 11973 11974 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 11975 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 11976 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 11977 BNX2X_ERR("BAD MCP validity signature\n"); 11978 } 11979 11980 /** 11981 * bnx2x_io_error_detected - called when PCI error is detected 11982 * @pdev: Pointer to PCI device 11983 * @state: The current pci connection state 11984 * 11985 * This function is called after a PCI bus error affecting 11986 * this device has been detected. 11987 */ 11988 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 11989 pci_channel_state_t state) 11990 { 11991 struct net_device *dev = pci_get_drvdata(pdev); 11992 struct bnx2x *bp = netdev_priv(dev); 11993 11994 rtnl_lock(); 11995 11996 netif_device_detach(dev); 11997 11998 if (state == pci_channel_io_perm_failure) { 11999 rtnl_unlock(); 12000 return PCI_ERS_RESULT_DISCONNECT; 12001 } 12002 12003 if (netif_running(dev)) 12004 bnx2x_eeh_nic_unload(bp); 12005 12006 pci_disable_device(pdev); 12007 12008 rtnl_unlock(); 12009 12010 /* Request a slot reset */ 12011 return PCI_ERS_RESULT_NEED_RESET; 12012 } 12013 12014 /** 12015 * bnx2x_io_slot_reset - called after the PCI bus has been reset 12016 * @pdev: Pointer to PCI device 12017 * 12018 * Restart the card from scratch, as if from a cold-boot. 12019 */ 12020 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 12021 { 12022 struct net_device *dev = pci_get_drvdata(pdev); 12023 struct bnx2x *bp = netdev_priv(dev); 12024 12025 rtnl_lock(); 12026 12027 if (pci_enable_device(pdev)) { 12028 dev_err(&pdev->dev, 12029 "Cannot re-enable PCI device after reset\n"); 12030 rtnl_unlock(); 12031 return PCI_ERS_RESULT_DISCONNECT; 12032 } 12033 12034 pci_set_master(pdev); 12035 pci_restore_state(pdev); 12036 12037 if (netif_running(dev)) 12038 bnx2x_set_power_state(bp, PCI_D0); 12039 12040 rtnl_unlock(); 12041 12042 return PCI_ERS_RESULT_RECOVERED; 12043 } 12044 12045 /** 12046 * bnx2x_io_resume - called when traffic can start flowing again 12047 * @pdev: Pointer to PCI device 12048 * 12049 * This callback is called when the error recovery driver tells us that 12050 * its OK to resume normal operation. 12051 */ 12052 static void bnx2x_io_resume(struct pci_dev *pdev) 12053 { 12054 struct net_device *dev = pci_get_drvdata(pdev); 12055 struct bnx2x *bp = netdev_priv(dev); 12056 12057 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 12058 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 12059 return; 12060 } 12061 12062 rtnl_lock(); 12063 12064 bnx2x_eeh_recover(bp); 12065 12066 if (netif_running(dev)) 12067 bnx2x_nic_load(bp, LOAD_NORMAL); 12068 12069 netif_device_attach(dev); 12070 12071 rtnl_unlock(); 12072 } 12073 12074 static struct pci_error_handlers bnx2x_err_handler = { 12075 .error_detected = bnx2x_io_error_detected, 12076 .slot_reset = bnx2x_io_slot_reset, 12077 .resume = bnx2x_io_resume, 12078 }; 12079 12080 static struct pci_driver bnx2x_pci_driver = { 12081 .name = DRV_MODULE_NAME, 12082 .id_table = bnx2x_pci_tbl, 12083 .probe = bnx2x_init_one, 12084 .remove = __devexit_p(bnx2x_remove_one), 12085 .suspend = bnx2x_suspend, 12086 .resume = bnx2x_resume, 12087 .err_handler = &bnx2x_err_handler, 12088 }; 12089 12090 static int __init bnx2x_init(void) 12091 { 12092 int ret; 12093 12094 pr_info("%s", version); 12095 12096 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 12097 if (bnx2x_wq == NULL) { 12098 pr_err("Cannot create workqueue\n"); 12099 return -ENOMEM; 12100 } 12101 12102 ret = pci_register_driver(&bnx2x_pci_driver); 12103 if (ret) { 12104 pr_err("Cannot register driver\n"); 12105 destroy_workqueue(bnx2x_wq); 12106 } 12107 return ret; 12108 } 12109 12110 static void __exit bnx2x_cleanup(void) 12111 { 12112 struct list_head *pos, *q; 12113 pci_unregister_driver(&bnx2x_pci_driver); 12114 12115 destroy_workqueue(bnx2x_wq); 12116 12117 /* Free globablly allocated resources */ 12118 list_for_each_safe(pos, q, &bnx2x_prev_list) { 12119 struct bnx2x_prev_path_list *tmp = 12120 list_entry(pos, struct bnx2x_prev_path_list, list); 12121 list_del(pos); 12122 kfree(tmp); 12123 } 12124 } 12125 12126 void bnx2x_notify_link_changed(struct bnx2x *bp) 12127 { 12128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 12129 } 12130 12131 module_init(bnx2x_init); 12132 module_exit(bnx2x_cleanup); 12133 12134 #ifdef BCM_CNIC 12135 /** 12136 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 12137 * 12138 * @bp: driver handle 12139 * @set: set or clear the CAM entry 12140 * 12141 * This function will wait until the ramdord completion returns. 12142 * Return 0 if success, -ENODEV if ramrod doesn't return. 12143 */ 12144 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 12145 { 12146 unsigned long ramrod_flags = 0; 12147 12148 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12149 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 12150 &bp->iscsi_l2_mac_obj, true, 12151 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 12152 } 12153 12154 /* count denotes the number of new completions we have seen */ 12155 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 12156 { 12157 struct eth_spe *spe; 12158 12159 #ifdef BNX2X_STOP_ON_ERROR 12160 if (unlikely(bp->panic)) 12161 return; 12162 #endif 12163 12164 spin_lock_bh(&bp->spq_lock); 12165 BUG_ON(bp->cnic_spq_pending < count); 12166 bp->cnic_spq_pending -= count; 12167 12168 12169 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 12170 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 12171 & SPE_HDR_CONN_TYPE) >> 12172 SPE_HDR_CONN_TYPE_SHIFT; 12173 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 12174 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 12175 12176 /* Set validation for iSCSI L2 client before sending SETUP 12177 * ramrod 12178 */ 12179 if (type == ETH_CONNECTION_TYPE) { 12180 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) 12181 bnx2x_set_ctx_validation(bp, &bp->context. 12182 vcxt[BNX2X_ISCSI_ETH_CID].eth, 12183 BNX2X_ISCSI_ETH_CID); 12184 } 12185 12186 /* 12187 * There may be not more than 8 L2, not more than 8 L5 SPEs 12188 * and in the air. We also check that number of outstanding 12189 * COMMON ramrods is not more than the EQ and SPQ can 12190 * accommodate. 12191 */ 12192 if (type == ETH_CONNECTION_TYPE) { 12193 if (!atomic_read(&bp->cq_spq_left)) 12194 break; 12195 else 12196 atomic_dec(&bp->cq_spq_left); 12197 } else if (type == NONE_CONNECTION_TYPE) { 12198 if (!atomic_read(&bp->eq_spq_left)) 12199 break; 12200 else 12201 atomic_dec(&bp->eq_spq_left); 12202 } else if ((type == ISCSI_CONNECTION_TYPE) || 12203 (type == FCOE_CONNECTION_TYPE)) { 12204 if (bp->cnic_spq_pending >= 12205 bp->cnic_eth_dev.max_kwqe_pending) 12206 break; 12207 else 12208 bp->cnic_spq_pending++; 12209 } else { 12210 BNX2X_ERR("Unknown SPE type: %d\n", type); 12211 bnx2x_panic(); 12212 break; 12213 } 12214 12215 spe = bnx2x_sp_get_next(bp); 12216 *spe = *bp->cnic_kwq_cons; 12217 12218 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 12219 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 12220 12221 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 12222 bp->cnic_kwq_cons = bp->cnic_kwq; 12223 else 12224 bp->cnic_kwq_cons++; 12225 } 12226 bnx2x_sp_prod_update(bp); 12227 spin_unlock_bh(&bp->spq_lock); 12228 } 12229 12230 static int bnx2x_cnic_sp_queue(struct net_device *dev, 12231 struct kwqe_16 *kwqes[], u32 count) 12232 { 12233 struct bnx2x *bp = netdev_priv(dev); 12234 int i; 12235 12236 #ifdef BNX2X_STOP_ON_ERROR 12237 if (unlikely(bp->panic)) { 12238 BNX2X_ERR("Can't post to SP queue while panic\n"); 12239 return -EIO; 12240 } 12241 #endif 12242 12243 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 12244 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 12245 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 12246 return -EAGAIN; 12247 } 12248 12249 spin_lock_bh(&bp->spq_lock); 12250 12251 for (i = 0; i < count; i++) { 12252 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 12253 12254 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 12255 break; 12256 12257 *bp->cnic_kwq_prod = *spe; 12258 12259 bp->cnic_kwq_pending++; 12260 12261 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 12262 spe->hdr.conn_and_cmd_data, spe->hdr.type, 12263 spe->data.update_data_addr.hi, 12264 spe->data.update_data_addr.lo, 12265 bp->cnic_kwq_pending); 12266 12267 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 12268 bp->cnic_kwq_prod = bp->cnic_kwq; 12269 else 12270 bp->cnic_kwq_prod++; 12271 } 12272 12273 spin_unlock_bh(&bp->spq_lock); 12274 12275 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 12276 bnx2x_cnic_sp_post(bp, 0); 12277 12278 return i; 12279 } 12280 12281 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 12282 { 12283 struct cnic_ops *c_ops; 12284 int rc = 0; 12285 12286 mutex_lock(&bp->cnic_mutex); 12287 c_ops = rcu_dereference_protected(bp->cnic_ops, 12288 lockdep_is_held(&bp->cnic_mutex)); 12289 if (c_ops) 12290 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 12291 mutex_unlock(&bp->cnic_mutex); 12292 12293 return rc; 12294 } 12295 12296 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 12297 { 12298 struct cnic_ops *c_ops; 12299 int rc = 0; 12300 12301 rcu_read_lock(); 12302 c_ops = rcu_dereference(bp->cnic_ops); 12303 if (c_ops) 12304 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 12305 rcu_read_unlock(); 12306 12307 return rc; 12308 } 12309 12310 /* 12311 * for commands that have no data 12312 */ 12313 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 12314 { 12315 struct cnic_ctl_info ctl = {0}; 12316 12317 ctl.cmd = cmd; 12318 12319 return bnx2x_cnic_ctl_send(bp, &ctl); 12320 } 12321 12322 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 12323 { 12324 struct cnic_ctl_info ctl = {0}; 12325 12326 /* first we tell CNIC and only then we count this as a completion */ 12327 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 12328 ctl.data.comp.cid = cid; 12329 ctl.data.comp.error = err; 12330 12331 bnx2x_cnic_ctl_send_bh(bp, &ctl); 12332 bnx2x_cnic_sp_post(bp, 0); 12333 } 12334 12335 12336 /* Called with netif_addr_lock_bh() taken. 12337 * Sets an rx_mode config for an iSCSI ETH client. 12338 * Doesn't block. 12339 * Completion should be checked outside. 12340 */ 12341 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 12342 { 12343 unsigned long accept_flags = 0, ramrod_flags = 0; 12344 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12345 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 12346 12347 if (start) { 12348 /* Start accepting on iSCSI L2 ring. Accept all multicasts 12349 * because it's the only way for UIO Queue to accept 12350 * multicasts (in non-promiscuous mode only one Queue per 12351 * function will receive multicast packets (leading in our 12352 * case). 12353 */ 12354 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 12355 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 12356 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 12357 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 12358 12359 /* Clear STOP_PENDING bit if START is requested */ 12360 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 12361 12362 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 12363 } else 12364 /* Clear START_PENDING bit if STOP is requested */ 12365 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 12366 12367 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 12368 set_bit(sched_state, &bp->sp_state); 12369 else { 12370 __set_bit(RAMROD_RX, &ramrod_flags); 12371 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 12372 ramrod_flags); 12373 } 12374 } 12375 12376 12377 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 12378 { 12379 struct bnx2x *bp = netdev_priv(dev); 12380 int rc = 0; 12381 12382 switch (ctl->cmd) { 12383 case DRV_CTL_CTXTBL_WR_CMD: { 12384 u32 index = ctl->data.io.offset; 12385 dma_addr_t addr = ctl->data.io.dma_addr; 12386 12387 bnx2x_ilt_wr(bp, index, addr); 12388 break; 12389 } 12390 12391 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 12392 int count = ctl->data.credit.credit_count; 12393 12394 bnx2x_cnic_sp_post(bp, count); 12395 break; 12396 } 12397 12398 /* rtnl_lock is held. */ 12399 case DRV_CTL_START_L2_CMD: { 12400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12401 unsigned long sp_bits = 0; 12402 12403 /* Configure the iSCSI classification object */ 12404 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 12405 cp->iscsi_l2_client_id, 12406 cp->iscsi_l2_cid, BP_FUNC(bp), 12407 bnx2x_sp(bp, mac_rdata), 12408 bnx2x_sp_mapping(bp, mac_rdata), 12409 BNX2X_FILTER_MAC_PENDING, 12410 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 12411 &bp->macs_pool); 12412 12413 /* Set iSCSI MAC address */ 12414 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 12415 if (rc) 12416 break; 12417 12418 mmiowb(); 12419 barrier(); 12420 12421 /* Start accepting on iSCSI L2 ring */ 12422 12423 netif_addr_lock_bh(dev); 12424 bnx2x_set_iscsi_eth_rx_mode(bp, true); 12425 netif_addr_unlock_bh(dev); 12426 12427 /* bits to wait on */ 12428 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 12429 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 12430 12431 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 12432 BNX2X_ERR("rx_mode completion timed out!\n"); 12433 12434 break; 12435 } 12436 12437 /* rtnl_lock is held. */ 12438 case DRV_CTL_STOP_L2_CMD: { 12439 unsigned long sp_bits = 0; 12440 12441 /* Stop accepting on iSCSI L2 ring */ 12442 netif_addr_lock_bh(dev); 12443 bnx2x_set_iscsi_eth_rx_mode(bp, false); 12444 netif_addr_unlock_bh(dev); 12445 12446 /* bits to wait on */ 12447 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 12448 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 12449 12450 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 12451 BNX2X_ERR("rx_mode completion timed out!\n"); 12452 12453 mmiowb(); 12454 barrier(); 12455 12456 /* Unset iSCSI L2 MAC */ 12457 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 12458 BNX2X_ISCSI_ETH_MAC, true); 12459 break; 12460 } 12461 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 12462 int count = ctl->data.credit.credit_count; 12463 12464 smp_mb__before_atomic_inc(); 12465 atomic_add(count, &bp->cq_spq_left); 12466 smp_mb__after_atomic_inc(); 12467 break; 12468 } 12469 case DRV_CTL_ULP_REGISTER_CMD: { 12470 int ulp_type = ctl->data.ulp_type; 12471 12472 if (CHIP_IS_E3(bp)) { 12473 int idx = BP_FW_MB_IDX(bp); 12474 u32 cap; 12475 12476 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12477 if (ulp_type == CNIC_ULP_ISCSI) 12478 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12479 else if (ulp_type == CNIC_ULP_FCOE) 12480 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12481 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12482 } 12483 break; 12484 } 12485 case DRV_CTL_ULP_UNREGISTER_CMD: { 12486 int ulp_type = ctl->data.ulp_type; 12487 12488 if (CHIP_IS_E3(bp)) { 12489 int idx = BP_FW_MB_IDX(bp); 12490 u32 cap; 12491 12492 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12493 if (ulp_type == CNIC_ULP_ISCSI) 12494 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12495 else if (ulp_type == CNIC_ULP_FCOE) 12496 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12497 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12498 } 12499 break; 12500 } 12501 12502 default: 12503 BNX2X_ERR("unknown command %x\n", ctl->cmd); 12504 rc = -EINVAL; 12505 } 12506 12507 return rc; 12508 } 12509 12510 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 12511 { 12512 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12513 12514 if (bp->flags & USING_MSIX_FLAG) { 12515 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 12516 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 12517 cp->irq_arr[0].vector = bp->msix_table[1].vector; 12518 } else { 12519 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 12520 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 12521 } 12522 if (!CHIP_IS_E1x(bp)) 12523 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 12524 else 12525 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 12526 12527 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 12528 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 12529 cp->irq_arr[1].status_blk = bp->def_status_blk; 12530 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 12531 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 12532 12533 cp->num_irq = 2; 12534 } 12535 12536 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 12537 void *data) 12538 { 12539 struct bnx2x *bp = netdev_priv(dev); 12540 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12541 12542 if (ops == NULL) { 12543 BNX2X_ERR("NULL ops received\n"); 12544 return -EINVAL; 12545 } 12546 12547 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 12548 if (!bp->cnic_kwq) 12549 return -ENOMEM; 12550 12551 bp->cnic_kwq_cons = bp->cnic_kwq; 12552 bp->cnic_kwq_prod = bp->cnic_kwq; 12553 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 12554 12555 bp->cnic_spq_pending = 0; 12556 bp->cnic_kwq_pending = 0; 12557 12558 bp->cnic_data = data; 12559 12560 cp->num_irq = 0; 12561 cp->drv_state |= CNIC_DRV_STATE_REGD; 12562 cp->iro_arr = bp->iro_arr; 12563 12564 bnx2x_setup_cnic_irq_info(bp); 12565 12566 rcu_assign_pointer(bp->cnic_ops, ops); 12567 12568 return 0; 12569 } 12570 12571 static int bnx2x_unregister_cnic(struct net_device *dev) 12572 { 12573 struct bnx2x *bp = netdev_priv(dev); 12574 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12575 12576 mutex_lock(&bp->cnic_mutex); 12577 cp->drv_state = 0; 12578 RCU_INIT_POINTER(bp->cnic_ops, NULL); 12579 mutex_unlock(&bp->cnic_mutex); 12580 synchronize_rcu(); 12581 kfree(bp->cnic_kwq); 12582 bp->cnic_kwq = NULL; 12583 12584 return 0; 12585 } 12586 12587 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 12588 { 12589 struct bnx2x *bp = netdev_priv(dev); 12590 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12591 12592 /* If both iSCSI and FCoE are disabled - return NULL in 12593 * order to indicate CNIC that it should not try to work 12594 * with this device. 12595 */ 12596 if (NO_ISCSI(bp) && NO_FCOE(bp)) 12597 return NULL; 12598 12599 cp->drv_owner = THIS_MODULE; 12600 cp->chip_id = CHIP_ID(bp); 12601 cp->pdev = bp->pdev; 12602 cp->io_base = bp->regview; 12603 cp->io_base2 = bp->doorbells; 12604 cp->max_kwqe_pending = 8; 12605 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 12606 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 12607 bnx2x_cid_ilt_lines(bp); 12608 cp->ctx_tbl_len = CNIC_ILT_LINES; 12609 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 12610 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 12611 cp->drv_ctl = bnx2x_drv_ctl; 12612 cp->drv_register_cnic = bnx2x_register_cnic; 12613 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 12614 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; 12615 cp->iscsi_l2_client_id = 12616 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12617 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 12618 12619 if (NO_ISCSI_OOO(bp)) 12620 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12621 12622 if (NO_ISCSI(bp)) 12623 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 12624 12625 if (NO_FCOE(bp)) 12626 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 12627 12628 BNX2X_DEV_INFO( 12629 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 12630 cp->ctx_blk_size, 12631 cp->ctx_tbl_offset, 12632 cp->ctx_tbl_len, 12633 cp->starting_cid); 12634 return cp; 12635 } 12636 EXPORT_SYMBOL(bnx2x_cnic_probe); 12637 12638 #endif /* BCM_CNIC */ 12639 12640