1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/kernel.h> 23 #include <linux/device.h> /* for dev_info() */ 24 #include <linux/timer.h> 25 #include <linux/errno.h> 26 #include <linux/ioport.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/pci.h> 30 #include <linux/init.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/bitops.h> 36 #include <linux/irq.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/time.h> 40 #include <linux/ethtool.h> 41 #include <linux/mii.h> 42 #include <linux/if_vlan.h> 43 #include <net/ip.h> 44 #include <net/ipv6.h> 45 #include <net/tcp.h> 46 #include <net/checksum.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/workqueue.h> 49 #include <linux/crc32.h> 50 #include <linux/crc32c.h> 51 #include <linux/prefetch.h> 52 #include <linux/zlib.h> 53 #include <linux/io.h> 54 #include <linux/semaphore.h> 55 #include <linux/stringify.h> 56 #include <linux/vmalloc.h> 57 58 #include "bnx2x.h" 59 #include "bnx2x_init.h" 60 #include "bnx2x_init_ops.h" 61 #include "bnx2x_cmn.h" 62 #include "bnx2x_dcb.h" 63 #include "bnx2x_sp.h" 64 65 #include <linux/firmware.h> 66 #include "bnx2x_fw_file_hdr.h" 67 /* FW files */ 68 #define FW_FILE_VERSION \ 69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 72 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 73 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 74 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 75 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 76 77 /* Time in jiffies before concluding the transmitter is hung */ 78 #define TX_TIMEOUT (5*HZ) 79 80 static char version[] __devinitdata = 81 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 82 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 83 84 MODULE_AUTHOR("Eliezer Tamir"); 85 MODULE_DESCRIPTION("Broadcom NetXtreme II " 86 "BCM57710/57711/57711E/" 87 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 88 "57840/57840_MF Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(DRV_MODULE_VERSION); 91 MODULE_FIRMWARE(FW_FILE_NAME_E1); 92 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 93 MODULE_FIRMWARE(FW_FILE_NAME_E2); 94 95 96 int num_queues; 97 module_param(num_queues, int, 0); 98 MODULE_PARM_DESC(num_queues, 99 " Set number of queues (default is as a number of CPUs)"); 100 101 static int disable_tpa; 102 module_param(disable_tpa, int, 0); 103 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 104 105 #define INT_MODE_INTx 1 106 #define INT_MODE_MSI 2 107 static int int_mode; 108 module_param(int_mode, int, 0); 109 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 110 "(1 INT#x; 2 MSI)"); 111 112 static int dropless_fc; 113 module_param(dropless_fc, int, 0); 114 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 115 116 static int mrrs = -1; 117 module_param(mrrs, int, 0); 118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 119 120 static int debug; 121 module_param(debug, int, 0); 122 MODULE_PARM_DESC(debug, " Default debug msglevel"); 123 124 125 126 struct workqueue_struct *bnx2x_wq; 127 128 enum bnx2x_board_type { 129 BCM57710 = 0, 130 BCM57711, 131 BCM57711E, 132 BCM57712, 133 BCM57712_MF, 134 BCM57800, 135 BCM57800_MF, 136 BCM57810, 137 BCM57810_MF, 138 BCM57840, 139 BCM57840_MF, 140 BCM57811, 141 BCM57811_MF 142 }; 143 144 /* indexed by board_type, above */ 145 static struct { 146 char *name; 147 } board_info[] __devinitdata = { 148 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 149 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 150 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 151 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, 152 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, 153 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, 154 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, 155 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 156 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 157 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 158 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, 159 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"}, 160 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"}, 161 }; 162 163 #ifndef PCI_DEVICE_ID_NX2_57710 164 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 165 #endif 166 #ifndef PCI_DEVICE_ID_NX2_57711 167 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 168 #endif 169 #ifndef PCI_DEVICE_ID_NX2_57711E 170 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 171 #endif 172 #ifndef PCI_DEVICE_ID_NX2_57712 173 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 174 #endif 175 #ifndef PCI_DEVICE_ID_NX2_57712_MF 176 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 177 #endif 178 #ifndef PCI_DEVICE_ID_NX2_57800 179 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 180 #endif 181 #ifndef PCI_DEVICE_ID_NX2_57800_MF 182 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 183 #endif 184 #ifndef PCI_DEVICE_ID_NX2_57810 185 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 186 #endif 187 #ifndef PCI_DEVICE_ID_NX2_57810_MF 188 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 189 #endif 190 #ifndef PCI_DEVICE_ID_NX2_57840 191 #define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 192 #endif 193 #ifndef PCI_DEVICE_ID_NX2_57840_MF 194 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 195 #endif 196 #ifndef PCI_DEVICE_ID_NX2_57811 197 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 198 #endif 199 #ifndef PCI_DEVICE_ID_NX2_57811_MF 200 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 201 #endif 202 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 203 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 204 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 205 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 206 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 207 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 208 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 209 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 210 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 211 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 212 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, 213 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 214 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 215 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 216 { 0 } 217 }; 218 219 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 220 221 /* Global resources for unloading a previously loaded device */ 222 #define BNX2X_PREV_WAIT_NEEDED 1 223 static DEFINE_SEMAPHORE(bnx2x_prev_sem); 224 static LIST_HEAD(bnx2x_prev_list); 225 /**************************************************************************** 226 * General service functions 227 ****************************************************************************/ 228 229 static void __storm_memset_dma_mapping(struct bnx2x *bp, 230 u32 addr, dma_addr_t mapping) 231 { 232 REG_WR(bp, addr, U64_LO(mapping)); 233 REG_WR(bp, addr + 4, U64_HI(mapping)); 234 } 235 236 static void storm_memset_spq_addr(struct bnx2x *bp, 237 dma_addr_t mapping, u16 abs_fid) 238 { 239 u32 addr = XSEM_REG_FAST_MEMORY + 240 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 241 242 __storm_memset_dma_mapping(bp, addr, mapping); 243 } 244 245 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 246 u16 pf_id) 247 { 248 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 249 pf_id); 250 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 251 pf_id); 252 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 253 pf_id); 254 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 255 pf_id); 256 } 257 258 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 259 u8 enable) 260 { 261 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 262 enable); 263 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 264 enable); 265 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 266 enable); 267 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 268 enable); 269 } 270 271 static void storm_memset_eq_data(struct bnx2x *bp, 272 struct event_ring_data *eq_data, 273 u16 pfid) 274 { 275 size_t size = sizeof(struct event_ring_data); 276 277 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 278 279 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 280 } 281 282 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 283 u16 pfid) 284 { 285 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 286 REG_WR16(bp, addr, eq_prod); 287 } 288 289 /* used only at init 290 * locking is done by mcp 291 */ 292 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 293 { 294 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 295 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 296 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 297 PCICFG_VENDOR_ID_OFFSET); 298 } 299 300 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 301 { 302 u32 val; 303 304 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 305 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 306 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 307 PCICFG_VENDOR_ID_OFFSET); 308 309 return val; 310 } 311 312 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 313 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 314 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 315 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 316 #define DMAE_DP_DST_NONE "dst_addr [none]" 317 318 319 /* copy command into DMAE command memory and set DMAE command go */ 320 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 321 { 322 u32 cmd_offset; 323 int i; 324 325 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 326 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 327 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 328 } 329 REG_WR(bp, dmae_reg_go_c[idx], 1); 330 } 331 332 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 333 { 334 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 335 DMAE_CMD_C_ENABLE); 336 } 337 338 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 339 { 340 return opcode & ~DMAE_CMD_SRC_RESET; 341 } 342 343 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 344 bool with_comp, u8 comp_type) 345 { 346 u32 opcode = 0; 347 348 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 349 (dst_type << DMAE_COMMAND_DST_SHIFT)); 350 351 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 352 353 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 354 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 355 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 356 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 357 358 #ifdef __BIG_ENDIAN 359 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 360 #else 361 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 362 #endif 363 if (with_comp) 364 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 365 return opcode; 366 } 367 368 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 369 struct dmae_command *dmae, 370 u8 src_type, u8 dst_type) 371 { 372 memset(dmae, 0, sizeof(struct dmae_command)); 373 374 /* set the opcode */ 375 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 376 true, DMAE_COMP_PCI); 377 378 /* fill in the completion parameters */ 379 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 380 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 381 dmae->comp_val = DMAE_COMP_VAL; 382 } 383 384 /* issue a dmae command over the init-channel and wailt for completion */ 385 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, 386 struct dmae_command *dmae) 387 { 388 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 389 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 390 int rc = 0; 391 392 /* 393 * Lock the dmae channel. Disable BHs to prevent a dead-lock 394 * as long as this code is called both from syscall context and 395 * from ndo_set_rx_mode() flow that may be called from BH. 396 */ 397 spin_lock_bh(&bp->dmae_lock); 398 399 /* reset completion */ 400 *wb_comp = 0; 401 402 /* post the command on the channel used for initializations */ 403 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 404 405 /* wait for completion */ 406 udelay(5); 407 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 408 409 if (!cnt || 410 (bp->recovery_state != BNX2X_RECOVERY_DONE && 411 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 412 BNX2X_ERR("DMAE timeout!\n"); 413 rc = DMAE_TIMEOUT; 414 goto unlock; 415 } 416 cnt--; 417 udelay(50); 418 } 419 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 420 BNX2X_ERR("DMAE PCI error!\n"); 421 rc = DMAE_PCI_ERROR; 422 } 423 424 unlock: 425 spin_unlock_bh(&bp->dmae_lock); 426 return rc; 427 } 428 429 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 430 u32 len32) 431 { 432 struct dmae_command dmae; 433 434 if (!bp->dmae_ready) { 435 u32 *data = bnx2x_sp(bp, wb_data[0]); 436 437 if (CHIP_IS_E1(bp)) 438 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 439 else 440 bnx2x_init_str_wr(bp, dst_addr, data, len32); 441 return; 442 } 443 444 /* set opcode and fixed command fields */ 445 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 446 447 /* fill in addresses and len */ 448 dmae.src_addr_lo = U64_LO(dma_addr); 449 dmae.src_addr_hi = U64_HI(dma_addr); 450 dmae.dst_addr_lo = dst_addr >> 2; 451 dmae.dst_addr_hi = 0; 452 dmae.len = len32; 453 454 /* issue the command and wait for completion */ 455 bnx2x_issue_dmae_with_comp(bp, &dmae); 456 } 457 458 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 459 { 460 struct dmae_command dmae; 461 462 if (!bp->dmae_ready) { 463 u32 *data = bnx2x_sp(bp, wb_data[0]); 464 int i; 465 466 if (CHIP_IS_E1(bp)) 467 for (i = 0; i < len32; i++) 468 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 469 else 470 for (i = 0; i < len32; i++) 471 data[i] = REG_RD(bp, src_addr + i*4); 472 473 return; 474 } 475 476 /* set opcode and fixed command fields */ 477 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 478 479 /* fill in addresses and len */ 480 dmae.src_addr_lo = src_addr >> 2; 481 dmae.src_addr_hi = 0; 482 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 483 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 484 dmae.len = len32; 485 486 /* issue the command and wait for completion */ 487 bnx2x_issue_dmae_with_comp(bp, &dmae); 488 } 489 490 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 491 u32 addr, u32 len) 492 { 493 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 494 int offset = 0; 495 496 while (len > dmae_wr_max) { 497 bnx2x_write_dmae(bp, phys_addr + offset, 498 addr + offset, dmae_wr_max); 499 offset += dmae_wr_max * 4; 500 len -= dmae_wr_max; 501 } 502 503 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 504 } 505 506 static int bnx2x_mc_assert(struct bnx2x *bp) 507 { 508 char last_idx; 509 int i, rc = 0; 510 u32 row0, row1, row2, row3; 511 512 /* XSTORM */ 513 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 514 XSTORM_ASSERT_LIST_INDEX_OFFSET); 515 if (last_idx) 516 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 517 518 /* print the asserts */ 519 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 520 521 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 522 XSTORM_ASSERT_LIST_OFFSET(i)); 523 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 524 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 525 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 526 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 527 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 528 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 529 530 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 531 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 532 i, row3, row2, row1, row0); 533 rc++; 534 } else { 535 break; 536 } 537 } 538 539 /* TSTORM */ 540 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + 541 TSTORM_ASSERT_LIST_INDEX_OFFSET); 542 if (last_idx) 543 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 544 545 /* print the asserts */ 546 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 547 548 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + 549 TSTORM_ASSERT_LIST_OFFSET(i)); 550 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + 551 TSTORM_ASSERT_LIST_OFFSET(i) + 4); 552 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + 553 TSTORM_ASSERT_LIST_OFFSET(i) + 8); 554 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + 555 TSTORM_ASSERT_LIST_OFFSET(i) + 12); 556 557 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 558 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 559 i, row3, row2, row1, row0); 560 rc++; 561 } else { 562 break; 563 } 564 } 565 566 /* CSTORM */ 567 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 568 CSTORM_ASSERT_LIST_INDEX_OFFSET); 569 if (last_idx) 570 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 571 572 /* print the asserts */ 573 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 574 575 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 576 CSTORM_ASSERT_LIST_OFFSET(i)); 577 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 578 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 579 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 580 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 581 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 582 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 583 584 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 585 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 586 i, row3, row2, row1, row0); 587 rc++; 588 } else { 589 break; 590 } 591 } 592 593 /* USTORM */ 594 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 595 USTORM_ASSERT_LIST_INDEX_OFFSET); 596 if (last_idx) 597 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 598 599 /* print the asserts */ 600 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 601 602 row0 = REG_RD(bp, BAR_USTRORM_INTMEM + 603 USTORM_ASSERT_LIST_OFFSET(i)); 604 row1 = REG_RD(bp, BAR_USTRORM_INTMEM + 605 USTORM_ASSERT_LIST_OFFSET(i) + 4); 606 row2 = REG_RD(bp, BAR_USTRORM_INTMEM + 607 USTORM_ASSERT_LIST_OFFSET(i) + 8); 608 row3 = REG_RD(bp, BAR_USTRORM_INTMEM + 609 USTORM_ASSERT_LIST_OFFSET(i) + 12); 610 611 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 612 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 613 i, row3, row2, row1, row0); 614 rc++; 615 } else { 616 break; 617 } 618 } 619 620 return rc; 621 } 622 623 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 624 { 625 u32 addr, val; 626 u32 mark, offset; 627 __be32 data[9]; 628 int word; 629 u32 trace_shmem_base; 630 if (BP_NOMCP(bp)) { 631 BNX2X_ERR("NO MCP - can not dump\n"); 632 return; 633 } 634 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 635 (bp->common.bc_ver & 0xff0000) >> 16, 636 (bp->common.bc_ver & 0xff00) >> 8, 637 (bp->common.bc_ver & 0xff)); 638 639 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 640 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 641 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 642 643 if (BP_PATH(bp) == 0) 644 trace_shmem_base = bp->common.shmem_base; 645 else 646 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 647 addr = trace_shmem_base - 0x800; 648 649 /* validate TRCB signature */ 650 mark = REG_RD(bp, addr); 651 if (mark != MFW_TRACE_SIGNATURE) { 652 BNX2X_ERR("Trace buffer signature is missing."); 653 return ; 654 } 655 656 /* read cyclic buffer pointer */ 657 addr += 4; 658 mark = REG_RD(bp, addr); 659 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 660 + ((mark + 0x3) & ~0x3) - 0x08000000; 661 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 662 663 printk("%s", lvl); 664 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 665 for (word = 0; word < 8; word++) 666 data[word] = htonl(REG_RD(bp, offset + 4*word)); 667 data[8] = 0x0; 668 pr_cont("%s", (char *)data); 669 } 670 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 671 for (word = 0; word < 8; word++) 672 data[word] = htonl(REG_RD(bp, offset + 4*word)); 673 data[8] = 0x0; 674 pr_cont("%s", (char *)data); 675 } 676 printk("%s" "end of fw dump\n", lvl); 677 } 678 679 static void bnx2x_fw_dump(struct bnx2x *bp) 680 { 681 bnx2x_fw_dump_lvl(bp, KERN_ERR); 682 } 683 684 void bnx2x_panic_dump(struct bnx2x *bp) 685 { 686 int i; 687 u16 j; 688 struct hc_sp_status_block_data sp_sb_data; 689 int func = BP_FUNC(bp); 690 #ifdef BNX2X_STOP_ON_ERROR 691 u16 start = 0, end = 0; 692 u8 cos; 693 #endif 694 695 bp->stats_state = STATS_STATE_DISABLED; 696 bp->eth_stats.unrecoverable_error++; 697 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 698 699 BNX2X_ERR("begin crash dump -----------------\n"); 700 701 /* Indices */ 702 /* Common */ 703 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 704 bp->def_idx, bp->def_att_idx, bp->attn_state, 705 bp->spq_prod_idx, bp->stats_counter); 706 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 707 bp->def_status_blk->atten_status_block.attn_bits, 708 bp->def_status_blk->atten_status_block.attn_bits_ack, 709 bp->def_status_blk->atten_status_block.status_block_id, 710 bp->def_status_blk->atten_status_block.attn_bits_index); 711 BNX2X_ERR(" def ("); 712 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 713 pr_cont("0x%x%s", 714 bp->def_status_blk->sp_sb.index_values[i], 715 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 716 717 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 718 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 719 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 720 i*sizeof(u32)); 721 722 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 723 sp_sb_data.igu_sb_id, 724 sp_sb_data.igu_seg_id, 725 sp_sb_data.p_func.pf_id, 726 sp_sb_data.p_func.vnic_id, 727 sp_sb_data.p_func.vf_id, 728 sp_sb_data.p_func.vf_valid, 729 sp_sb_data.state); 730 731 732 for_each_eth_queue(bp, i) { 733 struct bnx2x_fastpath *fp = &bp->fp[i]; 734 int loop; 735 struct hc_status_block_data_e2 sb_data_e2; 736 struct hc_status_block_data_e1x sb_data_e1x; 737 struct hc_status_block_sm *hc_sm_p = 738 CHIP_IS_E1x(bp) ? 739 sb_data_e1x.common.state_machine : 740 sb_data_e2.common.state_machine; 741 struct hc_index_data *hc_index_p = 742 CHIP_IS_E1x(bp) ? 743 sb_data_e1x.index_data : 744 sb_data_e2.index_data; 745 u8 data_size, cos; 746 u32 *sb_data_p; 747 struct bnx2x_fp_txdata txdata; 748 749 /* Rx */ 750 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 751 i, fp->rx_bd_prod, fp->rx_bd_cons, 752 fp->rx_comp_prod, 753 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 754 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 755 fp->rx_sge_prod, fp->last_max_sge, 756 le16_to_cpu(fp->fp_hc_idx)); 757 758 /* Tx */ 759 for_each_cos_in_tx_queue(fp, cos) 760 { 761 txdata = *fp->txdata_ptr[cos]; 762 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 763 i, txdata.tx_pkt_prod, 764 txdata.tx_pkt_cons, txdata.tx_bd_prod, 765 txdata.tx_bd_cons, 766 le16_to_cpu(*txdata.tx_cons_sb)); 767 } 768 769 loop = CHIP_IS_E1x(bp) ? 770 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 771 772 /* host sb data */ 773 774 #ifdef BCM_CNIC 775 if (IS_FCOE_FP(fp)) 776 continue; 777 #endif 778 BNX2X_ERR(" run indexes ("); 779 for (j = 0; j < HC_SB_MAX_SM; j++) 780 pr_cont("0x%x%s", 781 fp->sb_running_index[j], 782 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 783 784 BNX2X_ERR(" indexes ("); 785 for (j = 0; j < loop; j++) 786 pr_cont("0x%x%s", 787 fp->sb_index_values[j], 788 (j == loop - 1) ? ")" : " "); 789 /* fw sb data */ 790 data_size = CHIP_IS_E1x(bp) ? 791 sizeof(struct hc_status_block_data_e1x) : 792 sizeof(struct hc_status_block_data_e2); 793 data_size /= sizeof(u32); 794 sb_data_p = CHIP_IS_E1x(bp) ? 795 (u32 *)&sb_data_e1x : 796 (u32 *)&sb_data_e2; 797 /* copy sb data in here */ 798 for (j = 0; j < data_size; j++) 799 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 800 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 801 j * sizeof(u32)); 802 803 if (!CHIP_IS_E1x(bp)) { 804 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 805 sb_data_e2.common.p_func.pf_id, 806 sb_data_e2.common.p_func.vf_id, 807 sb_data_e2.common.p_func.vf_valid, 808 sb_data_e2.common.p_func.vnic_id, 809 sb_data_e2.common.same_igu_sb_1b, 810 sb_data_e2.common.state); 811 } else { 812 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 813 sb_data_e1x.common.p_func.pf_id, 814 sb_data_e1x.common.p_func.vf_id, 815 sb_data_e1x.common.p_func.vf_valid, 816 sb_data_e1x.common.p_func.vnic_id, 817 sb_data_e1x.common.same_igu_sb_1b, 818 sb_data_e1x.common.state); 819 } 820 821 /* SB_SMs data */ 822 for (j = 0; j < HC_SB_MAX_SM; j++) { 823 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 824 j, hc_sm_p[j].__flags, 825 hc_sm_p[j].igu_sb_id, 826 hc_sm_p[j].igu_seg_id, 827 hc_sm_p[j].time_to_expire, 828 hc_sm_p[j].timer_value); 829 } 830 831 /* Indecies data */ 832 for (j = 0; j < loop; j++) { 833 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 834 hc_index_p[j].flags, 835 hc_index_p[j].timeout); 836 } 837 } 838 839 #ifdef BNX2X_STOP_ON_ERROR 840 /* Rings */ 841 /* Rx */ 842 for_each_rx_queue(bp, i) { 843 struct bnx2x_fastpath *fp = &bp->fp[i]; 844 845 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 846 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 847 for (j = start; j != end; j = RX_BD(j + 1)) { 848 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 849 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 850 851 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 852 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 853 } 854 855 start = RX_SGE(fp->rx_sge_prod); 856 end = RX_SGE(fp->last_max_sge); 857 for (j = start; j != end; j = RX_SGE(j + 1)) { 858 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 859 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 860 861 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 862 i, j, rx_sge[1], rx_sge[0], sw_page->page); 863 } 864 865 start = RCQ_BD(fp->rx_comp_cons - 10); 866 end = RCQ_BD(fp->rx_comp_cons + 503); 867 for (j = start; j != end; j = RCQ_BD(j + 1)) { 868 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 869 870 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 871 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 872 } 873 } 874 875 /* Tx */ 876 for_each_tx_queue(bp, i) { 877 struct bnx2x_fastpath *fp = &bp->fp[i]; 878 for_each_cos_in_tx_queue(fp, cos) { 879 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 880 881 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 882 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 883 for (j = start; j != end; j = TX_BD(j + 1)) { 884 struct sw_tx_bd *sw_bd = 885 &txdata->tx_buf_ring[j]; 886 887 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 888 i, cos, j, sw_bd->skb, 889 sw_bd->first_bd); 890 } 891 892 start = TX_BD(txdata->tx_bd_cons - 10); 893 end = TX_BD(txdata->tx_bd_cons + 254); 894 for (j = start; j != end; j = TX_BD(j + 1)) { 895 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 896 897 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 898 i, cos, j, tx_bd[0], tx_bd[1], 899 tx_bd[2], tx_bd[3]); 900 } 901 } 902 } 903 #endif 904 bnx2x_fw_dump(bp); 905 bnx2x_mc_assert(bp); 906 BNX2X_ERR("end crash dump -----------------\n"); 907 } 908 909 /* 910 * FLR Support for E2 911 * 912 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 913 * initialization. 914 */ 915 #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ 916 #define FLR_WAIT_INTERVAL 50 /* usec */ 917 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 918 919 struct pbf_pN_buf_regs { 920 int pN; 921 u32 init_crd; 922 u32 crd; 923 u32 crd_freed; 924 }; 925 926 struct pbf_pN_cmd_regs { 927 int pN; 928 u32 lines_occup; 929 u32 lines_freed; 930 }; 931 932 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 933 struct pbf_pN_buf_regs *regs, 934 u32 poll_count) 935 { 936 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 937 u32 cur_cnt = poll_count; 938 939 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 940 crd = crd_start = REG_RD(bp, regs->crd); 941 init_crd = REG_RD(bp, regs->init_crd); 942 943 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 944 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 945 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 946 947 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 948 (init_crd - crd_start))) { 949 if (cur_cnt--) { 950 udelay(FLR_WAIT_INTERVAL); 951 crd = REG_RD(bp, regs->crd); 952 crd_freed = REG_RD(bp, regs->crd_freed); 953 } else { 954 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 955 regs->pN); 956 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 957 regs->pN, crd); 958 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 959 regs->pN, crd_freed); 960 break; 961 } 962 } 963 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 964 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 965 } 966 967 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 968 struct pbf_pN_cmd_regs *regs, 969 u32 poll_count) 970 { 971 u32 occup, to_free, freed, freed_start; 972 u32 cur_cnt = poll_count; 973 974 occup = to_free = REG_RD(bp, regs->lines_occup); 975 freed = freed_start = REG_RD(bp, regs->lines_freed); 976 977 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 978 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 979 980 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 981 if (cur_cnt--) { 982 udelay(FLR_WAIT_INTERVAL); 983 occup = REG_RD(bp, regs->lines_occup); 984 freed = REG_RD(bp, regs->lines_freed); 985 } else { 986 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 987 regs->pN); 988 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 989 regs->pN, occup); 990 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 991 regs->pN, freed); 992 break; 993 } 994 } 995 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 996 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 997 } 998 999 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1000 u32 expected, u32 poll_count) 1001 { 1002 u32 cur_cnt = poll_count; 1003 u32 val; 1004 1005 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1006 udelay(FLR_WAIT_INTERVAL); 1007 1008 return val; 1009 } 1010 1011 static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1012 char *msg, u32 poll_cnt) 1013 { 1014 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1015 if (val != 0) { 1016 BNX2X_ERR("%s usage count=%d\n", msg, val); 1017 return 1; 1018 } 1019 return 0; 1020 } 1021 1022 static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1023 { 1024 /* adjust polling timeout */ 1025 if (CHIP_REV_IS_EMUL(bp)) 1026 return FLR_POLL_CNT * 2000; 1027 1028 if (CHIP_REV_IS_FPGA(bp)) 1029 return FLR_POLL_CNT * 120; 1030 1031 return FLR_POLL_CNT; 1032 } 1033 1034 static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1035 { 1036 struct pbf_pN_cmd_regs cmd_regs[] = { 1037 {0, (CHIP_IS_E3B0(bp)) ? 1038 PBF_REG_TQ_OCCUPANCY_Q0 : 1039 PBF_REG_P0_TQ_OCCUPANCY, 1040 (CHIP_IS_E3B0(bp)) ? 1041 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1042 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1043 {1, (CHIP_IS_E3B0(bp)) ? 1044 PBF_REG_TQ_OCCUPANCY_Q1 : 1045 PBF_REG_P1_TQ_OCCUPANCY, 1046 (CHIP_IS_E3B0(bp)) ? 1047 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1048 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1049 {4, (CHIP_IS_E3B0(bp)) ? 1050 PBF_REG_TQ_OCCUPANCY_LB_Q : 1051 PBF_REG_P4_TQ_OCCUPANCY, 1052 (CHIP_IS_E3B0(bp)) ? 1053 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1054 PBF_REG_P4_TQ_LINES_FREED_CNT} 1055 }; 1056 1057 struct pbf_pN_buf_regs buf_regs[] = { 1058 {0, (CHIP_IS_E3B0(bp)) ? 1059 PBF_REG_INIT_CRD_Q0 : 1060 PBF_REG_P0_INIT_CRD , 1061 (CHIP_IS_E3B0(bp)) ? 1062 PBF_REG_CREDIT_Q0 : 1063 PBF_REG_P0_CREDIT, 1064 (CHIP_IS_E3B0(bp)) ? 1065 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1066 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1067 {1, (CHIP_IS_E3B0(bp)) ? 1068 PBF_REG_INIT_CRD_Q1 : 1069 PBF_REG_P1_INIT_CRD, 1070 (CHIP_IS_E3B0(bp)) ? 1071 PBF_REG_CREDIT_Q1 : 1072 PBF_REG_P1_CREDIT, 1073 (CHIP_IS_E3B0(bp)) ? 1074 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1075 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1076 {4, (CHIP_IS_E3B0(bp)) ? 1077 PBF_REG_INIT_CRD_LB_Q : 1078 PBF_REG_P4_INIT_CRD, 1079 (CHIP_IS_E3B0(bp)) ? 1080 PBF_REG_CREDIT_LB_Q : 1081 PBF_REG_P4_CREDIT, 1082 (CHIP_IS_E3B0(bp)) ? 1083 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1084 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1085 }; 1086 1087 int i; 1088 1089 /* Verify the command queues are flushed P0, P1, P4 */ 1090 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1091 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1092 1093 1094 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1095 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1096 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1097 } 1098 1099 #define OP_GEN_PARAM(param) \ 1100 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1101 1102 #define OP_GEN_TYPE(type) \ 1103 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1104 1105 #define OP_GEN_AGG_VECT(index) \ 1106 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1107 1108 1109 static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, 1110 u32 poll_cnt) 1111 { 1112 struct sdm_op_gen op_gen = {0}; 1113 1114 u32 comp_addr = BAR_CSTRORM_INTMEM + 1115 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1116 int ret = 0; 1117 1118 if (REG_RD(bp, comp_addr)) { 1119 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1120 return 1; 1121 } 1122 1123 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1124 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1125 op_gen.command |= OP_GEN_AGG_VECT(clnup_func); 1126 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1127 1128 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1129 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); 1130 1131 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1132 BNX2X_ERR("FW final cleanup did not succeed\n"); 1133 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1134 (REG_RD(bp, comp_addr))); 1135 ret = 1; 1136 } 1137 /* Zero completion for nxt FLR */ 1138 REG_WR(bp, comp_addr, 0); 1139 1140 return ret; 1141 } 1142 1143 static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1144 { 1145 int pos; 1146 u16 status; 1147 1148 pos = pci_pcie_cap(dev); 1149 if (!pos) 1150 return false; 1151 1152 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 1153 return status & PCI_EXP_DEVSTA_TRPND; 1154 } 1155 1156 /* PF FLR specific routines 1157 */ 1158 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1159 { 1160 1161 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1162 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1163 CFC_REG_NUM_LCIDS_INSIDE_PF, 1164 "CFC PF usage counter timed out", 1165 poll_cnt)) 1166 return 1; 1167 1168 1169 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1170 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1171 DORQ_REG_PF_USAGE_CNT, 1172 "DQ PF usage counter timed out", 1173 poll_cnt)) 1174 return 1; 1175 1176 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1177 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1178 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1179 "QM PF usage counter timed out", 1180 poll_cnt)) 1181 return 1; 1182 1183 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1184 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1185 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1186 "Timers VNIC usage counter timed out", 1187 poll_cnt)) 1188 return 1; 1189 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1190 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1191 "Timers NUM_SCANS usage counter timed out", 1192 poll_cnt)) 1193 return 1; 1194 1195 /* Wait DMAE PF usage counter to zero */ 1196 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1197 dmae_reg_go_c[INIT_DMAE_C(bp)], 1198 "DMAE dommand register timed out", 1199 poll_cnt)) 1200 return 1; 1201 1202 return 0; 1203 } 1204 1205 static void bnx2x_hw_enable_status(struct bnx2x *bp) 1206 { 1207 u32 val; 1208 1209 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1210 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1211 1212 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1213 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1214 1215 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1216 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1217 1218 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1219 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1220 1221 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1222 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1223 1224 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1225 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1226 1227 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1228 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1229 1230 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1231 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1232 val); 1233 } 1234 1235 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1236 { 1237 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1238 1239 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1240 1241 /* Re-enable PF target read access */ 1242 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1243 1244 /* Poll HW usage counters */ 1245 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1246 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1247 return -EBUSY; 1248 1249 /* Zero the igu 'trailing edge' and 'leading edge' */ 1250 1251 /* Send the FW cleanup command */ 1252 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1253 return -EBUSY; 1254 1255 /* ATC cleanup */ 1256 1257 /* Verify TX hw is flushed */ 1258 bnx2x_tx_hw_flushed(bp, poll_cnt); 1259 1260 /* Wait 100ms (not adjusted according to platform) */ 1261 msleep(100); 1262 1263 /* Verify no pending pci transactions */ 1264 if (bnx2x_is_pcie_pending(bp->pdev)) 1265 BNX2X_ERR("PCIE Transactions still pending\n"); 1266 1267 /* Debug */ 1268 bnx2x_hw_enable_status(bp); 1269 1270 /* 1271 * Master enable - Due to WB DMAE writes performed before this 1272 * register is re-initialized as part of the regular function init 1273 */ 1274 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1275 1276 return 0; 1277 } 1278 1279 static void bnx2x_hc_int_enable(struct bnx2x *bp) 1280 { 1281 int port = BP_PORT(bp); 1282 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1283 u32 val = REG_RD(bp, addr); 1284 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1285 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1286 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1287 1288 if (msix) { 1289 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1290 HC_CONFIG_0_REG_INT_LINE_EN_0); 1291 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1292 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1293 if (single_msix) 1294 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1295 } else if (msi) { 1296 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1297 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1298 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1299 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1300 } else { 1301 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1302 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1303 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1304 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1305 1306 if (!CHIP_IS_E1(bp)) { 1307 DP(NETIF_MSG_IFUP, 1308 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1309 1310 REG_WR(bp, addr, val); 1311 1312 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1313 } 1314 } 1315 1316 if (CHIP_IS_E1(bp)) 1317 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1318 1319 DP(NETIF_MSG_IFUP, 1320 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1321 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1322 1323 REG_WR(bp, addr, val); 1324 /* 1325 * Ensure that HC_CONFIG is written before leading/trailing edge config 1326 */ 1327 mmiowb(); 1328 barrier(); 1329 1330 if (!CHIP_IS_E1(bp)) { 1331 /* init leading/trailing edge */ 1332 if (IS_MF(bp)) { 1333 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1334 if (bp->port.pmf) 1335 /* enable nig and gpio3 attention */ 1336 val |= 0x1100; 1337 } else 1338 val = 0xffff; 1339 1340 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1341 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1342 } 1343 1344 /* Make sure that interrupts are indeed enabled from here on */ 1345 mmiowb(); 1346 } 1347 1348 static void bnx2x_igu_int_enable(struct bnx2x *bp) 1349 { 1350 u32 val; 1351 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1352 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1353 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1354 1355 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1356 1357 if (msix) { 1358 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1359 IGU_PF_CONF_SINGLE_ISR_EN); 1360 val |= (IGU_PF_CONF_FUNC_EN | 1361 IGU_PF_CONF_MSI_MSIX_EN | 1362 IGU_PF_CONF_ATTN_BIT_EN); 1363 1364 if (single_msix) 1365 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1366 } else if (msi) { 1367 val &= ~IGU_PF_CONF_INT_LINE_EN; 1368 val |= (IGU_PF_CONF_FUNC_EN | 1369 IGU_PF_CONF_MSI_MSIX_EN | 1370 IGU_PF_CONF_ATTN_BIT_EN | 1371 IGU_PF_CONF_SINGLE_ISR_EN); 1372 } else { 1373 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1374 val |= (IGU_PF_CONF_FUNC_EN | 1375 IGU_PF_CONF_INT_LINE_EN | 1376 IGU_PF_CONF_ATTN_BIT_EN | 1377 IGU_PF_CONF_SINGLE_ISR_EN); 1378 } 1379 1380 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1381 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1382 1383 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1384 1385 if (val & IGU_PF_CONF_INT_LINE_EN) 1386 pci_intx(bp->pdev, true); 1387 1388 barrier(); 1389 1390 /* init leading/trailing edge */ 1391 if (IS_MF(bp)) { 1392 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1393 if (bp->port.pmf) 1394 /* enable nig and gpio3 attention */ 1395 val |= 0x1100; 1396 } else 1397 val = 0xffff; 1398 1399 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1400 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1401 1402 /* Make sure that interrupts are indeed enabled from here on */ 1403 mmiowb(); 1404 } 1405 1406 void bnx2x_int_enable(struct bnx2x *bp) 1407 { 1408 if (bp->common.int_block == INT_BLOCK_HC) 1409 bnx2x_hc_int_enable(bp); 1410 else 1411 bnx2x_igu_int_enable(bp); 1412 } 1413 1414 static void bnx2x_hc_int_disable(struct bnx2x *bp) 1415 { 1416 int port = BP_PORT(bp); 1417 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1418 u32 val = REG_RD(bp, addr); 1419 1420 /* 1421 * in E1 we must use only PCI configuration space to disable 1422 * MSI/MSIX capablility 1423 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 1424 */ 1425 if (CHIP_IS_E1(bp)) { 1426 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 1427 * Use mask register to prevent from HC sending interrupts 1428 * after we exit the function 1429 */ 1430 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 1431 1432 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1433 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1434 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1435 } else 1436 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1437 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1438 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1439 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1440 1441 DP(NETIF_MSG_IFDOWN, 1442 "write %x to HC %d (addr 0x%x)\n", 1443 val, port, addr); 1444 1445 /* flush all outstanding writes */ 1446 mmiowb(); 1447 1448 REG_WR(bp, addr, val); 1449 if (REG_RD(bp, addr) != val) 1450 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1451 } 1452 1453 static void bnx2x_igu_int_disable(struct bnx2x *bp) 1454 { 1455 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1456 1457 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 1458 IGU_PF_CONF_INT_LINE_EN | 1459 IGU_PF_CONF_ATTN_BIT_EN); 1460 1461 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 1462 1463 /* flush all outstanding writes */ 1464 mmiowb(); 1465 1466 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1467 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 1468 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1469 } 1470 1471 void bnx2x_int_disable(struct bnx2x *bp) 1472 { 1473 if (bp->common.int_block == INT_BLOCK_HC) 1474 bnx2x_hc_int_disable(bp); 1475 else 1476 bnx2x_igu_int_disable(bp); 1477 } 1478 1479 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1480 { 1481 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1482 int i, offset; 1483 1484 if (disable_hw) 1485 /* prevent the HW from sending interrupts */ 1486 bnx2x_int_disable(bp); 1487 1488 /* make sure all ISRs are done */ 1489 if (msix) { 1490 synchronize_irq(bp->msix_table[0].vector); 1491 offset = 1; 1492 #ifdef BCM_CNIC 1493 offset++; 1494 #endif 1495 for_each_eth_queue(bp, i) 1496 synchronize_irq(bp->msix_table[offset++].vector); 1497 } else 1498 synchronize_irq(bp->pdev->irq); 1499 1500 /* make sure sp_task is not running */ 1501 cancel_delayed_work(&bp->sp_task); 1502 cancel_delayed_work(&bp->period_task); 1503 flush_workqueue(bnx2x_wq); 1504 } 1505 1506 /* fast path */ 1507 1508 /* 1509 * General service functions 1510 */ 1511 1512 /* Return true if succeeded to acquire the lock */ 1513 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1514 { 1515 u32 lock_status; 1516 u32 resource_bit = (1 << resource); 1517 int func = BP_FUNC(bp); 1518 u32 hw_lock_control_reg; 1519 1520 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1521 "Trying to take a lock on resource %d\n", resource); 1522 1523 /* Validating that the resource is within range */ 1524 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1525 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1526 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1527 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1528 return false; 1529 } 1530 1531 if (func <= 5) 1532 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1533 else 1534 hw_lock_control_reg = 1535 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1536 1537 /* Try to acquire the lock */ 1538 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1539 lock_status = REG_RD(bp, hw_lock_control_reg); 1540 if (lock_status & resource_bit) 1541 return true; 1542 1543 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1544 "Failed to get a lock on resource %d\n", resource); 1545 return false; 1546 } 1547 1548 /** 1549 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1550 * 1551 * @bp: driver handle 1552 * 1553 * Returns the recovery leader resource id according to the engine this function 1554 * belongs to. Currently only only 2 engines is supported. 1555 */ 1556 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1557 { 1558 if (BP_PATH(bp)) 1559 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1560 else 1561 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1562 } 1563 1564 /** 1565 * bnx2x_trylock_leader_lock- try to aquire a leader lock. 1566 * 1567 * @bp: driver handle 1568 * 1569 * Tries to aquire a leader lock for current engine. 1570 */ 1571 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1572 { 1573 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1574 } 1575 1576 #ifdef BCM_CNIC 1577 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1578 #endif 1579 1580 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1581 { 1582 struct bnx2x *bp = fp->bp; 1583 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1584 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1585 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1586 struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; 1587 1588 DP(BNX2X_MSG_SP, 1589 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1590 fp->index, cid, command, bp->state, 1591 rr_cqe->ramrod_cqe.ramrod_type); 1592 1593 switch (command) { 1594 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1595 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1596 drv_cmd = BNX2X_Q_CMD_UPDATE; 1597 break; 1598 1599 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1600 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1601 drv_cmd = BNX2X_Q_CMD_SETUP; 1602 break; 1603 1604 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1605 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1606 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1607 break; 1608 1609 case (RAMROD_CMD_ID_ETH_HALT): 1610 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1611 drv_cmd = BNX2X_Q_CMD_HALT; 1612 break; 1613 1614 case (RAMROD_CMD_ID_ETH_TERMINATE): 1615 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); 1616 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1617 break; 1618 1619 case (RAMROD_CMD_ID_ETH_EMPTY): 1620 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1621 drv_cmd = BNX2X_Q_CMD_EMPTY; 1622 break; 1623 1624 default: 1625 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1626 command, fp->index); 1627 return; 1628 } 1629 1630 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1631 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1632 /* q_obj->complete_cmd() failure means that this was 1633 * an unexpected completion. 1634 * 1635 * In this case we don't want to increase the bp->spq_left 1636 * because apparently we haven't sent this command the first 1637 * place. 1638 */ 1639 #ifdef BNX2X_STOP_ON_ERROR 1640 bnx2x_panic(); 1641 #else 1642 return; 1643 #endif 1644 1645 smp_mb__before_atomic_inc(); 1646 atomic_inc(&bp->cq_spq_left); 1647 /* push the change in bp->spq_left and towards the memory */ 1648 smp_mb__after_atomic_inc(); 1649 1650 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1651 1652 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1653 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1654 /* if Q update ramrod is completed for last Q in AFEX vif set 1655 * flow, then ACK MCP at the end 1656 * 1657 * mark pending ACK to MCP bit. 1658 * prevent case that both bits are cleared. 1659 * At the end of load/unload driver checks that 1660 * sp_state is cleaerd, and this order prevents 1661 * races 1662 */ 1663 smp_mb__before_clear_bit(); 1664 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1665 wmb(); 1666 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1667 smp_mb__after_clear_bit(); 1668 1669 /* schedule workqueue to send ack to MCP */ 1670 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1671 } 1672 1673 return; 1674 } 1675 1676 void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 1677 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) 1678 { 1679 u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; 1680 1681 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, 1682 start); 1683 } 1684 1685 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1686 { 1687 struct bnx2x *bp = netdev_priv(dev_instance); 1688 u16 status = bnx2x_ack_int(bp); 1689 u16 mask; 1690 int i; 1691 u8 cos; 1692 1693 /* Return here if interrupt is shared and it's not for us */ 1694 if (unlikely(status == 0)) { 1695 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1696 return IRQ_NONE; 1697 } 1698 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1699 1700 #ifdef BNX2X_STOP_ON_ERROR 1701 if (unlikely(bp->panic)) 1702 return IRQ_HANDLED; 1703 #endif 1704 1705 for_each_eth_queue(bp, i) { 1706 struct bnx2x_fastpath *fp = &bp->fp[i]; 1707 1708 mask = 0x2 << (fp->index + CNIC_PRESENT); 1709 if (status & mask) { 1710 /* Handle Rx or Tx according to SB id */ 1711 prefetch(fp->rx_cons_sb); 1712 for_each_cos_in_tx_queue(fp, cos) 1713 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1714 prefetch(&fp->sb_running_index[SM_RX_ID]); 1715 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1716 status &= ~mask; 1717 } 1718 } 1719 1720 #ifdef BCM_CNIC 1721 mask = 0x2; 1722 if (status & (mask | 0x1)) { 1723 struct cnic_ops *c_ops = NULL; 1724 1725 if (likely(bp->state == BNX2X_STATE_OPEN)) { 1726 rcu_read_lock(); 1727 c_ops = rcu_dereference(bp->cnic_ops); 1728 if (c_ops) 1729 c_ops->cnic_handler(bp->cnic_data, NULL); 1730 rcu_read_unlock(); 1731 } 1732 1733 status &= ~mask; 1734 } 1735 #endif 1736 1737 if (unlikely(status & 0x1)) { 1738 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1739 1740 status &= ~0x1; 1741 if (!status) 1742 return IRQ_HANDLED; 1743 } 1744 1745 if (unlikely(status)) 1746 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1747 status); 1748 1749 return IRQ_HANDLED; 1750 } 1751 1752 /* Link */ 1753 1754 /* 1755 * General service functions 1756 */ 1757 1758 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1759 { 1760 u32 lock_status; 1761 u32 resource_bit = (1 << resource); 1762 int func = BP_FUNC(bp); 1763 u32 hw_lock_control_reg; 1764 int cnt; 1765 1766 /* Validating that the resource is within range */ 1767 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1768 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1769 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1770 return -EINVAL; 1771 } 1772 1773 if (func <= 5) { 1774 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1775 } else { 1776 hw_lock_control_reg = 1777 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1778 } 1779 1780 /* Validating that the resource is not already taken */ 1781 lock_status = REG_RD(bp, hw_lock_control_reg); 1782 if (lock_status & resource_bit) { 1783 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 1784 lock_status, resource_bit); 1785 return -EEXIST; 1786 } 1787 1788 /* Try for 5 second every 5ms */ 1789 for (cnt = 0; cnt < 1000; cnt++) { 1790 /* Try to acquire the lock */ 1791 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1792 lock_status = REG_RD(bp, hw_lock_control_reg); 1793 if (lock_status & resource_bit) 1794 return 0; 1795 1796 msleep(5); 1797 } 1798 BNX2X_ERR("Timeout\n"); 1799 return -EAGAIN; 1800 } 1801 1802 int bnx2x_release_leader_lock(struct bnx2x *bp) 1803 { 1804 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1805 } 1806 1807 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1808 { 1809 u32 lock_status; 1810 u32 resource_bit = (1 << resource); 1811 int func = BP_FUNC(bp); 1812 u32 hw_lock_control_reg; 1813 1814 /* Validating that the resource is within range */ 1815 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1816 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1817 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1818 return -EINVAL; 1819 } 1820 1821 if (func <= 5) { 1822 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1823 } else { 1824 hw_lock_control_reg = 1825 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1826 } 1827 1828 /* Validating that the resource is currently taken */ 1829 lock_status = REG_RD(bp, hw_lock_control_reg); 1830 if (!(lock_status & resource_bit)) { 1831 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", 1832 lock_status, resource_bit); 1833 return -EFAULT; 1834 } 1835 1836 REG_WR(bp, hw_lock_control_reg, resource_bit); 1837 return 0; 1838 } 1839 1840 1841 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 1842 { 1843 /* The GPIO should be swapped if swap register is set and active */ 1844 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1845 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1846 int gpio_shift = gpio_num + 1847 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1848 u32 gpio_mask = (1 << gpio_shift); 1849 u32 gpio_reg; 1850 int value; 1851 1852 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1853 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1854 return -EINVAL; 1855 } 1856 1857 /* read GPIO value */ 1858 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 1859 1860 /* get the requested pin value */ 1861 if ((gpio_reg & gpio_mask) == gpio_mask) 1862 value = 1; 1863 else 1864 value = 0; 1865 1866 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); 1867 1868 return value; 1869 } 1870 1871 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 1872 { 1873 /* The GPIO should be swapped if swap register is set and active */ 1874 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1875 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1876 int gpio_shift = gpio_num + 1877 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1878 u32 gpio_mask = (1 << gpio_shift); 1879 u32 gpio_reg; 1880 1881 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1882 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1883 return -EINVAL; 1884 } 1885 1886 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1887 /* read GPIO and mask except the float bits */ 1888 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1889 1890 switch (mode) { 1891 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1892 DP(NETIF_MSG_LINK, 1893 "Set GPIO %d (shift %d) -> output low\n", 1894 gpio_num, gpio_shift); 1895 /* clear FLOAT and set CLR */ 1896 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1897 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 1898 break; 1899 1900 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1901 DP(NETIF_MSG_LINK, 1902 "Set GPIO %d (shift %d) -> output high\n", 1903 gpio_num, gpio_shift); 1904 /* clear FLOAT and set SET */ 1905 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1906 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1907 break; 1908 1909 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1910 DP(NETIF_MSG_LINK, 1911 "Set GPIO %d (shift %d) -> input\n", 1912 gpio_num, gpio_shift); 1913 /* set FLOAT */ 1914 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1915 break; 1916 1917 default: 1918 break; 1919 } 1920 1921 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1922 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1923 1924 return 0; 1925 } 1926 1927 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 1928 { 1929 u32 gpio_reg = 0; 1930 int rc = 0; 1931 1932 /* Any port swapping should be handled by caller. */ 1933 1934 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1935 /* read GPIO and mask except the float bits */ 1936 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 1937 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1938 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 1939 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 1940 1941 switch (mode) { 1942 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1943 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 1944 /* set CLR */ 1945 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 1946 break; 1947 1948 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1949 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 1950 /* set SET */ 1951 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 1952 break; 1953 1954 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1955 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 1956 /* set FLOAT */ 1957 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1958 break; 1959 1960 default: 1961 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 1962 rc = -EINVAL; 1963 break; 1964 } 1965 1966 if (rc == 0) 1967 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1968 1969 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1970 1971 return rc; 1972 } 1973 1974 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 1975 { 1976 /* The GPIO should be swapped if swap register is set and active */ 1977 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1978 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1979 int gpio_shift = gpio_num + 1980 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1981 u32 gpio_mask = (1 << gpio_shift); 1982 u32 gpio_reg; 1983 1984 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1985 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1986 return -EINVAL; 1987 } 1988 1989 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1990 /* read GPIO int */ 1991 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 1992 1993 switch (mode) { 1994 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 1995 DP(NETIF_MSG_LINK, 1996 "Clear GPIO INT %d (shift %d) -> output low\n", 1997 gpio_num, gpio_shift); 1998 /* clear SET and set CLR */ 1999 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2000 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2001 break; 2002 2003 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2004 DP(NETIF_MSG_LINK, 2005 "Set GPIO INT %d (shift %d) -> output high\n", 2006 gpio_num, gpio_shift); 2007 /* clear CLR and set SET */ 2008 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2009 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2010 break; 2011 2012 default: 2013 break; 2014 } 2015 2016 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2017 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2018 2019 return 0; 2020 } 2021 2022 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) 2023 { 2024 u32 spio_mask = (1 << spio_num); 2025 u32 spio_reg; 2026 2027 if ((spio_num < MISC_REGISTERS_SPIO_4) || 2028 (spio_num > MISC_REGISTERS_SPIO_7)) { 2029 BNX2X_ERR("Invalid SPIO %d\n", spio_num); 2030 return -EINVAL; 2031 } 2032 2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2034 /* read SPIO and mask except the float bits */ 2035 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 2036 2037 switch (mode) { 2038 case MISC_REGISTERS_SPIO_OUTPUT_LOW: 2039 DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num); 2040 /* clear FLOAT and set CLR */ 2041 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2042 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 2043 break; 2044 2045 case MISC_REGISTERS_SPIO_OUTPUT_HIGH: 2046 DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num); 2047 /* clear FLOAT and set SET */ 2048 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2049 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); 2050 break; 2051 2052 case MISC_REGISTERS_SPIO_INPUT_HI_Z: 2053 DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num); 2054 /* set FLOAT */ 2055 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2056 break; 2057 2058 default: 2059 break; 2060 } 2061 2062 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2063 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2064 2065 return 0; 2066 } 2067 2068 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2069 { 2070 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2071 switch (bp->link_vars.ieee_fc & 2072 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2073 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2074 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2075 ADVERTISED_Pause); 2076 break; 2077 2078 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2079 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2080 ADVERTISED_Pause); 2081 break; 2082 2083 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2084 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2085 break; 2086 2087 default: 2088 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2089 ADVERTISED_Pause); 2090 break; 2091 } 2092 } 2093 2094 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2095 { 2096 if (!BP_NOMCP(bp)) { 2097 u8 rc; 2098 int cfx_idx = bnx2x_get_link_cfg_idx(bp); 2099 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2100 /* 2101 * Initialize link parameters structure variables 2102 * It is recommended to turn off RX FC for jumbo frames 2103 * for better performance 2104 */ 2105 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2106 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2107 else 2108 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2109 2110 bnx2x_acquire_phy_lock(bp); 2111 2112 if (load_mode == LOAD_DIAG) { 2113 struct link_params *lp = &bp->link_params; 2114 lp->loopback_mode = LOOPBACK_XGXS; 2115 /* do PHY loopback at 10G speed, if possible */ 2116 if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2117 if (lp->speed_cap_mask[cfx_idx] & 2118 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2119 lp->req_line_speed[cfx_idx] = 2120 SPEED_10000; 2121 else 2122 lp->req_line_speed[cfx_idx] = 2123 SPEED_1000; 2124 } 2125 } 2126 2127 if (load_mode == LOAD_LOOPBACK_EXT) { 2128 struct link_params *lp = &bp->link_params; 2129 lp->loopback_mode = LOOPBACK_EXT; 2130 } 2131 2132 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2133 2134 bnx2x_release_phy_lock(bp); 2135 2136 bnx2x_calc_fc_adv(bp); 2137 2138 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { 2139 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2140 bnx2x_link_report(bp); 2141 } else 2142 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2143 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2144 return rc; 2145 } 2146 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2147 return -EINVAL; 2148 } 2149 2150 void bnx2x_link_set(struct bnx2x *bp) 2151 { 2152 if (!BP_NOMCP(bp)) { 2153 bnx2x_acquire_phy_lock(bp); 2154 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2155 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2156 bnx2x_release_phy_lock(bp); 2157 2158 bnx2x_calc_fc_adv(bp); 2159 } else 2160 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2161 } 2162 2163 static void bnx2x__link_reset(struct bnx2x *bp) 2164 { 2165 if (!BP_NOMCP(bp)) { 2166 bnx2x_acquire_phy_lock(bp); 2167 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2168 bnx2x_release_phy_lock(bp); 2169 } else 2170 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2171 } 2172 2173 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2174 { 2175 u8 rc = 0; 2176 2177 if (!BP_NOMCP(bp)) { 2178 bnx2x_acquire_phy_lock(bp); 2179 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2180 is_serdes); 2181 bnx2x_release_phy_lock(bp); 2182 } else 2183 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2184 2185 return rc; 2186 } 2187 2188 2189 /* Calculates the sum of vn_min_rates. 2190 It's needed for further normalizing of the min_rates. 2191 Returns: 2192 sum of vn_min_rates. 2193 or 2194 0 - if all the min_rates are 0. 2195 In the later case fainess algorithm should be deactivated. 2196 If not all min_rates are zero then those that are zeroes will be set to 1. 2197 */ 2198 static void bnx2x_calc_vn_min(struct bnx2x *bp, 2199 struct cmng_init_input *input) 2200 { 2201 int all_zero = 1; 2202 int vn; 2203 2204 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2205 u32 vn_cfg = bp->mf_config[vn]; 2206 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2207 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2208 2209 /* Skip hidden vns */ 2210 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2211 vn_min_rate = 0; 2212 /* If min rate is zero - set it to 1 */ 2213 else if (!vn_min_rate) 2214 vn_min_rate = DEF_MIN_RATE; 2215 else 2216 all_zero = 0; 2217 2218 input->vnic_min_rate[vn] = vn_min_rate; 2219 } 2220 2221 /* if ETS or all min rates are zeros - disable fairness */ 2222 if (BNX2X_IS_ETS_ENABLED(bp)) { 2223 input->flags.cmng_enables &= 2224 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2225 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2226 } else if (all_zero) { 2227 input->flags.cmng_enables &= 2228 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2229 DP(NETIF_MSG_IFUP, 2230 "All MIN values are zeroes fairness will be disabled\n"); 2231 } else 2232 input->flags.cmng_enables |= 2233 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2234 } 2235 2236 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2237 struct cmng_init_input *input) 2238 { 2239 u16 vn_max_rate; 2240 u32 vn_cfg = bp->mf_config[vn]; 2241 2242 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2243 vn_max_rate = 0; 2244 else { 2245 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2246 2247 if (IS_MF_SI(bp)) { 2248 /* maxCfg in percents of linkspeed */ 2249 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2250 } else /* SD modes */ 2251 /* maxCfg is absolute in 100Mb units */ 2252 vn_max_rate = maxCfg * 100; 2253 } 2254 2255 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2256 2257 input->vnic_max_rate[vn] = vn_max_rate; 2258 } 2259 2260 2261 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2262 { 2263 if (CHIP_REV_IS_SLOW(bp)) 2264 return CMNG_FNS_NONE; 2265 if (IS_MF(bp)) 2266 return CMNG_FNS_MINMAX; 2267 2268 return CMNG_FNS_NONE; 2269 } 2270 2271 void bnx2x_read_mf_cfg(struct bnx2x *bp) 2272 { 2273 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2274 2275 if (BP_NOMCP(bp)) 2276 return; /* what should be the default bvalue in this case */ 2277 2278 /* For 2 port configuration the absolute function number formula 2279 * is: 2280 * abs_func = 2 * vn + BP_PORT + BP_PATH 2281 * 2282 * and there are 4 functions per port 2283 * 2284 * For 4 port configuration it is 2285 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2286 * 2287 * and there are 2 functions per port 2288 */ 2289 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2290 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2291 2292 if (func >= E1H_FUNC_MAX) 2293 break; 2294 2295 bp->mf_config[vn] = 2296 MF_CFG_RD(bp, func_mf_config[func].config); 2297 } 2298 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2299 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2300 bp->flags |= MF_FUNC_DIS; 2301 } else { 2302 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2303 bp->flags &= ~MF_FUNC_DIS; 2304 } 2305 } 2306 2307 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2308 { 2309 struct cmng_init_input input; 2310 memset(&input, 0, sizeof(struct cmng_init_input)); 2311 2312 input.port_rate = bp->link_vars.line_speed; 2313 2314 if (cmng_type == CMNG_FNS_MINMAX) { 2315 int vn; 2316 2317 /* read mf conf from shmem */ 2318 if (read_cfg) 2319 bnx2x_read_mf_cfg(bp); 2320 2321 /* vn_weight_sum and enable fairness if not 0 */ 2322 bnx2x_calc_vn_min(bp, &input); 2323 2324 /* calculate and set min-max rate for each vn */ 2325 if (bp->port.pmf) 2326 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2327 bnx2x_calc_vn_max(bp, vn, &input); 2328 2329 /* always enable rate shaping and fairness */ 2330 input.flags.cmng_enables |= 2331 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2332 2333 bnx2x_init_cmng(&input, &bp->cmng); 2334 return; 2335 } 2336 2337 /* rate shaping and fairness are disabled */ 2338 DP(NETIF_MSG_IFUP, 2339 "rate shaping and fairness are disabled\n"); 2340 } 2341 2342 static void storm_memset_cmng(struct bnx2x *bp, 2343 struct cmng_init *cmng, 2344 u8 port) 2345 { 2346 int vn; 2347 size_t size = sizeof(struct cmng_struct_per_port); 2348 2349 u32 addr = BAR_XSTRORM_INTMEM + 2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2351 2352 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2353 2354 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2355 int func = func_by_vn(bp, vn); 2356 2357 addr = BAR_XSTRORM_INTMEM + 2358 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2359 size = sizeof(struct rate_shaping_vars_per_vn); 2360 __storm_memset_struct(bp, addr, size, 2361 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2362 2363 addr = BAR_XSTRORM_INTMEM + 2364 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2365 size = sizeof(struct fairness_vars_per_vn); 2366 __storm_memset_struct(bp, addr, size, 2367 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2368 } 2369 } 2370 2371 /* This function is called upon link interrupt */ 2372 static void bnx2x_link_attn(struct bnx2x *bp) 2373 { 2374 /* Make sure that we are synced with the current statistics */ 2375 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2376 2377 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2378 2379 if (bp->link_vars.link_up) { 2380 2381 /* dropless flow control */ 2382 if (!CHIP_IS_E1(bp) && bp->dropless_fc) { 2383 int port = BP_PORT(bp); 2384 u32 pause_enabled = 0; 2385 2386 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2387 pause_enabled = 1; 2388 2389 REG_WR(bp, BAR_USTRORM_INTMEM + 2390 USTORM_ETH_PAUSE_ENABLED_OFFSET(port), 2391 pause_enabled); 2392 } 2393 2394 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2395 struct host_port_stats *pstats; 2396 2397 pstats = bnx2x_sp(bp, port_stats); 2398 /* reset old mac stats */ 2399 memset(&(pstats->mac_stx[0]), 0, 2400 sizeof(struct mac_stx)); 2401 } 2402 if (bp->state == BNX2X_STATE_OPEN) 2403 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2404 } 2405 2406 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2407 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2408 2409 if (cmng_fns != CMNG_FNS_NONE) { 2410 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2411 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2412 } else 2413 /* rate shaping and fairness are disabled */ 2414 DP(NETIF_MSG_IFUP, 2415 "single function mode without fairness\n"); 2416 } 2417 2418 __bnx2x_link_report(bp); 2419 2420 if (IS_MF(bp)) 2421 bnx2x_link_sync_notify(bp); 2422 } 2423 2424 void bnx2x__link_status_update(struct bnx2x *bp) 2425 { 2426 if (bp->state != BNX2X_STATE_OPEN) 2427 return; 2428 2429 /* read updated dcb configuration */ 2430 bnx2x_dcbx_pmf_update(bp); 2431 2432 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2433 2434 if (bp->link_vars.link_up) 2435 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2436 else 2437 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2438 2439 /* indicate link status */ 2440 bnx2x_link_report(bp); 2441 } 2442 2443 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2444 u16 vlan_val, u8 allowed_prio) 2445 { 2446 struct bnx2x_func_state_params func_params = {0}; 2447 struct bnx2x_func_afex_update_params *f_update_params = 2448 &func_params.params.afex_update; 2449 2450 func_params.f_obj = &bp->func_obj; 2451 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2452 2453 /* no need to wait for RAMROD completion, so don't 2454 * set RAMROD_COMP_WAIT flag 2455 */ 2456 2457 f_update_params->vif_id = vifid; 2458 f_update_params->afex_default_vlan = vlan_val; 2459 f_update_params->allowed_priorities = allowed_prio; 2460 2461 /* if ramrod can not be sent, response to MCP immediately */ 2462 if (bnx2x_func_state_change(bp, &func_params) < 0) 2463 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2464 2465 return 0; 2466 } 2467 2468 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2469 u16 vif_index, u8 func_bit_map) 2470 { 2471 struct bnx2x_func_state_params func_params = {0}; 2472 struct bnx2x_func_afex_viflists_params *update_params = 2473 &func_params.params.afex_viflists; 2474 int rc; 2475 u32 drv_msg_code; 2476 2477 /* validate only LIST_SET and LIST_GET are received from switch */ 2478 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2479 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2480 cmd_type); 2481 2482 func_params.f_obj = &bp->func_obj; 2483 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2484 2485 /* set parameters according to cmd_type */ 2486 update_params->afex_vif_list_command = cmd_type; 2487 update_params->vif_list_index = cpu_to_le16(vif_index); 2488 update_params->func_bit_map = 2489 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2490 update_params->func_to_clear = 0; 2491 drv_msg_code = 2492 (cmd_type == VIF_LIST_RULE_GET) ? 2493 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2494 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2495 2496 /* if ramrod can not be sent, respond to MCP immediately for 2497 * SET and GET requests (other are not triggered from MCP) 2498 */ 2499 rc = bnx2x_func_state_change(bp, &func_params); 2500 if (rc < 0) 2501 bnx2x_fw_command(bp, drv_msg_code, 0); 2502 2503 return 0; 2504 } 2505 2506 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2507 { 2508 struct afex_stats afex_stats; 2509 u32 func = BP_ABS_FUNC(bp); 2510 u32 mf_config; 2511 u16 vlan_val; 2512 u32 vlan_prio; 2513 u16 vif_id; 2514 u8 allowed_prio; 2515 u8 vlan_mode; 2516 u32 addr_to_write, vifid, addrs, stats_type, i; 2517 2518 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2519 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2520 DP(BNX2X_MSG_MCP, 2521 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2522 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2523 } 2524 2525 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2526 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2527 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2528 DP(BNX2X_MSG_MCP, 2529 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2530 vifid, addrs); 2531 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2532 addrs); 2533 } 2534 2535 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2536 addr_to_write = SHMEM2_RD(bp, 2537 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2538 stats_type = SHMEM2_RD(bp, 2539 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2540 2541 DP(BNX2X_MSG_MCP, 2542 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2543 addr_to_write); 2544 2545 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2546 2547 /* write response to scratchpad, for MCP */ 2548 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2549 REG_WR(bp, addr_to_write + i*sizeof(u32), 2550 *(((u32 *)(&afex_stats))+i)); 2551 2552 /* send ack message to MCP */ 2553 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2554 } 2555 2556 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2557 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2558 bp->mf_config[BP_VN(bp)] = mf_config; 2559 DP(BNX2X_MSG_MCP, 2560 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2561 mf_config); 2562 2563 /* if VIF_SET is "enabled" */ 2564 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2565 /* set rate limit directly to internal RAM */ 2566 struct cmng_init_input cmng_input; 2567 struct rate_shaping_vars_per_vn m_rs_vn; 2568 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2569 u32 addr = BAR_XSTRORM_INTMEM + 2570 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2571 2572 bp->mf_config[BP_VN(bp)] = mf_config; 2573 2574 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2575 m_rs_vn.vn_counter.rate = 2576 cmng_input.vnic_max_rate[BP_VN(bp)]; 2577 m_rs_vn.vn_counter.quota = 2578 (m_rs_vn.vn_counter.rate * 2579 RS_PERIODIC_TIMEOUT_USEC) / 8; 2580 2581 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2582 2583 /* read relevant values from mf_cfg struct in shmem */ 2584 vif_id = 2585 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2586 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2587 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2588 vlan_val = 2589 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2590 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2591 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2592 vlan_prio = (mf_config & 2593 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2594 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2595 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2596 vlan_mode = 2597 (MF_CFG_RD(bp, 2598 func_mf_config[func].afex_config) & 2599 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2600 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2601 allowed_prio = 2602 (MF_CFG_RD(bp, 2603 func_mf_config[func].afex_config) & 2604 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2605 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2606 2607 /* send ramrod to FW, return in case of failure */ 2608 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2609 allowed_prio)) 2610 return; 2611 2612 bp->afex_def_vlan_tag = vlan_val; 2613 bp->afex_vlan_mode = vlan_mode; 2614 } else { 2615 /* notify link down because BP->flags is disabled */ 2616 bnx2x_link_report(bp); 2617 2618 /* send INVALID VIF ramrod to FW */ 2619 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2620 2621 /* Reset the default afex VLAN */ 2622 bp->afex_def_vlan_tag = -1; 2623 } 2624 } 2625 } 2626 2627 static void bnx2x_pmf_update(struct bnx2x *bp) 2628 { 2629 int port = BP_PORT(bp); 2630 u32 val; 2631 2632 bp->port.pmf = 1; 2633 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2634 2635 /* 2636 * We need the mb() to ensure the ordering between the writing to 2637 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2638 */ 2639 smp_mb(); 2640 2641 /* queue a periodic task */ 2642 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2643 2644 bnx2x_dcbx_pmf_update(bp); 2645 2646 /* enable nig attention */ 2647 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2648 if (bp->common.int_block == INT_BLOCK_HC) { 2649 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2650 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2651 } else if (!CHIP_IS_E1x(bp)) { 2652 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2653 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2654 } 2655 2656 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2657 } 2658 2659 /* end of Link */ 2660 2661 /* slow path */ 2662 2663 /* 2664 * General service functions 2665 */ 2666 2667 /* send the MCP a request, block until there is a reply */ 2668 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2669 { 2670 int mb_idx = BP_FW_MB_IDX(bp); 2671 u32 seq; 2672 u32 rc = 0; 2673 u32 cnt = 1; 2674 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2675 2676 mutex_lock(&bp->fw_mb_mutex); 2677 seq = ++bp->fw_seq; 2678 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2679 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2680 2681 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 2682 (command | seq), param); 2683 2684 do { 2685 /* let the FW do it's magic ... */ 2686 msleep(delay); 2687 2688 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 2689 2690 /* Give the FW up to 5 second (500*10ms) */ 2691 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2692 2693 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2694 cnt*delay, rc, seq); 2695 2696 /* is this a reply to our command? */ 2697 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 2698 rc &= FW_MSG_CODE_MASK; 2699 else { 2700 /* FW BUG! */ 2701 BNX2X_ERR("FW failed to respond!\n"); 2702 bnx2x_fw_dump(bp); 2703 rc = 0; 2704 } 2705 mutex_unlock(&bp->fw_mb_mutex); 2706 2707 return rc; 2708 } 2709 2710 2711 static void storm_memset_func_cfg(struct bnx2x *bp, 2712 struct tstorm_eth_function_common_config *tcfg, 2713 u16 abs_fid) 2714 { 2715 size_t size = sizeof(struct tstorm_eth_function_common_config); 2716 2717 u32 addr = BAR_TSTRORM_INTMEM + 2718 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 2719 2720 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 2721 } 2722 2723 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2724 { 2725 if (CHIP_IS_E1x(bp)) { 2726 struct tstorm_eth_function_common_config tcfg = {0}; 2727 2728 storm_memset_func_cfg(bp, &tcfg, p->func_id); 2729 } 2730 2731 /* Enable the function in the FW */ 2732 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 2733 storm_memset_func_en(bp, p->func_id, 1); 2734 2735 /* spq */ 2736 if (p->func_flgs & FUNC_FLG_SPQ) { 2737 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 2738 REG_WR(bp, XSEM_REG_FAST_MEMORY + 2739 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 2740 } 2741 } 2742 2743 /** 2744 * bnx2x_get_tx_only_flags - Return common flags 2745 * 2746 * @bp device handle 2747 * @fp queue handle 2748 * @zero_stats TRUE if statistics zeroing is needed 2749 * 2750 * Return the flags that are common for the Tx-only and not normal connections. 2751 */ 2752 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2753 struct bnx2x_fastpath *fp, 2754 bool zero_stats) 2755 { 2756 unsigned long flags = 0; 2757 2758 /* PF driver will always initialize the Queue to an ACTIVE state */ 2759 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2760 2761 /* tx only connections collect statistics (on the same index as the 2762 * parent connection). The statistics are zeroed when the parent 2763 * connection is initialized. 2764 */ 2765 2766 __set_bit(BNX2X_Q_FLG_STATS, &flags); 2767 if (zero_stats) 2768 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 2769 2770 2771 return flags; 2772 } 2773 2774 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2775 struct bnx2x_fastpath *fp, 2776 bool leading) 2777 { 2778 unsigned long flags = 0; 2779 2780 /* calculate other queue flags */ 2781 if (IS_MF_SD(bp)) 2782 __set_bit(BNX2X_Q_FLG_OV, &flags); 2783 2784 if (IS_FCOE_FP(fp)) { 2785 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2786 /* For FCoE - force usage of default priority (for afex) */ 2787 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 2788 } 2789 2790 if (!fp->disable_tpa) { 2791 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2792 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 2793 if (fp->mode == TPA_MODE_GRO) 2794 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 2795 } 2796 2797 if (leading) { 2798 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 2799 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 2800 } 2801 2802 /* Always set HW VLAN stripping */ 2803 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2804 2805 /* configure silent vlan removal */ 2806 if (IS_MF_AFEX(bp)) 2807 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 2808 2809 2810 return flags | bnx2x_get_common_flags(bp, fp, true); 2811 } 2812 2813 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 2814 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 2815 u8 cos) 2816 { 2817 gen_init->stat_id = bnx2x_stats_id(fp); 2818 gen_init->spcl_id = fp->cl_id; 2819 2820 /* Always use mini-jumbo MTU for FCoE L2 ring */ 2821 if (IS_FCOE_FP(fp)) 2822 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 2823 else 2824 gen_init->mtu = bp->dev->mtu; 2825 2826 gen_init->cos = cos; 2827 } 2828 2829 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 2830 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 2831 struct bnx2x_rxq_setup_params *rxq_init) 2832 { 2833 u8 max_sge = 0; 2834 u16 sge_sz = 0; 2835 u16 tpa_agg_size = 0; 2836 2837 if (!fp->disable_tpa) { 2838 pause->sge_th_lo = SGE_TH_LO(bp); 2839 pause->sge_th_hi = SGE_TH_HI(bp); 2840 2841 /* validate SGE ring has enough to cross high threshold */ 2842 WARN_ON(bp->dropless_fc && 2843 pause->sge_th_hi + FW_PREFETCH_CNT > 2844 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 2845 2846 tpa_agg_size = min_t(u32, 2847 (min_t(u32, 8, MAX_SKB_FRAGS) * 2848 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2849 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 2850 SGE_PAGE_SHIFT; 2851 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 2852 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 2853 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, 2854 0xffff); 2855 } 2856 2857 /* pause - not for e1 */ 2858 if (!CHIP_IS_E1(bp)) { 2859 pause->bd_th_lo = BD_TH_LO(bp); 2860 pause->bd_th_hi = BD_TH_HI(bp); 2861 2862 pause->rcq_th_lo = RCQ_TH_LO(bp); 2863 pause->rcq_th_hi = RCQ_TH_HI(bp); 2864 /* 2865 * validate that rings have enough entries to cross 2866 * high thresholds 2867 */ 2868 WARN_ON(bp->dropless_fc && 2869 pause->bd_th_hi + FW_PREFETCH_CNT > 2870 bp->rx_ring_size); 2871 WARN_ON(bp->dropless_fc && 2872 pause->rcq_th_hi + FW_PREFETCH_CNT > 2873 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 2874 2875 pause->pri_map = 1; 2876 } 2877 2878 /* rxq setup */ 2879 rxq_init->dscr_map = fp->rx_desc_mapping; 2880 rxq_init->sge_map = fp->rx_sge_mapping; 2881 rxq_init->rcq_map = fp->rx_comp_mapping; 2882 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2883 2884 /* This should be a maximum number of data bytes that may be 2885 * placed on the BD (not including paddings). 2886 */ 2887 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 2888 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 2889 2890 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2891 rxq_init->tpa_agg_sz = tpa_agg_size; 2892 rxq_init->sge_buf_sz = sge_sz; 2893 rxq_init->max_sges_pkt = max_sge; 2894 rxq_init->rss_engine_id = BP_FUNC(bp); 2895 rxq_init->mcast_engine_id = BP_FUNC(bp); 2896 2897 /* Maximum number or simultaneous TPA aggregation for this Queue. 2898 * 2899 * For PF Clients it should be the maximum avaliable number. 2900 * VF driver(s) may want to define it to a smaller value. 2901 */ 2902 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 2903 2904 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2905 rxq_init->fw_sb_id = fp->fw_sb_id; 2906 2907 if (IS_FCOE_FP(fp)) 2908 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2909 else 2910 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 2911 /* configure silent vlan removal 2912 * if multi function mode is afex, then mask default vlan 2913 */ 2914 if (IS_MF_AFEX(bp)) { 2915 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 2916 rxq_init->silent_removal_mask = VLAN_VID_MASK; 2917 } 2918 } 2919 2920 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2921 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 2922 u8 cos) 2923 { 2924 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 2925 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 2926 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2927 txq_init->fw_sb_id = fp->fw_sb_id; 2928 2929 /* 2930 * set the tss leading client id for TX classfication == 2931 * leading RSS client id 2932 */ 2933 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 2934 2935 if (IS_FCOE_FP(fp)) { 2936 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 2937 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 2938 } 2939 } 2940 2941 static void bnx2x_pf_init(struct bnx2x *bp) 2942 { 2943 struct bnx2x_func_init_params func_init = {0}; 2944 struct event_ring_data eq_data = { {0} }; 2945 u16 flags; 2946 2947 if (!CHIP_IS_E1x(bp)) { 2948 /* reset IGU PF statistics: MSIX + ATTN */ 2949 /* PF */ 2950 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 2951 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 2952 (CHIP_MODE_IS_4_PORT(bp) ? 2953 BP_FUNC(bp) : BP_VN(bp))*4, 0); 2954 /* ATTN */ 2955 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 2956 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 2957 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 2958 (CHIP_MODE_IS_4_PORT(bp) ? 2959 BP_FUNC(bp) : BP_VN(bp))*4, 0); 2960 } 2961 2962 /* function setup flags */ 2963 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 2964 2965 /* This flag is relevant for E1x only. 2966 * E2 doesn't have a TPA configuration in a function level. 2967 */ 2968 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 2969 2970 func_init.func_flgs = flags; 2971 func_init.pf_id = BP_FUNC(bp); 2972 func_init.func_id = BP_FUNC(bp); 2973 func_init.spq_map = bp->spq_mapping; 2974 func_init.spq_prod = bp->spq_prod_idx; 2975 2976 bnx2x_func_init(bp, &func_init); 2977 2978 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 2979 2980 /* 2981 * Congestion management values depend on the link rate 2982 * There is no active link so initial link rate is set to 10 Gbps. 2983 * When the link comes up The congestion management values are 2984 * re-calculated according to the actual link rate. 2985 */ 2986 bp->link_vars.line_speed = SPEED_10000; 2987 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 2988 2989 /* Only the PMF sets the HW */ 2990 if (bp->port.pmf) 2991 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2992 2993 /* init Event Queue */ 2994 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 2995 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 2996 eq_data.producer = bp->eq_prod; 2997 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 2998 eq_data.sb_id = DEF_SB_ID; 2999 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3000 } 3001 3002 3003 static void bnx2x_e1h_disable(struct bnx2x *bp) 3004 { 3005 int port = BP_PORT(bp); 3006 3007 bnx2x_tx_disable(bp); 3008 3009 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3010 } 3011 3012 static void bnx2x_e1h_enable(struct bnx2x *bp) 3013 { 3014 int port = BP_PORT(bp); 3015 3016 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3017 3018 /* Tx queue should be only reenabled */ 3019 netif_tx_wake_all_queues(bp->dev); 3020 3021 /* 3022 * Should not call netif_carrier_on since it will be called if the link 3023 * is up when checking for link state 3024 */ 3025 } 3026 3027 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3028 3029 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3030 { 3031 struct eth_stats_info *ether_stat = 3032 &bp->slowpath->drv_info_to_mcp.ether_stat; 3033 3034 /* leave last char as NULL */ 3035 memcpy(ether_stat->version, DRV_MODULE_VERSION, 3036 ETH_STAT_INFO_VERSION_LEN - 1); 3037 3038 bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, 3039 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3040 ether_stat->mac_local); 3041 3042 ether_stat->mtu_size = bp->dev->mtu; 3043 3044 if (bp->dev->features & NETIF_F_RXCSUM) 3045 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3046 if (bp->dev->features & NETIF_F_TSO) 3047 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3048 ether_stat->feature_flags |= bp->common.boot_mode; 3049 3050 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3051 3052 ether_stat->txq_size = bp->tx_ring_size; 3053 ether_stat->rxq_size = bp->rx_ring_size; 3054 } 3055 3056 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3057 { 3058 #ifdef BCM_CNIC 3059 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3060 struct fcoe_stats_info *fcoe_stat = 3061 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3062 3063 memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); 3064 3065 fcoe_stat->qos_priority = 3066 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3067 3068 /* insert FCoE stats from ramrod response */ 3069 if (!NO_FCOE(bp)) { 3070 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3071 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3072 tstorm_queue_statistics; 3073 3074 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3075 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3076 xstorm_queue_statistics; 3077 3078 struct fcoe_statistics_params *fw_fcoe_stat = 3079 &bp->fw_stats_data->fcoe; 3080 3081 ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, 3082 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3083 3084 ADD_64(fcoe_stat->rx_bytes_hi, 3085 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3086 fcoe_stat->rx_bytes_lo, 3087 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3088 3089 ADD_64(fcoe_stat->rx_bytes_hi, 3090 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3091 fcoe_stat->rx_bytes_lo, 3092 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3093 3094 ADD_64(fcoe_stat->rx_bytes_hi, 3095 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3096 fcoe_stat->rx_bytes_lo, 3097 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3098 3099 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3100 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3101 3102 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3103 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3104 3105 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3106 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3107 3108 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3109 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3110 3111 ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, 3112 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3113 3114 ADD_64(fcoe_stat->tx_bytes_hi, 3115 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3116 fcoe_stat->tx_bytes_lo, 3117 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3118 3119 ADD_64(fcoe_stat->tx_bytes_hi, 3120 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3121 fcoe_stat->tx_bytes_lo, 3122 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3123 3124 ADD_64(fcoe_stat->tx_bytes_hi, 3125 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3126 fcoe_stat->tx_bytes_lo, 3127 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3128 3129 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3130 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3131 3132 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3133 fcoe_q_xstorm_stats->ucast_pkts_sent); 3134 3135 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3136 fcoe_q_xstorm_stats->bcast_pkts_sent); 3137 3138 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3139 fcoe_q_xstorm_stats->mcast_pkts_sent); 3140 } 3141 3142 /* ask L5 driver to add data to the struct */ 3143 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3144 #endif 3145 } 3146 3147 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3148 { 3149 #ifdef BCM_CNIC 3150 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3151 struct iscsi_stats_info *iscsi_stat = 3152 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3153 3154 memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3155 3156 iscsi_stat->qos_priority = 3157 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3158 3159 /* ask L5 driver to add data to the struct */ 3160 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3161 #endif 3162 } 3163 3164 /* called due to MCP event (on pmf): 3165 * reread new bandwidth configuration 3166 * configure FW 3167 * notify others function about the change 3168 */ 3169 static void bnx2x_config_mf_bw(struct bnx2x *bp) 3170 { 3171 if (bp->link_vars.link_up) { 3172 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3173 bnx2x_link_sync_notify(bp); 3174 } 3175 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3176 } 3177 3178 static void bnx2x_set_mf_bw(struct bnx2x *bp) 3179 { 3180 bnx2x_config_mf_bw(bp); 3181 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3182 } 3183 3184 static void bnx2x_handle_eee_event(struct bnx2x *bp) 3185 { 3186 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3187 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3188 } 3189 3190 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3191 { 3192 enum drv_info_opcode op_code; 3193 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3194 3195 /* if drv_info version supported by MFW doesn't match - send NACK */ 3196 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3197 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3198 return; 3199 } 3200 3201 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3202 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3203 3204 memset(&bp->slowpath->drv_info_to_mcp, 0, 3205 sizeof(union drv_info_to_mcp)); 3206 3207 switch (op_code) { 3208 case ETH_STATS_OPCODE: 3209 bnx2x_drv_info_ether_stat(bp); 3210 break; 3211 case FCOE_STATS_OPCODE: 3212 bnx2x_drv_info_fcoe_stat(bp); 3213 break; 3214 case ISCSI_STATS_OPCODE: 3215 bnx2x_drv_info_iscsi_stat(bp); 3216 break; 3217 default: 3218 /* if op code isn't supported - send NACK */ 3219 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3220 return; 3221 } 3222 3223 /* if we got drv_info attn from MFW then these fields are defined in 3224 * shmem2 for sure 3225 */ 3226 SHMEM2_WR(bp, drv_info_host_addr_lo, 3227 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3228 SHMEM2_WR(bp, drv_info_host_addr_hi, 3229 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3230 3231 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3232 } 3233 3234 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3235 { 3236 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 3237 3238 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3239 3240 /* 3241 * This is the only place besides the function initialization 3242 * where the bp->flags can change so it is done without any 3243 * locks 3244 */ 3245 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3246 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3247 bp->flags |= MF_FUNC_DIS; 3248 3249 bnx2x_e1h_disable(bp); 3250 } else { 3251 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3252 bp->flags &= ~MF_FUNC_DIS; 3253 3254 bnx2x_e1h_enable(bp); 3255 } 3256 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3257 } 3258 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3259 bnx2x_config_mf_bw(bp); 3260 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3261 } 3262 3263 /* Report results to MCP */ 3264 if (dcc_event) 3265 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); 3266 else 3267 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); 3268 } 3269 3270 /* must be called under the spq lock */ 3271 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3272 { 3273 struct eth_spe *next_spe = bp->spq_prod_bd; 3274 3275 if (bp->spq_prod_bd == bp->spq_last_bd) { 3276 bp->spq_prod_bd = bp->spq; 3277 bp->spq_prod_idx = 0; 3278 DP(BNX2X_MSG_SP, "end of spq\n"); 3279 } else { 3280 bp->spq_prod_bd++; 3281 bp->spq_prod_idx++; 3282 } 3283 return next_spe; 3284 } 3285 3286 /* must be called under the spq lock */ 3287 static void bnx2x_sp_prod_update(struct bnx2x *bp) 3288 { 3289 int func = BP_FUNC(bp); 3290 3291 /* 3292 * Make sure that BD data is updated before writing the producer: 3293 * BD data is written to the memory, the producer is read from the 3294 * memory, thus we need a full memory barrier to ensure the ordering. 3295 */ 3296 mb(); 3297 3298 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3299 bp->spq_prod_idx); 3300 mmiowb(); 3301 } 3302 3303 /** 3304 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3305 * 3306 * @cmd: command to check 3307 * @cmd_type: command type 3308 */ 3309 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3310 { 3311 if ((cmd_type == NONE_CONNECTION_TYPE) || 3312 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3313 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3314 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3315 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3316 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3317 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3318 return true; 3319 else 3320 return false; 3321 3322 } 3323 3324 3325 /** 3326 * bnx2x_sp_post - place a single command on an SP ring 3327 * 3328 * @bp: driver handle 3329 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3330 * @cid: SW CID the command is related to 3331 * @data_hi: command private data address (high 32 bits) 3332 * @data_lo: command private data address (low 32 bits) 3333 * @cmd_type: command type (e.g. NONE, ETH) 3334 * 3335 * SP data is handled as if it's always an address pair, thus data fields are 3336 * not swapped to little endian in upper functions. Instead this function swaps 3337 * data as if it's two u32 fields. 3338 */ 3339 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3340 u32 data_hi, u32 data_lo, int cmd_type) 3341 { 3342 struct eth_spe *spe; 3343 u16 type; 3344 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3345 3346 #ifdef BNX2X_STOP_ON_ERROR 3347 if (unlikely(bp->panic)) { 3348 BNX2X_ERR("Can't post SP when there is panic\n"); 3349 return -EIO; 3350 } 3351 #endif 3352 3353 spin_lock_bh(&bp->spq_lock); 3354 3355 if (common) { 3356 if (!atomic_read(&bp->eq_spq_left)) { 3357 BNX2X_ERR("BUG! EQ ring full!\n"); 3358 spin_unlock_bh(&bp->spq_lock); 3359 bnx2x_panic(); 3360 return -EBUSY; 3361 } 3362 } else if (!atomic_read(&bp->cq_spq_left)) { 3363 BNX2X_ERR("BUG! SPQ ring full!\n"); 3364 spin_unlock_bh(&bp->spq_lock); 3365 bnx2x_panic(); 3366 return -EBUSY; 3367 } 3368 3369 spe = bnx2x_sp_get_next(bp); 3370 3371 /* CID needs port number to be encoded int it */ 3372 spe->hdr.conn_and_cmd_data = 3373 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3374 HW_CID(bp, cid)); 3375 3376 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3377 3378 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3379 SPE_HDR_FUNCTION_ID); 3380 3381 spe->hdr.type = cpu_to_le16(type); 3382 3383 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3384 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3385 3386 /* 3387 * It's ok if the actual decrement is issued towards the memory 3388 * somewhere between the spin_lock and spin_unlock. Thus no 3389 * more explict memory barrier is needed. 3390 */ 3391 if (common) 3392 atomic_dec(&bp->eq_spq_left); 3393 else 3394 atomic_dec(&bp->cq_spq_left); 3395 3396 3397 DP(BNX2X_MSG_SP, 3398 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3399 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3400 (u32)(U64_LO(bp->spq_mapping) + 3401 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3402 HW_CID(bp, cid), data_hi, data_lo, type, 3403 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3404 3405 bnx2x_sp_prod_update(bp); 3406 spin_unlock_bh(&bp->spq_lock); 3407 return 0; 3408 } 3409 3410 /* acquire split MCP access lock register */ 3411 static int bnx2x_acquire_alr(struct bnx2x *bp) 3412 { 3413 u32 j, val; 3414 int rc = 0; 3415 3416 might_sleep(); 3417 for (j = 0; j < 1000; j++) { 3418 val = (1UL << 31); 3419 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 3420 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 3421 if (val & (1L << 31)) 3422 break; 3423 3424 msleep(5); 3425 } 3426 if (!(val & (1L << 31))) { 3427 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3428 rc = -EBUSY; 3429 } 3430 3431 return rc; 3432 } 3433 3434 /* release split MCP access lock register */ 3435 static void bnx2x_release_alr(struct bnx2x *bp) 3436 { 3437 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 3438 } 3439 3440 #define BNX2X_DEF_SB_ATT_IDX 0x0001 3441 #define BNX2X_DEF_SB_IDX 0x0002 3442 3443 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3444 { 3445 struct host_sp_status_block *def_sb = bp->def_status_blk; 3446 u16 rc = 0; 3447 3448 barrier(); /* status block is written to by the chip */ 3449 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3450 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3451 rc |= BNX2X_DEF_SB_ATT_IDX; 3452 } 3453 3454 if (bp->def_idx != def_sb->sp_sb.running_index) { 3455 bp->def_idx = def_sb->sp_sb.running_index; 3456 rc |= BNX2X_DEF_SB_IDX; 3457 } 3458 3459 /* Do not reorder: indecies reading should complete before handling */ 3460 barrier(); 3461 return rc; 3462 } 3463 3464 /* 3465 * slow path service functions 3466 */ 3467 3468 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 3469 { 3470 int port = BP_PORT(bp); 3471 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 3472 MISC_REG_AEU_MASK_ATTN_FUNC_0; 3473 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 3474 NIG_REG_MASK_INTERRUPT_PORT0; 3475 u32 aeu_mask; 3476 u32 nig_mask = 0; 3477 u32 reg_addr; 3478 3479 if (bp->attn_state & asserted) 3480 BNX2X_ERR("IGU ERROR\n"); 3481 3482 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3483 aeu_mask = REG_RD(bp, aeu_addr); 3484 3485 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 3486 aeu_mask, asserted); 3487 aeu_mask &= ~(asserted & 0x3ff); 3488 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3489 3490 REG_WR(bp, aeu_addr, aeu_mask); 3491 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3492 3493 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 3494 bp->attn_state |= asserted; 3495 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 3496 3497 if (asserted & ATTN_HARD_WIRED_MASK) { 3498 if (asserted & ATTN_NIG_FOR_FUNC) { 3499 3500 bnx2x_acquire_phy_lock(bp); 3501 3502 /* save nig interrupt mask */ 3503 nig_mask = REG_RD(bp, nig_int_mask_addr); 3504 3505 /* If nig_mask is not set, no need to call the update 3506 * function. 3507 */ 3508 if (nig_mask) { 3509 REG_WR(bp, nig_int_mask_addr, 0); 3510 3511 bnx2x_link_attn(bp); 3512 } 3513 3514 /* handle unicore attn? */ 3515 } 3516 if (asserted & ATTN_SW_TIMER_4_FUNC) 3517 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 3518 3519 if (asserted & GPIO_2_FUNC) 3520 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 3521 3522 if (asserted & GPIO_3_FUNC) 3523 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 3524 3525 if (asserted & GPIO_4_FUNC) 3526 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 3527 3528 if (port == 0) { 3529 if (asserted & ATTN_GENERAL_ATTN_1) { 3530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 3531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3532 } 3533 if (asserted & ATTN_GENERAL_ATTN_2) { 3534 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 3535 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3536 } 3537 if (asserted & ATTN_GENERAL_ATTN_3) { 3538 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 3539 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3540 } 3541 } else { 3542 if (asserted & ATTN_GENERAL_ATTN_4) { 3543 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 3544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3545 } 3546 if (asserted & ATTN_GENERAL_ATTN_5) { 3547 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 3548 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3549 } 3550 if (asserted & ATTN_GENERAL_ATTN_6) { 3551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 3552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3553 } 3554 } 3555 3556 } /* if hardwired */ 3557 3558 if (bp->common.int_block == INT_BLOCK_HC) 3559 reg_addr = (HC_REG_COMMAND_REG + port*32 + 3560 COMMAND_REG_ATTN_BITS_SET); 3561 else 3562 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 3563 3564 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 3565 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 3566 REG_WR(bp, reg_addr, asserted); 3567 3568 /* now set back the mask */ 3569 if (asserted & ATTN_NIG_FOR_FUNC) { 3570 REG_WR(bp, nig_int_mask_addr, nig_mask); 3571 bnx2x_release_phy_lock(bp); 3572 } 3573 } 3574 3575 static void bnx2x_fan_failure(struct bnx2x *bp) 3576 { 3577 int port = BP_PORT(bp); 3578 u32 ext_phy_config; 3579 /* mark the failure */ 3580 ext_phy_config = 3581 SHMEM_RD(bp, 3582 dev_info.port_hw_config[port].external_phy_config); 3583 3584 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 3585 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 3586 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 3587 ext_phy_config); 3588 3589 /* log the failure */ 3590 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 3591 "Please contact OEM Support for assistance\n"); 3592 3593 /* 3594 * Scheudle device reset (unload) 3595 * This is due to some boards consuming sufficient power when driver is 3596 * up to overheat if fan fails. 3597 */ 3598 smp_mb__before_clear_bit(); 3599 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); 3600 smp_mb__after_clear_bit(); 3601 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3602 3603 } 3604 3605 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3606 { 3607 int port = BP_PORT(bp); 3608 int reg_offset; 3609 u32 val; 3610 3611 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 3612 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 3613 3614 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 3615 3616 val = REG_RD(bp, reg_offset); 3617 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 3618 REG_WR(bp, reg_offset, val); 3619 3620 BNX2X_ERR("SPIO5 hw attention\n"); 3621 3622 /* Fan failure attention */ 3623 bnx2x_hw_reset_phy(&bp->link_params); 3624 bnx2x_fan_failure(bp); 3625 } 3626 3627 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 3628 bnx2x_acquire_phy_lock(bp); 3629 bnx2x_handle_module_detect_int(&bp->link_params); 3630 bnx2x_release_phy_lock(bp); 3631 } 3632 3633 if (attn & HW_INTERRUT_ASSERT_SET_0) { 3634 3635 val = REG_RD(bp, reg_offset); 3636 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 3637 REG_WR(bp, reg_offset, val); 3638 3639 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 3640 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 3641 bnx2x_panic(); 3642 } 3643 } 3644 3645 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3646 { 3647 u32 val; 3648 3649 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 3650 3651 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 3652 BNX2X_ERR("DB hw attention 0x%x\n", val); 3653 /* DORQ discard attention */ 3654 if (val & 0x2) 3655 BNX2X_ERR("FATAL error from DORQ\n"); 3656 } 3657 3658 if (attn & HW_INTERRUT_ASSERT_SET_1) { 3659 3660 int port = BP_PORT(bp); 3661 int reg_offset; 3662 3663 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 3664 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 3665 3666 val = REG_RD(bp, reg_offset); 3667 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 3668 REG_WR(bp, reg_offset, val); 3669 3670 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 3671 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 3672 bnx2x_panic(); 3673 } 3674 } 3675 3676 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3677 { 3678 u32 val; 3679 3680 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3681 3682 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 3683 BNX2X_ERR("CFC hw attention 0x%x\n", val); 3684 /* CFC error attention */ 3685 if (val & 0x2) 3686 BNX2X_ERR("FATAL error from CFC\n"); 3687 } 3688 3689 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3690 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 3691 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 3692 /* RQ_USDMDP_FIFO_OVERFLOW */ 3693 if (val & 0x18000) 3694 BNX2X_ERR("FATAL error from PXP\n"); 3695 3696 if (!CHIP_IS_E1x(bp)) { 3697 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 3698 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 3699 } 3700 } 3701 3702 if (attn & HW_INTERRUT_ASSERT_SET_2) { 3703 3704 int port = BP_PORT(bp); 3705 int reg_offset; 3706 3707 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 3708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 3709 3710 val = REG_RD(bp, reg_offset); 3711 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 3712 REG_WR(bp, reg_offset, val); 3713 3714 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 3715 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 3716 bnx2x_panic(); 3717 } 3718 } 3719 3720 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3721 { 3722 u32 val; 3723 3724 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3725 3726 if (attn & BNX2X_PMF_LINK_ASSERT) { 3727 int func = BP_FUNC(bp); 3728 3729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3730 bnx2x_read_mf_cfg(bp); 3731 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3732 func_mf_config[BP_ABS_FUNC(bp)].config); 3733 val = SHMEM_RD(bp, 3734 func_mb[BP_FW_MB_IDX(bp)].drv_status); 3735 if (val & DRV_STATUS_DCC_EVENT_MASK) 3736 bnx2x_dcc_event(bp, 3737 (val & DRV_STATUS_DCC_EVENT_MASK)); 3738 3739 if (val & DRV_STATUS_SET_MF_BW) 3740 bnx2x_set_mf_bw(bp); 3741 3742 if (val & DRV_STATUS_DRV_INFO_REQ) 3743 bnx2x_handle_drv_info_req(bp); 3744 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3745 bnx2x_pmf_update(bp); 3746 3747 if (bp->port.pmf && 3748 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 3749 bp->dcbx_enabled > 0) 3750 /* start dcbx state machine */ 3751 bnx2x_dcbx_set_params(bp, 3752 BNX2X_DCBX_STATE_NEG_RECEIVED); 3753 if (val & DRV_STATUS_AFEX_EVENT_MASK) 3754 bnx2x_handle_afex_cmd(bp, 3755 val & DRV_STATUS_AFEX_EVENT_MASK); 3756 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 3757 bnx2x_handle_eee_event(bp); 3758 if (bp->link_vars.periodic_flags & 3759 PERIODIC_FLAGS_LINK_EVENT) { 3760 /* sync with link */ 3761 bnx2x_acquire_phy_lock(bp); 3762 bp->link_vars.periodic_flags &= 3763 ~PERIODIC_FLAGS_LINK_EVENT; 3764 bnx2x_release_phy_lock(bp); 3765 if (IS_MF(bp)) 3766 bnx2x_link_sync_notify(bp); 3767 bnx2x_link_report(bp); 3768 } 3769 /* Always call it here: bnx2x_link_report() will 3770 * prevent the link indication duplication. 3771 */ 3772 bnx2x__link_status_update(bp); 3773 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3774 3775 BNX2X_ERR("MC assert!\n"); 3776 bnx2x_mc_assert(bp); 3777 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 3778 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 3779 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 3780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 3781 bnx2x_panic(); 3782 3783 } else if (attn & BNX2X_MCP_ASSERT) { 3784 3785 BNX2X_ERR("MCP assert!\n"); 3786 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 3787 bnx2x_fw_dump(bp); 3788 3789 } else 3790 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 3791 } 3792 3793 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3794 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3795 if (attn & BNX2X_GRC_TIMEOUT) { 3796 val = CHIP_IS_E1(bp) ? 0 : 3797 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 3798 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3799 } 3800 if (attn & BNX2X_GRC_RSV) { 3801 val = CHIP_IS_E1(bp) ? 0 : 3802 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 3803 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3804 } 3805 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3806 } 3807 } 3808 3809 /* 3810 * Bits map: 3811 * 0-7 - Engine0 load counter. 3812 * 8-15 - Engine1 load counter. 3813 * 16 - Engine0 RESET_IN_PROGRESS bit. 3814 * 17 - Engine1 RESET_IN_PROGRESS bit. 3815 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 3816 * on the engine 3817 * 19 - Engine1 ONE_IS_LOADED. 3818 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 3819 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 3820 * just the one belonging to its engine). 3821 * 3822 */ 3823 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 3824 3825 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 3826 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 3827 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 3828 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 3829 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 3830 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 3831 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 3832 3833 /* 3834 * Set the GLOBAL_RESET bit. 3835 * 3836 * Should be run under rtnl lock 3837 */ 3838 void bnx2x_set_reset_global(struct bnx2x *bp) 3839 { 3840 u32 val; 3841 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3842 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3843 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 3844 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3845 } 3846 3847 /* 3848 * Clear the GLOBAL_RESET bit. 3849 * 3850 * Should be run under rtnl lock 3851 */ 3852 static void bnx2x_clear_reset_global(struct bnx2x *bp) 3853 { 3854 u32 val; 3855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3856 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3857 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 3858 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3859 } 3860 3861 /* 3862 * Checks the GLOBAL_RESET bit. 3863 * 3864 * should be run under rtnl lock 3865 */ 3866 static bool bnx2x_reset_is_global(struct bnx2x *bp) 3867 { 3868 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3869 3870 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 3871 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 3872 } 3873 3874 /* 3875 * Clear RESET_IN_PROGRESS bit for the current engine. 3876 * 3877 * Should be run under rtnl lock 3878 */ 3879 static void bnx2x_set_reset_done(struct bnx2x *bp) 3880 { 3881 u32 val; 3882 u32 bit = BP_PATH(bp) ? 3883 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3885 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3886 3887 /* Clear the bit */ 3888 val &= ~bit; 3889 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3890 3891 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3892 } 3893 3894 /* 3895 * Set RESET_IN_PROGRESS for the current engine. 3896 * 3897 * should be run under rtnl lock 3898 */ 3899 void bnx2x_set_reset_in_progress(struct bnx2x *bp) 3900 { 3901 u32 val; 3902 u32 bit = BP_PATH(bp) ? 3903 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3905 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3906 3907 /* Set the bit */ 3908 val |= bit; 3909 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3910 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3911 } 3912 3913 /* 3914 * Checks the RESET_IN_PROGRESS bit for the given engine. 3915 * should be run under rtnl lock 3916 */ 3917 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 3918 { 3919 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3920 u32 bit = engine ? 3921 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3922 3923 /* return false if bit is set */ 3924 return (val & bit) ? false : true; 3925 } 3926 3927 /* 3928 * set pf load for the current pf. 3929 * 3930 * should be run under rtnl lock 3931 */ 3932 void bnx2x_set_pf_load(struct bnx2x *bp) 3933 { 3934 u32 val1, val; 3935 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3936 BNX2X_PATH0_LOAD_CNT_MASK; 3937 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3938 BNX2X_PATH0_LOAD_CNT_SHIFT; 3939 3940 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3941 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3942 3943 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 3944 3945 /* get the current counter value */ 3946 val1 = (val & mask) >> shift; 3947 3948 /* set bit of that PF */ 3949 val1 |= (1 << bp->pf_num); 3950 3951 /* clear the old value */ 3952 val &= ~mask; 3953 3954 /* set the new one */ 3955 val |= ((val1 << shift) & mask); 3956 3957 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3959 } 3960 3961 /** 3962 * bnx2x_clear_pf_load - clear pf load mark 3963 * 3964 * @bp: driver handle 3965 * 3966 * Should be run under rtnl lock. 3967 * Decrements the load counter for the current engine. Returns 3968 * whether other functions are still loaded 3969 */ 3970 bool bnx2x_clear_pf_load(struct bnx2x *bp) 3971 { 3972 u32 val1, val; 3973 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3974 BNX2X_PATH0_LOAD_CNT_MASK; 3975 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3976 BNX2X_PATH0_LOAD_CNT_SHIFT; 3977 3978 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3979 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3980 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 3981 3982 /* get the current counter value */ 3983 val1 = (val & mask) >> shift; 3984 3985 /* clear bit of that PF */ 3986 val1 &= ~(1 << bp->pf_num); 3987 3988 /* clear the old value */ 3989 val &= ~mask; 3990 3991 /* set the new one */ 3992 val |= ((val1 << shift) & mask); 3993 3994 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3995 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3996 return val1 != 0; 3997 } 3998 3999 /* 4000 * Read the load status for the current engine. 4001 * 4002 * should be run under rtnl lock 4003 */ 4004 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4005 { 4006 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4007 BNX2X_PATH0_LOAD_CNT_MASK); 4008 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4009 BNX2X_PATH0_LOAD_CNT_SHIFT); 4010 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4011 4012 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4013 4014 val = (val & mask) >> shift; 4015 4016 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4017 engine, val); 4018 4019 return val != 0; 4020 } 4021 4022 /* 4023 * Reset the load status for the current engine. 4024 */ 4025 static void bnx2x_clear_load_status(struct bnx2x *bp) 4026 { 4027 u32 val; 4028 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4029 BNX2X_PATH0_LOAD_CNT_MASK); 4030 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4031 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4032 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); 4033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4034 } 4035 4036 static void _print_next_block(int idx, const char *blk) 4037 { 4038 pr_cont("%s%s", idx ? ", " : "", blk); 4039 } 4040 4041 static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4042 bool print) 4043 { 4044 int i = 0; 4045 u32 cur_bit = 0; 4046 for (i = 0; sig; i++) { 4047 cur_bit = ((u32)0x1 << i); 4048 if (sig & cur_bit) { 4049 switch (cur_bit) { 4050 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4051 if (print) 4052 _print_next_block(par_num++, "BRB"); 4053 break; 4054 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4055 if (print) 4056 _print_next_block(par_num++, "PARSER"); 4057 break; 4058 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4059 if (print) 4060 _print_next_block(par_num++, "TSDM"); 4061 break; 4062 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4063 if (print) 4064 _print_next_block(par_num++, 4065 "SEARCHER"); 4066 break; 4067 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4068 if (print) 4069 _print_next_block(par_num++, "TCM"); 4070 break; 4071 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4072 if (print) 4073 _print_next_block(par_num++, "TSEMI"); 4074 break; 4075 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4076 if (print) 4077 _print_next_block(par_num++, "XPB"); 4078 break; 4079 } 4080 4081 /* Clear the bit */ 4082 sig &= ~cur_bit; 4083 } 4084 } 4085 4086 return par_num; 4087 } 4088 4089 static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4090 bool *global, bool print) 4091 { 4092 int i = 0; 4093 u32 cur_bit = 0; 4094 for (i = 0; sig; i++) { 4095 cur_bit = ((u32)0x1 << i); 4096 if (sig & cur_bit) { 4097 switch (cur_bit) { 4098 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4099 if (print) 4100 _print_next_block(par_num++, "PBF"); 4101 break; 4102 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4103 if (print) 4104 _print_next_block(par_num++, "QM"); 4105 break; 4106 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4107 if (print) 4108 _print_next_block(par_num++, "TM"); 4109 break; 4110 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4111 if (print) 4112 _print_next_block(par_num++, "XSDM"); 4113 break; 4114 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4115 if (print) 4116 _print_next_block(par_num++, "XCM"); 4117 break; 4118 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4119 if (print) 4120 _print_next_block(par_num++, "XSEMI"); 4121 break; 4122 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4123 if (print) 4124 _print_next_block(par_num++, 4125 "DOORBELLQ"); 4126 break; 4127 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4128 if (print) 4129 _print_next_block(par_num++, "NIG"); 4130 break; 4131 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4132 if (print) 4133 _print_next_block(par_num++, 4134 "VAUX PCI CORE"); 4135 *global = true; 4136 break; 4137 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4138 if (print) 4139 _print_next_block(par_num++, "DEBUG"); 4140 break; 4141 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4142 if (print) 4143 _print_next_block(par_num++, "USDM"); 4144 break; 4145 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4146 if (print) 4147 _print_next_block(par_num++, "UCM"); 4148 break; 4149 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4150 if (print) 4151 _print_next_block(par_num++, "USEMI"); 4152 break; 4153 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4154 if (print) 4155 _print_next_block(par_num++, "UPB"); 4156 break; 4157 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4158 if (print) 4159 _print_next_block(par_num++, "CSDM"); 4160 break; 4161 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4162 if (print) 4163 _print_next_block(par_num++, "CCM"); 4164 break; 4165 } 4166 4167 /* Clear the bit */ 4168 sig &= ~cur_bit; 4169 } 4170 } 4171 4172 return par_num; 4173 } 4174 4175 static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4176 bool print) 4177 { 4178 int i = 0; 4179 u32 cur_bit = 0; 4180 for (i = 0; sig; i++) { 4181 cur_bit = ((u32)0x1 << i); 4182 if (sig & cur_bit) { 4183 switch (cur_bit) { 4184 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4185 if (print) 4186 _print_next_block(par_num++, "CSEMI"); 4187 break; 4188 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4189 if (print) 4190 _print_next_block(par_num++, "PXP"); 4191 break; 4192 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4193 if (print) 4194 _print_next_block(par_num++, 4195 "PXPPCICLOCKCLIENT"); 4196 break; 4197 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4198 if (print) 4199 _print_next_block(par_num++, "CFC"); 4200 break; 4201 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4202 if (print) 4203 _print_next_block(par_num++, "CDU"); 4204 break; 4205 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4206 if (print) 4207 _print_next_block(par_num++, "DMAE"); 4208 break; 4209 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4210 if (print) 4211 _print_next_block(par_num++, "IGU"); 4212 break; 4213 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4214 if (print) 4215 _print_next_block(par_num++, "MISC"); 4216 break; 4217 } 4218 4219 /* Clear the bit */ 4220 sig &= ~cur_bit; 4221 } 4222 } 4223 4224 return par_num; 4225 } 4226 4227 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4228 bool *global, bool print) 4229 { 4230 int i = 0; 4231 u32 cur_bit = 0; 4232 for (i = 0; sig; i++) { 4233 cur_bit = ((u32)0x1 << i); 4234 if (sig & cur_bit) { 4235 switch (cur_bit) { 4236 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4237 if (print) 4238 _print_next_block(par_num++, "MCP ROM"); 4239 *global = true; 4240 break; 4241 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4242 if (print) 4243 _print_next_block(par_num++, 4244 "MCP UMP RX"); 4245 *global = true; 4246 break; 4247 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4248 if (print) 4249 _print_next_block(par_num++, 4250 "MCP UMP TX"); 4251 *global = true; 4252 break; 4253 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4254 if (print) 4255 _print_next_block(par_num++, 4256 "MCP SCPAD"); 4257 *global = true; 4258 break; 4259 } 4260 4261 /* Clear the bit */ 4262 sig &= ~cur_bit; 4263 } 4264 } 4265 4266 return par_num; 4267 } 4268 4269 static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4270 bool print) 4271 { 4272 int i = 0; 4273 u32 cur_bit = 0; 4274 for (i = 0; sig; i++) { 4275 cur_bit = ((u32)0x1 << i); 4276 if (sig & cur_bit) { 4277 switch (cur_bit) { 4278 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4279 if (print) 4280 _print_next_block(par_num++, "PGLUE_B"); 4281 break; 4282 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4283 if (print) 4284 _print_next_block(par_num++, "ATC"); 4285 break; 4286 } 4287 4288 /* Clear the bit */ 4289 sig &= ~cur_bit; 4290 } 4291 } 4292 4293 return par_num; 4294 } 4295 4296 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4297 u32 *sig) 4298 { 4299 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4300 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4301 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4302 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4303 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4304 int par_num = 0; 4305 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4306 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4307 sig[0] & HW_PRTY_ASSERT_SET_0, 4308 sig[1] & HW_PRTY_ASSERT_SET_1, 4309 sig[2] & HW_PRTY_ASSERT_SET_2, 4310 sig[3] & HW_PRTY_ASSERT_SET_3, 4311 sig[4] & HW_PRTY_ASSERT_SET_4); 4312 if (print) 4313 netdev_err(bp->dev, 4314 "Parity errors detected in blocks: "); 4315 par_num = bnx2x_check_blocks_with_parity0( 4316 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4317 par_num = bnx2x_check_blocks_with_parity1( 4318 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4319 par_num = bnx2x_check_blocks_with_parity2( 4320 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4321 par_num = bnx2x_check_blocks_with_parity3( 4322 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4323 par_num = bnx2x_check_blocks_with_parity4( 4324 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4325 4326 if (print) 4327 pr_cont("\n"); 4328 4329 return true; 4330 } else 4331 return false; 4332 } 4333 4334 /** 4335 * bnx2x_chk_parity_attn - checks for parity attentions. 4336 * 4337 * @bp: driver handle 4338 * @global: true if there was a global attention 4339 * @print: show parity attention in syslog 4340 */ 4341 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 4342 { 4343 struct attn_route attn = { {0} }; 4344 int port = BP_PORT(bp); 4345 4346 attn.sig[0] = REG_RD(bp, 4347 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 4348 port*4); 4349 attn.sig[1] = REG_RD(bp, 4350 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 4351 port*4); 4352 attn.sig[2] = REG_RD(bp, 4353 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 4354 port*4); 4355 attn.sig[3] = REG_RD(bp, 4356 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4357 port*4); 4358 4359 if (!CHIP_IS_E1x(bp)) 4360 attn.sig[4] = REG_RD(bp, 4361 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 4362 port*4); 4363 4364 return bnx2x_parity_attn(bp, global, print, attn.sig); 4365 } 4366 4367 4368 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4369 { 4370 u32 val; 4371 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4372 4373 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 4374 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 4375 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 4376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 4377 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 4378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 4379 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 4380 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 4381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 4382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 4383 if (val & 4384 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 4385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 4386 if (val & 4387 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 4388 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 4389 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 4390 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 4391 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 4392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 4393 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 4394 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 4395 } 4396 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 4397 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 4398 BNX2X_ERR("ATC hw attention 0x%x\n", val); 4399 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 4400 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 4401 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 4402 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 4403 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 4404 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 4405 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 4406 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 4407 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 4408 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 4409 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 4410 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 4411 } 4412 4413 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4414 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 4415 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 4416 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4417 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4418 } 4419 4420 } 4421 4422 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4423 { 4424 struct attn_route attn, *group_mask; 4425 int port = BP_PORT(bp); 4426 int index; 4427 u32 reg_addr; 4428 u32 val; 4429 u32 aeu_mask; 4430 bool global = false; 4431 4432 /* need to take HW lock because MCP or other port might also 4433 try to handle this event */ 4434 bnx2x_acquire_alr(bp); 4435 4436 if (bnx2x_chk_parity_attn(bp, &global, true)) { 4437 #ifndef BNX2X_STOP_ON_ERROR 4438 bp->recovery_state = BNX2X_RECOVERY_INIT; 4439 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4440 /* Disable HW interrupts */ 4441 bnx2x_int_disable(bp); 4442 /* In case of parity errors don't handle attentions so that 4443 * other function would "see" parity errors. 4444 */ 4445 #else 4446 bnx2x_panic(); 4447 #endif 4448 bnx2x_release_alr(bp); 4449 return; 4450 } 4451 4452 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 4453 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 4454 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 4455 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 4456 if (!CHIP_IS_E1x(bp)) 4457 attn.sig[4] = 4458 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 4459 else 4460 attn.sig[4] = 0; 4461 4462 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 4463 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 4464 4465 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4466 if (deasserted & (1 << index)) { 4467 group_mask = &bp->attn_group[index]; 4468 4469 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 4470 index, 4471 group_mask->sig[0], group_mask->sig[1], 4472 group_mask->sig[2], group_mask->sig[3], 4473 group_mask->sig[4]); 4474 4475 bnx2x_attn_int_deasserted4(bp, 4476 attn.sig[4] & group_mask->sig[4]); 4477 bnx2x_attn_int_deasserted3(bp, 4478 attn.sig[3] & group_mask->sig[3]); 4479 bnx2x_attn_int_deasserted1(bp, 4480 attn.sig[1] & group_mask->sig[1]); 4481 bnx2x_attn_int_deasserted2(bp, 4482 attn.sig[2] & group_mask->sig[2]); 4483 bnx2x_attn_int_deasserted0(bp, 4484 attn.sig[0] & group_mask->sig[0]); 4485 } 4486 } 4487 4488 bnx2x_release_alr(bp); 4489 4490 if (bp->common.int_block == INT_BLOCK_HC) 4491 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4492 COMMAND_REG_ATTN_BITS_CLR); 4493 else 4494 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 4495 4496 val = ~deasserted; 4497 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 4498 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4499 REG_WR(bp, reg_addr, val); 4500 4501 if (~bp->attn_state & deasserted) 4502 BNX2X_ERR("IGU ERROR\n"); 4503 4504 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4505 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4506 4507 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4508 aeu_mask = REG_RD(bp, reg_addr); 4509 4510 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 4511 aeu_mask, deasserted); 4512 aeu_mask |= (deasserted & 0x3ff); 4513 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4514 4515 REG_WR(bp, reg_addr, aeu_mask); 4516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4517 4518 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4519 bp->attn_state &= ~deasserted; 4520 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4521 } 4522 4523 static void bnx2x_attn_int(struct bnx2x *bp) 4524 { 4525 /* read local copy of bits */ 4526 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 4527 attn_bits); 4528 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 4529 attn_bits_ack); 4530 u32 attn_state = bp->attn_state; 4531 4532 /* look for changed bits */ 4533 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 4534 u32 deasserted = ~attn_bits & attn_ack & attn_state; 4535 4536 DP(NETIF_MSG_HW, 4537 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 4538 attn_bits, attn_ack, asserted, deasserted); 4539 4540 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 4541 BNX2X_ERR("BAD attention state\n"); 4542 4543 /* handle bits that were raised */ 4544 if (asserted) 4545 bnx2x_attn_int_asserted(bp, asserted); 4546 4547 if (deasserted) 4548 bnx2x_attn_int_deasserted(bp, deasserted); 4549 } 4550 4551 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 4552 u16 index, u8 op, u8 update) 4553 { 4554 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 4555 4556 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 4557 igu_addr); 4558 } 4559 4560 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4561 { 4562 /* No memory barriers */ 4563 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4564 mmiowb(); /* keep prod updates ordered */ 4565 } 4566 4567 #ifdef BCM_CNIC 4568 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4569 union event_ring_elem *elem) 4570 { 4571 u8 err = elem->message.error; 4572 4573 if (!bp->cnic_eth_dev.starting_cid || 4574 (cid < bp->cnic_eth_dev.starting_cid && 4575 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 4576 return 1; 4577 4578 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 4579 4580 if (unlikely(err)) { 4581 4582 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 4583 cid); 4584 bnx2x_panic_dump(bp); 4585 } 4586 bnx2x_cnic_cfc_comp(bp, cid, err); 4587 return 0; 4588 } 4589 #endif 4590 4591 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4592 { 4593 struct bnx2x_mcast_ramrod_params rparam; 4594 int rc; 4595 4596 memset(&rparam, 0, sizeof(rparam)); 4597 4598 rparam.mcast_obj = &bp->mcast_obj; 4599 4600 netif_addr_lock_bh(bp->dev); 4601 4602 /* Clear pending state for the last command */ 4603 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 4604 4605 /* If there are pending mcast commands - send them */ 4606 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 4607 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 4608 if (rc < 0) 4609 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 4610 rc); 4611 } 4612 4613 netif_addr_unlock_bh(bp->dev); 4614 } 4615 4616 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4617 union event_ring_elem *elem) 4618 { 4619 unsigned long ramrod_flags = 0; 4620 int rc = 0; 4621 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4622 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 4623 4624 /* Always push next commands out, don't wait here */ 4625 __set_bit(RAMROD_CONT, &ramrod_flags); 4626 4627 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 4628 case BNX2X_FILTER_MAC_PENDING: 4629 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4630 #ifdef BCM_CNIC 4631 if (cid == BNX2X_ISCSI_ETH_CID(bp)) 4632 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4633 else 4634 #endif 4635 vlan_mac_obj = &bp->fp[cid].mac_obj; 4636 4637 break; 4638 case BNX2X_FILTER_MCAST_PENDING: 4639 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 4640 /* This is only relevant for 57710 where multicast MACs are 4641 * configured as unicast MACs using the same ramrod. 4642 */ 4643 bnx2x_handle_mcast_eqe(bp); 4644 return; 4645 default: 4646 BNX2X_ERR("Unsupported classification command: %d\n", 4647 elem->message.data.eth_event.echo); 4648 return; 4649 } 4650 4651 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 4652 4653 if (rc < 0) 4654 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 4655 else if (rc > 0) 4656 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 4657 4658 } 4659 4660 #ifdef BCM_CNIC 4661 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4662 #endif 4663 4664 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4665 { 4666 netif_addr_lock_bh(bp->dev); 4667 4668 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 4669 4670 /* Send rx_mode command again if was requested */ 4671 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4672 bnx2x_set_storm_rx_mode(bp); 4673 #ifdef BCM_CNIC 4674 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4675 &bp->sp_state)) 4676 bnx2x_set_iscsi_eth_rx_mode(bp, true); 4677 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 4678 &bp->sp_state)) 4679 bnx2x_set_iscsi_eth_rx_mode(bp, false); 4680 #endif 4681 4682 netif_addr_unlock_bh(bp->dev); 4683 } 4684 4685 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 4686 union event_ring_elem *elem) 4687 { 4688 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 4689 DP(BNX2X_MSG_SP, 4690 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 4691 elem->message.data.vif_list_event.func_bit_map); 4692 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 4693 elem->message.data.vif_list_event.func_bit_map); 4694 } else if (elem->message.data.vif_list_event.echo == 4695 VIF_LIST_RULE_SET) { 4696 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 4697 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 4698 } 4699 } 4700 4701 /* called with rtnl_lock */ 4702 static void bnx2x_after_function_update(struct bnx2x *bp) 4703 { 4704 int q, rc; 4705 struct bnx2x_fastpath *fp; 4706 struct bnx2x_queue_state_params queue_params = {NULL}; 4707 struct bnx2x_queue_update_params *q_update_params = 4708 &queue_params.params.update; 4709 4710 /* Send Q update command with afex vlan removal values for all Qs */ 4711 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 4712 4713 /* set silent vlan removal values according to vlan mode */ 4714 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4715 &q_update_params->update_flags); 4716 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 4717 &q_update_params->update_flags); 4718 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4719 4720 /* in access mode mark mask and value are 0 to strip all vlans */ 4721 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 4722 q_update_params->silent_removal_value = 0; 4723 q_update_params->silent_removal_mask = 0; 4724 } else { 4725 q_update_params->silent_removal_value = 4726 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 4727 q_update_params->silent_removal_mask = VLAN_VID_MASK; 4728 } 4729 4730 for_each_eth_queue(bp, q) { 4731 /* Set the appropriate Queue object */ 4732 fp = &bp->fp[q]; 4733 queue_params.q_obj = &fp->q_obj; 4734 4735 /* send the ramrod */ 4736 rc = bnx2x_queue_state_change(bp, &queue_params); 4737 if (rc < 0) 4738 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 4739 q); 4740 } 4741 4742 #ifdef BCM_CNIC 4743 if (!NO_FCOE(bp)) { 4744 fp = &bp->fp[FCOE_IDX(bp)]; 4745 queue_params.q_obj = &fp->q_obj; 4746 4747 /* clear pending completion bit */ 4748 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4749 4750 /* mark latest Q bit */ 4751 smp_mb__before_clear_bit(); 4752 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 4753 smp_mb__after_clear_bit(); 4754 4755 /* send Q update ramrod for FCoE Q */ 4756 rc = bnx2x_queue_state_change(bp, &queue_params); 4757 if (rc < 0) 4758 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 4759 q); 4760 } else { 4761 /* If no FCoE ring - ACK MCP now */ 4762 bnx2x_link_report(bp); 4763 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4764 } 4765 #else 4766 /* If no FCoE ring - ACK MCP now */ 4767 bnx2x_link_report(bp); 4768 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4769 #endif /* BCM_CNIC */ 4770 } 4771 4772 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4773 struct bnx2x *bp, u32 cid) 4774 { 4775 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4776 #ifdef BCM_CNIC 4777 if (cid == BNX2X_FCOE_ETH_CID(bp)) 4778 return &bnx2x_fcoe(bp, q_obj); 4779 else 4780 #endif 4781 return &bnx2x_fp(bp, CID_TO_FP(cid, bp), q_obj); 4782 } 4783 4784 static void bnx2x_eq_int(struct bnx2x *bp) 4785 { 4786 u16 hw_cons, sw_cons, sw_prod; 4787 union event_ring_elem *elem; 4788 u32 cid; 4789 u8 opcode; 4790 int spqe_cnt = 0; 4791 struct bnx2x_queue_sp_obj *q_obj; 4792 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 4793 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 4794 4795 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 4796 4797 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 4798 * when we get the the next-page we nned to adjust so the loop 4799 * condition below will be met. The next element is the size of a 4800 * regular element and hence incrementing by 1 4801 */ 4802 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 4803 hw_cons++; 4804 4805 /* This function may never run in parallel with itself for a 4806 * specific bp, thus there is no need in "paired" read memory 4807 * barrier here. 4808 */ 4809 sw_cons = bp->eq_cons; 4810 sw_prod = bp->eq_prod; 4811 4812 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 4813 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 4814 4815 for (; sw_cons != hw_cons; 4816 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 4817 4818 4819 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 4820 4821 cid = SW_CID(elem->message.data.cfc_del_event.cid); 4822 opcode = elem->message.opcode; 4823 4824 4825 /* handle eq element */ 4826 switch (opcode) { 4827 case EVENT_RING_OPCODE_STAT_QUERY: 4828 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 4829 "got statistics comp event %d\n", 4830 bp->stats_comp++); 4831 /* nothing to do with stats comp */ 4832 goto next_spqe; 4833 4834 case EVENT_RING_OPCODE_CFC_DEL: 4835 /* handle according to cid range */ 4836 /* 4837 * we may want to verify here that the bp state is 4838 * HALTING 4839 */ 4840 DP(BNX2X_MSG_SP, 4841 "got delete ramrod for MULTI[%d]\n", cid); 4842 #ifdef BCM_CNIC 4843 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 4844 goto next_spqe; 4845 #endif 4846 q_obj = bnx2x_cid_to_q_obj(bp, cid); 4847 4848 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 4849 break; 4850 4851 4852 4853 goto next_spqe; 4854 4855 case EVENT_RING_OPCODE_STOP_TRAFFIC: 4856 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 4857 if (f_obj->complete_cmd(bp, f_obj, 4858 BNX2X_F_CMD_TX_STOP)) 4859 break; 4860 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 4861 goto next_spqe; 4862 4863 case EVENT_RING_OPCODE_START_TRAFFIC: 4864 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 4865 if (f_obj->complete_cmd(bp, f_obj, 4866 BNX2X_F_CMD_TX_START)) 4867 break; 4868 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4869 goto next_spqe; 4870 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4871 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 4872 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 4873 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); 4874 4875 /* We will perform the Queues update from sp_rtnl task 4876 * as all Queue SP operations should run under 4877 * rtnl_lock. 4878 */ 4879 smp_mb__before_clear_bit(); 4880 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 4881 &bp->sp_rtnl_state); 4882 smp_mb__after_clear_bit(); 4883 4884 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4885 goto next_spqe; 4886 4887 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 4888 f_obj->complete_cmd(bp, f_obj, 4889 BNX2X_F_CMD_AFEX_VIFLISTS); 4890 bnx2x_after_afex_vif_lists(bp, elem); 4891 goto next_spqe; 4892 case EVENT_RING_OPCODE_FUNCTION_START: 4893 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4894 "got FUNC_START ramrod\n"); 4895 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 4896 break; 4897 4898 goto next_spqe; 4899 4900 case EVENT_RING_OPCODE_FUNCTION_STOP: 4901 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4902 "got FUNC_STOP ramrod\n"); 4903 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 4904 break; 4905 4906 goto next_spqe; 4907 } 4908 4909 switch (opcode | bp->state) { 4910 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 4911 BNX2X_STATE_OPEN): 4912 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 4913 BNX2X_STATE_OPENING_WAIT4_PORT): 4914 cid = elem->message.data.eth_event.echo & 4915 BNX2X_SWCID_MASK; 4916 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 4917 cid); 4918 rss_raw->clear_pending(rss_raw); 4919 break; 4920 4921 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 4922 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 4923 case (EVENT_RING_OPCODE_SET_MAC | 4924 BNX2X_STATE_CLOSING_WAIT4_HALT): 4925 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4926 BNX2X_STATE_OPEN): 4927 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4928 BNX2X_STATE_DIAG): 4929 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4930 BNX2X_STATE_CLOSING_WAIT4_HALT): 4931 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); 4932 bnx2x_handle_classification_eqe(bp, elem); 4933 break; 4934 4935 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4936 BNX2X_STATE_OPEN): 4937 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4938 BNX2X_STATE_DIAG): 4939 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4940 BNX2X_STATE_CLOSING_WAIT4_HALT): 4941 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 4942 bnx2x_handle_mcast_eqe(bp); 4943 break; 4944 4945 case (EVENT_RING_OPCODE_FILTERS_RULES | 4946 BNX2X_STATE_OPEN): 4947 case (EVENT_RING_OPCODE_FILTERS_RULES | 4948 BNX2X_STATE_DIAG): 4949 case (EVENT_RING_OPCODE_FILTERS_RULES | 4950 BNX2X_STATE_CLOSING_WAIT4_HALT): 4951 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 4952 bnx2x_handle_rx_mode_eqe(bp); 4953 break; 4954 default: 4955 /* unknown event log error and continue */ 4956 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 4957 elem->message.opcode, bp->state); 4958 } 4959 next_spqe: 4960 spqe_cnt++; 4961 } /* for */ 4962 4963 smp_mb__before_atomic_inc(); 4964 atomic_add(spqe_cnt, &bp->eq_spq_left); 4965 4966 bp->eq_cons = sw_cons; 4967 bp->eq_prod = sw_prod; 4968 /* Make sure that above mem writes were issued towards the memory */ 4969 smp_wmb(); 4970 4971 /* update producer */ 4972 bnx2x_update_eq_prod(bp, bp->eq_prod); 4973 } 4974 4975 static void bnx2x_sp_task(struct work_struct *work) 4976 { 4977 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 4978 u16 status; 4979 4980 status = bnx2x_update_dsb_idx(bp); 4981 /* if (status == 0) */ 4982 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 4983 4984 DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); 4985 4986 /* HW attentions */ 4987 if (status & BNX2X_DEF_SB_ATT_IDX) { 4988 bnx2x_attn_int(bp); 4989 status &= ~BNX2X_DEF_SB_ATT_IDX; 4990 } 4991 4992 /* SP events: STAT_QUERY and others */ 4993 if (status & BNX2X_DEF_SB_IDX) { 4994 #ifdef BCM_CNIC 4995 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 4996 4997 if ((!NO_FCOE(bp)) && 4998 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 4999 /* 5000 * Prevent local bottom-halves from running as 5001 * we are going to change the local NAPI list. 5002 */ 5003 local_bh_disable(); 5004 napi_schedule(&bnx2x_fcoe(bp, napi)); 5005 local_bh_enable(); 5006 } 5007 #endif 5008 /* Handle EQ completions */ 5009 bnx2x_eq_int(bp); 5010 5011 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5012 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5013 5014 status &= ~BNX2X_DEF_SB_IDX; 5015 } 5016 5017 if (unlikely(status)) 5018 DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", 5019 status); 5020 5021 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5022 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5023 5024 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5025 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5026 &bp->sp_state)) { 5027 bnx2x_link_report(bp); 5028 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5029 } 5030 } 5031 5032 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5033 { 5034 struct net_device *dev = dev_instance; 5035 struct bnx2x *bp = netdev_priv(dev); 5036 5037 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5038 IGU_INT_DISABLE, 0); 5039 5040 #ifdef BNX2X_STOP_ON_ERROR 5041 if (unlikely(bp->panic)) 5042 return IRQ_HANDLED; 5043 #endif 5044 5045 #ifdef BCM_CNIC 5046 { 5047 struct cnic_ops *c_ops; 5048 5049 rcu_read_lock(); 5050 c_ops = rcu_dereference(bp->cnic_ops); 5051 if (c_ops) 5052 c_ops->cnic_handler(bp->cnic_data, NULL); 5053 rcu_read_unlock(); 5054 } 5055 #endif 5056 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 5057 5058 return IRQ_HANDLED; 5059 } 5060 5061 /* end of slow path */ 5062 5063 5064 void bnx2x_drv_pulse(struct bnx2x *bp) 5065 { 5066 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5067 bp->fw_drv_pulse_wr_seq); 5068 } 5069 5070 5071 static void bnx2x_timer(unsigned long data) 5072 { 5073 struct bnx2x *bp = (struct bnx2x *) data; 5074 5075 if (!netif_running(bp->dev)) 5076 return; 5077 5078 if (!BP_NOMCP(bp)) { 5079 int mb_idx = BP_FW_MB_IDX(bp); 5080 u32 drv_pulse; 5081 u32 mcp_pulse; 5082 5083 ++bp->fw_drv_pulse_wr_seq; 5084 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5085 /* TBD - add SYSTEM_TIME */ 5086 drv_pulse = bp->fw_drv_pulse_wr_seq; 5087 bnx2x_drv_pulse(bp); 5088 5089 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5090 MCP_PULSE_SEQ_MASK); 5091 /* The delta between driver pulse and mcp response 5092 * should be 1 (before mcp response) or 0 (after mcp response) 5093 */ 5094 if ((drv_pulse != mcp_pulse) && 5095 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5096 /* someone lost a heartbeat... */ 5097 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5098 drv_pulse, mcp_pulse); 5099 } 5100 } 5101 5102 if (bp->state == BNX2X_STATE_OPEN) 5103 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5104 5105 mod_timer(&bp->timer, jiffies + bp->current_interval); 5106 } 5107 5108 /* end of Statistics */ 5109 5110 /* nic init */ 5111 5112 /* 5113 * nic init service functions 5114 */ 5115 5116 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5117 { 5118 u32 i; 5119 if (!(len%4) && !(addr%4)) 5120 for (i = 0; i < len; i += 4) 5121 REG_WR(bp, addr + i, fill); 5122 else 5123 for (i = 0; i < len; i++) 5124 REG_WR8(bp, addr + i, fill); 5125 5126 } 5127 5128 /* helper: writes FP SP data to FW - data_size in dwords */ 5129 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5130 int fw_sb_id, 5131 u32 *sb_data_p, 5132 u32 data_size) 5133 { 5134 int index; 5135 for (index = 0; index < data_size; index++) 5136 REG_WR(bp, BAR_CSTRORM_INTMEM + 5137 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5138 sizeof(u32)*index, 5139 *(sb_data_p + index)); 5140 } 5141 5142 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5143 { 5144 u32 *sb_data_p; 5145 u32 data_size = 0; 5146 struct hc_status_block_data_e2 sb_data_e2; 5147 struct hc_status_block_data_e1x sb_data_e1x; 5148 5149 /* disable the function first */ 5150 if (!CHIP_IS_E1x(bp)) { 5151 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5152 sb_data_e2.common.state = SB_DISABLED; 5153 sb_data_e2.common.p_func.vf_valid = false; 5154 sb_data_p = (u32 *)&sb_data_e2; 5155 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5156 } else { 5157 memset(&sb_data_e1x, 0, 5158 sizeof(struct hc_status_block_data_e1x)); 5159 sb_data_e1x.common.state = SB_DISABLED; 5160 sb_data_e1x.common.p_func.vf_valid = false; 5161 sb_data_p = (u32 *)&sb_data_e1x; 5162 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5163 } 5164 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5165 5166 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5167 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5168 CSTORM_STATUS_BLOCK_SIZE); 5169 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5170 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5171 CSTORM_SYNC_BLOCK_SIZE); 5172 } 5173 5174 /* helper: writes SP SB data to FW */ 5175 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5176 struct hc_sp_status_block_data *sp_sb_data) 5177 { 5178 int func = BP_FUNC(bp); 5179 int i; 5180 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5181 REG_WR(bp, BAR_CSTRORM_INTMEM + 5182 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5183 i*sizeof(u32), 5184 *((u32 *)sp_sb_data + i)); 5185 } 5186 5187 static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5188 { 5189 int func = BP_FUNC(bp); 5190 struct hc_sp_status_block_data sp_sb_data; 5191 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5192 5193 sp_sb_data.state = SB_DISABLED; 5194 sp_sb_data.p_func.vf_valid = false; 5195 5196 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5197 5198 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5199 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5200 CSTORM_SP_STATUS_BLOCK_SIZE); 5201 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5202 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5203 CSTORM_SP_SYNC_BLOCK_SIZE); 5204 5205 } 5206 5207 5208 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5209 int igu_sb_id, int igu_seg_id) 5210 { 5211 hc_sm->igu_sb_id = igu_sb_id; 5212 hc_sm->igu_seg_id = igu_seg_id; 5213 hc_sm->timer_value = 0xFF; 5214 hc_sm->time_to_expire = 0xFFFFFFFF; 5215 } 5216 5217 5218 /* allocates state machine ids. */ 5219 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5220 { 5221 /* zero out state machine indices */ 5222 /* rx indices */ 5223 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5224 5225 /* tx indices */ 5226 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5227 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5228 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5229 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5230 5231 /* map indices */ 5232 /* rx indices */ 5233 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5234 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5235 5236 /* tx indices */ 5237 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5238 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5239 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5240 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5241 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5242 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5243 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5244 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5245 } 5246 5247 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5248 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5249 { 5250 int igu_seg_id; 5251 5252 struct hc_status_block_data_e2 sb_data_e2; 5253 struct hc_status_block_data_e1x sb_data_e1x; 5254 struct hc_status_block_sm *hc_sm_p; 5255 int data_size; 5256 u32 *sb_data_p; 5257 5258 if (CHIP_INT_MODE_IS_BC(bp)) 5259 igu_seg_id = HC_SEG_ACCESS_NORM; 5260 else 5261 igu_seg_id = IGU_SEG_ACCESS_NORM; 5262 5263 bnx2x_zero_fp_sb(bp, fw_sb_id); 5264 5265 if (!CHIP_IS_E1x(bp)) { 5266 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5267 sb_data_e2.common.state = SB_ENABLED; 5268 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5269 sb_data_e2.common.p_func.vf_id = vfid; 5270 sb_data_e2.common.p_func.vf_valid = vf_valid; 5271 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5272 sb_data_e2.common.same_igu_sb_1b = true; 5273 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5274 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5275 hc_sm_p = sb_data_e2.common.state_machine; 5276 sb_data_p = (u32 *)&sb_data_e2; 5277 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5278 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5279 } else { 5280 memset(&sb_data_e1x, 0, 5281 sizeof(struct hc_status_block_data_e1x)); 5282 sb_data_e1x.common.state = SB_ENABLED; 5283 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5284 sb_data_e1x.common.p_func.vf_id = 0xff; 5285 sb_data_e1x.common.p_func.vf_valid = false; 5286 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5287 sb_data_e1x.common.same_igu_sb_1b = true; 5288 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5289 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5290 hc_sm_p = sb_data_e1x.common.state_machine; 5291 sb_data_p = (u32 *)&sb_data_e1x; 5292 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5293 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5294 } 5295 5296 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5297 igu_sb_id, igu_seg_id); 5298 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 5299 igu_sb_id, igu_seg_id); 5300 5301 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 5302 5303 /* write indecies to HW */ 5304 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5305 } 5306 5307 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 5308 u16 tx_usec, u16 rx_usec) 5309 { 5310 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 5311 false, rx_usec); 5312 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5313 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 5314 tx_usec); 5315 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5316 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 5317 tx_usec); 5318 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5319 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 5320 tx_usec); 5321 } 5322 5323 static void bnx2x_init_def_sb(struct bnx2x *bp) 5324 { 5325 struct host_sp_status_block *def_sb = bp->def_status_blk; 5326 dma_addr_t mapping = bp->def_status_blk_mapping; 5327 int igu_sp_sb_index; 5328 int igu_seg_id; 5329 int port = BP_PORT(bp); 5330 int func = BP_FUNC(bp); 5331 int reg_offset, reg_offset_en5; 5332 u64 section; 5333 int index; 5334 struct hc_sp_status_block_data sp_sb_data; 5335 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5336 5337 if (CHIP_INT_MODE_IS_BC(bp)) { 5338 igu_sp_sb_index = DEF_SB_IGU_ID; 5339 igu_seg_id = HC_SEG_ACCESS_DEF; 5340 } else { 5341 igu_sp_sb_index = bp->igu_dsb_id; 5342 igu_seg_id = IGU_SEG_ACCESS_DEF; 5343 } 5344 5345 /* ATTN */ 5346 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5347 atten_status_block); 5348 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5349 5350 bp->attn_state = 0; 5351 5352 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5353 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5354 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5355 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 5356 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5357 int sindex; 5358 /* take care of sig[0]..sig[4] */ 5359 for (sindex = 0; sindex < 4; sindex++) 5360 bp->attn_group[index].sig[sindex] = 5361 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 5362 5363 if (!CHIP_IS_E1x(bp)) 5364 /* 5365 * enable5 is separate from the rest of the registers, 5366 * and therefore the address skip is 4 5367 * and not 16 between the different groups 5368 */ 5369 bp->attn_group[index].sig[4] = REG_RD(bp, 5370 reg_offset_en5 + 0x4*index); 5371 else 5372 bp->attn_group[index].sig[4] = 0; 5373 } 5374 5375 if (bp->common.int_block == INT_BLOCK_HC) { 5376 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 5377 HC_REG_ATTN_MSG0_ADDR_L); 5378 5379 REG_WR(bp, reg_offset, U64_LO(section)); 5380 REG_WR(bp, reg_offset + 4, U64_HI(section)); 5381 } else if (!CHIP_IS_E1x(bp)) { 5382 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5383 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5384 } 5385 5386 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5387 sp_sb); 5388 5389 bnx2x_zero_sp_sb(bp); 5390 5391 sp_sb_data.state = SB_ENABLED; 5392 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5393 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5394 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5395 sp_sb_data.igu_seg_id = igu_seg_id; 5396 sp_sb_data.p_func.pf_id = func; 5397 sp_sb_data.p_func.vnic_id = BP_VN(bp); 5398 sp_sb_data.p_func.vf_id = 0xff; 5399 5400 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5401 5402 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5403 } 5404 5405 void bnx2x_update_coalesce(struct bnx2x *bp) 5406 { 5407 int i; 5408 5409 for_each_eth_queue(bp, i) 5410 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 5411 bp->tx_ticks, bp->rx_ticks); 5412 } 5413 5414 static void bnx2x_init_sp_ring(struct bnx2x *bp) 5415 { 5416 spin_lock_init(&bp->spq_lock); 5417 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 5418 5419 bp->spq_prod_idx = 0; 5420 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5421 bp->spq_prod_bd = bp->spq; 5422 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5423 } 5424 5425 static void bnx2x_init_eq_ring(struct bnx2x *bp) 5426 { 5427 int i; 5428 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5429 union event_ring_elem *elem = 5430 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 5431 5432 elem->next_page.addr.hi = 5433 cpu_to_le32(U64_HI(bp->eq_mapping + 5434 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 5435 elem->next_page.addr.lo = 5436 cpu_to_le32(U64_LO(bp->eq_mapping + 5437 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 5438 } 5439 bp->eq_cons = 0; 5440 bp->eq_prod = NUM_EQ_DESC; 5441 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5442 /* we want a warning message before it gets rought... */ 5443 atomic_set(&bp->eq_spq_left, 5444 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5445 } 5446 5447 5448 /* called with netif_addr_lock_bh() */ 5449 void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 5450 unsigned long rx_mode_flags, 5451 unsigned long rx_accept_flags, 5452 unsigned long tx_accept_flags, 5453 unsigned long ramrod_flags) 5454 { 5455 struct bnx2x_rx_mode_ramrod_params ramrod_param; 5456 int rc; 5457 5458 memset(&ramrod_param, 0, sizeof(ramrod_param)); 5459 5460 /* Prepare ramrod parameters */ 5461 ramrod_param.cid = 0; 5462 ramrod_param.cl_id = cl_id; 5463 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 5464 ramrod_param.func_id = BP_FUNC(bp); 5465 5466 ramrod_param.pstate = &bp->sp_state; 5467 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 5468 5469 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 5470 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 5471 5472 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5473 5474 ramrod_param.ramrod_flags = ramrod_flags; 5475 ramrod_param.rx_mode_flags = rx_mode_flags; 5476 5477 ramrod_param.rx_accept_flags = rx_accept_flags; 5478 ramrod_param.tx_accept_flags = tx_accept_flags; 5479 5480 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 5481 if (rc < 0) { 5482 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 5483 return; 5484 } 5485 } 5486 5487 /* called with netif_addr_lock_bh() */ 5488 void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5489 { 5490 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5491 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5492 5493 #ifdef BCM_CNIC 5494 if (!NO_FCOE(bp)) 5495 5496 /* Configure rx_mode of FCoE Queue */ 5497 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5498 #endif 5499 5500 switch (bp->rx_mode) { 5501 case BNX2X_RX_MODE_NONE: 5502 /* 5503 * 'drop all' supersedes any accept flags that may have been 5504 * passed to the function. 5505 */ 5506 break; 5507 case BNX2X_RX_MODE_NORMAL: 5508 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5509 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); 5510 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5511 5512 /* internal switching mode */ 5513 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5514 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); 5515 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5516 5517 break; 5518 case BNX2X_RX_MODE_ALLMULTI: 5519 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5520 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); 5521 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5522 5523 /* internal switching mode */ 5524 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5525 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); 5526 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5527 5528 break; 5529 case BNX2X_RX_MODE_PROMISC: 5530 /* According to deffinition of SI mode, iface in promisc mode 5531 * should receive matched and unmatched (in resolution of port) 5532 * unicast packets. 5533 */ 5534 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); 5535 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5536 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); 5537 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5538 5539 /* internal switching mode */ 5540 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); 5541 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5542 5543 if (IS_MF_SI(bp)) 5544 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); 5545 else 5546 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5547 5548 break; 5549 default: 5550 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); 5551 return; 5552 } 5553 5554 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 5555 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); 5556 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); 5557 } 5558 5559 __set_bit(RAMROD_RX, &ramrod_flags); 5560 __set_bit(RAMROD_TX, &ramrod_flags); 5561 5562 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, 5563 tx_accept_flags, ramrod_flags); 5564 } 5565 5566 static void bnx2x_init_internal_common(struct bnx2x *bp) 5567 { 5568 int i; 5569 5570 if (IS_MF_SI(bp)) 5571 /* 5572 * In switch independent mode, the TSTORM needs to accept 5573 * packets that failed classification, since approximate match 5574 * mac addresses aren't written to NIG LLH 5575 */ 5576 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5577 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); 5578 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ 5579 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5580 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); 5581 5582 /* Zero this manually as its initialization is 5583 currently missing in the initTool */ 5584 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 5585 REG_WR(bp, BAR_USTRORM_INTMEM + 5586 USTORM_AGG_DATA_OFFSET + i * 4, 0); 5587 if (!CHIP_IS_E1x(bp)) { 5588 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 5589 CHIP_INT_MODE_IS_BC(bp) ? 5590 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 5591 } 5592 } 5593 5594 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 5595 { 5596 switch (load_code) { 5597 case FW_MSG_CODE_DRV_LOAD_COMMON: 5598 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5599 bnx2x_init_internal_common(bp); 5600 /* no break */ 5601 5602 case FW_MSG_CODE_DRV_LOAD_PORT: 5603 /* nothing to do */ 5604 /* no break */ 5605 5606 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5607 /* internal memory per function is 5608 initialized inside bnx2x_pf_init */ 5609 break; 5610 5611 default: 5612 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5613 break; 5614 } 5615 } 5616 5617 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5618 { 5619 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; 5620 } 5621 5622 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5623 { 5624 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5625 } 5626 5627 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5628 { 5629 if (CHIP_IS_E1x(fp->bp)) 5630 return BP_L_ID(fp->bp) + fp->index; 5631 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 5632 return bnx2x_fp_igu_sb_id(fp); 5633 } 5634 5635 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 5636 { 5637 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 5638 u8 cos; 5639 unsigned long q_type = 0; 5640 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 5641 fp->rx_queue = fp_idx; 5642 fp->cid = fp_idx; 5643 fp->cl_id = bnx2x_fp_cl_id(fp); 5644 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 5645 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 5646 /* qZone id equals to FW (per path) client id */ 5647 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 5648 5649 /* init shortcut */ 5650 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 5651 5652 /* Setup SB indicies */ 5653 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 5654 5655 /* Configure Queue State object */ 5656 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 5657 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 5658 5659 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 5660 5661 /* init tx data */ 5662 for_each_cos_in_tx_queue(fp, cos) { 5663 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 5664 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 5665 FP_COS_TO_TXQ(fp, cos, bp), 5666 BNX2X_TX_SB_INDEX_BASE + cos, fp); 5667 cids[cos] = fp->txdata_ptr[cos]->cid; 5668 } 5669 5670 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, 5671 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 5672 bnx2x_sp_mapping(bp, q_rdata), q_type); 5673 5674 /** 5675 * Configure classification DBs: Always enable Tx switching 5676 */ 5677 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 5678 5679 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 5680 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 5681 fp->igu_sb_id); 5682 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 5683 fp->fw_sb_id, fp->igu_sb_id); 5684 5685 bnx2x_update_fpsb_idx(fp); 5686 } 5687 5688 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 5689 { 5690 int i; 5691 5692 for (i = 1; i <= NUM_TX_RINGS; i++) { 5693 struct eth_tx_next_bd *tx_next_bd = 5694 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 5695 5696 tx_next_bd->addr_hi = 5697 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 5698 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 5699 tx_next_bd->addr_lo = 5700 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 5701 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 5702 } 5703 5704 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 5705 txdata->tx_db.data.zero_fill1 = 0; 5706 txdata->tx_db.data.prod = 0; 5707 5708 txdata->tx_pkt_prod = 0; 5709 txdata->tx_pkt_cons = 0; 5710 txdata->tx_bd_prod = 0; 5711 txdata->tx_bd_cons = 0; 5712 txdata->tx_pkt = 0; 5713 } 5714 5715 static void bnx2x_init_tx_rings(struct bnx2x *bp) 5716 { 5717 int i; 5718 u8 cos; 5719 5720 for_each_tx_queue(bp, i) 5721 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5722 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 5723 } 5724 5725 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5726 { 5727 int i; 5728 5729 for_each_eth_queue(bp, i) 5730 bnx2x_init_eth_fp(bp, i); 5731 #ifdef BCM_CNIC 5732 if (!NO_FCOE(bp)) 5733 bnx2x_init_fcoe_fp(bp); 5734 5735 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 5736 BNX2X_VF_ID_INVALID, false, 5737 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 5738 5739 #endif 5740 5741 /* Initialize MOD_ABS interrupts */ 5742 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 5743 bp->common.shmem_base, bp->common.shmem2_base, 5744 BP_PORT(bp)); 5745 /* ensure status block indices were read */ 5746 rmb(); 5747 5748 bnx2x_init_def_sb(bp); 5749 bnx2x_update_dsb_idx(bp); 5750 bnx2x_init_rx_rings(bp); 5751 bnx2x_init_tx_rings(bp); 5752 bnx2x_init_sp_ring(bp); 5753 bnx2x_init_eq_ring(bp); 5754 bnx2x_init_internal(bp, load_code); 5755 bnx2x_pf_init(bp); 5756 bnx2x_stats_init(bp); 5757 5758 /* flush all before enabling interrupts */ 5759 mb(); 5760 mmiowb(); 5761 5762 bnx2x_int_enable(bp); 5763 5764 /* Check for SPIO5 */ 5765 bnx2x_attn_int_deasserted0(bp, 5766 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 5767 AEU_INPUTS_ATTN_BITS_SPIO5); 5768 } 5769 5770 /* end of nic init */ 5771 5772 /* 5773 * gzip service functions 5774 */ 5775 5776 static int bnx2x_gunzip_init(struct bnx2x *bp) 5777 { 5778 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 5779 &bp->gunzip_mapping, GFP_KERNEL); 5780 if (bp->gunzip_buf == NULL) 5781 goto gunzip_nomem1; 5782 5783 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 5784 if (bp->strm == NULL) 5785 goto gunzip_nomem2; 5786 5787 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 5788 if (bp->strm->workspace == NULL) 5789 goto gunzip_nomem3; 5790 5791 return 0; 5792 5793 gunzip_nomem3: 5794 kfree(bp->strm); 5795 bp->strm = NULL; 5796 5797 gunzip_nomem2: 5798 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 5799 bp->gunzip_mapping); 5800 bp->gunzip_buf = NULL; 5801 5802 gunzip_nomem1: 5803 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 5804 return -ENOMEM; 5805 } 5806 5807 static void bnx2x_gunzip_end(struct bnx2x *bp) 5808 { 5809 if (bp->strm) { 5810 vfree(bp->strm->workspace); 5811 kfree(bp->strm); 5812 bp->strm = NULL; 5813 } 5814 5815 if (bp->gunzip_buf) { 5816 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 5817 bp->gunzip_mapping); 5818 bp->gunzip_buf = NULL; 5819 } 5820 } 5821 5822 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 5823 { 5824 int n, rc; 5825 5826 /* check gzip header */ 5827 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 5828 BNX2X_ERR("Bad gzip header\n"); 5829 return -EINVAL; 5830 } 5831 5832 n = 10; 5833 5834 #define FNAME 0x8 5835 5836 if (zbuf[3] & FNAME) 5837 while ((zbuf[n++] != 0) && (n < len)); 5838 5839 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 5840 bp->strm->avail_in = len - n; 5841 bp->strm->next_out = bp->gunzip_buf; 5842 bp->strm->avail_out = FW_BUF_SIZE; 5843 5844 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 5845 if (rc != Z_OK) 5846 return rc; 5847 5848 rc = zlib_inflate(bp->strm, Z_FINISH); 5849 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 5850 netdev_err(bp->dev, "Firmware decompression error: %s\n", 5851 bp->strm->msg); 5852 5853 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 5854 if (bp->gunzip_outlen & 0x3) 5855 netdev_err(bp->dev, 5856 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 5857 bp->gunzip_outlen); 5858 bp->gunzip_outlen >>= 2; 5859 5860 zlib_inflateEnd(bp->strm); 5861 5862 if (rc == Z_STREAM_END) 5863 return 0; 5864 5865 return rc; 5866 } 5867 5868 /* nic load/unload */ 5869 5870 /* 5871 * General service functions 5872 */ 5873 5874 /* send a NIG loopback debug packet */ 5875 static void bnx2x_lb_pckt(struct bnx2x *bp) 5876 { 5877 u32 wb_write[3]; 5878 5879 /* Ethernet source and destination addresses */ 5880 wb_write[0] = 0x55555555; 5881 wb_write[1] = 0x55555555; 5882 wb_write[2] = 0x20; /* SOP */ 5883 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 5884 5885 /* NON-IP protocol */ 5886 wb_write[0] = 0x09000000; 5887 wb_write[1] = 0x55555555; 5888 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 5889 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 5890 } 5891 5892 /* some of the internal memories 5893 * are not directly readable from the driver 5894 * to test them we send debug packets 5895 */ 5896 static int bnx2x_int_mem_test(struct bnx2x *bp) 5897 { 5898 int factor; 5899 int count, i; 5900 u32 val = 0; 5901 5902 if (CHIP_REV_IS_FPGA(bp)) 5903 factor = 120; 5904 else if (CHIP_REV_IS_EMUL(bp)) 5905 factor = 200; 5906 else 5907 factor = 1; 5908 5909 /* Disable inputs of parser neighbor blocks */ 5910 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 5911 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 5912 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 5913 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 5914 5915 /* Write 0 to parser credits for CFC search request */ 5916 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 5917 5918 /* send Ethernet packet */ 5919 bnx2x_lb_pckt(bp); 5920 5921 /* TODO do i reset NIG statistic? */ 5922 /* Wait until NIG register shows 1 packet of size 0x10 */ 5923 count = 1000 * factor; 5924 while (count) { 5925 5926 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5927 val = *bnx2x_sp(bp, wb_data[0]); 5928 if (val == 0x10) 5929 break; 5930 5931 msleep(10); 5932 count--; 5933 } 5934 if (val != 0x10) { 5935 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 5936 return -1; 5937 } 5938 5939 /* Wait until PRS register shows 1 packet */ 5940 count = 1000 * factor; 5941 while (count) { 5942 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 5943 if (val == 1) 5944 break; 5945 5946 msleep(10); 5947 count--; 5948 } 5949 if (val != 0x1) { 5950 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 5951 return -2; 5952 } 5953 5954 /* Reset and init BRB, PRS */ 5955 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 5956 msleep(50); 5957 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 5958 msleep(50); 5959 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 5960 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 5961 5962 DP(NETIF_MSG_HW, "part2\n"); 5963 5964 /* Disable inputs of parser neighbor blocks */ 5965 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 5966 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 5967 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 5968 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 5969 5970 /* Write 0 to parser credits for CFC search request */ 5971 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 5972 5973 /* send 10 Ethernet packets */ 5974 for (i = 0; i < 10; i++) 5975 bnx2x_lb_pckt(bp); 5976 5977 /* Wait until NIG register shows 10 + 1 5978 packets of size 11*0x10 = 0xb0 */ 5979 count = 1000 * factor; 5980 while (count) { 5981 5982 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5983 val = *bnx2x_sp(bp, wb_data[0]); 5984 if (val == 0xb0) 5985 break; 5986 5987 msleep(10); 5988 count--; 5989 } 5990 if (val != 0xb0) { 5991 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 5992 return -3; 5993 } 5994 5995 /* Wait until PRS register shows 2 packets */ 5996 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 5997 if (val != 2) 5998 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 5999 6000 /* Write 1 to parser credits for CFC search request */ 6001 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6002 6003 /* Wait until PRS register shows 3 packets */ 6004 msleep(10 * factor); 6005 /* Wait until NIG register shows 1 packet of size 0x10 */ 6006 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6007 if (val != 3) 6008 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6009 6010 /* clear NIG EOP FIFO */ 6011 for (i = 0; i < 11; i++) 6012 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6013 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6014 if (val != 1) { 6015 BNX2X_ERR("clear of NIG failed\n"); 6016 return -4; 6017 } 6018 6019 /* Reset and init BRB, PRS, NIG */ 6020 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6021 msleep(50); 6022 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6023 msleep(50); 6024 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6025 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6026 #ifndef BCM_CNIC 6027 /* set NIC mode */ 6028 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6029 #endif 6030 6031 /* Enable inputs of parser neighbor blocks */ 6032 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6033 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6034 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6035 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6036 6037 DP(NETIF_MSG_HW, "done\n"); 6038 6039 return 0; /* OK */ 6040 } 6041 6042 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6043 { 6044 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6045 if (!CHIP_IS_E1x(bp)) 6046 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6047 else 6048 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6049 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6050 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6051 /* 6052 * mask read length error interrupts in brb for parser 6053 * (parsing unit and 'checksum and crc' unit) 6054 * these errors are legal (PU reads fixed length and CAC can cause 6055 * read length error on truncated packets) 6056 */ 6057 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6058 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6059 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6060 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6061 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6062 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6063 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6064 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6065 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6066 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6067 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6068 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6069 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6070 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6071 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6072 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6073 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6074 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6075 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6076 6077 if (CHIP_REV_IS_FPGA(bp)) 6078 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 6079 else if (!CHIP_IS_E1x(bp)) 6080 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 6081 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF 6082 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT 6083 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN 6084 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED 6085 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); 6086 else 6087 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 6088 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6089 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6090 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6091 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6092 6093 if (!CHIP_IS_E1x(bp)) 6094 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6095 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6096 6097 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6098 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6099 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6100 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6101 } 6102 6103 static void bnx2x_reset_common(struct bnx2x *bp) 6104 { 6105 u32 val = 0x1400; 6106 6107 /* reset_common */ 6108 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6109 0xd3ffff7f); 6110 6111 if (CHIP_IS_E3(bp)) { 6112 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6113 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6114 } 6115 6116 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6117 } 6118 6119 static void bnx2x_setup_dmae(struct bnx2x *bp) 6120 { 6121 bp->dmae_ready = 0; 6122 spin_lock_init(&bp->dmae_lock); 6123 } 6124 6125 static void bnx2x_init_pxp(struct bnx2x *bp) 6126 { 6127 u16 devctl; 6128 int r_order, w_order; 6129 6130 pci_read_config_word(bp->pdev, 6131 pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); 6132 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6133 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6134 if (bp->mrrs == -1) 6135 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6136 else { 6137 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6138 r_order = bp->mrrs; 6139 } 6140 6141 bnx2x_init_pxp_arb(bp, r_order, w_order); 6142 } 6143 6144 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6145 { 6146 int is_required; 6147 u32 val; 6148 int port; 6149 6150 if (BP_NOMCP(bp)) 6151 return; 6152 6153 is_required = 0; 6154 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6155 SHARED_HW_CFG_FAN_FAILURE_MASK; 6156 6157 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6158 is_required = 1; 6159 6160 /* 6161 * The fan failure mechanism is usually related to the PHY type since 6162 * the power consumption of the board is affected by the PHY. Currently, 6163 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6164 */ 6165 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6166 for (port = PORT_0; port < PORT_MAX; port++) { 6167 is_required |= 6168 bnx2x_fan_failure_det_req( 6169 bp, 6170 bp->common.shmem_base, 6171 bp->common.shmem2_base, 6172 port); 6173 } 6174 6175 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6176 6177 if (is_required == 0) 6178 return; 6179 6180 /* Fan failure is indicated by SPIO 5 */ 6181 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 6182 MISC_REGISTERS_SPIO_INPUT_HI_Z); 6183 6184 /* set to active low mode */ 6185 val = REG_RD(bp, MISC_REG_SPIO_INT); 6186 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6187 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6188 REG_WR(bp, MISC_REG_SPIO_INT, val); 6189 6190 /* enable interrupt to signal the IGU */ 6191 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6192 val |= (1 << MISC_REGISTERS_SPIO_5); 6193 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6194 } 6195 6196 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) 6197 { 6198 u32 offset = 0; 6199 6200 if (CHIP_IS_E1(bp)) 6201 return; 6202 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) 6203 return; 6204 6205 switch (BP_ABS_FUNC(bp)) { 6206 case 0: 6207 offset = PXP2_REG_PGL_PRETEND_FUNC_F0; 6208 break; 6209 case 1: 6210 offset = PXP2_REG_PGL_PRETEND_FUNC_F1; 6211 break; 6212 case 2: 6213 offset = PXP2_REG_PGL_PRETEND_FUNC_F2; 6214 break; 6215 case 3: 6216 offset = PXP2_REG_PGL_PRETEND_FUNC_F3; 6217 break; 6218 case 4: 6219 offset = PXP2_REG_PGL_PRETEND_FUNC_F4; 6220 break; 6221 case 5: 6222 offset = PXP2_REG_PGL_PRETEND_FUNC_F5; 6223 break; 6224 case 6: 6225 offset = PXP2_REG_PGL_PRETEND_FUNC_F6; 6226 break; 6227 case 7: 6228 offset = PXP2_REG_PGL_PRETEND_FUNC_F7; 6229 break; 6230 default: 6231 return; 6232 } 6233 6234 REG_WR(bp, offset, pretend_func_num); 6235 REG_RD(bp, offset); 6236 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); 6237 } 6238 6239 void bnx2x_pf_disable(struct bnx2x *bp) 6240 { 6241 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6242 val &= ~IGU_PF_CONF_FUNC_EN; 6243 6244 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6245 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6246 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6247 } 6248 6249 static void bnx2x__common_init_phy(struct bnx2x *bp) 6250 { 6251 u32 shmem_base[2], shmem2_base[2]; 6252 shmem_base[0] = bp->common.shmem_base; 6253 shmem2_base[0] = bp->common.shmem2_base; 6254 if (!CHIP_IS_E1x(bp)) { 6255 shmem_base[1] = 6256 SHMEM2_RD(bp, other_shmem_base_addr); 6257 shmem2_base[1] = 6258 SHMEM2_RD(bp, other_shmem2_base_addr); 6259 } 6260 bnx2x_acquire_phy_lock(bp); 6261 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 6262 bp->common.chip_id); 6263 bnx2x_release_phy_lock(bp); 6264 } 6265 6266 /** 6267 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6268 * 6269 * @bp: driver handle 6270 */ 6271 static int bnx2x_init_hw_common(struct bnx2x *bp) 6272 { 6273 u32 val; 6274 6275 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 6276 6277 /* 6278 * take the UNDI lock to protect undi_unload flow from accessing 6279 * registers while we're resetting the chip 6280 */ 6281 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6282 6283 bnx2x_reset_common(bp); 6284 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 6285 6286 val = 0xfffc; 6287 if (CHIP_IS_E3(bp)) { 6288 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6289 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6290 } 6291 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 6292 6293 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6294 6295 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 6296 6297 if (!CHIP_IS_E1x(bp)) { 6298 u8 abs_func_id; 6299 6300 /** 6301 * 4-port mode or 2-port mode we need to turn of master-enable 6302 * for everyone, after that, turn it back on for self. 6303 * so, we disregard multi-function or not, and always disable 6304 * for all functions on the given path, this means 0,2,4,6 for 6305 * path 0 and 1,3,5,7 for path 1 6306 */ 6307 for (abs_func_id = BP_PATH(bp); 6308 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 6309 if (abs_func_id == BP_ABS_FUNC(bp)) { 6310 REG_WR(bp, 6311 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 6312 1); 6313 continue; 6314 } 6315 6316 bnx2x_pretend_func(bp, abs_func_id); 6317 /* clear pf enable */ 6318 bnx2x_pf_disable(bp); 6319 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6320 } 6321 } 6322 6323 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 6324 if (CHIP_IS_E1(bp)) { 6325 /* enable HW interrupt from PXP on USDM overflow 6326 bit 16 on INT_MASK_0 */ 6327 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6328 } 6329 6330 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6331 bnx2x_init_pxp(bp); 6332 6333 #ifdef __BIG_ENDIAN 6334 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); 6335 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); 6336 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 6337 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 6338 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 6339 /* make sure this value is 0 */ 6340 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 6341 6342 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 6343 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 6344 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); 6345 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); 6346 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 6347 #endif 6348 6349 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6350 6351 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6352 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 6353 6354 /* let the HW do it's magic ... */ 6355 msleep(100); 6356 /* finish PXP init */ 6357 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 6358 if (val != 1) { 6359 BNX2X_ERR("PXP2 CFG failed\n"); 6360 return -EBUSY; 6361 } 6362 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 6363 if (val != 1) { 6364 BNX2X_ERR("PXP2 RD_INIT failed\n"); 6365 return -EBUSY; 6366 } 6367 6368 /* Timers bug workaround E2 only. We need to set the entire ILT to 6369 * have entries with value "0" and valid bit on. 6370 * This needs to be done by the first PF that is loaded in a path 6371 * (i.e. common phase) 6372 */ 6373 if (!CHIP_IS_E1x(bp)) { 6374 /* In E2 there is a bug in the timers block that can cause function 6 / 7 6375 * (i.e. vnic3) to start even if it is marked as "scan-off". 6376 * This occurs when a different function (func2,3) is being marked 6377 * as "scan-off". Real-life scenario for example: if a driver is being 6378 * load-unloaded while func6,7 are down. This will cause the timer to access 6379 * the ilt, translate to a logical address and send a request to read/write. 6380 * Since the ilt for the function that is down is not valid, this will cause 6381 * a translation error which is unrecoverable. 6382 * The Workaround is intended to make sure that when this happens nothing fatal 6383 * will occur. The workaround: 6384 * 1. First PF driver which loads on a path will: 6385 * a. After taking the chip out of reset, by using pretend, 6386 * it will write "0" to the following registers of 6387 * the other vnics. 6388 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6389 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 6390 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 6391 * And for itself it will write '1' to 6392 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 6393 * dmae-operations (writing to pram for example.) 6394 * note: can be done for only function 6,7 but cleaner this 6395 * way. 6396 * b. Write zero+valid to the entire ILT. 6397 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 6398 * VNIC3 (of that port). The range allocated will be the 6399 * entire ILT. This is needed to prevent ILT range error. 6400 * 2. Any PF driver load flow: 6401 * a. ILT update with the physical addresses of the allocated 6402 * logical pages. 6403 * b. Wait 20msec. - note that this timeout is needed to make 6404 * sure there are no requests in one of the PXP internal 6405 * queues with "old" ILT addresses. 6406 * c. PF enable in the PGLC. 6407 * d. Clear the was_error of the PF in the PGLC. (could have 6408 * occured while driver was down) 6409 * e. PF enable in the CFC (WEAK + STRONG) 6410 * f. Timers scan enable 6411 * 3. PF driver unload flow: 6412 * a. Clear the Timers scan_en. 6413 * b. Polling for scan_on=0 for that PF. 6414 * c. Clear the PF enable bit in the PXP. 6415 * d. Clear the PF enable in the CFC (WEAK + STRONG) 6416 * e. Write zero+valid to all ILT entries (The valid bit must 6417 * stay set) 6418 * f. If this is VNIC 3 of a port then also init 6419 * first_timers_ilt_entry to zero and last_timers_ilt_entry 6420 * to the last enrty in the ILT. 6421 * 6422 * Notes: 6423 * Currently the PF error in the PGLC is non recoverable. 6424 * In the future the there will be a recovery routine for this error. 6425 * Currently attention is masked. 6426 * Having an MCP lock on the load/unload process does not guarantee that 6427 * there is no Timer disable during Func6/7 enable. This is because the 6428 * Timers scan is currently being cleared by the MCP on FLR. 6429 * Step 2.d can be done only for PF6/7 and the driver can also check if 6430 * there is error before clearing it. But the flow above is simpler and 6431 * more general. 6432 * All ILT entries are written by zero+valid and not just PF6/7 6433 * ILT entries since in the future the ILT entries allocation for 6434 * PF-s might be dynamic. 6435 */ 6436 struct ilt_client_info ilt_cli; 6437 struct bnx2x_ilt ilt; 6438 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 6439 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 6440 6441 /* initialize dummy TM client */ 6442 ilt_cli.start = 0; 6443 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 6444 ilt_cli.client_num = ILT_CLIENT_TM; 6445 6446 /* Step 1: set zeroes to all ilt page entries with valid bit on 6447 * Step 2: set the timers first/last ilt entry to point 6448 * to the entire range to prevent ILT range error for 3rd/4th 6449 * vnic (this code assumes existance of the vnic) 6450 * 6451 * both steps performed by call to bnx2x_ilt_client_init_op() 6452 * with dummy TM client 6453 * 6454 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 6455 * and his brother are split registers 6456 */ 6457 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 6458 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 6459 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6460 6461 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 6462 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 6463 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 6464 } 6465 6466 6467 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 6468 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 6469 6470 if (!CHIP_IS_E1x(bp)) { 6471 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 6472 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 6473 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 6474 6475 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 6476 6477 /* let the HW do it's magic ... */ 6478 do { 6479 msleep(200); 6480 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 6481 } while (factor-- && (val != 1)); 6482 6483 if (val != 1) { 6484 BNX2X_ERR("ATC_INIT failed\n"); 6485 return -EBUSY; 6486 } 6487 } 6488 6489 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 6490 6491 /* clean the DMAE memory */ 6492 bp->dmae_ready = 1; 6493 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 6494 6495 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 6496 6497 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 6498 6499 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 6500 6501 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 6502 6503 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 6504 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 6505 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 6506 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6507 6508 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 6509 6510 6511 /* QM queues pointers table */ 6512 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 6513 6514 /* soft reset pulse */ 6515 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6516 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6517 6518 #ifdef BCM_CNIC 6519 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6520 #endif 6521 6522 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6523 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6524 if (!CHIP_REV_IS_SLOW(bp)) 6525 /* enable hw interrupt from doorbell Q */ 6526 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6527 6528 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6529 6530 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6531 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6532 6533 if (!CHIP_IS_E1(bp)) 6534 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6535 6536 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 6537 if (IS_MF_AFEX(bp)) { 6538 /* configure that VNTag and VLAN headers must be 6539 * received in afex mode 6540 */ 6541 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 6542 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 6543 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 6544 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 6545 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 6546 } else { 6547 /* Bit-map indicating which L2 hdrs may appear 6548 * after the basic Ethernet header 6549 */ 6550 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6551 bp->path_has_ovlan ? 7 : 6); 6552 } 6553 } 6554 6555 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6556 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6557 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 6558 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 6559 6560 if (!CHIP_IS_E1x(bp)) { 6561 /* reset VFC memories */ 6562 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6563 VFC_MEMORIES_RST_REG_CAM_RST | 6564 VFC_MEMORIES_RST_REG_RAM_RST); 6565 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6566 VFC_MEMORIES_RST_REG_CAM_RST | 6567 VFC_MEMORIES_RST_REG_RAM_RST); 6568 6569 msleep(20); 6570 } 6571 6572 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 6573 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 6574 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 6575 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 6576 6577 /* sync semi rtc */ 6578 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6579 0x80000000); 6580 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 6581 0x80000000); 6582 6583 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 6584 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6585 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6586 6587 if (!CHIP_IS_E1x(bp)) { 6588 if (IS_MF_AFEX(bp)) { 6589 /* configure that VNTag and VLAN headers must be 6590 * sent in afex mode 6591 */ 6592 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 6593 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 6594 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 6595 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 6596 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 6597 } else { 6598 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6599 bp->path_has_ovlan ? 7 : 6); 6600 } 6601 } 6602 6603 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6604 6605 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6606 6607 #ifdef BCM_CNIC 6608 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6609 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6610 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6611 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6612 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6613 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6614 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6615 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6616 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6617 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6618 #endif 6619 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6620 6621 if (sizeof(union cdu_context) != 1024) 6622 /* we currently assume that a context is 1024 bytes */ 6623 dev_alert(&bp->pdev->dev, 6624 "please adjust the size of cdu_context(%ld)\n", 6625 (long)sizeof(union cdu_context)); 6626 6627 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 6628 val = (4 << 24) + (0 << 12) + 1024; 6629 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 6630 6631 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 6632 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 6633 /* enable context validation interrupt from CFC */ 6634 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6635 6636 /* set the thresholds to prevent CFC/CDU race */ 6637 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 6638 6639 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 6640 6641 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 6642 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 6643 6644 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 6645 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 6646 6647 /* Reset PCIE errors for debug */ 6648 REG_WR(bp, 0x2814, 0xffffffff); 6649 REG_WR(bp, 0x3820, 0xffffffff); 6650 6651 if (!CHIP_IS_E1x(bp)) { 6652 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 6653 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 6654 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 6655 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 6656 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 6657 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 6658 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 6659 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 6660 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 6661 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 6662 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 6663 } 6664 6665 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 6666 if (!CHIP_IS_E1(bp)) { 6667 /* in E3 this done in per-port section */ 6668 if (!CHIP_IS_E3(bp)) 6669 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 6670 } 6671 if (CHIP_IS_E1H(bp)) 6672 /* not applicable for E2 (and above ...) */ 6673 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 6674 6675 if (CHIP_REV_IS_SLOW(bp)) 6676 msleep(200); 6677 6678 /* finish CFC init */ 6679 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 6680 if (val != 1) { 6681 BNX2X_ERR("CFC LL_INIT failed\n"); 6682 return -EBUSY; 6683 } 6684 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 6685 if (val != 1) { 6686 BNX2X_ERR("CFC AC_INIT failed\n"); 6687 return -EBUSY; 6688 } 6689 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 6690 if (val != 1) { 6691 BNX2X_ERR("CFC CAM_INIT failed\n"); 6692 return -EBUSY; 6693 } 6694 REG_WR(bp, CFC_REG_DEBUG0, 0); 6695 6696 if (CHIP_IS_E1(bp)) { 6697 /* read NIG statistic 6698 to see if this is our first up since powerup */ 6699 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6700 val = *bnx2x_sp(bp, wb_data[0]); 6701 6702 /* do internal memory self test */ 6703 if ((val == 0) && bnx2x_int_mem_test(bp)) { 6704 BNX2X_ERR("internal mem self test failed\n"); 6705 return -EBUSY; 6706 } 6707 } 6708 6709 bnx2x_setup_fan_failure_detection(bp); 6710 6711 /* clear PXP2 attentions */ 6712 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6713 6714 bnx2x_enable_blocks_attention(bp); 6715 bnx2x_enable_blocks_parity(bp); 6716 6717 if (!BP_NOMCP(bp)) { 6718 if (CHIP_IS_E1x(bp)) 6719 bnx2x__common_init_phy(bp); 6720 } else 6721 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 6722 6723 return 0; 6724 } 6725 6726 /** 6727 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 6728 * 6729 * @bp: driver handle 6730 */ 6731 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 6732 { 6733 int rc = bnx2x_init_hw_common(bp); 6734 6735 if (rc) 6736 return rc; 6737 6738 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 6739 if (!BP_NOMCP(bp)) 6740 bnx2x__common_init_phy(bp); 6741 6742 return 0; 6743 } 6744 6745 static int bnx2x_init_hw_port(struct bnx2x *bp) 6746 { 6747 int port = BP_PORT(bp); 6748 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 6749 u32 low, high; 6750 u32 val; 6751 6752 bnx2x__link_reset(bp); 6753 6754 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 6755 6756 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6757 6758 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 6759 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 6760 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 6761 6762 /* Timers bug workaround: disables the pf_master bit in pglue at 6763 * common phase, we need to enable it here before any dmae access are 6764 * attempted. Therefore we manually added the enable-master to the 6765 * port phase (it also happens in the function phase) 6766 */ 6767 if (!CHIP_IS_E1x(bp)) 6768 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 6769 6770 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 6771 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 6772 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 6773 bnx2x_init_block(bp, BLOCK_QM, init_phase); 6774 6775 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 6776 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 6777 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 6778 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 6779 6780 /* QM cid (connection) count */ 6781 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 6782 6783 #ifdef BCM_CNIC 6784 bnx2x_init_block(bp, BLOCK_TM, init_phase); 6785 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6786 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6787 #endif 6788 6789 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6790 6791 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6792 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 6793 6794 if (IS_MF(bp)) 6795 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6796 else if (bp->dev->mtu > 4096) { 6797 if (bp->flags & ONE_PORT_FLAG) 6798 low = 160; 6799 else { 6800 val = bp->dev->mtu; 6801 /* (24*1024 + val*4)/256 */ 6802 low = 96 + (val/64) + 6803 ((val % 64) ? 1 : 0); 6804 } 6805 } else 6806 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 6807 high = low + 56; /* 14*1024/256 */ 6808 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 6809 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 6810 } 6811 6812 if (CHIP_MODE_IS_4_PORT(bp)) 6813 REG_WR(bp, (BP_PORT(bp) ? 6814 BRB1_REG_MAC_GUARANTIED_1 : 6815 BRB1_REG_MAC_GUARANTIED_0), 40); 6816 6817 6818 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 6819 if (CHIP_IS_E3B0(bp)) { 6820 if (IS_MF_AFEX(bp)) { 6821 /* configure headers for AFEX mode */ 6822 REG_WR(bp, BP_PORT(bp) ? 6823 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6824 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 6825 REG_WR(bp, BP_PORT(bp) ? 6826 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 6827 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 6828 REG_WR(bp, BP_PORT(bp) ? 6829 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 6830 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 6831 } else { 6832 /* Ovlan exists only if we are in multi-function + 6833 * switch-dependent mode, in switch-independent there 6834 * is no ovlan headers 6835 */ 6836 REG_WR(bp, BP_PORT(bp) ? 6837 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6838 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 6839 (bp->path_has_ovlan ? 7 : 6)); 6840 } 6841 } 6842 6843 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 6844 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 6845 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 6846 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 6847 6848 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 6849 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 6850 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 6851 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 6852 6853 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 6854 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 6855 6856 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 6857 6858 if (CHIP_IS_E1x(bp)) { 6859 /* configure PBF to work without PAUSE mtu 9000 */ 6860 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 6861 6862 /* update threshold */ 6863 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 6864 /* update init credit */ 6865 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 6866 6867 /* probe changes */ 6868 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 6869 udelay(50); 6870 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6871 } 6872 6873 #ifdef BCM_CNIC 6874 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 6875 #endif 6876 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 6877 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 6878 6879 if (CHIP_IS_E1(bp)) { 6880 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6881 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6882 } 6883 bnx2x_init_block(bp, BLOCK_HC, init_phase); 6884 6885 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 6886 6887 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 6888 /* init aeu_mask_attn_func_0/1: 6889 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 6890 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 6891 * bits 4-7 are used for "per vn group attention" */ 6892 val = IS_MF(bp) ? 0xF7 : 0x7; 6893 /* Enable DCBX attention for all but E1 */ 6894 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 6895 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 6896 6897 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 6898 6899 if (!CHIP_IS_E1x(bp)) { 6900 /* Bit-map indicating which L2 hdrs may appear after the 6901 * basic Ethernet header 6902 */ 6903 if (IS_MF_AFEX(bp)) 6904 REG_WR(bp, BP_PORT(bp) ? 6905 NIG_REG_P1_HDRS_AFTER_BASIC : 6906 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 6907 else 6908 REG_WR(bp, BP_PORT(bp) ? 6909 NIG_REG_P1_HDRS_AFTER_BASIC : 6910 NIG_REG_P0_HDRS_AFTER_BASIC, 6911 IS_MF_SD(bp) ? 7 : 6); 6912 6913 if (CHIP_IS_E3(bp)) 6914 REG_WR(bp, BP_PORT(bp) ? 6915 NIG_REG_LLH1_MF_MODE : 6916 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 6917 } 6918 if (!CHIP_IS_E3(bp)) 6919 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 6920 6921 if (!CHIP_IS_E1(bp)) { 6922 /* 0x2 disable mf_ov, 0x1 enable */ 6923 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 6924 (IS_MF_SD(bp) ? 0x1 : 0x2)); 6925 6926 if (!CHIP_IS_E1x(bp)) { 6927 val = 0; 6928 switch (bp->mf_mode) { 6929 case MULTI_FUNCTION_SD: 6930 val = 1; 6931 break; 6932 case MULTI_FUNCTION_SI: 6933 case MULTI_FUNCTION_AFEX: 6934 val = 2; 6935 break; 6936 } 6937 6938 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 6939 NIG_REG_LLH0_CLS_TYPE), val); 6940 } 6941 { 6942 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 6943 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 6944 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 6945 } 6946 } 6947 6948 6949 /* If SPIO5 is set to generate interrupts, enable it for this port */ 6950 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6951 if (val & (1 << MISC_REGISTERS_SPIO_5)) { 6952 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 6953 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 6954 val = REG_RD(bp, reg_addr); 6955 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 6956 REG_WR(bp, reg_addr, val); 6957 } 6958 6959 return 0; 6960 } 6961 6962 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6963 { 6964 int reg; 6965 u32 wb_write[2]; 6966 6967 if (CHIP_IS_E1(bp)) 6968 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 6969 else 6970 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 6971 6972 wb_write[0] = ONCHIP_ADDR1(addr); 6973 wb_write[1] = ONCHIP_ADDR2(addr); 6974 REG_WR_DMAE(bp, reg, wb_write, 2); 6975 } 6976 6977 static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 6978 u8 idu_sb_id, bool is_Pf) 6979 { 6980 u32 data, ctl, cnt = 100; 6981 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 6982 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 6983 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 6984 u32 sb_bit = 1 << (idu_sb_id%32); 6985 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 6986 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 6987 6988 /* Not supported in BC mode */ 6989 if (CHIP_INT_MODE_IS_BC(bp)) 6990 return; 6991 6992 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 6993 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 6994 IGU_REGULAR_CLEANUP_SET | 6995 IGU_REGULAR_BCLEANUP; 6996 6997 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 6998 func_encode << IGU_CTRL_REG_FID_SHIFT | 6999 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7000 7001 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7002 data, igu_addr_data); 7003 REG_WR(bp, igu_addr_data, data); 7004 mmiowb(); 7005 barrier(); 7006 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7007 ctl, igu_addr_ctl); 7008 REG_WR(bp, igu_addr_ctl, ctl); 7009 mmiowb(); 7010 barrier(); 7011 7012 /* wait for clean up to finish */ 7013 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7014 msleep(20); 7015 7016 7017 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7018 DP(NETIF_MSG_HW, 7019 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7020 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7021 } 7022 } 7023 7024 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7025 { 7026 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7027 } 7028 7029 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7030 { 7031 u32 i, base = FUNC_ILT_BASE(func); 7032 for (i = base; i < base + ILT_PER_FUNC; i++) 7033 bnx2x_ilt_wr(bp, i, 0); 7034 } 7035 7036 static int bnx2x_init_hw_func(struct bnx2x *bp) 7037 { 7038 int port = BP_PORT(bp); 7039 int func = BP_FUNC(bp); 7040 int init_phase = PHASE_PF0 + func; 7041 struct bnx2x_ilt *ilt = BP_ILT(bp); 7042 u16 cdu_ilt_start; 7043 u32 addr, val; 7044 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7045 int i, main_mem_width, rc; 7046 7047 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7048 7049 /* FLR cleanup - hmmm */ 7050 if (!CHIP_IS_E1x(bp)) { 7051 rc = bnx2x_pf_flr_clnup(bp); 7052 if (rc) 7053 return rc; 7054 } 7055 7056 /* set MSI reconfigure capability */ 7057 if (bp->common.int_block == INT_BLOCK_HC) { 7058 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7059 val = REG_RD(bp, addr); 7060 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7061 REG_WR(bp, addr, val); 7062 } 7063 7064 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7065 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7066 7067 ilt = BP_ILT(bp); 7068 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7069 7070 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7071 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7072 ilt->lines[cdu_ilt_start + i].page_mapping = 7073 bp->context[i].cxt_mapping; 7074 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 7075 } 7076 bnx2x_ilt_init_op(bp, INITOP_SET); 7077 7078 #ifdef BCM_CNIC 7079 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7080 7081 /* T1 hash bits value determines the T1 number of entries */ 7082 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7083 #endif 7084 7085 #ifndef BCM_CNIC 7086 /* set NIC mode */ 7087 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7088 #endif /* BCM_CNIC */ 7089 7090 if (!CHIP_IS_E1x(bp)) { 7091 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7092 7093 /* Turn on a single ISR mode in IGU if driver is going to use 7094 * INT#x or MSI 7095 */ 7096 if (!(bp->flags & USING_MSIX_FLAG)) 7097 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 7098 /* 7099 * Timers workaround bug: function init part. 7100 * Need to wait 20msec after initializing ILT, 7101 * needed to make sure there are no requests in 7102 * one of the PXP internal queues with "old" ILT addresses 7103 */ 7104 msleep(20); 7105 /* 7106 * Master enable - Due to WB DMAE writes performed before this 7107 * register is re-initialized as part of the regular function 7108 * init 7109 */ 7110 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7111 /* Enable the function in IGU */ 7112 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 7113 } 7114 7115 bp->dmae_ready = 1; 7116 7117 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7118 7119 if (!CHIP_IS_E1x(bp)) 7120 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 7121 7122 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7123 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7124 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7125 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7126 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7127 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7128 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7129 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7130 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7131 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7132 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7133 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7134 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7135 7136 if (!CHIP_IS_E1x(bp)) 7137 REG_WR(bp, QM_REG_PF_EN, 1); 7138 7139 if (!CHIP_IS_E1x(bp)) { 7140 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7141 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7142 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7143 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7144 } 7145 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7146 7147 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7148 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7149 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7150 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7151 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7152 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7153 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7154 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7155 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7156 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7157 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7158 if (!CHIP_IS_E1x(bp)) 7159 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 7160 7161 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7162 7163 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7164 7165 if (!CHIP_IS_E1x(bp)) 7166 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 7167 7168 if (IS_MF(bp)) { 7169 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7170 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 7171 } 7172 7173 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7174 7175 /* HC init per function */ 7176 if (bp->common.int_block == INT_BLOCK_HC) { 7177 if (CHIP_IS_E1H(bp)) { 7178 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7179 7180 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7181 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7182 } 7183 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7184 7185 } else { 7186 int num_segs, sb_idx, prod_offset; 7187 7188 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7189 7190 if (!CHIP_IS_E1x(bp)) { 7191 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 7192 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 7193 } 7194 7195 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7196 7197 if (!CHIP_IS_E1x(bp)) { 7198 int dsb_idx = 0; 7199 /** 7200 * Producer memory: 7201 * E2 mode: address 0-135 match to the mapping memory; 7202 * 136 - PF0 default prod; 137 - PF1 default prod; 7203 * 138 - PF2 default prod; 139 - PF3 default prod; 7204 * 140 - PF0 attn prod; 141 - PF1 attn prod; 7205 * 142 - PF2 attn prod; 143 - PF3 attn prod; 7206 * 144-147 reserved. 7207 * 7208 * E1.5 mode - In backward compatible mode; 7209 * for non default SB; each even line in the memory 7210 * holds the U producer and each odd line hold 7211 * the C producer. The first 128 producers are for 7212 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 7213 * producers are for the DSB for each PF. 7214 * Each PF has five segments: (the order inside each 7215 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 7216 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 7217 * 144-147 attn prods; 7218 */ 7219 /* non-default-status-blocks */ 7220 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7221 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 7222 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 7223 prod_offset = (bp->igu_base_sb + sb_idx) * 7224 num_segs; 7225 7226 for (i = 0; i < num_segs; i++) { 7227 addr = IGU_REG_PROD_CONS_MEMORY + 7228 (prod_offset + i) * 4; 7229 REG_WR(bp, addr, 0); 7230 } 7231 /* send consumer update with value 0 */ 7232 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 7233 USTORM_ID, 0, IGU_INT_NOP, 1); 7234 bnx2x_igu_clear_sb(bp, 7235 bp->igu_base_sb + sb_idx); 7236 } 7237 7238 /* default-status-blocks */ 7239 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7240 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 7241 7242 if (CHIP_MODE_IS_4_PORT(bp)) 7243 dsb_idx = BP_FUNC(bp); 7244 else 7245 dsb_idx = BP_VN(bp); 7246 7247 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 7248 IGU_BC_BASE_DSB_PROD + dsb_idx : 7249 IGU_NORM_BASE_DSB_PROD + dsb_idx); 7250 7251 /* 7252 * igu prods come in chunks of E1HVN_MAX (4) - 7253 * does not matters what is the current chip mode 7254 */ 7255 for (i = 0; i < (num_segs * E1HVN_MAX); 7256 i += E1HVN_MAX) { 7257 addr = IGU_REG_PROD_CONS_MEMORY + 7258 (prod_offset + i)*4; 7259 REG_WR(bp, addr, 0); 7260 } 7261 /* send consumer update with 0 */ 7262 if (CHIP_INT_MODE_IS_BC(bp)) { 7263 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7264 USTORM_ID, 0, IGU_INT_NOP, 1); 7265 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7266 CSTORM_ID, 0, IGU_INT_NOP, 1); 7267 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7268 XSTORM_ID, 0, IGU_INT_NOP, 1); 7269 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7270 TSTORM_ID, 0, IGU_INT_NOP, 1); 7271 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7272 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7273 } else { 7274 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7275 USTORM_ID, 0, IGU_INT_NOP, 1); 7276 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7277 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7278 } 7279 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 7280 7281 /* !!! these should become driver const once 7282 rf-tool supports split-68 const */ 7283 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 7284 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 7285 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 7286 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 7287 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 7288 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 7289 } 7290 } 7291 7292 /* Reset PCIE errors for debug */ 7293 REG_WR(bp, 0x2114, 0xffffffff); 7294 REG_WR(bp, 0x2120, 0xffffffff); 7295 7296 if (CHIP_IS_E1x(bp)) { 7297 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 7298 main_mem_base = HC_REG_MAIN_MEMORY + 7299 BP_PORT(bp) * (main_mem_size * 4); 7300 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 7301 main_mem_width = 8; 7302 7303 val = REG_RD(bp, main_mem_prty_clr); 7304 if (val) 7305 DP(NETIF_MSG_HW, 7306 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 7307 val); 7308 7309 /* Clear "false" parity errors in MSI-X table */ 7310 for (i = main_mem_base; 7311 i < main_mem_base + main_mem_size * 4; 7312 i += main_mem_width) { 7313 bnx2x_read_dmae(bp, i, main_mem_width / 4); 7314 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 7315 i, main_mem_width / 4); 7316 } 7317 /* Clear HC parity attention */ 7318 REG_RD(bp, main_mem_prty_clr); 7319 } 7320 7321 #ifdef BNX2X_STOP_ON_ERROR 7322 /* Enable STORMs SP logging */ 7323 REG_WR8(bp, BAR_USTRORM_INTMEM + 7324 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7325 REG_WR8(bp, BAR_TSTRORM_INTMEM + 7326 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7327 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7328 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7329 REG_WR8(bp, BAR_XSTRORM_INTMEM + 7330 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7331 #endif 7332 7333 bnx2x_phy_probe(&bp->link_params); 7334 7335 return 0; 7336 } 7337 7338 7339 void bnx2x_free_mem(struct bnx2x *bp) 7340 { 7341 int i; 7342 7343 /* fastpath */ 7344 bnx2x_free_fp_mem(bp); 7345 /* end of fastpath */ 7346 7347 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7348 sizeof(struct host_sp_status_block)); 7349 7350 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7351 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7352 7353 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7354 sizeof(struct bnx2x_slowpath)); 7355 7356 for (i = 0; i < L2_ILT_LINES(bp); i++) 7357 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 7358 bp->context[i].size); 7359 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7360 7361 BNX2X_FREE(bp->ilt->lines); 7362 7363 #ifdef BCM_CNIC 7364 if (!CHIP_IS_E1x(bp)) 7365 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 7366 sizeof(struct host_hc_status_block_e2)); 7367 else 7368 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 7369 sizeof(struct host_hc_status_block_e1x)); 7370 7371 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7372 #endif 7373 7374 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7375 7376 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7377 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7378 } 7379 7380 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 7381 { 7382 int num_groups; 7383 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 7384 7385 /* number of queues for statistics is number of eth queues + FCoE */ 7386 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; 7387 7388 /* Total number of FW statistics requests = 7389 * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + 7390 * num of queues 7391 */ 7392 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; 7393 7394 7395 /* Request is built from stats_query_header and an array of 7396 * stats_query_cmd_group each of which contains 7397 * STATS_QUERY_CMD_COUNT rules. The real number or requests is 7398 * configured in the stats_query_header. 7399 */ 7400 num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + 7401 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); 7402 7403 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + 7404 num_groups * sizeof(struct stats_query_cmd_group); 7405 7406 /* Data for statistics requests + stats_conter 7407 * 7408 * stats_counter holds per-STORM counters that are incremented 7409 * when STORM has finished with the current request. 7410 * 7411 * memory for FCoE offloaded statistics are counted anyway, 7412 * even if they will not be sent. 7413 */ 7414 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + 7415 sizeof(struct per_pf_stats) + 7416 sizeof(struct fcoe_statistics_params) + 7417 sizeof(struct per_queue_stats) * num_queue_stats + 7418 sizeof(struct stats_counter); 7419 7420 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, 7421 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7422 7423 /* Set shortcuts */ 7424 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 7425 bp->fw_stats_req_mapping = bp->fw_stats_mapping; 7426 7427 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) 7428 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); 7429 7430 bp->fw_stats_data_mapping = bp->fw_stats_mapping + 7431 bp->fw_stats_req_sz; 7432 return 0; 7433 7434 alloc_mem_err: 7435 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7436 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7437 BNX2X_ERR("Can't allocate memory\n"); 7438 return -ENOMEM; 7439 } 7440 7441 7442 int bnx2x_alloc_mem(struct bnx2x *bp) 7443 { 7444 int i, allocated, context_size; 7445 7446 #ifdef BCM_CNIC 7447 if (!CHIP_IS_E1x(bp)) 7448 /* size = the status block + ramrod buffers */ 7449 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7450 sizeof(struct host_hc_status_block_e2)); 7451 else 7452 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 7453 sizeof(struct host_hc_status_block_e1x)); 7454 7455 /* allocate searcher T2 table */ 7456 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7457 #endif 7458 7459 7460 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7461 sizeof(struct host_sp_status_block)); 7462 7463 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7464 sizeof(struct bnx2x_slowpath)); 7465 7466 #ifdef BCM_CNIC 7467 /* write address to which L5 should insert its values */ 7468 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp; 7469 #endif 7470 7471 /* Allocated memory for FW statistics */ 7472 if (bnx2x_alloc_fw_stats_mem(bp)) 7473 goto alloc_mem_err; 7474 7475 /* Allocate memory for CDU context: 7476 * This memory is allocated separately and not in the generic ILT 7477 * functions because CDU differs in few aspects: 7478 * 1. There are multiple entities allocating memory for context - 7479 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 7480 * its own ILT lines. 7481 * 2. Since CDU page-size is not a single 4KB page (which is the case 7482 * for the other ILT clients), to be efficient we want to support 7483 * allocation of sub-page-size in the last entry. 7484 * 3. Context pointers are used by the driver to pass to FW / update 7485 * the context (for the other ILT clients the pointers are used just to 7486 * free the memory during unload). 7487 */ 7488 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7489 7490 for (i = 0, allocated = 0; allocated < context_size; i++) { 7491 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 7492 (context_size - allocated)); 7493 BNX2X_PCI_ALLOC(bp->context[i].vcxt, 7494 &bp->context[i].cxt_mapping, 7495 bp->context[i].size); 7496 allocated += bp->context[i].size; 7497 } 7498 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7499 7500 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7501 goto alloc_mem_err; 7502 7503 /* Slow path ring */ 7504 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 7505 7506 /* EQ */ 7507 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 7508 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7509 7510 7511 /* fastpath */ 7512 /* need to be done at the end, since it's self adjusting to amount 7513 * of memory available for RSS queues 7514 */ 7515 if (bnx2x_alloc_fp_mem(bp)) 7516 goto alloc_mem_err; 7517 return 0; 7518 7519 alloc_mem_err: 7520 bnx2x_free_mem(bp); 7521 BNX2X_ERR("Can't allocate memory\n"); 7522 return -ENOMEM; 7523 } 7524 7525 /* 7526 * Init service functions 7527 */ 7528 7529 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 7530 struct bnx2x_vlan_mac_obj *obj, bool set, 7531 int mac_type, unsigned long *ramrod_flags) 7532 { 7533 int rc; 7534 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 7535 7536 memset(&ramrod_param, 0, sizeof(ramrod_param)); 7537 7538 /* Fill general parameters */ 7539 ramrod_param.vlan_mac_obj = obj; 7540 ramrod_param.ramrod_flags = *ramrod_flags; 7541 7542 /* Fill a user request section if needed */ 7543 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 7544 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 7545 7546 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 7547 7548 /* Set the command: ADD or DEL */ 7549 if (set) 7550 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 7551 else 7552 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 7553 } 7554 7555 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 7556 if (rc < 0) 7557 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 7558 return rc; 7559 } 7560 7561 int bnx2x_del_all_macs(struct bnx2x *bp, 7562 struct bnx2x_vlan_mac_obj *mac_obj, 7563 int mac_type, bool wait_for_comp) 7564 { 7565 int rc; 7566 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 7567 7568 /* Wait for completion of requested */ 7569 if (wait_for_comp) 7570 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7571 7572 /* Set the mac type of addresses we want to clear */ 7573 __set_bit(mac_type, &vlan_mac_flags); 7574 7575 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 7576 if (rc < 0) 7577 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 7578 7579 return rc; 7580 } 7581 7582 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 7583 { 7584 unsigned long ramrod_flags = 0; 7585 7586 #ifdef BCM_CNIC 7587 if (is_zero_ether_addr(bp->dev->dev_addr) && 7588 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7589 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7590 "Ignoring Zero MAC for STORAGE SD mode\n"); 7591 return 0; 7592 } 7593 #endif 7594 7595 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7596 7597 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7598 /* Eth MAC is set on RSS leading client (fp[0]) */ 7599 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, 7600 BNX2X_ETH_MAC, &ramrod_flags); 7601 } 7602 7603 int bnx2x_setup_leading(struct bnx2x *bp) 7604 { 7605 return bnx2x_setup_queue(bp, &bp->fp[0], 1); 7606 } 7607 7608 /** 7609 * bnx2x_set_int_mode - configure interrupt mode 7610 * 7611 * @bp: driver handle 7612 * 7613 * In case of MSI-X it will also try to enable MSI-X. 7614 */ 7615 static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) 7616 { 7617 switch (int_mode) { 7618 case INT_MODE_MSI: 7619 bnx2x_enable_msi(bp); 7620 /* falling through... */ 7621 case INT_MODE_INTx: 7622 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7623 BNX2X_DEV_INFO("set number of queues to 1\n"); 7624 break; 7625 default: 7626 /* Set number of queues for MSI-X mode */ 7627 bnx2x_set_num_queues(bp); 7628 7629 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 7630 7631 /* if we can't use MSI-X we only need one fp, 7632 * so try to enable MSI-X with the requested number of fp's 7633 * and fallback to MSI or legacy INTx with one fp 7634 */ 7635 if (bnx2x_enable_msix(bp) || 7636 bp->flags & USING_SINGLE_MSIX_FLAG) { 7637 /* failed to enable multiple MSI-X */ 7638 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 7639 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7640 7641 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7642 7643 /* Try to enable MSI */ 7644 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && 7645 !(bp->flags & DISABLE_MSI_FLAG)) 7646 bnx2x_enable_msi(bp); 7647 } 7648 break; 7649 } 7650 } 7651 7652 /* must be called prioir to any HW initializations */ 7653 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 7654 { 7655 return L2_ILT_LINES(bp); 7656 } 7657 7658 void bnx2x_ilt_set_info(struct bnx2x *bp) 7659 { 7660 struct ilt_client_info *ilt_client; 7661 struct bnx2x_ilt *ilt = BP_ILT(bp); 7662 u16 line = 0; 7663 7664 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 7665 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 7666 7667 /* CDU */ 7668 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 7669 ilt_client->client_num = ILT_CLIENT_CDU; 7670 ilt_client->page_size = CDU_ILT_PAGE_SZ; 7671 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 7672 ilt_client->start = line; 7673 line += bnx2x_cid_ilt_lines(bp); 7674 #ifdef BCM_CNIC 7675 line += CNIC_ILT_LINES; 7676 #endif 7677 ilt_client->end = line - 1; 7678 7679 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7680 ilt_client->start, 7681 ilt_client->end, 7682 ilt_client->page_size, 7683 ilt_client->flags, 7684 ilog2(ilt_client->page_size >> 12)); 7685 7686 /* QM */ 7687 if (QM_INIT(bp->qm_cid_count)) { 7688 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 7689 ilt_client->client_num = ILT_CLIENT_QM; 7690 ilt_client->page_size = QM_ILT_PAGE_SZ; 7691 ilt_client->flags = 0; 7692 ilt_client->start = line; 7693 7694 /* 4 bytes for each cid */ 7695 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 7696 QM_ILT_PAGE_SZ); 7697 7698 ilt_client->end = line - 1; 7699 7700 DP(NETIF_MSG_IFUP, 7701 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7702 ilt_client->start, 7703 ilt_client->end, 7704 ilt_client->page_size, 7705 ilt_client->flags, 7706 ilog2(ilt_client->page_size >> 12)); 7707 7708 } 7709 /* SRC */ 7710 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 7711 #ifdef BCM_CNIC 7712 ilt_client->client_num = ILT_CLIENT_SRC; 7713 ilt_client->page_size = SRC_ILT_PAGE_SZ; 7714 ilt_client->flags = 0; 7715 ilt_client->start = line; 7716 line += SRC_ILT_LINES; 7717 ilt_client->end = line - 1; 7718 7719 DP(NETIF_MSG_IFUP, 7720 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7721 ilt_client->start, 7722 ilt_client->end, 7723 ilt_client->page_size, 7724 ilt_client->flags, 7725 ilog2(ilt_client->page_size >> 12)); 7726 7727 #else 7728 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7729 #endif 7730 7731 /* TM */ 7732 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 7733 #ifdef BCM_CNIC 7734 ilt_client->client_num = ILT_CLIENT_TM; 7735 ilt_client->page_size = TM_ILT_PAGE_SZ; 7736 ilt_client->flags = 0; 7737 ilt_client->start = line; 7738 line += TM_ILT_LINES; 7739 ilt_client->end = line - 1; 7740 7741 DP(NETIF_MSG_IFUP, 7742 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7743 ilt_client->start, 7744 ilt_client->end, 7745 ilt_client->page_size, 7746 ilt_client->flags, 7747 ilog2(ilt_client->page_size >> 12)); 7748 7749 #else 7750 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7751 #endif 7752 BUG_ON(line > ILT_MAX_LINES); 7753 } 7754 7755 /** 7756 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 7757 * 7758 * @bp: driver handle 7759 * @fp: pointer to fastpath 7760 * @init_params: pointer to parameters structure 7761 * 7762 * parameters configured: 7763 * - HC configuration 7764 * - Queue's CDU context 7765 */ 7766 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7767 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7768 { 7769 7770 u8 cos; 7771 int cxt_index, cxt_offset; 7772 7773 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7774 if (!IS_FCOE_FP(fp)) { 7775 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7776 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 7777 7778 /* If HC is supporterd, enable host coalescing in the transition 7779 * to INIT state. 7780 */ 7781 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 7782 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 7783 7784 /* HC rate */ 7785 init_params->rx.hc_rate = bp->rx_ticks ? 7786 (1000000 / bp->rx_ticks) : 0; 7787 init_params->tx.hc_rate = bp->tx_ticks ? 7788 (1000000 / bp->tx_ticks) : 0; 7789 7790 /* FW SB ID */ 7791 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 7792 fp->fw_sb_id; 7793 7794 /* 7795 * CQ index among the SB indices: FCoE clients uses the default 7796 * SB, therefore it's different. 7797 */ 7798 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 7799 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 7800 } 7801 7802 /* set maximum number of COSs supported by this queue */ 7803 init_params->max_cos = fp->max_cos; 7804 7805 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 7806 fp->index, init_params->max_cos); 7807 7808 /* set the context pointers queue object */ 7809 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 7810 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 7811 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 7812 ILT_PAGE_CIDS); 7813 init_params->cxts[cos] = 7814 &bp->context[cxt_index].vcxt[cxt_offset].eth; 7815 } 7816 } 7817 7818 int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7819 struct bnx2x_queue_state_params *q_params, 7820 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 7821 int tx_index, bool leading) 7822 { 7823 memset(tx_only_params, 0, sizeof(*tx_only_params)); 7824 7825 /* Set the command */ 7826 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 7827 7828 /* Set tx-only QUEUE flags: don't zero statistics */ 7829 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 7830 7831 /* choose the index of the cid to send the slow path on */ 7832 tx_only_params->cid_index = tx_index; 7833 7834 /* Set general TX_ONLY_SETUP parameters */ 7835 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 7836 7837 /* Set Tx TX_ONLY_SETUP parameters */ 7838 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 7839 7840 DP(NETIF_MSG_IFUP, 7841 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 7842 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 7843 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 7844 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 7845 7846 /* send the ramrod */ 7847 return bnx2x_queue_state_change(bp, q_params); 7848 } 7849 7850 7851 /** 7852 * bnx2x_setup_queue - setup queue 7853 * 7854 * @bp: driver handle 7855 * @fp: pointer to fastpath 7856 * @leading: is leading 7857 * 7858 * This function performs 2 steps in a Queue state machine 7859 * actually: 1) RESET->INIT 2) INIT->SETUP 7860 */ 7861 7862 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7863 bool leading) 7864 { 7865 struct bnx2x_queue_state_params q_params = {NULL}; 7866 struct bnx2x_queue_setup_params *setup_params = 7867 &q_params.params.setup; 7868 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 7869 &q_params.params.tx_only; 7870 int rc; 7871 u8 tx_index; 7872 7873 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 7874 7875 /* reset IGU state skip FCoE L2 queue */ 7876 if (!IS_FCOE_FP(fp)) 7877 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 7878 IGU_INT_ENABLE, 0); 7879 7880 q_params.q_obj = &fp->q_obj; 7881 /* We want to wait for completion in this context */ 7882 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7883 7884 /* Prepare the INIT parameters */ 7885 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 7886 7887 /* Set the command */ 7888 q_params.cmd = BNX2X_Q_CMD_INIT; 7889 7890 /* Change the state to INIT */ 7891 rc = bnx2x_queue_state_change(bp, &q_params); 7892 if (rc) { 7893 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 7894 return rc; 7895 } 7896 7897 DP(NETIF_MSG_IFUP, "init complete\n"); 7898 7899 7900 /* Now move the Queue to the SETUP state... */ 7901 memset(setup_params, 0, sizeof(*setup_params)); 7902 7903 /* Set QUEUE flags */ 7904 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 7905 7906 /* Set general SETUP parameters */ 7907 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 7908 FIRST_TX_COS_INDEX); 7909 7910 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 7911 &setup_params->rxq_params); 7912 7913 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 7914 FIRST_TX_COS_INDEX); 7915 7916 /* Set the command */ 7917 q_params.cmd = BNX2X_Q_CMD_SETUP; 7918 7919 /* Change the state to SETUP */ 7920 rc = bnx2x_queue_state_change(bp, &q_params); 7921 if (rc) { 7922 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 7923 return rc; 7924 } 7925 7926 /* loop through the relevant tx-only indices */ 7927 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 7928 tx_index < fp->max_cos; 7929 tx_index++) { 7930 7931 /* prepare and send tx-only ramrod*/ 7932 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 7933 tx_only_params, tx_index, leading); 7934 if (rc) { 7935 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 7936 fp->index, tx_index); 7937 return rc; 7938 } 7939 } 7940 7941 return rc; 7942 } 7943 7944 static int bnx2x_stop_queue(struct bnx2x *bp, int index) 7945 { 7946 struct bnx2x_fastpath *fp = &bp->fp[index]; 7947 struct bnx2x_fp_txdata *txdata; 7948 struct bnx2x_queue_state_params q_params = {NULL}; 7949 int rc, tx_index; 7950 7951 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 7952 7953 q_params.q_obj = &fp->q_obj; 7954 /* We want to wait for completion in this context */ 7955 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7956 7957 7958 /* close tx-only connections */ 7959 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 7960 tx_index < fp->max_cos; 7961 tx_index++){ 7962 7963 /* ascertain this is a normal queue*/ 7964 txdata = fp->txdata_ptr[tx_index]; 7965 7966 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 7967 txdata->txq_index); 7968 7969 /* send halt terminate on tx-only connection */ 7970 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 7971 memset(&q_params.params.terminate, 0, 7972 sizeof(q_params.params.terminate)); 7973 q_params.params.terminate.cid_index = tx_index; 7974 7975 rc = bnx2x_queue_state_change(bp, &q_params); 7976 if (rc) 7977 return rc; 7978 7979 /* send halt terminate on tx-only connection */ 7980 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 7981 memset(&q_params.params.cfc_del, 0, 7982 sizeof(q_params.params.cfc_del)); 7983 q_params.params.cfc_del.cid_index = tx_index; 7984 rc = bnx2x_queue_state_change(bp, &q_params); 7985 if (rc) 7986 return rc; 7987 } 7988 /* Stop the primary connection: */ 7989 /* ...halt the connection */ 7990 q_params.cmd = BNX2X_Q_CMD_HALT; 7991 rc = bnx2x_queue_state_change(bp, &q_params); 7992 if (rc) 7993 return rc; 7994 7995 /* ...terminate the connection */ 7996 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 7997 memset(&q_params.params.terminate, 0, 7998 sizeof(q_params.params.terminate)); 7999 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8000 rc = bnx2x_queue_state_change(bp, &q_params); 8001 if (rc) 8002 return rc; 8003 /* ...delete cfc entry */ 8004 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8005 memset(&q_params.params.cfc_del, 0, 8006 sizeof(q_params.params.cfc_del)); 8007 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8008 return bnx2x_queue_state_change(bp, &q_params); 8009 } 8010 8011 8012 static void bnx2x_reset_func(struct bnx2x *bp) 8013 { 8014 int port = BP_PORT(bp); 8015 int func = BP_FUNC(bp); 8016 int i; 8017 8018 /* Disable the function in the FW */ 8019 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8020 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8021 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8022 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8023 8024 /* FP SBs */ 8025 for_each_eth_queue(bp, i) { 8026 struct bnx2x_fastpath *fp = &bp->fp[i]; 8027 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8028 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 8029 SB_DISABLED); 8030 } 8031 8032 #ifdef BCM_CNIC 8033 /* CNIC SB */ 8034 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8035 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), 8036 SB_DISABLED); 8037 #endif 8038 /* SP SB */ 8039 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8040 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8041 SB_DISABLED); 8042 8043 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 8044 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8045 0); 8046 8047 /* Configure IGU */ 8048 if (bp->common.int_block == INT_BLOCK_HC) { 8049 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8050 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8051 } else { 8052 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8053 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8054 } 8055 8056 #ifdef BCM_CNIC 8057 /* Disable Timer scan */ 8058 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8059 /* 8060 * Wait for at least 10ms and up to 2 second for the timers scan to 8061 * complete 8062 */ 8063 for (i = 0; i < 200; i++) { 8064 msleep(10); 8065 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8066 break; 8067 } 8068 #endif 8069 /* Clear ILT */ 8070 bnx2x_clear_func_ilt(bp, func); 8071 8072 /* Timers workaround bug for E2: if this is vnic-3, 8073 * we need to set the entire ilt range for this timers. 8074 */ 8075 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8076 struct ilt_client_info ilt_cli; 8077 /* use dummy TM client */ 8078 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 8079 ilt_cli.start = 0; 8080 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 8081 ilt_cli.client_num = ILT_CLIENT_TM; 8082 8083 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8084 } 8085 8086 /* this assumes that reset_port() called before reset_func()*/ 8087 if (!CHIP_IS_E1x(bp)) 8088 bnx2x_pf_disable(bp); 8089 8090 bp->dmae_ready = 0; 8091 } 8092 8093 static void bnx2x_reset_port(struct bnx2x *bp) 8094 { 8095 int port = BP_PORT(bp); 8096 u32 val; 8097 8098 /* Reset physical Link */ 8099 bnx2x__link_reset(bp); 8100 8101 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 8102 8103 /* Do not rcv packets to BRB */ 8104 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 8105 /* Do not direct rcv packets that are not for MCP to the BRB */ 8106 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 8107 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 8108 8109 /* Configure AEU */ 8110 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 8111 8112 msleep(100); 8113 /* Check for BRB port occupancy */ 8114 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 8115 if (val) 8116 DP(NETIF_MSG_IFDOWN, 8117 "BRB1 is not empty %d blocks are occupied\n", val); 8118 8119 /* TODO: Close Doorbell port? */ 8120 } 8121 8122 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8123 { 8124 struct bnx2x_func_state_params func_params = {NULL}; 8125 8126 /* Prepare parameters for function state transitions */ 8127 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8128 8129 func_params.f_obj = &bp->func_obj; 8130 func_params.cmd = BNX2X_F_CMD_HW_RESET; 8131 8132 func_params.params.hw_init.load_phase = load_code; 8133 8134 return bnx2x_func_state_change(bp, &func_params); 8135 } 8136 8137 static int bnx2x_func_stop(struct bnx2x *bp) 8138 { 8139 struct bnx2x_func_state_params func_params = {NULL}; 8140 int rc; 8141 8142 /* Prepare parameters for function state transitions */ 8143 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8144 func_params.f_obj = &bp->func_obj; 8145 func_params.cmd = BNX2X_F_CMD_STOP; 8146 8147 /* 8148 * Try to stop the function the 'good way'. If fails (in case 8149 * of a parity error during bnx2x_chip_cleanup()) and we are 8150 * not in a debug mode, perform a state transaction in order to 8151 * enable further HW_RESET transaction. 8152 */ 8153 rc = bnx2x_func_state_change(bp, &func_params); 8154 if (rc) { 8155 #ifdef BNX2X_STOP_ON_ERROR 8156 return rc; 8157 #else 8158 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 8159 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 8160 return bnx2x_func_state_change(bp, &func_params); 8161 #endif 8162 } 8163 8164 return 0; 8165 } 8166 8167 /** 8168 * bnx2x_send_unload_req - request unload mode from the MCP. 8169 * 8170 * @bp: driver handle 8171 * @unload_mode: requested function's unload mode 8172 * 8173 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 8174 */ 8175 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 8176 { 8177 u32 reset_code = 0; 8178 int port = BP_PORT(bp); 8179 8180 /* Select the UNLOAD request mode */ 8181 if (unload_mode == UNLOAD_NORMAL) 8182 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8183 8184 else if (bp->flags & NO_WOL_FLAG) 8185 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 8186 8187 else if (bp->wol) { 8188 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8189 u8 *mac_addr = bp->dev->dev_addr; 8190 u32 val; 8191 u16 pmc; 8192 8193 /* The mac address is written to entries 1-4 to 8194 * preserve entry 0 which is used by the PMF 8195 */ 8196 u8 entry = (BP_VN(bp) + 1)*8; 8197 8198 val = (mac_addr[0] << 8) | mac_addr[1]; 8199 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 8200 8201 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 8202 (mac_addr[4] << 8) | mac_addr[5]; 8203 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8204 8205 /* Enable the PME and clear the status */ 8206 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8207 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8208 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8209 8210 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8211 8212 } else 8213 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8214 8215 /* Send the request to the MCP */ 8216 if (!BP_NOMCP(bp)) 8217 reset_code = bnx2x_fw_command(bp, reset_code, 0); 8218 else { 8219 int path = BP_PATH(bp); 8220 8221 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 8222 path, load_count[path][0], load_count[path][1], 8223 load_count[path][2]); 8224 load_count[path][0]--; 8225 load_count[path][1 + port]--; 8226 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 8227 path, load_count[path][0], load_count[path][1], 8228 load_count[path][2]); 8229 if (load_count[path][0] == 0) 8230 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 8231 else if (load_count[path][1 + port] == 0) 8232 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 8233 else 8234 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 8235 } 8236 8237 return reset_code; 8238 } 8239 8240 /** 8241 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8242 * 8243 * @bp: driver handle 8244 */ 8245 void bnx2x_send_unload_done(struct bnx2x *bp) 8246 { 8247 /* Report UNLOAD_DONE to MCP */ 8248 if (!BP_NOMCP(bp)) 8249 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8250 } 8251 8252 static int bnx2x_func_wait_started(struct bnx2x *bp) 8253 { 8254 int tout = 50; 8255 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8256 8257 if (!bp->port.pmf) 8258 return 0; 8259 8260 /* 8261 * (assumption: No Attention from MCP at this stage) 8262 * PMF probably in the middle of TXdisable/enable transaction 8263 * 1. Sync IRS for default SB 8264 * 2. Sync SP queue - this guarantes us that attention handling started 8265 * 3. Wait, that TXdisable/enable transaction completes 8266 * 8267 * 1+2 guranty that if DCBx attention was scheduled it already changed 8268 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy 8269 * received complettion for the transaction the state is TX_STOPPED. 8270 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8271 * transaction. 8272 */ 8273 8274 /* make sure default SB ISR is done */ 8275 if (msix) 8276 synchronize_irq(bp->msix_table[0].vector); 8277 else 8278 synchronize_irq(bp->pdev->irq); 8279 8280 flush_workqueue(bnx2x_wq); 8281 8282 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8283 BNX2X_F_STATE_STARTED && tout--) 8284 msleep(20); 8285 8286 if (bnx2x_func_get_state(bp, &bp->func_obj) != 8287 BNX2X_F_STATE_STARTED) { 8288 #ifdef BNX2X_STOP_ON_ERROR 8289 BNX2X_ERR("Wrong function state\n"); 8290 return -EBUSY; 8291 #else 8292 /* 8293 * Failed to complete the transaction in a "good way" 8294 * Force both transactions with CLR bit 8295 */ 8296 struct bnx2x_func_state_params func_params = {NULL}; 8297 8298 DP(NETIF_MSG_IFDOWN, 8299 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 8300 8301 func_params.f_obj = &bp->func_obj; 8302 __set_bit(RAMROD_DRV_CLR_ONLY, 8303 &func_params.ramrod_flags); 8304 8305 /* STARTED-->TX_ST0PPED */ 8306 func_params.cmd = BNX2X_F_CMD_TX_STOP; 8307 bnx2x_func_state_change(bp, &func_params); 8308 8309 /* TX_ST0PPED-->STARTED */ 8310 func_params.cmd = BNX2X_F_CMD_TX_START; 8311 return bnx2x_func_state_change(bp, &func_params); 8312 #endif 8313 } 8314 8315 return 0; 8316 } 8317 8318 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 8319 { 8320 int port = BP_PORT(bp); 8321 int i, rc = 0; 8322 u8 cos; 8323 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 8324 u32 reset_code; 8325 8326 /* Wait until tx fastpath tasks complete */ 8327 for_each_tx_queue(bp, i) { 8328 struct bnx2x_fastpath *fp = &bp->fp[i]; 8329 8330 for_each_cos_in_tx_queue(fp, cos) 8331 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 8332 #ifdef BNX2X_STOP_ON_ERROR 8333 if (rc) 8334 return; 8335 #endif 8336 } 8337 8338 /* Give HW time to discard old tx messages */ 8339 usleep_range(1000, 1000); 8340 8341 /* Clean all ETH MACs */ 8342 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); 8343 if (rc < 0) 8344 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8345 8346 /* Clean up UC list */ 8347 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, 8348 true); 8349 if (rc < 0) 8350 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8351 rc); 8352 8353 /* Disable LLH */ 8354 if (!CHIP_IS_E1(bp)) 8355 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8356 8357 /* Set "drop all" (stop Rx). 8358 * We need to take a netif_addr_lock() here in order to prevent 8359 * a race between the completion code and this code. 8360 */ 8361 netif_addr_lock_bh(bp->dev); 8362 /* Schedule the rx_mode command */ 8363 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 8364 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 8365 else 8366 bnx2x_set_storm_rx_mode(bp); 8367 8368 /* Cleanup multicast configuration */ 8369 rparam.mcast_obj = &bp->mcast_obj; 8370 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 8371 if (rc < 0) 8372 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 8373 8374 netif_addr_unlock_bh(bp->dev); 8375 8376 8377 8378 /* 8379 * Send the UNLOAD_REQUEST to the MCP. This will return if 8380 * this function should perform FUNC, PORT or COMMON HW 8381 * reset. 8382 */ 8383 reset_code = bnx2x_send_unload_req(bp, unload_mode); 8384 8385 /* 8386 * (assumption: No Attention from MCP at this stage) 8387 * PMF probably in the middle of TXdisable/enable transaction 8388 */ 8389 rc = bnx2x_func_wait_started(bp); 8390 if (rc) { 8391 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 8392 #ifdef BNX2X_STOP_ON_ERROR 8393 return; 8394 #endif 8395 } 8396 8397 /* Close multi and leading connections 8398 * Completions for ramrods are collected in a synchronous way 8399 */ 8400 for_each_queue(bp, i) 8401 if (bnx2x_stop_queue(bp, i)) 8402 #ifdef BNX2X_STOP_ON_ERROR 8403 return; 8404 #else 8405 goto unload_error; 8406 #endif 8407 /* If SP settings didn't get completed so far - something 8408 * very wrong has happen. 8409 */ 8410 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 8411 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 8412 8413 #ifndef BNX2X_STOP_ON_ERROR 8414 unload_error: 8415 #endif 8416 rc = bnx2x_func_stop(bp); 8417 if (rc) { 8418 BNX2X_ERR("Function stop failed!\n"); 8419 #ifdef BNX2X_STOP_ON_ERROR 8420 return; 8421 #endif 8422 } 8423 8424 /* Disable HW interrupts, NAPI */ 8425 bnx2x_netif_stop(bp, 1); 8426 8427 /* Release IRQs */ 8428 bnx2x_free_irq(bp); 8429 8430 /* Reset the chip */ 8431 rc = bnx2x_reset_hw(bp, reset_code); 8432 if (rc) 8433 BNX2X_ERR("HW_RESET failed\n"); 8434 8435 8436 /* Report UNLOAD_DONE to MCP */ 8437 bnx2x_send_unload_done(bp); 8438 } 8439 8440 void bnx2x_disable_close_the_gate(struct bnx2x *bp) 8441 { 8442 u32 val; 8443 8444 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 8445 8446 if (CHIP_IS_E1(bp)) { 8447 int port = BP_PORT(bp); 8448 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8449 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8450 8451 val = REG_RD(bp, addr); 8452 val &= ~(0x300); 8453 REG_WR(bp, addr, val); 8454 } else { 8455 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 8456 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 8457 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 8458 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 8459 } 8460 } 8461 8462 /* Close gates #2, #3 and #4: */ 8463 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 8464 { 8465 u32 val; 8466 8467 /* Gates #2 and #4a are closed/opened for "not E1" only */ 8468 if (!CHIP_IS_E1(bp)) { 8469 /* #4 */ 8470 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 8471 /* #2 */ 8472 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 8473 } 8474 8475 /* #3 */ 8476 if (CHIP_IS_E1x(bp)) { 8477 /* Prevent interrupts from HC on both ports */ 8478 val = REG_RD(bp, HC_REG_CONFIG_1); 8479 REG_WR(bp, HC_REG_CONFIG_1, 8480 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 8481 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 8482 8483 val = REG_RD(bp, HC_REG_CONFIG_0); 8484 REG_WR(bp, HC_REG_CONFIG_0, 8485 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 8486 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 8487 } else { 8488 /* Prevent incomming interrupts in IGU */ 8489 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 8490 8491 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 8492 (!close) ? 8493 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 8494 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 8495 } 8496 8497 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 8498 close ? "closing" : "opening"); 8499 mmiowb(); 8500 } 8501 8502 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 8503 8504 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 8505 { 8506 /* Do some magic... */ 8507 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8508 *magic_val = val & SHARED_MF_CLP_MAGIC; 8509 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 8510 } 8511 8512 /** 8513 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 8514 * 8515 * @bp: driver handle 8516 * @magic_val: old value of the `magic' bit. 8517 */ 8518 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 8519 { 8520 /* Restore the `magic' bit value... */ 8521 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8522 MF_CFG_WR(bp, shared_mf_config.clp_mb, 8523 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 8524 } 8525 8526 /** 8527 * bnx2x_reset_mcp_prep - prepare for MCP reset. 8528 * 8529 * @bp: driver handle 8530 * @magic_val: old value of 'magic' bit. 8531 * 8532 * Takes care of CLP configurations. 8533 */ 8534 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 8535 { 8536 u32 shmem; 8537 u32 validity_offset; 8538 8539 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 8540 8541 /* Set `magic' bit in order to save MF config */ 8542 if (!CHIP_IS_E1(bp)) 8543 bnx2x_clp_reset_prep(bp, magic_val); 8544 8545 /* Get shmem offset */ 8546 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8547 validity_offset = offsetof(struct shmem_region, validity_map[0]); 8548 8549 /* Clear validity map flags */ 8550 if (shmem > 0) 8551 REG_WR(bp, shmem + validity_offset, 0); 8552 } 8553 8554 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 8555 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 8556 8557 /** 8558 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 8559 * 8560 * @bp: driver handle 8561 */ 8562 static void bnx2x_mcp_wait_one(struct bnx2x *bp) 8563 { 8564 /* special handling for emulation and FPGA, 8565 wait 10 times longer */ 8566 if (CHIP_REV_IS_SLOW(bp)) 8567 msleep(MCP_ONE_TIMEOUT*10); 8568 else 8569 msleep(MCP_ONE_TIMEOUT); 8570 } 8571 8572 /* 8573 * initializes bp->common.shmem_base and waits for validity signature to appear 8574 */ 8575 static int bnx2x_init_shmem(struct bnx2x *bp) 8576 { 8577 int cnt = 0; 8578 u32 val = 0; 8579 8580 do { 8581 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8582 if (bp->common.shmem_base) { 8583 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 8584 if (val & SHR_MEM_VALIDITY_MB) 8585 return 0; 8586 } 8587 8588 bnx2x_mcp_wait_one(bp); 8589 8590 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 8591 8592 BNX2X_ERR("BAD MCP validity signature\n"); 8593 8594 return -ENODEV; 8595 } 8596 8597 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 8598 { 8599 int rc = bnx2x_init_shmem(bp); 8600 8601 /* Restore the `magic' bit value */ 8602 if (!CHIP_IS_E1(bp)) 8603 bnx2x_clp_reset_done(bp, magic_val); 8604 8605 return rc; 8606 } 8607 8608 static void bnx2x_pxp_prep(struct bnx2x *bp) 8609 { 8610 if (!CHIP_IS_E1(bp)) { 8611 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 8612 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 8613 mmiowb(); 8614 } 8615 } 8616 8617 /* 8618 * Reset the whole chip except for: 8619 * - PCIE core 8620 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 8621 * one reset bit) 8622 * - IGU 8623 * - MISC (including AEU) 8624 * - GRC 8625 * - RBCN, RBCP 8626 */ 8627 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 8628 { 8629 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 8630 u32 global_bits2, stay_reset2; 8631 8632 /* 8633 * Bits that have to be set in reset_mask2 if we want to reset 'global' 8634 * (per chip) blocks. 8635 */ 8636 global_bits2 = 8637 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 8638 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 8639 8640 /* Don't reset the following blocks */ 8641 not_reset_mask1 = 8642 MISC_REGISTERS_RESET_REG_1_RST_HC | 8643 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 8644 MISC_REGISTERS_RESET_REG_1_RST_PXP; 8645 8646 not_reset_mask2 = 8647 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 8648 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 8649 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 8650 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 8651 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 8652 MISC_REGISTERS_RESET_REG_2_RST_GRC | 8653 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 8654 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 8655 MISC_REGISTERS_RESET_REG_2_RST_ATC | 8656 MISC_REGISTERS_RESET_REG_2_PGLC; 8657 8658 /* 8659 * Keep the following blocks in reset: 8660 * - all xxMACs are handled by the bnx2x_link code. 8661 */ 8662 stay_reset2 = 8663 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 8664 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 8665 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 8666 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 8667 MISC_REGISTERS_RESET_REG_2_UMAC0 | 8668 MISC_REGISTERS_RESET_REG_2_UMAC1 | 8669 MISC_REGISTERS_RESET_REG_2_XMAC | 8670 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 8671 8672 /* Full reset masks according to the chip */ 8673 reset_mask1 = 0xffffffff; 8674 8675 if (CHIP_IS_E1(bp)) 8676 reset_mask2 = 0xffff; 8677 else if (CHIP_IS_E1H(bp)) 8678 reset_mask2 = 0x1ffff; 8679 else if (CHIP_IS_E2(bp)) 8680 reset_mask2 = 0xfffff; 8681 else /* CHIP_IS_E3 */ 8682 reset_mask2 = 0x3ffffff; 8683 8684 /* Don't reset global blocks unless we need to */ 8685 if (!global) 8686 reset_mask2 &= ~global_bits2; 8687 8688 /* 8689 * In case of attention in the QM, we need to reset PXP 8690 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 8691 * because otherwise QM reset would release 'close the gates' shortly 8692 * before resetting the PXP, then the PSWRQ would send a write 8693 * request to PGLUE. Then when PXP is reset, PGLUE would try to 8694 * read the payload data from PSWWR, but PSWWR would not 8695 * respond. The write queue in PGLUE would stuck, dmae commands 8696 * would not return. Therefore it's important to reset the second 8697 * reset register (containing the 8698 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 8699 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 8700 * bit). 8701 */ 8702 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 8703 reset_mask2 & (~not_reset_mask2)); 8704 8705 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 8706 reset_mask1 & (~not_reset_mask1)); 8707 8708 barrier(); 8709 mmiowb(); 8710 8711 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 8712 reset_mask2 & (~stay_reset2)); 8713 8714 barrier(); 8715 mmiowb(); 8716 8717 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 8718 mmiowb(); 8719 } 8720 8721 /** 8722 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 8723 * It should get cleared in no more than 1s. 8724 * 8725 * @bp: driver handle 8726 * 8727 * It should get cleared in no more than 1s. Returns 0 if 8728 * pending writes bit gets cleared. 8729 */ 8730 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 8731 { 8732 u32 cnt = 1000; 8733 u32 pend_bits = 0; 8734 8735 do { 8736 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 8737 8738 if (pend_bits == 0) 8739 break; 8740 8741 usleep_range(1000, 1000); 8742 } while (cnt-- > 0); 8743 8744 if (cnt <= 0) { 8745 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 8746 pend_bits); 8747 return -EBUSY; 8748 } 8749 8750 return 0; 8751 } 8752 8753 static int bnx2x_process_kill(struct bnx2x *bp, bool global) 8754 { 8755 int cnt = 1000; 8756 u32 val = 0; 8757 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 8758 8759 8760 /* Empty the Tetris buffer, wait for 1s */ 8761 do { 8762 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 8763 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 8764 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 8765 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 8766 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 8767 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 8768 ((port_is_idle_0 & 0x1) == 0x1) && 8769 ((port_is_idle_1 & 0x1) == 0x1) && 8770 (pgl_exp_rom2 == 0xffffffff)) 8771 break; 8772 usleep_range(1000, 1000); 8773 } while (cnt-- > 0); 8774 8775 if (cnt <= 0) { 8776 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 8777 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 8778 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 8779 pgl_exp_rom2); 8780 return -EAGAIN; 8781 } 8782 8783 barrier(); 8784 8785 /* Close gates #2, #3 and #4 */ 8786 bnx2x_set_234_gates(bp, true); 8787 8788 /* Poll for IGU VQs for 57712 and newer chips */ 8789 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 8790 return -EAGAIN; 8791 8792 8793 /* TBD: Indicate that "process kill" is in progress to MCP */ 8794 8795 /* Clear "unprepared" bit */ 8796 REG_WR(bp, MISC_REG_UNPREPARED, 0); 8797 barrier(); 8798 8799 /* Make sure all is written to the chip before the reset */ 8800 mmiowb(); 8801 8802 /* Wait for 1ms to empty GLUE and PCI-E core queues, 8803 * PSWHST, GRC and PSWRD Tetris buffer. 8804 */ 8805 usleep_range(1000, 1000); 8806 8807 /* Prepare to chip reset: */ 8808 /* MCP */ 8809 if (global) 8810 bnx2x_reset_mcp_prep(bp, &val); 8811 8812 /* PXP */ 8813 bnx2x_pxp_prep(bp); 8814 barrier(); 8815 8816 /* reset the chip */ 8817 bnx2x_process_kill_chip_reset(bp, global); 8818 barrier(); 8819 8820 /* Recover after reset: */ 8821 /* MCP */ 8822 if (global && bnx2x_reset_mcp_comp(bp, val)) 8823 return -EAGAIN; 8824 8825 /* TBD: Add resetting the NO_MCP mode DB here */ 8826 8827 /* PXP */ 8828 bnx2x_pxp_prep(bp); 8829 8830 /* Open the gates #2, #3 and #4 */ 8831 bnx2x_set_234_gates(bp, false); 8832 8833 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 8834 * reset state, re-enable attentions. */ 8835 8836 return 0; 8837 } 8838 8839 int bnx2x_leader_reset(struct bnx2x *bp) 8840 { 8841 int rc = 0; 8842 bool global = bnx2x_reset_is_global(bp); 8843 u32 load_code; 8844 8845 /* if not going to reset MCP - load "fake" driver to reset HW while 8846 * driver is owner of the HW 8847 */ 8848 if (!global && !BP_NOMCP(bp)) { 8849 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 8850 if (!load_code) { 8851 BNX2X_ERR("MCP response failure, aborting\n"); 8852 rc = -EAGAIN; 8853 goto exit_leader_reset; 8854 } 8855 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 8856 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 8857 BNX2X_ERR("MCP unexpected resp, aborting\n"); 8858 rc = -EAGAIN; 8859 goto exit_leader_reset2; 8860 } 8861 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 8862 if (!load_code) { 8863 BNX2X_ERR("MCP response failure, aborting\n"); 8864 rc = -EAGAIN; 8865 goto exit_leader_reset2; 8866 } 8867 } 8868 8869 /* Try to recover after the failure */ 8870 if (bnx2x_process_kill(bp, global)) { 8871 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 8872 BP_PATH(bp)); 8873 rc = -EAGAIN; 8874 goto exit_leader_reset2; 8875 } 8876 8877 /* 8878 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 8879 * state. 8880 */ 8881 bnx2x_set_reset_done(bp); 8882 if (global) 8883 bnx2x_clear_reset_global(bp); 8884 8885 exit_leader_reset2: 8886 /* unload "fake driver" if it was loaded */ 8887 if (!global && !BP_NOMCP(bp)) { 8888 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 8889 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8890 } 8891 exit_leader_reset: 8892 bp->is_leader = 0; 8893 bnx2x_release_leader_lock(bp); 8894 smp_mb(); 8895 return rc; 8896 } 8897 8898 static void bnx2x_recovery_failed(struct bnx2x *bp) 8899 { 8900 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 8901 8902 /* Disconnect this device */ 8903 netif_device_detach(bp->dev); 8904 8905 /* 8906 * Block ifup for all function on this engine until "process kill" 8907 * or power cycle. 8908 */ 8909 bnx2x_set_reset_in_progress(bp); 8910 8911 /* Shut down the power */ 8912 bnx2x_set_power_state(bp, PCI_D3hot); 8913 8914 bp->recovery_state = BNX2X_RECOVERY_FAILED; 8915 8916 smp_mb(); 8917 } 8918 8919 /* 8920 * Assumption: runs under rtnl lock. This together with the fact 8921 * that it's called only from bnx2x_sp_rtnl() ensure that it 8922 * will never be called when netif_running(bp->dev) is false. 8923 */ 8924 static void bnx2x_parity_recover(struct bnx2x *bp) 8925 { 8926 bool global = false; 8927 u32 error_recovered, error_unrecovered; 8928 bool is_parity; 8929 8930 DP(NETIF_MSG_HW, "Handling parity\n"); 8931 while (1) { 8932 switch (bp->recovery_state) { 8933 case BNX2X_RECOVERY_INIT: 8934 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 8935 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 8936 WARN_ON(!is_parity); 8937 8938 /* Try to get a LEADER_LOCK HW lock */ 8939 if (bnx2x_trylock_leader_lock(bp)) { 8940 bnx2x_set_reset_in_progress(bp); 8941 /* 8942 * Check if there is a global attention and if 8943 * there was a global attention, set the global 8944 * reset bit. 8945 */ 8946 8947 if (global) 8948 bnx2x_set_reset_global(bp); 8949 8950 bp->is_leader = 1; 8951 } 8952 8953 /* Stop the driver */ 8954 /* If interface has been removed - break */ 8955 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) 8956 return; 8957 8958 bp->recovery_state = BNX2X_RECOVERY_WAIT; 8959 8960 /* Ensure "is_leader", MCP command sequence and 8961 * "recovery_state" update values are seen on other 8962 * CPUs. 8963 */ 8964 smp_mb(); 8965 break; 8966 8967 case BNX2X_RECOVERY_WAIT: 8968 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 8969 if (bp->is_leader) { 8970 int other_engine = BP_PATH(bp) ? 0 : 1; 8971 bool other_load_status = 8972 bnx2x_get_load_status(bp, other_engine); 8973 bool load_status = 8974 bnx2x_get_load_status(bp, BP_PATH(bp)); 8975 global = bnx2x_reset_is_global(bp); 8976 8977 /* 8978 * In case of a parity in a global block, let 8979 * the first leader that performs a 8980 * leader_reset() reset the global blocks in 8981 * order to clear global attentions. Otherwise 8982 * the the gates will remain closed for that 8983 * engine. 8984 */ 8985 if (load_status || 8986 (global && other_load_status)) { 8987 /* Wait until all other functions get 8988 * down. 8989 */ 8990 schedule_delayed_work(&bp->sp_rtnl_task, 8991 HZ/10); 8992 return; 8993 } else { 8994 /* If all other functions got down - 8995 * try to bring the chip back to 8996 * normal. In any case it's an exit 8997 * point for a leader. 8998 */ 8999 if (bnx2x_leader_reset(bp)) { 9000 bnx2x_recovery_failed(bp); 9001 return; 9002 } 9003 9004 /* If we are here, means that the 9005 * leader has succeeded and doesn't 9006 * want to be a leader any more. Try 9007 * to continue as a none-leader. 9008 */ 9009 break; 9010 } 9011 } else { /* non-leader */ 9012 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 9013 /* Try to get a LEADER_LOCK HW lock as 9014 * long as a former leader may have 9015 * been unloaded by the user or 9016 * released a leadership by another 9017 * reason. 9018 */ 9019 if (bnx2x_trylock_leader_lock(bp)) { 9020 /* I'm a leader now! Restart a 9021 * switch case. 9022 */ 9023 bp->is_leader = 1; 9024 break; 9025 } 9026 9027 schedule_delayed_work(&bp->sp_rtnl_task, 9028 HZ/10); 9029 return; 9030 9031 } else { 9032 /* 9033 * If there was a global attention, wait 9034 * for it to be cleared. 9035 */ 9036 if (bnx2x_reset_is_global(bp)) { 9037 schedule_delayed_work( 9038 &bp->sp_rtnl_task, 9039 HZ/10); 9040 return; 9041 } 9042 9043 error_recovered = 9044 bp->eth_stats.recoverable_error; 9045 error_unrecovered = 9046 bp->eth_stats.unrecoverable_error; 9047 bp->recovery_state = 9048 BNX2X_RECOVERY_NIC_LOADING; 9049 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 9050 error_unrecovered++; 9051 netdev_err(bp->dev, 9052 "Recovery failed. Power cycle needed\n"); 9053 /* Disconnect this device */ 9054 netif_device_detach(bp->dev); 9055 /* Shut down the power */ 9056 bnx2x_set_power_state( 9057 bp, PCI_D3hot); 9058 smp_mb(); 9059 } else { 9060 bp->recovery_state = 9061 BNX2X_RECOVERY_DONE; 9062 error_recovered++; 9063 smp_mb(); 9064 } 9065 bp->eth_stats.recoverable_error = 9066 error_recovered; 9067 bp->eth_stats.unrecoverable_error = 9068 error_unrecovered; 9069 9070 return; 9071 } 9072 } 9073 default: 9074 return; 9075 } 9076 } 9077 } 9078 9079 static int bnx2x_close(struct net_device *dev); 9080 9081 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 9082 * scheduled on a general queue in order to prevent a dead lock. 9083 */ 9084 static void bnx2x_sp_rtnl_task(struct work_struct *work) 9085 { 9086 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 9087 9088 rtnl_lock(); 9089 9090 if (!netif_running(bp->dev)) 9091 goto sp_rtnl_exit; 9092 9093 /* if stop on error is defined no recovery flows should be executed */ 9094 #ifdef BNX2X_STOP_ON_ERROR 9095 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9096 "you will need to reboot when done\n"); 9097 goto sp_rtnl_not_reset; 9098 #endif 9099 9100 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 9101 /* 9102 * Clear all pending SP commands as we are going to reset the 9103 * function anyway. 9104 */ 9105 bp->sp_rtnl_state = 0; 9106 smp_mb(); 9107 9108 bnx2x_parity_recover(bp); 9109 9110 goto sp_rtnl_exit; 9111 } 9112 9113 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9114 /* 9115 * Clear all pending SP commands as we are going to reset the 9116 * function anyway. 9117 */ 9118 bp->sp_rtnl_state = 0; 9119 smp_mb(); 9120 9121 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 9122 bnx2x_nic_load(bp, LOAD_NORMAL); 9123 9124 goto sp_rtnl_exit; 9125 } 9126 #ifdef BNX2X_STOP_ON_ERROR 9127 sp_rtnl_not_reset: 9128 #endif 9129 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9130 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9131 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 9132 bnx2x_after_function_update(bp); 9133 /* 9134 * in case of fan failure we need to reset id if the "stop on error" 9135 * debug flag is set, since we trying to prevent permanent overheating 9136 * damage 9137 */ 9138 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 9139 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 9140 netif_device_detach(bp->dev); 9141 bnx2x_close(bp->dev); 9142 } 9143 9144 sp_rtnl_exit: 9145 rtnl_unlock(); 9146 } 9147 9148 /* end of nic load/unload */ 9149 9150 static void bnx2x_period_task(struct work_struct *work) 9151 { 9152 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 9153 9154 if (!netif_running(bp->dev)) 9155 goto period_task_exit; 9156 9157 if (CHIP_REV_IS_SLOW(bp)) { 9158 BNX2X_ERR("period task called on emulation, ignoring\n"); 9159 goto period_task_exit; 9160 } 9161 9162 bnx2x_acquire_phy_lock(bp); 9163 /* 9164 * The barrier is needed to ensure the ordering between the writing to 9165 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 9166 * the reading here. 9167 */ 9168 smp_mb(); 9169 if (bp->port.pmf) { 9170 bnx2x_period_func(&bp->link_params, &bp->link_vars); 9171 9172 /* Re-queue task in 1 sec */ 9173 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 9174 } 9175 9176 bnx2x_release_phy_lock(bp); 9177 period_task_exit: 9178 return; 9179 } 9180 9181 /* 9182 * Init service functions 9183 */ 9184 9185 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 9186 { 9187 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9188 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 9189 return base + (BP_ABS_FUNC(bp)) * stride; 9190 } 9191 9192 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) 9193 { 9194 u32 reg = bnx2x_get_pretend_reg(bp); 9195 9196 /* Flush all outstanding writes */ 9197 mmiowb(); 9198 9199 /* Pretend to be function 0 */ 9200 REG_WR(bp, reg, 0); 9201 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ 9202 9203 /* From now we are in the "like-E1" mode */ 9204 bnx2x_int_disable(bp); 9205 9206 /* Flush all outstanding writes */ 9207 mmiowb(); 9208 9209 /* Restore the original function */ 9210 REG_WR(bp, reg, BP_ABS_FUNC(bp)); 9211 REG_RD(bp, reg); 9212 } 9213 9214 static inline void bnx2x_undi_int_disable(struct bnx2x *bp) 9215 { 9216 if (CHIP_IS_E1(bp)) 9217 bnx2x_int_disable(bp); 9218 else 9219 bnx2x_undi_int_disable_e1h(bp); 9220 } 9221 9222 static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp) 9223 { 9224 u32 val, base_addr, offset, mask, reset_reg; 9225 bool mac_stopped = false; 9226 u8 port = BP_PORT(bp); 9227 9228 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 9229 9230 if (!CHIP_IS_E3(bp)) { 9231 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9232 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9233 if ((mask & reset_reg) && val) { 9234 u32 wb_data[2]; 9235 BNX2X_DEV_INFO("Disable bmac Rx\n"); 9236 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 9237 : NIG_REG_INGRESS_BMAC0_MEM; 9238 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 9239 : BIGMAC_REGISTER_BMAC_CONTROL; 9240 9241 /* 9242 * use rd/wr since we cannot use dmae. This is safe 9243 * since MCP won't access the bus due to the request 9244 * to unload, and no function on the path can be 9245 * loaded at this time. 9246 */ 9247 wb_data[0] = REG_RD(bp, base_addr + offset); 9248 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 9249 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 9250 REG_WR(bp, base_addr + offset, wb_data[0]); 9251 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]); 9252 9253 } 9254 BNX2X_DEV_INFO("Disable emac Rx\n"); 9255 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0); 9256 9257 mac_stopped = true; 9258 } else { 9259 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9260 BNX2X_DEV_INFO("Disable xmac Rx\n"); 9261 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9262 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 9263 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9264 val & ~(1 << 1)); 9265 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9266 val | (1 << 1)); 9267 REG_WR(bp, base_addr + XMAC_REG_CTRL, 0); 9268 mac_stopped = true; 9269 } 9270 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9271 if (mask & reset_reg) { 9272 BNX2X_DEV_INFO("Disable umac Rx\n"); 9273 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9274 REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0); 9275 mac_stopped = true; 9276 } 9277 } 9278 9279 if (mac_stopped) 9280 msleep(20); 9281 9282 } 9283 9284 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9285 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9286 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9287 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9288 9289 static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, 9290 u8 inc) 9291 { 9292 u16 rcq, bd; 9293 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9294 9295 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9296 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9297 9298 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9299 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9300 9301 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 9302 port, bd, rcq); 9303 } 9304 9305 static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) 9306 { 9307 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9308 if (!rc) { 9309 BNX2X_ERR("MCP response failure, aborting\n"); 9310 return -EBUSY; 9311 } 9312 9313 return 0; 9314 } 9315 9316 static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp) 9317 { 9318 struct bnx2x_prev_path_list *tmp_list; 9319 int rc = false; 9320 9321 if (down_trylock(&bnx2x_prev_sem)) 9322 return false; 9323 9324 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { 9325 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 9326 bp->pdev->bus->number == tmp_list->bus && 9327 BP_PATH(bp) == tmp_list->path) { 9328 rc = true; 9329 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 9330 BP_PATH(bp)); 9331 break; 9332 } 9333 } 9334 9335 up(&bnx2x_prev_sem); 9336 9337 return rc; 9338 } 9339 9340 static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) 9341 { 9342 struct bnx2x_prev_path_list *tmp_list; 9343 int rc; 9344 9345 tmp_list = (struct bnx2x_prev_path_list *) 9346 kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 9347 if (!tmp_list) { 9348 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 9349 return -ENOMEM; 9350 } 9351 9352 tmp_list->bus = bp->pdev->bus->number; 9353 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9354 tmp_list->path = BP_PATH(bp); 9355 9356 rc = down_interruptible(&bnx2x_prev_sem); 9357 if (rc) { 9358 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9359 kfree(tmp_list); 9360 } else { 9361 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", 9362 BP_PATH(bp)); 9363 list_add(&tmp_list->list, &bnx2x_prev_list); 9364 up(&bnx2x_prev_sem); 9365 } 9366 9367 return rc; 9368 } 9369 9370 static bool __devinit bnx2x_can_flr(struct bnx2x *bp) 9371 { 9372 int pos; 9373 u32 cap; 9374 struct pci_dev *dev = bp->pdev; 9375 9376 pos = pci_pcie_cap(dev); 9377 if (!pos) 9378 return false; 9379 9380 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 9381 if (!(cap & PCI_EXP_DEVCAP_FLR)) 9382 return false; 9383 9384 return true; 9385 } 9386 9387 static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9388 { 9389 int i, pos; 9390 u16 status; 9391 struct pci_dev *dev = bp->pdev; 9392 9393 /* probe the capability first */ 9394 if (bnx2x_can_flr(bp)) 9395 return -ENOTTY; 9396 9397 pos = pci_pcie_cap(dev); 9398 if (!pos) 9399 return -ENOTTY; 9400 9401 /* Wait for Transaction Pending bit clean */ 9402 for (i = 0; i < 4; i++) { 9403 if (i) 9404 msleep((1 << (i - 1)) * 100); 9405 9406 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 9407 if (!(status & PCI_EXP_DEVSTA_TRPND)) 9408 goto clear; 9409 } 9410 9411 dev_err(&dev->dev, 9412 "transaction is not cleared; proceeding with reset anyway\n"); 9413 9414 clear: 9415 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9416 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 9417 bp->common.bc_ver); 9418 return -EINVAL; 9419 } 9420 9421 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 9422 9423 return 0; 9424 } 9425 9426 static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) 9427 { 9428 int rc; 9429 9430 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 9431 9432 /* Test if previous unload process was already finished for this path */ 9433 if (bnx2x_prev_is_path_marked(bp)) 9434 return bnx2x_prev_mcp_done(bp); 9435 9436 /* If function has FLR capabilities, and existing FW version matches 9437 * the one required, then FLR will be sufficient to clean any residue 9438 * left by previous driver 9439 */ 9440 if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) 9441 return bnx2x_do_flr(bp); 9442 9443 /* Close the MCP request, return failure*/ 9444 rc = bnx2x_prev_mcp_done(bp); 9445 if (!rc) 9446 rc = BNX2X_PREV_WAIT_NEEDED; 9447 9448 return rc; 9449 } 9450 9451 static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) 9452 { 9453 u32 reset_reg, tmp_reg = 0, rc; 9454 /* It is possible a previous function received 'common' answer, 9455 * but hasn't loaded yet, therefore creating a scenario of 9456 * multiple functions receiving 'common' on the same path. 9457 */ 9458 BNX2X_DEV_INFO("Common unload Flow\n"); 9459 9460 if (bnx2x_prev_is_path_marked(bp)) 9461 return bnx2x_prev_mcp_done(bp); 9462 9463 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 9464 9465 /* Reset should be performed after BRB is emptied */ 9466 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9467 u32 timer_count = 1000; 9468 bool prev_undi = false; 9469 9470 /* Close the MAC Rx to prevent BRB from filling up */ 9471 bnx2x_prev_unload_close_mac(bp); 9472 9473 /* Check if the UNDI driver was previously loaded 9474 * UNDI driver initializes CID offset for normal bell to 0x7 9475 */ 9476 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 9477 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 9478 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 9479 if (tmp_reg == 0x7) { 9480 BNX2X_DEV_INFO("UNDI previously loaded\n"); 9481 prev_undi = true; 9482 /* clear the UNDI indication */ 9483 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 9484 } 9485 } 9486 /* wait until BRB is empty */ 9487 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9488 while (timer_count) { 9489 u32 prev_brb = tmp_reg; 9490 9491 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9492 if (!tmp_reg) 9493 break; 9494 9495 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 9496 9497 /* reset timer as long as BRB actually gets emptied */ 9498 if (prev_brb > tmp_reg) 9499 timer_count = 1000; 9500 else 9501 timer_count--; 9502 9503 /* If UNDI resides in memory, manually increment it */ 9504 if (prev_undi) 9505 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); 9506 9507 udelay(10); 9508 } 9509 9510 if (!timer_count) 9511 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 9512 9513 } 9514 9515 /* No packets are in the pipeline, path is ready for reset */ 9516 bnx2x_reset_common(bp); 9517 9518 rc = bnx2x_prev_mark_path(bp); 9519 if (rc) { 9520 bnx2x_prev_mcp_done(bp); 9521 return rc; 9522 } 9523 9524 return bnx2x_prev_mcp_done(bp); 9525 } 9526 9527 /* previous driver DMAE transaction may have occurred when pre-boot stage ended 9528 * and boot began, or when kdump kernel was loaded. Either case would invalidate 9529 * the addresses of the transaction, resulting in was-error bit set in the pci 9530 * causing all hw-to-host pcie transactions to timeout. If this happened we want 9531 * to clear the interrupt which detected this from the pglueb and the was done 9532 * bit 9533 */ 9534 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9535 { 9536 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9537 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9538 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); 9539 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); 9540 } 9541 } 9542 9543 static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9544 { 9545 int time_counter = 10; 9546 u32 rc, fw, hw_lock_reg, hw_lock_val; 9547 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9548 9549 /* clear hw from errors which may have resulted from an interrupted 9550 * dmae transaction. 9551 */ 9552 bnx2x_prev_interrupted_dmae(bp); 9553 9554 /* Release previously held locks */ 9555 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9556 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9557 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 9558 9559 hw_lock_val = (REG_RD(bp, hw_lock_reg)); 9560 if (hw_lock_val) { 9561 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 9562 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 9563 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 9564 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 9565 } 9566 9567 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 9568 REG_WR(bp, hw_lock_reg, 0xffffffff); 9569 } else 9570 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 9571 9572 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 9573 BNX2X_DEV_INFO("Release previously held alr\n"); 9574 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 9575 } 9576 9577 9578 do { 9579 /* Lock MCP using an unload request */ 9580 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 9581 if (!fw) { 9582 BNX2X_ERR("MCP response failure, aborting\n"); 9583 rc = -EBUSY; 9584 break; 9585 } 9586 9587 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 9588 rc = bnx2x_prev_unload_common(bp); 9589 break; 9590 } 9591 9592 /* non-common reply from MCP night require looping */ 9593 rc = bnx2x_prev_unload_uncommon(bp); 9594 if (rc != BNX2X_PREV_WAIT_NEEDED) 9595 break; 9596 9597 msleep(20); 9598 } while (--time_counter); 9599 9600 if (!time_counter || rc) { 9601 BNX2X_ERR("Failed unloading previous driver, aborting\n"); 9602 rc = -EBUSY; 9603 } 9604 9605 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 9606 9607 return rc; 9608 } 9609 9610 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 9611 { 9612 u32 val, val2, val3, val4, id, boot_mode; 9613 u16 pmc; 9614 9615 /* Get the chip revision id and number. */ 9616 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 9617 val = REG_RD(bp, MISC_REG_CHIP_NUM); 9618 id = ((val & 0xffff) << 16); 9619 val = REG_RD(bp, MISC_REG_CHIP_REV); 9620 id |= ((val & 0xf) << 12); 9621 val = REG_RD(bp, MISC_REG_CHIP_METAL); 9622 id |= ((val & 0xff) << 4); 9623 val = REG_RD(bp, MISC_REG_BOND_ID); 9624 id |= (val & 0xf); 9625 bp->common.chip_id = id; 9626 9627 /* force 57811 according to MISC register */ 9628 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 9629 if (CHIP_IS_57810(bp)) 9630 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 9631 (bp->common.chip_id & 0x0000FFFF); 9632 else if (CHIP_IS_57810_MF(bp)) 9633 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 9634 (bp->common.chip_id & 0x0000FFFF); 9635 bp->common.chip_id |= 0x1; 9636 } 9637 9638 /* Set doorbell size */ 9639 bp->db_size = (1 << BNX2X_DB_SHIFT); 9640 9641 if (!CHIP_IS_E1x(bp)) { 9642 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 9643 if ((val & 1) == 0) 9644 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 9645 else 9646 val = (val >> 1) & 1; 9647 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 9648 "2_PORT_MODE"); 9649 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 9650 CHIP_2_PORT_MODE; 9651 9652 if (CHIP_MODE_IS_4_PORT(bp)) 9653 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 9654 else 9655 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 9656 } else { 9657 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 9658 bp->pfid = bp->pf_num; /* 0..7 */ 9659 } 9660 9661 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 9662 9663 bp->link_params.chip_id = bp->common.chip_id; 9664 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 9665 9666 val = (REG_RD(bp, 0x2874) & 0x55); 9667 if ((bp->common.chip_id & 0x1) || 9668 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 9669 bp->flags |= ONE_PORT_FLAG; 9670 BNX2X_DEV_INFO("single port device\n"); 9671 } 9672 9673 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 9674 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 9675 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 9676 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 9677 bp->common.flash_size, bp->common.flash_size); 9678 9679 bnx2x_init_shmem(bp); 9680 9681 9682 9683 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 9684 MISC_REG_GENERIC_CR_1 : 9685 MISC_REG_GENERIC_CR_0)); 9686 9687 bp->link_params.shmem_base = bp->common.shmem_base; 9688 bp->link_params.shmem2_base = bp->common.shmem2_base; 9689 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 9690 bp->common.shmem_base, bp->common.shmem2_base); 9691 9692 if (!bp->common.shmem_base) { 9693 BNX2X_DEV_INFO("MCP not active\n"); 9694 bp->flags |= NO_MCP_FLAG; 9695 return; 9696 } 9697 9698 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9699 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9700 9701 bp->link_params.hw_led_mode = ((bp->common.hw_config & 9702 SHARED_HW_CFG_LED_MODE_MASK) >> 9703 SHARED_HW_CFG_LED_MODE_SHIFT); 9704 9705 bp->link_params.feature_config_flags = 0; 9706 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 9707 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 9708 bp->link_params.feature_config_flags |= 9709 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 9710 else 9711 bp->link_params.feature_config_flags &= 9712 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 9713 9714 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 9715 bp->common.bc_ver = val; 9716 BNX2X_DEV_INFO("bc_ver %X\n", val); 9717 if (val < BNX2X_BC_VER) { 9718 /* for now only warn 9719 * later we might need to enforce this */ 9720 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 9721 BNX2X_BC_VER, val); 9722 } 9723 bp->link_params.feature_config_flags |= 9724 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 9725 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 9726 9727 bp->link_params.feature_config_flags |= 9728 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 9729 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 9730 bp->link_params.feature_config_flags |= 9731 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 9732 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 9733 bp->link_params.feature_config_flags |= 9734 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9735 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9736 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9737 BC_SUPPORTS_PFC_STATS : 0; 9738 9739 boot_mode = SHMEM_RD(bp, 9740 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 9741 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 9742 switch (boot_mode) { 9743 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 9744 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 9745 break; 9746 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 9747 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 9748 break; 9749 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 9750 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 9751 break; 9752 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 9753 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 9754 break; 9755 } 9756 9757 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 9758 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 9759 9760 BNX2X_DEV_INFO("%sWoL capable\n", 9761 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 9762 9763 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 9764 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 9765 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9766 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9767 9768 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 9769 val, val2, val3, val4); 9770 } 9771 9772 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 9773 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 9774 9775 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 9776 { 9777 int pfid = BP_FUNC(bp); 9778 int igu_sb_id; 9779 u32 val; 9780 u8 fid, igu_sb_cnt = 0; 9781 9782 bp->igu_base_sb = 0xff; 9783 if (CHIP_INT_MODE_IS_BC(bp)) { 9784 int vn = BP_VN(bp); 9785 igu_sb_cnt = bp->igu_sb_cnt; 9786 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 9787 FP_SB_MAX_E1x; 9788 9789 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 9790 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 9791 9792 return; 9793 } 9794 9795 /* IGU in normal mode - read CAM */ 9796 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 9797 igu_sb_id++) { 9798 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 9799 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 9800 continue; 9801 fid = IGU_FID(val); 9802 if ((fid & IGU_FID_ENCODE_IS_PF)) { 9803 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 9804 continue; 9805 if (IGU_VEC(val) == 0) 9806 /* default status block */ 9807 bp->igu_dsb_id = igu_sb_id; 9808 else { 9809 if (bp->igu_base_sb == 0xff) 9810 bp->igu_base_sb = igu_sb_id; 9811 igu_sb_cnt++; 9812 } 9813 } 9814 } 9815 9816 #ifdef CONFIG_PCI_MSI 9817 /* 9818 * It's expected that number of CAM entries for this functions is equal 9819 * to the number evaluated based on the MSI-X table size. We want a 9820 * harsh warning if these values are different! 9821 */ 9822 WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); 9823 #endif 9824 9825 if (igu_sb_cnt == 0) 9826 BNX2X_ERR("CAM configuration error\n"); 9827 } 9828 9829 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9830 u32 switch_cfg) 9831 { 9832 int cfg_size = 0, idx, port = BP_PORT(bp); 9833 9834 /* Aggregation of supported attributes of all external phys */ 9835 bp->port.supported[0] = 0; 9836 bp->port.supported[1] = 0; 9837 switch (bp->link_params.num_phys) { 9838 case 1: 9839 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 9840 cfg_size = 1; 9841 break; 9842 case 2: 9843 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 9844 cfg_size = 1; 9845 break; 9846 case 3: 9847 if (bp->link_params.multi_phy_config & 9848 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 9849 bp->port.supported[1] = 9850 bp->link_params.phy[EXT_PHY1].supported; 9851 bp->port.supported[0] = 9852 bp->link_params.phy[EXT_PHY2].supported; 9853 } else { 9854 bp->port.supported[0] = 9855 bp->link_params.phy[EXT_PHY1].supported; 9856 bp->port.supported[1] = 9857 bp->link_params.phy[EXT_PHY2].supported; 9858 } 9859 cfg_size = 2; 9860 break; 9861 } 9862 9863 if (!(bp->port.supported[0] || bp->port.supported[1])) { 9864 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 9865 SHMEM_RD(bp, 9866 dev_info.port_hw_config[port].external_phy_config), 9867 SHMEM_RD(bp, 9868 dev_info.port_hw_config[port].external_phy_config2)); 9869 return; 9870 } 9871 9872 if (CHIP_IS_E3(bp)) 9873 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 9874 else { 9875 switch (switch_cfg) { 9876 case SWITCH_CFG_1G: 9877 bp->port.phy_addr = REG_RD( 9878 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 9879 break; 9880 case SWITCH_CFG_10G: 9881 bp->port.phy_addr = REG_RD( 9882 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 9883 break; 9884 default: 9885 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 9886 bp->port.link_config[0]); 9887 return; 9888 } 9889 } 9890 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 9891 /* mask what we support according to speed_cap_mask per configuration */ 9892 for (idx = 0; idx < cfg_size; idx++) { 9893 if (!(bp->link_params.speed_cap_mask[idx] & 9894 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 9895 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 9896 9897 if (!(bp->link_params.speed_cap_mask[idx] & 9898 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 9899 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 9900 9901 if (!(bp->link_params.speed_cap_mask[idx] & 9902 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 9903 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 9904 9905 if (!(bp->link_params.speed_cap_mask[idx] & 9906 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 9907 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 9908 9909 if (!(bp->link_params.speed_cap_mask[idx] & 9910 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 9911 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 9912 SUPPORTED_1000baseT_Full); 9913 9914 if (!(bp->link_params.speed_cap_mask[idx] & 9915 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 9916 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 9917 9918 if (!(bp->link_params.speed_cap_mask[idx] & 9919 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 9920 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 9921 9922 } 9923 9924 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 9925 bp->port.supported[1]); 9926 } 9927 9928 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 9929 { 9930 u32 link_config, idx, cfg_size = 0; 9931 bp->port.advertising[0] = 0; 9932 bp->port.advertising[1] = 0; 9933 switch (bp->link_params.num_phys) { 9934 case 1: 9935 case 2: 9936 cfg_size = 1; 9937 break; 9938 case 3: 9939 cfg_size = 2; 9940 break; 9941 } 9942 for (idx = 0; idx < cfg_size; idx++) { 9943 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 9944 link_config = bp->port.link_config[idx]; 9945 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 9946 case PORT_FEATURE_LINK_SPEED_AUTO: 9947 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 9948 bp->link_params.req_line_speed[idx] = 9949 SPEED_AUTO_NEG; 9950 bp->port.advertising[idx] |= 9951 bp->port.supported[idx]; 9952 if (bp->link_params.phy[EXT_PHY1].type == 9953 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 9954 bp->port.advertising[idx] |= 9955 (SUPPORTED_100baseT_Half | 9956 SUPPORTED_100baseT_Full); 9957 } else { 9958 /* force 10G, no AN */ 9959 bp->link_params.req_line_speed[idx] = 9960 SPEED_10000; 9961 bp->port.advertising[idx] |= 9962 (ADVERTISED_10000baseT_Full | 9963 ADVERTISED_FIBRE); 9964 continue; 9965 } 9966 break; 9967 9968 case PORT_FEATURE_LINK_SPEED_10M_FULL: 9969 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 9970 bp->link_params.req_line_speed[idx] = 9971 SPEED_10; 9972 bp->port.advertising[idx] |= 9973 (ADVERTISED_10baseT_Full | 9974 ADVERTISED_TP); 9975 } else { 9976 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9977 link_config, 9978 bp->link_params.speed_cap_mask[idx]); 9979 return; 9980 } 9981 break; 9982 9983 case PORT_FEATURE_LINK_SPEED_10M_HALF: 9984 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 9985 bp->link_params.req_line_speed[idx] = 9986 SPEED_10; 9987 bp->link_params.req_duplex[idx] = 9988 DUPLEX_HALF; 9989 bp->port.advertising[idx] |= 9990 (ADVERTISED_10baseT_Half | 9991 ADVERTISED_TP); 9992 } else { 9993 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9994 link_config, 9995 bp->link_params.speed_cap_mask[idx]); 9996 return; 9997 } 9998 break; 9999 10000 case PORT_FEATURE_LINK_SPEED_100M_FULL: 10001 if (bp->port.supported[idx] & 10002 SUPPORTED_100baseT_Full) { 10003 bp->link_params.req_line_speed[idx] = 10004 SPEED_100; 10005 bp->port.advertising[idx] |= 10006 (ADVERTISED_100baseT_Full | 10007 ADVERTISED_TP); 10008 } else { 10009 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10010 link_config, 10011 bp->link_params.speed_cap_mask[idx]); 10012 return; 10013 } 10014 break; 10015 10016 case PORT_FEATURE_LINK_SPEED_100M_HALF: 10017 if (bp->port.supported[idx] & 10018 SUPPORTED_100baseT_Half) { 10019 bp->link_params.req_line_speed[idx] = 10020 SPEED_100; 10021 bp->link_params.req_duplex[idx] = 10022 DUPLEX_HALF; 10023 bp->port.advertising[idx] |= 10024 (ADVERTISED_100baseT_Half | 10025 ADVERTISED_TP); 10026 } else { 10027 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10028 link_config, 10029 bp->link_params.speed_cap_mask[idx]); 10030 return; 10031 } 10032 break; 10033 10034 case PORT_FEATURE_LINK_SPEED_1G: 10035 if (bp->port.supported[idx] & 10036 SUPPORTED_1000baseT_Full) { 10037 bp->link_params.req_line_speed[idx] = 10038 SPEED_1000; 10039 bp->port.advertising[idx] |= 10040 (ADVERTISED_1000baseT_Full | 10041 ADVERTISED_TP); 10042 } else { 10043 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10044 link_config, 10045 bp->link_params.speed_cap_mask[idx]); 10046 return; 10047 } 10048 break; 10049 10050 case PORT_FEATURE_LINK_SPEED_2_5G: 10051 if (bp->port.supported[idx] & 10052 SUPPORTED_2500baseX_Full) { 10053 bp->link_params.req_line_speed[idx] = 10054 SPEED_2500; 10055 bp->port.advertising[idx] |= 10056 (ADVERTISED_2500baseX_Full | 10057 ADVERTISED_TP); 10058 } else { 10059 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10060 link_config, 10061 bp->link_params.speed_cap_mask[idx]); 10062 return; 10063 } 10064 break; 10065 10066 case PORT_FEATURE_LINK_SPEED_10G_CX4: 10067 if (bp->port.supported[idx] & 10068 SUPPORTED_10000baseT_Full) { 10069 bp->link_params.req_line_speed[idx] = 10070 SPEED_10000; 10071 bp->port.advertising[idx] |= 10072 (ADVERTISED_10000baseT_Full | 10073 ADVERTISED_FIBRE); 10074 } else { 10075 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10076 link_config, 10077 bp->link_params.speed_cap_mask[idx]); 10078 return; 10079 } 10080 break; 10081 case PORT_FEATURE_LINK_SPEED_20G: 10082 bp->link_params.req_line_speed[idx] = SPEED_20000; 10083 10084 break; 10085 default: 10086 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 10087 link_config); 10088 bp->link_params.req_line_speed[idx] = 10089 SPEED_AUTO_NEG; 10090 bp->port.advertising[idx] = 10091 bp->port.supported[idx]; 10092 break; 10093 } 10094 10095 bp->link_params.req_flow_ctrl[idx] = (link_config & 10096 PORT_FEATURE_FLOW_CONTROL_MASK); 10097 if ((bp->link_params.req_flow_ctrl[idx] == 10098 BNX2X_FLOW_CTRL_AUTO) && 10099 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { 10100 bp->link_params.req_flow_ctrl[idx] = 10101 BNX2X_FLOW_CTRL_NONE; 10102 } 10103 10104 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 10105 bp->link_params.req_line_speed[idx], 10106 bp->link_params.req_duplex[idx], 10107 bp->link_params.req_flow_ctrl[idx], 10108 bp->port.advertising[idx]); 10109 } 10110 } 10111 10112 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 10113 { 10114 mac_hi = cpu_to_be16(mac_hi); 10115 mac_lo = cpu_to_be32(mac_lo); 10116 memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); 10117 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); 10118 } 10119 10120 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 10121 { 10122 int port = BP_PORT(bp); 10123 u32 config; 10124 u32 ext_phy_type, ext_phy_config, eee_mode; 10125 10126 bp->link_params.bp = bp; 10127 bp->link_params.port = port; 10128 10129 bp->link_params.lane_config = 10130 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 10131 10132 bp->link_params.speed_cap_mask[0] = 10133 SHMEM_RD(bp, 10134 dev_info.port_hw_config[port].speed_capability_mask); 10135 bp->link_params.speed_cap_mask[1] = 10136 SHMEM_RD(bp, 10137 dev_info.port_hw_config[port].speed_capability_mask2); 10138 bp->port.link_config[0] = 10139 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 10140 10141 bp->port.link_config[1] = 10142 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 10143 10144 bp->link_params.multi_phy_config = 10145 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 10146 /* If the device is capable of WoL, set the default state according 10147 * to the HW 10148 */ 10149 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 10150 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 10151 (config & PORT_FEATURE_WOL_ENABLED)); 10152 10153 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 10154 bp->link_params.lane_config, 10155 bp->link_params.speed_cap_mask[0], 10156 bp->port.link_config[0]); 10157 10158 bp->link_params.switch_cfg = (bp->port.link_config[0] & 10159 PORT_FEATURE_CONNECTED_SWITCH_MASK); 10160 bnx2x_phy_probe(&bp->link_params); 10161 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 10162 10163 bnx2x_link_settings_requested(bp); 10164 10165 /* 10166 * If connected directly, work with the internal PHY, otherwise, work 10167 * with the external PHY 10168 */ 10169 ext_phy_config = 10170 SHMEM_RD(bp, 10171 dev_info.port_hw_config[port].external_phy_config); 10172 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 10173 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 10174 bp->mdio.prtad = bp->port.phy_addr; 10175 10176 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 10177 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 10178 bp->mdio.prtad = 10179 XGXS_EXT_PHY_ADDR(ext_phy_config); 10180 10181 /* 10182 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) 10183 * In MF mode, it is set to cover self test cases 10184 */ 10185 if (IS_MF(bp)) 10186 bp->port.need_hw_lock = 1; 10187 else 10188 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 10189 bp->common.shmem_base, 10190 bp->common.shmem2_base); 10191 10192 /* Configure link feature according to nvram value */ 10193 eee_mode = (((SHMEM_RD(bp, dev_info. 10194 port_feature_config[port].eee_power_mode)) & 10195 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 10196 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 10197 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 10198 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 10199 EEE_MODE_ENABLE_LPI | 10200 EEE_MODE_OUTPUT_TIME; 10201 } else { 10202 bp->link_params.eee_mode = 0; 10203 } 10204 } 10205 10206 void bnx2x_get_iscsi_info(struct bnx2x *bp) 10207 { 10208 u32 no_flags = NO_ISCSI_FLAG; 10209 #ifdef BCM_CNIC 10210 int port = BP_PORT(bp); 10211 10212 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10213 drv_lic_key[port].max_iscsi_conn); 10214 10215 /* Get the number of maximum allowed iSCSI connections */ 10216 bp->cnic_eth_dev.max_iscsi_conn = 10217 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10218 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 10219 10220 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 10221 bp->cnic_eth_dev.max_iscsi_conn); 10222 10223 /* 10224 * If maximum allowed number of connections is zero - 10225 * disable the feature. 10226 */ 10227 if (!bp->cnic_eth_dev.max_iscsi_conn) 10228 bp->flags |= no_flags; 10229 #else 10230 bp->flags |= no_flags; 10231 #endif 10232 } 10233 10234 #ifdef BCM_CNIC 10235 static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10236 { 10237 /* Port info */ 10238 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10239 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 10240 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10241 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 10242 10243 /* Node info */ 10244 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10245 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 10246 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10247 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10248 } 10249 #endif 10250 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) 10251 { 10252 #ifdef BCM_CNIC 10253 int port = BP_PORT(bp); 10254 int func = BP_ABS_FUNC(bp); 10255 10256 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10257 drv_lic_key[port].max_fcoe_conn); 10258 10259 /* Get the number of maximum allowed FCoE connections */ 10260 bp->cnic_eth_dev.max_fcoe_conn = 10261 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10262 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 10263 10264 /* Read the WWN: */ 10265 if (!IS_MF(bp)) { 10266 /* Port info */ 10267 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10268 SHMEM_RD(bp, 10269 dev_info.port_hw_config[port]. 10270 fcoe_wwn_port_name_upper); 10271 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10272 SHMEM_RD(bp, 10273 dev_info.port_hw_config[port]. 10274 fcoe_wwn_port_name_lower); 10275 10276 /* Node info */ 10277 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10278 SHMEM_RD(bp, 10279 dev_info.port_hw_config[port]. 10280 fcoe_wwn_node_name_upper); 10281 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10282 SHMEM_RD(bp, 10283 dev_info.port_hw_config[port]. 10284 fcoe_wwn_node_name_lower); 10285 } else if (!IS_MF_SD(bp)) { 10286 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10287 10288 /* 10289 * Read the WWN info only if the FCoE feature is enabled for 10290 * this function. 10291 */ 10292 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) 10293 bnx2x_get_ext_wwn_info(bp, func); 10294 10295 } else if (IS_MF_FCOE_SD(bp)) 10296 bnx2x_get_ext_wwn_info(bp, func); 10297 10298 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 10299 10300 /* 10301 * If maximum allowed number of connections is zero - 10302 * disable the feature. 10303 */ 10304 if (!bp->cnic_eth_dev.max_fcoe_conn) 10305 bp->flags |= NO_FCOE_FLAG; 10306 #else 10307 bp->flags |= NO_FCOE_FLAG; 10308 #endif 10309 } 10310 10311 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) 10312 { 10313 /* 10314 * iSCSI may be dynamically disabled but reading 10315 * info here we will decrease memory usage by driver 10316 * if the feature is disabled for good 10317 */ 10318 bnx2x_get_iscsi_info(bp); 10319 bnx2x_get_fcoe_info(bp); 10320 } 10321 10322 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 10323 { 10324 u32 val, val2; 10325 int func = BP_ABS_FUNC(bp); 10326 int port = BP_PORT(bp); 10327 #ifdef BCM_CNIC 10328 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10329 u8 *fip_mac = bp->fip_mac; 10330 #endif 10331 10332 /* Zero primary MAC configuration */ 10333 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10334 10335 if (BP_NOMCP(bp)) { 10336 BNX2X_ERROR("warning: random MAC workaround active\n"); 10337 eth_hw_addr_random(bp->dev); 10338 } else if (IS_MF(bp)) { 10339 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 10340 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 10341 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 10342 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 10343 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 10344 10345 #ifdef BCM_CNIC 10346 /* 10347 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 10348 * FCoE MAC then the appropriate feature should be disabled. 10349 * 10350 * In non SD mode features configuration comes from 10351 * struct func_ext_config. 10352 */ 10353 if (!IS_MF_SD(bp)) { 10354 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10355 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 10356 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10357 iscsi_mac_addr_upper); 10358 val = MF_CFG_RD(bp, func_ext_config[func]. 10359 iscsi_mac_addr_lower); 10360 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10361 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10362 iscsi_mac); 10363 } else 10364 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 10365 10366 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 10367 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10368 fcoe_mac_addr_upper); 10369 val = MF_CFG_RD(bp, func_ext_config[func]. 10370 fcoe_mac_addr_lower); 10371 bnx2x_set_mac_buf(fip_mac, val, val2); 10372 BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", 10373 fip_mac); 10374 10375 } else 10376 bp->flags |= NO_FCOE_FLAG; 10377 10378 bp->mf_ext_config = cfg; 10379 10380 } else { /* SD MODE */ 10381 if (IS_MF_STORAGE_SD(bp)) { 10382 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10383 /* use primary mac as iscsi mac */ 10384 memcpy(iscsi_mac, bp->dev->dev_addr, 10385 ETH_ALEN); 10386 10387 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 10388 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10389 iscsi_mac); 10390 } else { /* FCoE */ 10391 memcpy(fip_mac, bp->dev->dev_addr, 10392 ETH_ALEN); 10393 BNX2X_DEV_INFO("SD FCoE MODE\n"); 10394 BNX2X_DEV_INFO("Read FIP MAC: %pM\n", 10395 fip_mac); 10396 } 10397 /* Zero primary MAC configuration */ 10398 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10399 } 10400 } 10401 10402 if (IS_MF_FCOE_AFEX(bp)) 10403 /* use FIP MAC as primary MAC */ 10404 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10405 10406 #endif 10407 } else { 10408 /* in SF read MACs from port configuration */ 10409 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 10410 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 10411 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 10412 10413 #ifdef BCM_CNIC 10414 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10415 iscsi_mac_upper); 10416 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10417 iscsi_mac_lower); 10418 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10419 10420 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10421 fcoe_fip_mac_upper); 10422 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10423 fcoe_fip_mac_lower); 10424 bnx2x_set_mac_buf(fip_mac, val, val2); 10425 #endif 10426 } 10427 10428 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 10429 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 10430 10431 #ifdef BCM_CNIC 10432 /* Disable iSCSI if MAC configuration is 10433 * invalid. 10434 */ 10435 if (!is_valid_ether_addr(iscsi_mac)) { 10436 bp->flags |= NO_ISCSI_FLAG; 10437 memset(iscsi_mac, 0, ETH_ALEN); 10438 } 10439 10440 /* Disable FCoE if MAC configuration is 10441 * invalid. 10442 */ 10443 if (!is_valid_ether_addr(fip_mac)) { 10444 bp->flags |= NO_FCOE_FLAG; 10445 memset(bp->fip_mac, 0, ETH_ALEN); 10446 } 10447 #endif 10448 10449 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 10450 dev_err(&bp->pdev->dev, 10451 "bad Ethernet MAC address configuration: %pM\n" 10452 "change it manually before bringing up the appropriate network interface\n", 10453 bp->dev->dev_addr); 10454 10455 10456 } 10457 10458 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 10459 { 10460 int /*abs*/func = BP_ABS_FUNC(bp); 10461 int vn; 10462 u32 val = 0; 10463 int rc = 0; 10464 10465 bnx2x_get_common_hwinfo(bp); 10466 10467 /* 10468 * initialize IGU parameters 10469 */ 10470 if (CHIP_IS_E1x(bp)) { 10471 bp->common.int_block = INT_BLOCK_HC; 10472 10473 bp->igu_dsb_id = DEF_SB_IGU_ID; 10474 bp->igu_base_sb = 0; 10475 } else { 10476 bp->common.int_block = INT_BLOCK_IGU; 10477 10478 /* do not allow device reset during IGU info preocessing */ 10479 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10480 10481 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 10482 10483 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 10484 int tout = 5000; 10485 10486 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 10487 10488 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 10489 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 10490 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 10491 10492 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10493 tout--; 10494 usleep_range(1000, 1000); 10495 } 10496 10497 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10498 dev_err(&bp->pdev->dev, 10499 "FORCING Normal Mode failed!!!\n"); 10500 return -EPERM; 10501 } 10502 } 10503 10504 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 10505 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 10506 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 10507 } else 10508 BNX2X_DEV_INFO("IGU Normal Mode\n"); 10509 10510 bnx2x_get_igu_cam_info(bp); 10511 10512 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10513 } 10514 10515 /* 10516 * set base FW non-default (fast path) status block id, this value is 10517 * used to initialize the fw_sb_id saved on the fp/queue structure to 10518 * determine the id used by the FW. 10519 */ 10520 if (CHIP_IS_E1x(bp)) 10521 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 10522 else /* 10523 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 10524 * the same queue are indicated on the same IGU SB). So we prefer 10525 * FW and IGU SBs to be the same value. 10526 */ 10527 bp->base_fw_ndsb = bp->igu_base_sb; 10528 10529 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 10530 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 10531 bp->igu_sb_cnt, bp->base_fw_ndsb); 10532 10533 /* 10534 * Initialize MF configuration 10535 */ 10536 10537 bp->mf_ov = 0; 10538 bp->mf_mode = 0; 10539 vn = BP_VN(bp); 10540 10541 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 10542 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 10543 bp->common.shmem2_base, SHMEM2_RD(bp, size), 10544 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 10545 10546 if (SHMEM2_HAS(bp, mf_cfg_addr)) 10547 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 10548 else 10549 bp->common.mf_cfg_base = bp->common.shmem_base + 10550 offsetof(struct shmem_region, func_mb) + 10551 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 10552 /* 10553 * get mf configuration: 10554 * 1. existence of MF configuration 10555 * 2. MAC address must be legal (check only upper bytes) 10556 * for Switch-Independent mode; 10557 * OVLAN must be legal for Switch-Dependent mode 10558 * 3. SF_MODE configures specific MF mode 10559 */ 10560 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 10561 /* get mf configuration */ 10562 val = SHMEM_RD(bp, 10563 dev_info.shared_feature_config.config); 10564 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 10565 10566 switch (val) { 10567 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 10568 val = MF_CFG_RD(bp, func_mf_config[func]. 10569 mac_upper); 10570 /* check for legal mac (upper bytes)*/ 10571 if (val != 0xffff) { 10572 bp->mf_mode = MULTI_FUNCTION_SI; 10573 bp->mf_config[vn] = MF_CFG_RD(bp, 10574 func_mf_config[func].config); 10575 } else 10576 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 10577 break; 10578 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 10579 if ((!CHIP_IS_E1x(bp)) && 10580 (MF_CFG_RD(bp, func_mf_config[func]. 10581 mac_upper) != 0xffff) && 10582 (SHMEM2_HAS(bp, 10583 afex_driver_support))) { 10584 bp->mf_mode = MULTI_FUNCTION_AFEX; 10585 bp->mf_config[vn] = MF_CFG_RD(bp, 10586 func_mf_config[func].config); 10587 } else { 10588 BNX2X_DEV_INFO("can not configure afex mode\n"); 10589 } 10590 break; 10591 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 10592 /* get OV configuration */ 10593 val = MF_CFG_RD(bp, 10594 func_mf_config[FUNC_0].e1hov_tag); 10595 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 10596 10597 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 10598 bp->mf_mode = MULTI_FUNCTION_SD; 10599 bp->mf_config[vn] = MF_CFG_RD(bp, 10600 func_mf_config[func].config); 10601 } else 10602 BNX2X_DEV_INFO("illegal OV for SD\n"); 10603 break; 10604 default: 10605 /* Unknown configuration: reset mf_config */ 10606 bp->mf_config[vn] = 0; 10607 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 10608 } 10609 } 10610 10611 BNX2X_DEV_INFO("%s function mode\n", 10612 IS_MF(bp) ? "multi" : "single"); 10613 10614 switch (bp->mf_mode) { 10615 case MULTI_FUNCTION_SD: 10616 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 10617 FUNC_MF_CFG_E1HOV_TAG_MASK; 10618 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 10619 bp->mf_ov = val; 10620 bp->path_has_ovlan = true; 10621 10622 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 10623 func, bp->mf_ov, bp->mf_ov); 10624 } else { 10625 dev_err(&bp->pdev->dev, 10626 "No valid MF OV for func %d, aborting\n", 10627 func); 10628 return -EPERM; 10629 } 10630 break; 10631 case MULTI_FUNCTION_AFEX: 10632 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 10633 break; 10634 case MULTI_FUNCTION_SI: 10635 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 10636 func); 10637 break; 10638 default: 10639 if (vn) { 10640 dev_err(&bp->pdev->dev, 10641 "VN %d is in a single function mode, aborting\n", 10642 vn); 10643 return -EPERM; 10644 } 10645 break; 10646 } 10647 10648 /* check if other port on the path needs ovlan: 10649 * Since MF configuration is shared between ports 10650 * Possible mixed modes are only 10651 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 10652 */ 10653 if (CHIP_MODE_IS_4_PORT(bp) && 10654 !bp->path_has_ovlan && 10655 !IS_MF(bp) && 10656 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 10657 u8 other_port = !BP_PORT(bp); 10658 u8 other_func = BP_PATH(bp) + 2*other_port; 10659 val = MF_CFG_RD(bp, 10660 func_mf_config[other_func].e1hov_tag); 10661 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 10662 bp->path_has_ovlan = true; 10663 } 10664 } 10665 10666 /* adjust igu_sb_cnt to MF for E1x */ 10667 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 10668 bp->igu_sb_cnt /= E1HVN_MAX; 10669 10670 /* port info */ 10671 bnx2x_get_port_hwinfo(bp); 10672 10673 /* Get MAC addresses */ 10674 bnx2x_get_mac_hwinfo(bp); 10675 10676 bnx2x_get_cnic_info(bp); 10677 10678 return rc; 10679 } 10680 10681 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) 10682 { 10683 int cnt, i, block_end, rodi; 10684 char vpd_start[BNX2X_VPD_LEN+1]; 10685 char str_id_reg[VENDOR_ID_LEN+1]; 10686 char str_id_cap[VENDOR_ID_LEN+1]; 10687 char *vpd_data; 10688 char *vpd_extended_data = NULL; 10689 u8 len; 10690 10691 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 10692 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 10693 10694 if (cnt < BNX2X_VPD_LEN) 10695 goto out_not_found; 10696 10697 /* VPD RO tag should be first tag after identifier string, hence 10698 * we should be able to find it in first BNX2X_VPD_LEN chars 10699 */ 10700 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 10701 PCI_VPD_LRDT_RO_DATA); 10702 if (i < 0) 10703 goto out_not_found; 10704 10705 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 10706 pci_vpd_lrdt_size(&vpd_start[i]); 10707 10708 i += PCI_VPD_LRDT_TAG_SIZE; 10709 10710 if (block_end > BNX2X_VPD_LEN) { 10711 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 10712 if (vpd_extended_data == NULL) 10713 goto out_not_found; 10714 10715 /* read rest of vpd image into vpd_extended_data */ 10716 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 10717 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 10718 block_end - BNX2X_VPD_LEN, 10719 vpd_extended_data + BNX2X_VPD_LEN); 10720 if (cnt < (block_end - BNX2X_VPD_LEN)) 10721 goto out_not_found; 10722 vpd_data = vpd_extended_data; 10723 } else 10724 vpd_data = vpd_start; 10725 10726 /* now vpd_data holds full vpd content in both cases */ 10727 10728 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 10729 PCI_VPD_RO_KEYWORD_MFR_ID); 10730 if (rodi < 0) 10731 goto out_not_found; 10732 10733 len = pci_vpd_info_field_size(&vpd_data[rodi]); 10734 10735 if (len != VENDOR_ID_LEN) 10736 goto out_not_found; 10737 10738 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 10739 10740 /* vendor specific info */ 10741 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 10742 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 10743 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 10744 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 10745 10746 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 10747 PCI_VPD_RO_KEYWORD_VENDOR0); 10748 if (rodi >= 0) { 10749 len = pci_vpd_info_field_size(&vpd_data[rodi]); 10750 10751 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 10752 10753 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 10754 memcpy(bp->fw_ver, &vpd_data[rodi], len); 10755 bp->fw_ver[len] = ' '; 10756 } 10757 } 10758 kfree(vpd_extended_data); 10759 return; 10760 } 10761 out_not_found: 10762 kfree(vpd_extended_data); 10763 return; 10764 } 10765 10766 static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) 10767 { 10768 u32 flags = 0; 10769 10770 if (CHIP_REV_IS_FPGA(bp)) 10771 SET_FLAGS(flags, MODE_FPGA); 10772 else if (CHIP_REV_IS_EMUL(bp)) 10773 SET_FLAGS(flags, MODE_EMUL); 10774 else 10775 SET_FLAGS(flags, MODE_ASIC); 10776 10777 if (CHIP_MODE_IS_4_PORT(bp)) 10778 SET_FLAGS(flags, MODE_PORT4); 10779 else 10780 SET_FLAGS(flags, MODE_PORT2); 10781 10782 if (CHIP_IS_E2(bp)) 10783 SET_FLAGS(flags, MODE_E2); 10784 else if (CHIP_IS_E3(bp)) { 10785 SET_FLAGS(flags, MODE_E3); 10786 if (CHIP_REV(bp) == CHIP_REV_Ax) 10787 SET_FLAGS(flags, MODE_E3_A0); 10788 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 10789 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 10790 } 10791 10792 if (IS_MF(bp)) { 10793 SET_FLAGS(flags, MODE_MF); 10794 switch (bp->mf_mode) { 10795 case MULTI_FUNCTION_SD: 10796 SET_FLAGS(flags, MODE_MF_SD); 10797 break; 10798 case MULTI_FUNCTION_SI: 10799 SET_FLAGS(flags, MODE_MF_SI); 10800 break; 10801 case MULTI_FUNCTION_AFEX: 10802 SET_FLAGS(flags, MODE_MF_AFEX); 10803 break; 10804 } 10805 } else 10806 SET_FLAGS(flags, MODE_SF); 10807 10808 #if defined(__LITTLE_ENDIAN) 10809 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 10810 #else /*(__BIG_ENDIAN)*/ 10811 SET_FLAGS(flags, MODE_BIG_ENDIAN); 10812 #endif 10813 INIT_MODE_FLAGS(bp) = flags; 10814 } 10815 10816 static int __devinit bnx2x_init_bp(struct bnx2x *bp) 10817 { 10818 int func; 10819 int rc; 10820 10821 mutex_init(&bp->port.phy_mutex); 10822 mutex_init(&bp->fw_mb_mutex); 10823 spin_lock_init(&bp->stats_lock); 10824 #ifdef BCM_CNIC 10825 mutex_init(&bp->cnic_mutex); 10826 #endif 10827 10828 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 10829 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 10830 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 10831 rc = bnx2x_get_hwinfo(bp); 10832 if (rc) 10833 return rc; 10834 10835 bnx2x_set_modes_bitmap(bp); 10836 10837 rc = bnx2x_alloc_mem_bp(bp); 10838 if (rc) 10839 return rc; 10840 10841 bnx2x_read_fwinfo(bp); 10842 10843 func = BP_FUNC(bp); 10844 10845 /* need to reset chip if undi was active */ 10846 if (!BP_NOMCP(bp)) { 10847 /* init fw_seq */ 10848 bp->fw_seq = 10849 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 10850 DRV_MSG_SEQ_NUMBER_MASK; 10851 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 10852 10853 bnx2x_prev_unload(bp); 10854 } 10855 10856 10857 if (CHIP_REV_IS_FPGA(bp)) 10858 dev_err(&bp->pdev->dev, "FPGA detected\n"); 10859 10860 if (BP_NOMCP(bp) && (func == 0)) 10861 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 10862 10863 bp->disable_tpa = disable_tpa; 10864 10865 #ifdef BCM_CNIC 10866 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 10867 #endif 10868 10869 /* Set TPA flags */ 10870 if (bp->disable_tpa) { 10871 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 10872 bp->dev->features &= ~NETIF_F_LRO; 10873 } else { 10874 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 10875 bp->dev->features |= NETIF_F_LRO; 10876 } 10877 10878 if (CHIP_IS_E1(bp)) 10879 bp->dropless_fc = 0; 10880 else 10881 bp->dropless_fc = dropless_fc; 10882 10883 bp->mrrs = mrrs; 10884 10885 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 10886 10887 /* make sure that the numbers are in the right granularity */ 10888 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10889 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 10890 10891 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 10892 10893 init_timer(&bp->timer); 10894 bp->timer.expires = jiffies + bp->current_interval; 10895 bp->timer.data = (unsigned long) bp; 10896 bp->timer.function = bnx2x_timer; 10897 10898 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 10899 bnx2x_dcbx_init_params(bp); 10900 10901 #ifdef BCM_CNIC 10902 if (CHIP_IS_E1x(bp)) 10903 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 10904 else 10905 bp->cnic_base_cl_id = FP_SB_MAX_E2; 10906 #endif 10907 10908 /* multiple tx priority */ 10909 if (CHIP_IS_E1x(bp)) 10910 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 10911 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 10912 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 10913 if (CHIP_IS_E3B0(bp)) 10914 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 10915 10916 return rc; 10917 } 10918 10919 10920 /**************************************************************************** 10921 * General service functions 10922 ****************************************************************************/ 10923 10924 /* 10925 * net_device service functions 10926 */ 10927 10928 /* called with rtnl_lock */ 10929 static int bnx2x_open(struct net_device *dev) 10930 { 10931 struct bnx2x *bp = netdev_priv(dev); 10932 bool global = false; 10933 int other_engine = BP_PATH(bp) ? 0 : 1; 10934 bool other_load_status, load_status; 10935 10936 bp->stats_init = true; 10937 10938 netif_carrier_off(dev); 10939 10940 bnx2x_set_power_state(bp, PCI_D0); 10941 10942 other_load_status = bnx2x_get_load_status(bp, other_engine); 10943 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 10944 10945 /* 10946 * If parity had happen during the unload, then attentions 10947 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 10948 * want the first function loaded on the current engine to 10949 * complete the recovery. 10950 */ 10951 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 10952 bnx2x_chk_parity_attn(bp, &global, true)) 10953 do { 10954 /* 10955 * If there are attentions and they are in a global 10956 * blocks, set the GLOBAL_RESET bit regardless whether 10957 * it will be this function that will complete the 10958 * recovery or not. 10959 */ 10960 if (global) 10961 bnx2x_set_reset_global(bp); 10962 10963 /* 10964 * Only the first function on the current engine should 10965 * try to recover in open. In case of attentions in 10966 * global blocks only the first in the chip should try 10967 * to recover. 10968 */ 10969 if ((!load_status && 10970 (!global || !other_load_status)) && 10971 bnx2x_trylock_leader_lock(bp) && 10972 !bnx2x_leader_reset(bp)) { 10973 netdev_info(bp->dev, "Recovered in open\n"); 10974 break; 10975 } 10976 10977 /* recovery has failed... */ 10978 bnx2x_set_power_state(bp, PCI_D3hot); 10979 bp->recovery_state = BNX2X_RECOVERY_FAILED; 10980 10981 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 10982 "If you still see this message after a few retries then power cycle is required.\n"); 10983 10984 return -EAGAIN; 10985 } while (0); 10986 10987 bp->recovery_state = BNX2X_RECOVERY_DONE; 10988 return bnx2x_nic_load(bp, LOAD_OPEN); 10989 } 10990 10991 /* called with rtnl_lock */ 10992 static int bnx2x_close(struct net_device *dev) 10993 { 10994 struct bnx2x *bp = netdev_priv(dev); 10995 10996 /* Unload the driver, release IRQs */ 10997 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 10998 10999 /* Power off */ 11000 bnx2x_set_power_state(bp, PCI_D3hot); 11001 11002 return 0; 11003 } 11004 11005 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 11006 struct bnx2x_mcast_ramrod_params *p) 11007 { 11008 int mc_count = netdev_mc_count(bp->dev); 11009 struct bnx2x_mcast_list_elem *mc_mac = 11010 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 11011 struct netdev_hw_addr *ha; 11012 11013 if (!mc_mac) 11014 return -ENOMEM; 11015 11016 INIT_LIST_HEAD(&p->mcast_list); 11017 11018 netdev_for_each_mc_addr(ha, bp->dev) { 11019 mc_mac->mac = bnx2x_mc_addr(ha); 11020 list_add_tail(&mc_mac->link, &p->mcast_list); 11021 mc_mac++; 11022 } 11023 11024 p->mcast_list_len = mc_count; 11025 11026 return 0; 11027 } 11028 11029 static void bnx2x_free_mcast_macs_list( 11030 struct bnx2x_mcast_ramrod_params *p) 11031 { 11032 struct bnx2x_mcast_list_elem *mc_mac = 11033 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, 11034 link); 11035 11036 WARN_ON(!mc_mac); 11037 kfree(mc_mac); 11038 } 11039 11040 /** 11041 * bnx2x_set_uc_list - configure a new unicast MACs list. 11042 * 11043 * @bp: driver handle 11044 * 11045 * We will use zero (0) as a MAC type for these MACs. 11046 */ 11047 static int bnx2x_set_uc_list(struct bnx2x *bp) 11048 { 11049 int rc; 11050 struct net_device *dev = bp->dev; 11051 struct netdev_hw_addr *ha; 11052 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 11053 unsigned long ramrod_flags = 0; 11054 11055 /* First schedule a cleanup up of old configuration */ 11056 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 11057 if (rc < 0) { 11058 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 11059 return rc; 11060 } 11061 11062 netdev_for_each_uc_addr(ha, dev) { 11063 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 11064 BNX2X_UC_LIST_MAC, &ramrod_flags); 11065 if (rc < 0) { 11066 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 11067 rc); 11068 return rc; 11069 } 11070 } 11071 11072 /* Execute the pending commands */ 11073 __set_bit(RAMROD_CONT, &ramrod_flags); 11074 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 11075 BNX2X_UC_LIST_MAC, &ramrod_flags); 11076 } 11077 11078 static int bnx2x_set_mc_list(struct bnx2x *bp) 11079 { 11080 struct net_device *dev = bp->dev; 11081 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11082 int rc = 0; 11083 11084 rparam.mcast_obj = &bp->mcast_obj; 11085 11086 /* first, clear all configured multicast MACs */ 11087 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 11088 if (rc < 0) { 11089 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 11090 return rc; 11091 } 11092 11093 /* then, configure a new MACs list */ 11094 if (netdev_mc_count(dev)) { 11095 rc = bnx2x_init_mcast_macs_list(bp, &rparam); 11096 if (rc) { 11097 BNX2X_ERR("Failed to create multicast MACs list: %d\n", 11098 rc); 11099 return rc; 11100 } 11101 11102 /* Now add the new MACs */ 11103 rc = bnx2x_config_mcast(bp, &rparam, 11104 BNX2X_MCAST_CMD_ADD); 11105 if (rc < 0) 11106 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 11107 rc); 11108 11109 bnx2x_free_mcast_macs_list(&rparam); 11110 } 11111 11112 return rc; 11113 } 11114 11115 11116 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 11117 void bnx2x_set_rx_mode(struct net_device *dev) 11118 { 11119 struct bnx2x *bp = netdev_priv(dev); 11120 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 11121 11122 if (bp->state != BNX2X_STATE_OPEN) { 11123 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 11124 return; 11125 } 11126 11127 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 11128 11129 if (dev->flags & IFF_PROMISC) 11130 rx_mode = BNX2X_RX_MODE_PROMISC; 11131 else if ((dev->flags & IFF_ALLMULTI) || 11132 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 11133 CHIP_IS_E1(bp))) 11134 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11135 else { 11136 /* some multicasts */ 11137 if (bnx2x_set_mc_list(bp) < 0) 11138 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11139 11140 if (bnx2x_set_uc_list(bp) < 0) 11141 rx_mode = BNX2X_RX_MODE_PROMISC; 11142 } 11143 11144 bp->rx_mode = rx_mode; 11145 #ifdef BCM_CNIC 11146 /* handle ISCSI SD mode */ 11147 if (IS_MF_ISCSI_SD(bp)) 11148 bp->rx_mode = BNX2X_RX_MODE_NONE; 11149 #endif 11150 11151 /* Schedule the rx_mode command */ 11152 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11153 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 11154 return; 11155 } 11156 11157 bnx2x_set_storm_rx_mode(bp); 11158 } 11159 11160 /* called with rtnl_lock */ 11161 static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 11162 int devad, u16 addr) 11163 { 11164 struct bnx2x *bp = netdev_priv(netdev); 11165 u16 value; 11166 int rc; 11167 11168 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 11169 prtad, devad, addr); 11170 11171 /* The HW expects different devad if CL22 is used */ 11172 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11173 11174 bnx2x_acquire_phy_lock(bp); 11175 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 11176 bnx2x_release_phy_lock(bp); 11177 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 11178 11179 if (!rc) 11180 rc = value; 11181 return rc; 11182 } 11183 11184 /* called with rtnl_lock */ 11185 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 11186 u16 addr, u16 value) 11187 { 11188 struct bnx2x *bp = netdev_priv(netdev); 11189 int rc; 11190 11191 DP(NETIF_MSG_LINK, 11192 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 11193 prtad, devad, addr, value); 11194 11195 /* The HW expects different devad if CL22 is used */ 11196 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11197 11198 bnx2x_acquire_phy_lock(bp); 11199 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 11200 bnx2x_release_phy_lock(bp); 11201 return rc; 11202 } 11203 11204 /* called with rtnl_lock */ 11205 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11206 { 11207 struct bnx2x *bp = netdev_priv(dev); 11208 struct mii_ioctl_data *mdio = if_mii(ifr); 11209 11210 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 11211 mdio->phy_id, mdio->reg_num, mdio->val_in); 11212 11213 if (!netif_running(dev)) 11214 return -EAGAIN; 11215 11216 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 11217 } 11218 11219 #ifdef CONFIG_NET_POLL_CONTROLLER 11220 static void poll_bnx2x(struct net_device *dev) 11221 { 11222 struct bnx2x *bp = netdev_priv(dev); 11223 11224 disable_irq(bp->pdev->irq); 11225 bnx2x_interrupt(bp->pdev->irq, dev); 11226 enable_irq(bp->pdev->irq); 11227 } 11228 #endif 11229 11230 static int bnx2x_validate_addr(struct net_device *dev) 11231 { 11232 struct bnx2x *bp = netdev_priv(dev); 11233 11234 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 11235 BNX2X_ERR("Non-valid Ethernet address\n"); 11236 return -EADDRNOTAVAIL; 11237 } 11238 return 0; 11239 } 11240 11241 static const struct net_device_ops bnx2x_netdev_ops = { 11242 .ndo_open = bnx2x_open, 11243 .ndo_stop = bnx2x_close, 11244 .ndo_start_xmit = bnx2x_start_xmit, 11245 .ndo_select_queue = bnx2x_select_queue, 11246 .ndo_set_rx_mode = bnx2x_set_rx_mode, 11247 .ndo_set_mac_address = bnx2x_change_mac_addr, 11248 .ndo_validate_addr = bnx2x_validate_addr, 11249 .ndo_do_ioctl = bnx2x_ioctl, 11250 .ndo_change_mtu = bnx2x_change_mtu, 11251 .ndo_fix_features = bnx2x_fix_features, 11252 .ndo_set_features = bnx2x_set_features, 11253 .ndo_tx_timeout = bnx2x_tx_timeout, 11254 #ifdef CONFIG_NET_POLL_CONTROLLER 11255 .ndo_poll_controller = poll_bnx2x, 11256 #endif 11257 .ndo_setup_tc = bnx2x_setup_tc, 11258 11259 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 11260 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11261 #endif 11262 }; 11263 11264 static int bnx2x_set_coherency_mask(struct bnx2x *bp) 11265 { 11266 struct device *dev = &bp->pdev->dev; 11267 11268 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 11269 bp->flags |= USING_DAC_FLAG; 11270 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 11271 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 11272 return -EIO; 11273 } 11274 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 11275 dev_err(dev, "System does not support DMA, aborting\n"); 11276 return -EIO; 11277 } 11278 11279 return 0; 11280 } 11281 11282 static int __devinit bnx2x_init_dev(struct pci_dev *pdev, 11283 struct net_device *dev, 11284 unsigned long board_type) 11285 { 11286 struct bnx2x *bp; 11287 int rc; 11288 u32 pci_cfg_dword; 11289 bool chip_is_e1x = (board_type == BCM57710 || 11290 board_type == BCM57711 || 11291 board_type == BCM57711E); 11292 11293 SET_NETDEV_DEV(dev, &pdev->dev); 11294 bp = netdev_priv(dev); 11295 11296 bp->dev = dev; 11297 bp->pdev = pdev; 11298 bp->flags = 0; 11299 11300 rc = pci_enable_device(pdev); 11301 if (rc) { 11302 dev_err(&bp->pdev->dev, 11303 "Cannot enable PCI device, aborting\n"); 11304 goto err_out; 11305 } 11306 11307 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11308 dev_err(&bp->pdev->dev, 11309 "Cannot find PCI device base address, aborting\n"); 11310 rc = -ENODEV; 11311 goto err_out_disable; 11312 } 11313 11314 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 11315 dev_err(&bp->pdev->dev, "Cannot find second PCI device" 11316 " base address, aborting\n"); 11317 rc = -ENODEV; 11318 goto err_out_disable; 11319 } 11320 11321 if (atomic_read(&pdev->enable_cnt) == 1) { 11322 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11323 if (rc) { 11324 dev_err(&bp->pdev->dev, 11325 "Cannot obtain PCI resources, aborting\n"); 11326 goto err_out_disable; 11327 } 11328 11329 pci_set_master(pdev); 11330 pci_save_state(pdev); 11331 } 11332 11333 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 11334 if (bp->pm_cap == 0) { 11335 dev_err(&bp->pdev->dev, 11336 "Cannot find power management capability, aborting\n"); 11337 rc = -EIO; 11338 goto err_out_release; 11339 } 11340 11341 if (!pci_is_pcie(pdev)) { 11342 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 11343 rc = -EIO; 11344 goto err_out_release; 11345 } 11346 11347 rc = bnx2x_set_coherency_mask(bp); 11348 if (rc) 11349 goto err_out_release; 11350 11351 dev->mem_start = pci_resource_start(pdev, 0); 11352 dev->base_addr = dev->mem_start; 11353 dev->mem_end = pci_resource_end(pdev, 0); 11354 11355 dev->irq = pdev->irq; 11356 11357 bp->regview = pci_ioremap_bar(pdev, 0); 11358 if (!bp->regview) { 11359 dev_err(&bp->pdev->dev, 11360 "Cannot map register space, aborting\n"); 11361 rc = -ENOMEM; 11362 goto err_out_release; 11363 } 11364 11365 /* In E1/E1H use pci device function given by kernel. 11366 * In E2/E3 read physical function from ME register since these chips 11367 * support Physical Device Assignment where kernel BDF maybe arbitrary 11368 * (depending on hypervisor). 11369 */ 11370 if (chip_is_e1x) 11371 bp->pf_num = PCI_FUNC(pdev->devfn); 11372 else {/* chip is E2/3*/ 11373 pci_read_config_dword(bp->pdev, 11374 PCICFG_ME_REGISTER, &pci_cfg_dword); 11375 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 11376 ME_REG_ABS_PF_NUM_SHIFT); 11377 } 11378 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 11379 11380 bnx2x_set_power_state(bp, PCI_D0); 11381 11382 /* clean indirect addresses */ 11383 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 11384 PCICFG_VENDOR_ID_OFFSET); 11385 /* 11386 * Clean the following indirect addresses for all functions since it 11387 * is not used by the driver. 11388 */ 11389 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 11390 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 11391 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 11392 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 11393 11394 if (chip_is_e1x) { 11395 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 11396 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 11397 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 11398 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 11399 } 11400 11401 /* 11402 * Enable internal target-read (in case we are probed after PF FLR). 11403 * Must be done prior to any BAR read access. Only for 57712 and up 11404 */ 11405 if (!chip_is_e1x) 11406 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11407 11408 /* Reset the load counter */ 11409 bnx2x_clear_load_status(bp); 11410 11411 dev->watchdog_timeo = TX_TIMEOUT; 11412 11413 dev->netdev_ops = &bnx2x_netdev_ops; 11414 bnx2x_set_ethtool_ops(dev); 11415 11416 dev->priv_flags |= IFF_UNICAST_FLT; 11417 11418 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 11419 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 11420 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 11421 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; 11422 11423 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 11424 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 11425 11426 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; 11427 if (bp->flags & USING_DAC_FLAG) 11428 dev->features |= NETIF_F_HIGHDMA; 11429 11430 /* Add Loopback capability to the device */ 11431 dev->hw_features |= NETIF_F_LOOPBACK; 11432 11433 #ifdef BCM_DCBNL 11434 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 11435 #endif 11436 11437 /* get_port_hwinfo() will set prtad and mmds properly */ 11438 bp->mdio.prtad = MDIO_PRTAD_NONE; 11439 bp->mdio.mmds = 0; 11440 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 11441 bp->mdio.dev = dev; 11442 bp->mdio.mdio_read = bnx2x_mdio_read; 11443 bp->mdio.mdio_write = bnx2x_mdio_write; 11444 11445 return 0; 11446 11447 err_out_release: 11448 if (atomic_read(&pdev->enable_cnt) == 1) 11449 pci_release_regions(pdev); 11450 11451 err_out_disable: 11452 pci_disable_device(pdev); 11453 pci_set_drvdata(pdev, NULL); 11454 11455 err_out: 11456 return rc; 11457 } 11458 11459 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, 11460 int *width, int *speed) 11461 { 11462 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); 11463 11464 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; 11465 11466 /* return value of 1=2.5GHz 2=5GHz */ 11467 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 11468 } 11469 11470 static int bnx2x_check_firmware(struct bnx2x *bp) 11471 { 11472 const struct firmware *firmware = bp->firmware; 11473 struct bnx2x_fw_file_hdr *fw_hdr; 11474 struct bnx2x_fw_file_section *sections; 11475 u32 offset, len, num_ops; 11476 u16 *ops_offsets; 11477 int i; 11478 const u8 *fw_ver; 11479 11480 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 11481 BNX2X_ERR("Wrong FW size\n"); 11482 return -EINVAL; 11483 } 11484 11485 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 11486 sections = (struct bnx2x_fw_file_section *)fw_hdr; 11487 11488 /* Make sure none of the offsets and sizes make us read beyond 11489 * the end of the firmware data */ 11490 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 11491 offset = be32_to_cpu(sections[i].offset); 11492 len = be32_to_cpu(sections[i].len); 11493 if (offset + len > firmware->size) { 11494 BNX2X_ERR("Section %d length is out of bounds\n", i); 11495 return -EINVAL; 11496 } 11497 } 11498 11499 /* Likewise for the init_ops offsets */ 11500 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 11501 ops_offsets = (u16 *)(firmware->data + offset); 11502 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 11503 11504 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 11505 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 11506 BNX2X_ERR("Section offset %d is out of bounds\n", i); 11507 return -EINVAL; 11508 } 11509 } 11510 11511 /* Check FW version */ 11512 offset = be32_to_cpu(fw_hdr->fw_version.offset); 11513 fw_ver = firmware->data + offset; 11514 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 11515 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 11516 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 11517 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 11518 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 11519 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 11520 BCM_5710_FW_MAJOR_VERSION, 11521 BCM_5710_FW_MINOR_VERSION, 11522 BCM_5710_FW_REVISION_VERSION, 11523 BCM_5710_FW_ENGINEERING_VERSION); 11524 return -EINVAL; 11525 } 11526 11527 return 0; 11528 } 11529 11530 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11531 { 11532 const __be32 *source = (const __be32 *)_source; 11533 u32 *target = (u32 *)_target; 11534 u32 i; 11535 11536 for (i = 0; i < n/4; i++) 11537 target[i] = be32_to_cpu(source[i]); 11538 } 11539 11540 /* 11541 Ops array is stored in the following format: 11542 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 11543 */ 11544 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 11545 { 11546 const __be32 *source = (const __be32 *)_source; 11547 struct raw_op *target = (struct raw_op *)_target; 11548 u32 i, j, tmp; 11549 11550 for (i = 0, j = 0; i < n/8; i++, j += 2) { 11551 tmp = be32_to_cpu(source[j]); 11552 target[i].op = (tmp >> 24) & 0xff; 11553 target[i].offset = tmp & 0xffffff; 11554 target[i].raw_data = be32_to_cpu(source[j + 1]); 11555 } 11556 } 11557 11558 /** 11559 * IRO array is stored in the following format: 11560 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11561 */ 11562 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11563 { 11564 const __be32 *source = (const __be32 *)_source; 11565 struct iro *target = (struct iro *)_target; 11566 u32 i, j, tmp; 11567 11568 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 11569 target[i].base = be32_to_cpu(source[j]); 11570 j++; 11571 tmp = be32_to_cpu(source[j]); 11572 target[i].m1 = (tmp >> 16) & 0xffff; 11573 target[i].m2 = tmp & 0xffff; 11574 j++; 11575 tmp = be32_to_cpu(source[j]); 11576 target[i].m3 = (tmp >> 16) & 0xffff; 11577 target[i].size = tmp & 0xffff; 11578 j++; 11579 } 11580 } 11581 11582 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11583 { 11584 const __be16 *source = (const __be16 *)_source; 11585 u16 *target = (u16 *)_target; 11586 u32 i; 11587 11588 for (i = 0; i < n/2; i++) 11589 target[i] = be16_to_cpu(source[i]); 11590 } 11591 11592 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 11593 do { \ 11594 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 11595 bp->arr = kmalloc(len, GFP_KERNEL); \ 11596 if (!bp->arr) \ 11597 goto lbl; \ 11598 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 11599 (u8 *)bp->arr, len); \ 11600 } while (0) 11601 11602 static int bnx2x_init_firmware(struct bnx2x *bp) 11603 { 11604 const char *fw_file_name; 11605 struct bnx2x_fw_file_hdr *fw_hdr; 11606 int rc; 11607 11608 if (bp->firmware) 11609 return 0; 11610 11611 if (CHIP_IS_E1(bp)) 11612 fw_file_name = FW_FILE_NAME_E1; 11613 else if (CHIP_IS_E1H(bp)) 11614 fw_file_name = FW_FILE_NAME_E1H; 11615 else if (!CHIP_IS_E1x(bp)) 11616 fw_file_name = FW_FILE_NAME_E2; 11617 else { 11618 BNX2X_ERR("Unsupported chip revision\n"); 11619 return -EINVAL; 11620 } 11621 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 11622 11623 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 11624 if (rc) { 11625 BNX2X_ERR("Can't load firmware file %s\n", 11626 fw_file_name); 11627 goto request_firmware_exit; 11628 } 11629 11630 rc = bnx2x_check_firmware(bp); 11631 if (rc) { 11632 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 11633 goto request_firmware_exit; 11634 } 11635 11636 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 11637 11638 /* Initialize the pointers to the init arrays */ 11639 /* Blob */ 11640 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 11641 11642 /* Opcodes */ 11643 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 11644 11645 /* Offsets */ 11646 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 11647 be16_to_cpu_n); 11648 11649 /* STORMs firmware */ 11650 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11651 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 11652 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 11653 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 11654 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11655 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 11656 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 11657 be32_to_cpu(fw_hdr->usem_pram_data.offset); 11658 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11659 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 11660 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 11661 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 11662 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11663 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 11664 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 11665 be32_to_cpu(fw_hdr->csem_pram_data.offset); 11666 /* IRO */ 11667 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 11668 11669 return 0; 11670 11671 iro_alloc_err: 11672 kfree(bp->init_ops_offsets); 11673 init_offsets_alloc_err: 11674 kfree(bp->init_ops); 11675 init_ops_alloc_err: 11676 kfree(bp->init_data); 11677 request_firmware_exit: 11678 release_firmware(bp->firmware); 11679 bp->firmware = NULL; 11680 11681 return rc; 11682 } 11683 11684 static void bnx2x_release_firmware(struct bnx2x *bp) 11685 { 11686 kfree(bp->init_ops_offsets); 11687 kfree(bp->init_ops); 11688 kfree(bp->init_data); 11689 release_firmware(bp->firmware); 11690 bp->firmware = NULL; 11691 } 11692 11693 11694 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 11695 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 11696 .init_hw_cmn = bnx2x_init_hw_common, 11697 .init_hw_port = bnx2x_init_hw_port, 11698 .init_hw_func = bnx2x_init_hw_func, 11699 11700 .reset_hw_cmn = bnx2x_reset_common, 11701 .reset_hw_port = bnx2x_reset_port, 11702 .reset_hw_func = bnx2x_reset_func, 11703 11704 .gunzip_init = bnx2x_gunzip_init, 11705 .gunzip_end = bnx2x_gunzip_end, 11706 11707 .init_fw = bnx2x_init_firmware, 11708 .release_fw = bnx2x_release_firmware, 11709 }; 11710 11711 void bnx2x__init_func_obj(struct bnx2x *bp) 11712 { 11713 /* Prepare DMAE related driver resources */ 11714 bnx2x_setup_dmae(bp); 11715 11716 bnx2x_init_func_obj(bp, &bp->func_obj, 11717 bnx2x_sp(bp, func_rdata), 11718 bnx2x_sp_mapping(bp, func_rdata), 11719 bnx2x_sp(bp, func_afex_rdata), 11720 bnx2x_sp_mapping(bp, func_afex_rdata), 11721 &bnx2x_func_sp_drv); 11722 } 11723 11724 /* must be called after sriov-enable */ 11725 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11726 { 11727 int cid_count = BNX2X_L2_MAX_CID(bp); 11728 11729 #ifdef BCM_CNIC 11730 cid_count += CNIC_CID_MAX; 11731 #endif 11732 return roundup(cid_count, QM_CID_ROUND); 11733 } 11734 11735 /** 11736 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 11737 * 11738 * @dev: pci device 11739 * 11740 */ 11741 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11742 { 11743 int pos; 11744 u16 control; 11745 11746 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 11747 11748 /* 11749 * If MSI-X is not supported - return number of SBs needed to support 11750 * one fast path queue: one FP queue + SB for CNIC 11751 */ 11752 if (!pos) 11753 return 1 + CNIC_PRESENT; 11754 11755 /* 11756 * The value in the PCI configuration space is the index of the last 11757 * entry, namely one less than the actual size of the table, which is 11758 * exactly what we want to return from this function: number of all SBs 11759 * without the default SB. 11760 */ 11761 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 11762 return control & PCI_MSIX_FLAGS_QSIZE; 11763 } 11764 11765 static int __devinit bnx2x_init_one(struct pci_dev *pdev, 11766 const struct pci_device_id *ent) 11767 { 11768 struct net_device *dev = NULL; 11769 struct bnx2x *bp; 11770 int pcie_width, pcie_speed; 11771 int rc, max_non_def_sbs; 11772 int rx_count, tx_count, rss_count, doorbell_size; 11773 /* 11774 * An estimated maximum supported CoS number according to the chip 11775 * version. 11776 * We will try to roughly estimate the maximum number of CoSes this chip 11777 * may support in order to minimize the memory allocated for Tx 11778 * netdev_queue's. This number will be accurately calculated during the 11779 * initialization of bp->max_cos based on the chip versions AND chip 11780 * revision in the bnx2x_init_bp(). 11781 */ 11782 u8 max_cos_est = 0; 11783 11784 switch (ent->driver_data) { 11785 case BCM57710: 11786 case BCM57711: 11787 case BCM57711E: 11788 max_cos_est = BNX2X_MULTI_TX_COS_E1X; 11789 break; 11790 11791 case BCM57712: 11792 case BCM57712_MF: 11793 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; 11794 break; 11795 11796 case BCM57800: 11797 case BCM57800_MF: 11798 case BCM57810: 11799 case BCM57810_MF: 11800 case BCM57840: 11801 case BCM57840_MF: 11802 case BCM57811: 11803 case BCM57811_MF: 11804 max_cos_est = BNX2X_MULTI_TX_COS_E3B0; 11805 break; 11806 11807 default: 11808 pr_err("Unknown board_type (%ld), aborting\n", 11809 ent->driver_data); 11810 return -ENODEV; 11811 } 11812 11813 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11814 11815 /* !!! FIXME !!! 11816 * Do not allow the maximum SB count to grow above 16 11817 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. 11818 * We will use the FP_SB_MAX_E1x macro for this matter. 11819 */ 11820 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); 11821 11822 WARN_ON(!max_non_def_sbs); 11823 11824 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 11825 rss_count = max_non_def_sbs - CNIC_PRESENT; 11826 11827 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 11828 rx_count = rss_count + FCOE_PRESENT; 11829 11830 /* 11831 * Maximum number of netdev Tx queues: 11832 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 11833 */ 11834 tx_count = rss_count * max_cos_est + FCOE_PRESENT; 11835 11836 /* dev zeroed in init_etherdev */ 11837 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11838 if (!dev) 11839 return -ENOMEM; 11840 11841 bp = netdev_priv(dev); 11842 11843 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 11844 tx_count, rx_count); 11845 11846 bp->igu_sb_cnt = max_non_def_sbs; 11847 bp->msg_enable = debug; 11848 pci_set_drvdata(pdev, dev); 11849 11850 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 11851 if (rc < 0) { 11852 free_netdev(dev); 11853 return rc; 11854 } 11855 11856 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 11857 11858 rc = bnx2x_init_bp(bp); 11859 if (rc) 11860 goto init_one_exit; 11861 11862 /* 11863 * Map doorbels here as we need the real value of bp->max_cos which 11864 * is initialized in bnx2x_init_bp(). 11865 */ 11866 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 11867 if (doorbell_size > pci_resource_len(pdev, 2)) { 11868 dev_err(&bp->pdev->dev, 11869 "Cannot map doorbells, bar size too small, aborting\n"); 11870 rc = -ENOMEM; 11871 goto init_one_exit; 11872 } 11873 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 11874 doorbell_size); 11875 if (!bp->doorbells) { 11876 dev_err(&bp->pdev->dev, 11877 "Cannot map doorbell space, aborting\n"); 11878 rc = -ENOMEM; 11879 goto init_one_exit; 11880 } 11881 11882 /* calc qm_cid_count */ 11883 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 11884 11885 #ifdef BCM_CNIC 11886 /* disable FCOE L2 queue for E1x */ 11887 if (CHIP_IS_E1x(bp)) 11888 bp->flags |= NO_FCOE_FLAG; 11889 11890 #endif 11891 11892 /* Configure interrupt mode: try to enable MSI-X/MSI if 11893 * needed, set bp->num_queues appropriately. 11894 */ 11895 bnx2x_set_int_mode(bp); 11896 11897 /* Add all NAPI objects */ 11898 bnx2x_add_all_napi(bp); 11899 11900 rc = register_netdev(dev); 11901 if (rc) { 11902 dev_err(&pdev->dev, "Cannot register net device\n"); 11903 goto init_one_exit; 11904 } 11905 11906 #ifdef BCM_CNIC 11907 if (!NO_FCOE(bp)) { 11908 /* Add storage MAC address */ 11909 rtnl_lock(); 11910 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 11911 rtnl_unlock(); 11912 } 11913 #endif 11914 11915 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 11916 11917 BNX2X_DEV_INFO( 11918 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 11919 board_info[ent->driver_data].name, 11920 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 11921 pcie_width, 11922 ((!CHIP_IS_E2(bp) && pcie_speed == 2) || 11923 (CHIP_IS_E2(bp) && pcie_speed == 1)) ? 11924 "5GHz (Gen2)" : "2.5GHz", 11925 dev->base_addr, bp->pdev->irq, dev->dev_addr); 11926 11927 return 0; 11928 11929 init_one_exit: 11930 if (bp->regview) 11931 iounmap(bp->regview); 11932 11933 if (bp->doorbells) 11934 iounmap(bp->doorbells); 11935 11936 free_netdev(dev); 11937 11938 if (atomic_read(&pdev->enable_cnt) == 1) 11939 pci_release_regions(pdev); 11940 11941 pci_disable_device(pdev); 11942 pci_set_drvdata(pdev, NULL); 11943 11944 return rc; 11945 } 11946 11947 static void __devexit bnx2x_remove_one(struct pci_dev *pdev) 11948 { 11949 struct net_device *dev = pci_get_drvdata(pdev); 11950 struct bnx2x *bp; 11951 11952 if (!dev) { 11953 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 11954 return; 11955 } 11956 bp = netdev_priv(dev); 11957 11958 #ifdef BCM_CNIC 11959 /* Delete storage MAC address */ 11960 if (!NO_FCOE(bp)) { 11961 rtnl_lock(); 11962 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 11963 rtnl_unlock(); 11964 } 11965 #endif 11966 11967 #ifdef BCM_DCBNL 11968 /* Delete app tlvs from dcbnl */ 11969 bnx2x_dcbnl_update_applist(bp, true); 11970 #endif 11971 11972 unregister_netdev(dev); 11973 11974 /* Delete all NAPI objects */ 11975 bnx2x_del_all_napi(bp); 11976 11977 /* Power on: we can't let PCI layer write to us while we are in D3 */ 11978 bnx2x_set_power_state(bp, PCI_D0); 11979 11980 /* Disable MSI/MSI-X */ 11981 bnx2x_disable_msi(bp); 11982 11983 /* Power off */ 11984 bnx2x_set_power_state(bp, PCI_D3hot); 11985 11986 /* Make sure RESET task is not scheduled before continuing */ 11987 cancel_delayed_work_sync(&bp->sp_rtnl_task); 11988 11989 if (bp->regview) 11990 iounmap(bp->regview); 11991 11992 if (bp->doorbells) 11993 iounmap(bp->doorbells); 11994 11995 bnx2x_release_firmware(bp); 11996 11997 bnx2x_free_mem_bp(bp); 11998 11999 free_netdev(dev); 12000 12001 if (atomic_read(&pdev->enable_cnt) == 1) 12002 pci_release_regions(pdev); 12003 12004 pci_disable_device(pdev); 12005 pci_set_drvdata(pdev, NULL); 12006 } 12007 12008 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 12009 { 12010 int i; 12011 12012 bp->state = BNX2X_STATE_ERROR; 12013 12014 bp->rx_mode = BNX2X_RX_MODE_NONE; 12015 12016 #ifdef BCM_CNIC 12017 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 12018 #endif 12019 /* Stop Tx */ 12020 bnx2x_tx_disable(bp); 12021 12022 bnx2x_netif_stop(bp, 0); 12023 12024 del_timer_sync(&bp->timer); 12025 12026 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 12027 12028 /* Release IRQs */ 12029 bnx2x_free_irq(bp); 12030 12031 /* Free SKBs, SGEs, TPA pool and driver internals */ 12032 bnx2x_free_skbs(bp); 12033 12034 for_each_rx_queue(bp, i) 12035 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 12036 12037 bnx2x_free_mem(bp); 12038 12039 bp->state = BNX2X_STATE_CLOSED; 12040 12041 netif_carrier_off(bp->dev); 12042 12043 return 0; 12044 } 12045 12046 static void bnx2x_eeh_recover(struct bnx2x *bp) 12047 { 12048 u32 val; 12049 12050 mutex_init(&bp->port.phy_mutex); 12051 12052 12053 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 12054 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 12055 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 12056 BNX2X_ERR("BAD MCP validity signature\n"); 12057 } 12058 12059 /** 12060 * bnx2x_io_error_detected - called when PCI error is detected 12061 * @pdev: Pointer to PCI device 12062 * @state: The current pci connection state 12063 * 12064 * This function is called after a PCI bus error affecting 12065 * this device has been detected. 12066 */ 12067 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 12068 pci_channel_state_t state) 12069 { 12070 struct net_device *dev = pci_get_drvdata(pdev); 12071 struct bnx2x *bp = netdev_priv(dev); 12072 12073 rtnl_lock(); 12074 12075 netif_device_detach(dev); 12076 12077 if (state == pci_channel_io_perm_failure) { 12078 rtnl_unlock(); 12079 return PCI_ERS_RESULT_DISCONNECT; 12080 } 12081 12082 if (netif_running(dev)) 12083 bnx2x_eeh_nic_unload(bp); 12084 12085 pci_disable_device(pdev); 12086 12087 rtnl_unlock(); 12088 12089 /* Request a slot reset */ 12090 return PCI_ERS_RESULT_NEED_RESET; 12091 } 12092 12093 /** 12094 * bnx2x_io_slot_reset - called after the PCI bus has been reset 12095 * @pdev: Pointer to PCI device 12096 * 12097 * Restart the card from scratch, as if from a cold-boot. 12098 */ 12099 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 12100 { 12101 struct net_device *dev = pci_get_drvdata(pdev); 12102 struct bnx2x *bp = netdev_priv(dev); 12103 12104 rtnl_lock(); 12105 12106 if (pci_enable_device(pdev)) { 12107 dev_err(&pdev->dev, 12108 "Cannot re-enable PCI device after reset\n"); 12109 rtnl_unlock(); 12110 return PCI_ERS_RESULT_DISCONNECT; 12111 } 12112 12113 pci_set_master(pdev); 12114 pci_restore_state(pdev); 12115 12116 if (netif_running(dev)) 12117 bnx2x_set_power_state(bp, PCI_D0); 12118 12119 rtnl_unlock(); 12120 12121 return PCI_ERS_RESULT_RECOVERED; 12122 } 12123 12124 /** 12125 * bnx2x_io_resume - called when traffic can start flowing again 12126 * @pdev: Pointer to PCI device 12127 * 12128 * This callback is called when the error recovery driver tells us that 12129 * its OK to resume normal operation. 12130 */ 12131 static void bnx2x_io_resume(struct pci_dev *pdev) 12132 { 12133 struct net_device *dev = pci_get_drvdata(pdev); 12134 struct bnx2x *bp = netdev_priv(dev); 12135 12136 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 12137 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 12138 return; 12139 } 12140 12141 rtnl_lock(); 12142 12143 bnx2x_eeh_recover(bp); 12144 12145 if (netif_running(dev)) 12146 bnx2x_nic_load(bp, LOAD_NORMAL); 12147 12148 netif_device_attach(dev); 12149 12150 rtnl_unlock(); 12151 } 12152 12153 static struct pci_error_handlers bnx2x_err_handler = { 12154 .error_detected = bnx2x_io_error_detected, 12155 .slot_reset = bnx2x_io_slot_reset, 12156 .resume = bnx2x_io_resume, 12157 }; 12158 12159 static struct pci_driver bnx2x_pci_driver = { 12160 .name = DRV_MODULE_NAME, 12161 .id_table = bnx2x_pci_tbl, 12162 .probe = bnx2x_init_one, 12163 .remove = __devexit_p(bnx2x_remove_one), 12164 .suspend = bnx2x_suspend, 12165 .resume = bnx2x_resume, 12166 .err_handler = &bnx2x_err_handler, 12167 }; 12168 12169 static int __init bnx2x_init(void) 12170 { 12171 int ret; 12172 12173 pr_info("%s", version); 12174 12175 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 12176 if (bnx2x_wq == NULL) { 12177 pr_err("Cannot create workqueue\n"); 12178 return -ENOMEM; 12179 } 12180 12181 ret = pci_register_driver(&bnx2x_pci_driver); 12182 if (ret) { 12183 pr_err("Cannot register driver\n"); 12184 destroy_workqueue(bnx2x_wq); 12185 } 12186 return ret; 12187 } 12188 12189 static void __exit bnx2x_cleanup(void) 12190 { 12191 struct list_head *pos, *q; 12192 pci_unregister_driver(&bnx2x_pci_driver); 12193 12194 destroy_workqueue(bnx2x_wq); 12195 12196 /* Free globablly allocated resources */ 12197 list_for_each_safe(pos, q, &bnx2x_prev_list) { 12198 struct bnx2x_prev_path_list *tmp = 12199 list_entry(pos, struct bnx2x_prev_path_list, list); 12200 list_del(pos); 12201 kfree(tmp); 12202 } 12203 } 12204 12205 void bnx2x_notify_link_changed(struct bnx2x *bp) 12206 { 12207 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 12208 } 12209 12210 module_init(bnx2x_init); 12211 module_exit(bnx2x_cleanup); 12212 12213 #ifdef BCM_CNIC 12214 /** 12215 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 12216 * 12217 * @bp: driver handle 12218 * @set: set or clear the CAM entry 12219 * 12220 * This function will wait until the ramdord completion returns. 12221 * Return 0 if success, -ENODEV if ramrod doesn't return. 12222 */ 12223 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 12224 { 12225 unsigned long ramrod_flags = 0; 12226 12227 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12228 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 12229 &bp->iscsi_l2_mac_obj, true, 12230 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 12231 } 12232 12233 /* count denotes the number of new completions we have seen */ 12234 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 12235 { 12236 struct eth_spe *spe; 12237 int cxt_index, cxt_offset; 12238 12239 #ifdef BNX2X_STOP_ON_ERROR 12240 if (unlikely(bp->panic)) 12241 return; 12242 #endif 12243 12244 spin_lock_bh(&bp->spq_lock); 12245 BUG_ON(bp->cnic_spq_pending < count); 12246 bp->cnic_spq_pending -= count; 12247 12248 12249 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 12250 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 12251 & SPE_HDR_CONN_TYPE) >> 12252 SPE_HDR_CONN_TYPE_SHIFT; 12253 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 12254 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 12255 12256 /* Set validation for iSCSI L2 client before sending SETUP 12257 * ramrod 12258 */ 12259 if (type == ETH_CONNECTION_TYPE) { 12260 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 12261 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 12262 ILT_PAGE_CIDS; 12263 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 12264 (cxt_index * ILT_PAGE_CIDS); 12265 bnx2x_set_ctx_validation(bp, 12266 &bp->context[cxt_index]. 12267 vcxt[cxt_offset].eth, 12268 BNX2X_ISCSI_ETH_CID(bp)); 12269 } 12270 } 12271 12272 /* 12273 * There may be not more than 8 L2, not more than 8 L5 SPEs 12274 * and in the air. We also check that number of outstanding 12275 * COMMON ramrods is not more than the EQ and SPQ can 12276 * accommodate. 12277 */ 12278 if (type == ETH_CONNECTION_TYPE) { 12279 if (!atomic_read(&bp->cq_spq_left)) 12280 break; 12281 else 12282 atomic_dec(&bp->cq_spq_left); 12283 } else if (type == NONE_CONNECTION_TYPE) { 12284 if (!atomic_read(&bp->eq_spq_left)) 12285 break; 12286 else 12287 atomic_dec(&bp->eq_spq_left); 12288 } else if ((type == ISCSI_CONNECTION_TYPE) || 12289 (type == FCOE_CONNECTION_TYPE)) { 12290 if (bp->cnic_spq_pending >= 12291 bp->cnic_eth_dev.max_kwqe_pending) 12292 break; 12293 else 12294 bp->cnic_spq_pending++; 12295 } else { 12296 BNX2X_ERR("Unknown SPE type: %d\n", type); 12297 bnx2x_panic(); 12298 break; 12299 } 12300 12301 spe = bnx2x_sp_get_next(bp); 12302 *spe = *bp->cnic_kwq_cons; 12303 12304 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 12305 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 12306 12307 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 12308 bp->cnic_kwq_cons = bp->cnic_kwq; 12309 else 12310 bp->cnic_kwq_cons++; 12311 } 12312 bnx2x_sp_prod_update(bp); 12313 spin_unlock_bh(&bp->spq_lock); 12314 } 12315 12316 static int bnx2x_cnic_sp_queue(struct net_device *dev, 12317 struct kwqe_16 *kwqes[], u32 count) 12318 { 12319 struct bnx2x *bp = netdev_priv(dev); 12320 int i; 12321 12322 #ifdef BNX2X_STOP_ON_ERROR 12323 if (unlikely(bp->panic)) { 12324 BNX2X_ERR("Can't post to SP queue while panic\n"); 12325 return -EIO; 12326 } 12327 #endif 12328 12329 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 12330 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 12331 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 12332 return -EAGAIN; 12333 } 12334 12335 spin_lock_bh(&bp->spq_lock); 12336 12337 for (i = 0; i < count; i++) { 12338 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 12339 12340 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 12341 break; 12342 12343 *bp->cnic_kwq_prod = *spe; 12344 12345 bp->cnic_kwq_pending++; 12346 12347 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 12348 spe->hdr.conn_and_cmd_data, spe->hdr.type, 12349 spe->data.update_data_addr.hi, 12350 spe->data.update_data_addr.lo, 12351 bp->cnic_kwq_pending); 12352 12353 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 12354 bp->cnic_kwq_prod = bp->cnic_kwq; 12355 else 12356 bp->cnic_kwq_prod++; 12357 } 12358 12359 spin_unlock_bh(&bp->spq_lock); 12360 12361 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 12362 bnx2x_cnic_sp_post(bp, 0); 12363 12364 return i; 12365 } 12366 12367 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 12368 { 12369 struct cnic_ops *c_ops; 12370 int rc = 0; 12371 12372 mutex_lock(&bp->cnic_mutex); 12373 c_ops = rcu_dereference_protected(bp->cnic_ops, 12374 lockdep_is_held(&bp->cnic_mutex)); 12375 if (c_ops) 12376 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 12377 mutex_unlock(&bp->cnic_mutex); 12378 12379 return rc; 12380 } 12381 12382 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 12383 { 12384 struct cnic_ops *c_ops; 12385 int rc = 0; 12386 12387 rcu_read_lock(); 12388 c_ops = rcu_dereference(bp->cnic_ops); 12389 if (c_ops) 12390 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 12391 rcu_read_unlock(); 12392 12393 return rc; 12394 } 12395 12396 /* 12397 * for commands that have no data 12398 */ 12399 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 12400 { 12401 struct cnic_ctl_info ctl = {0}; 12402 12403 ctl.cmd = cmd; 12404 12405 return bnx2x_cnic_ctl_send(bp, &ctl); 12406 } 12407 12408 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 12409 { 12410 struct cnic_ctl_info ctl = {0}; 12411 12412 /* first we tell CNIC and only then we count this as a completion */ 12413 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 12414 ctl.data.comp.cid = cid; 12415 ctl.data.comp.error = err; 12416 12417 bnx2x_cnic_ctl_send_bh(bp, &ctl); 12418 bnx2x_cnic_sp_post(bp, 0); 12419 } 12420 12421 12422 /* Called with netif_addr_lock_bh() taken. 12423 * Sets an rx_mode config for an iSCSI ETH client. 12424 * Doesn't block. 12425 * Completion should be checked outside. 12426 */ 12427 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 12428 { 12429 unsigned long accept_flags = 0, ramrod_flags = 0; 12430 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12431 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 12432 12433 if (start) { 12434 /* Start accepting on iSCSI L2 ring. Accept all multicasts 12435 * because it's the only way for UIO Queue to accept 12436 * multicasts (in non-promiscuous mode only one Queue per 12437 * function will receive multicast packets (leading in our 12438 * case). 12439 */ 12440 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 12441 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 12442 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 12443 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 12444 12445 /* Clear STOP_PENDING bit if START is requested */ 12446 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 12447 12448 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 12449 } else 12450 /* Clear START_PENDING bit if STOP is requested */ 12451 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 12452 12453 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 12454 set_bit(sched_state, &bp->sp_state); 12455 else { 12456 __set_bit(RAMROD_RX, &ramrod_flags); 12457 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 12458 ramrod_flags); 12459 } 12460 } 12461 12462 12463 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 12464 { 12465 struct bnx2x *bp = netdev_priv(dev); 12466 int rc = 0; 12467 12468 switch (ctl->cmd) { 12469 case DRV_CTL_CTXTBL_WR_CMD: { 12470 u32 index = ctl->data.io.offset; 12471 dma_addr_t addr = ctl->data.io.dma_addr; 12472 12473 bnx2x_ilt_wr(bp, index, addr); 12474 break; 12475 } 12476 12477 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 12478 int count = ctl->data.credit.credit_count; 12479 12480 bnx2x_cnic_sp_post(bp, count); 12481 break; 12482 } 12483 12484 /* rtnl_lock is held. */ 12485 case DRV_CTL_START_L2_CMD: { 12486 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12487 unsigned long sp_bits = 0; 12488 12489 /* Configure the iSCSI classification object */ 12490 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 12491 cp->iscsi_l2_client_id, 12492 cp->iscsi_l2_cid, BP_FUNC(bp), 12493 bnx2x_sp(bp, mac_rdata), 12494 bnx2x_sp_mapping(bp, mac_rdata), 12495 BNX2X_FILTER_MAC_PENDING, 12496 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 12497 &bp->macs_pool); 12498 12499 /* Set iSCSI MAC address */ 12500 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 12501 if (rc) 12502 break; 12503 12504 mmiowb(); 12505 barrier(); 12506 12507 /* Start accepting on iSCSI L2 ring */ 12508 12509 netif_addr_lock_bh(dev); 12510 bnx2x_set_iscsi_eth_rx_mode(bp, true); 12511 netif_addr_unlock_bh(dev); 12512 12513 /* bits to wait on */ 12514 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 12515 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 12516 12517 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 12518 BNX2X_ERR("rx_mode completion timed out!\n"); 12519 12520 break; 12521 } 12522 12523 /* rtnl_lock is held. */ 12524 case DRV_CTL_STOP_L2_CMD: { 12525 unsigned long sp_bits = 0; 12526 12527 /* Stop accepting on iSCSI L2 ring */ 12528 netif_addr_lock_bh(dev); 12529 bnx2x_set_iscsi_eth_rx_mode(bp, false); 12530 netif_addr_unlock_bh(dev); 12531 12532 /* bits to wait on */ 12533 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 12534 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 12535 12536 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 12537 BNX2X_ERR("rx_mode completion timed out!\n"); 12538 12539 mmiowb(); 12540 barrier(); 12541 12542 /* Unset iSCSI L2 MAC */ 12543 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 12544 BNX2X_ISCSI_ETH_MAC, true); 12545 break; 12546 } 12547 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 12548 int count = ctl->data.credit.credit_count; 12549 12550 smp_mb__before_atomic_inc(); 12551 atomic_add(count, &bp->cq_spq_left); 12552 smp_mb__after_atomic_inc(); 12553 break; 12554 } 12555 case DRV_CTL_ULP_REGISTER_CMD: { 12556 int ulp_type = ctl->data.ulp_type; 12557 12558 if (CHIP_IS_E3(bp)) { 12559 int idx = BP_FW_MB_IDX(bp); 12560 u32 cap; 12561 12562 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12563 if (ulp_type == CNIC_ULP_ISCSI) 12564 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12565 else if (ulp_type == CNIC_ULP_FCOE) 12566 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12567 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12568 } 12569 break; 12570 } 12571 case DRV_CTL_ULP_UNREGISTER_CMD: { 12572 int ulp_type = ctl->data.ulp_type; 12573 12574 if (CHIP_IS_E3(bp)) { 12575 int idx = BP_FW_MB_IDX(bp); 12576 u32 cap; 12577 12578 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12579 if (ulp_type == CNIC_ULP_ISCSI) 12580 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12581 else if (ulp_type == CNIC_ULP_FCOE) 12582 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12583 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12584 } 12585 break; 12586 } 12587 12588 default: 12589 BNX2X_ERR("unknown command %x\n", ctl->cmd); 12590 rc = -EINVAL; 12591 } 12592 12593 return rc; 12594 } 12595 12596 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 12597 { 12598 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12599 12600 if (bp->flags & USING_MSIX_FLAG) { 12601 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 12602 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 12603 cp->irq_arr[0].vector = bp->msix_table[1].vector; 12604 } else { 12605 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 12606 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 12607 } 12608 if (!CHIP_IS_E1x(bp)) 12609 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 12610 else 12611 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 12612 12613 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 12614 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 12615 cp->irq_arr[1].status_blk = bp->def_status_blk; 12616 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 12617 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 12618 12619 cp->num_irq = 2; 12620 } 12621 12622 void bnx2x_setup_cnic_info(struct bnx2x *bp) 12623 { 12624 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12625 12626 12627 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 12628 bnx2x_cid_ilt_lines(bp); 12629 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 12630 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 12631 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 12632 12633 if (NO_ISCSI_OOO(bp)) 12634 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12635 } 12636 12637 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 12638 void *data) 12639 { 12640 struct bnx2x *bp = netdev_priv(dev); 12641 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12642 12643 if (ops == NULL) { 12644 BNX2X_ERR("NULL ops received\n"); 12645 return -EINVAL; 12646 } 12647 12648 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 12649 if (!bp->cnic_kwq) 12650 return -ENOMEM; 12651 12652 bp->cnic_kwq_cons = bp->cnic_kwq; 12653 bp->cnic_kwq_prod = bp->cnic_kwq; 12654 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 12655 12656 bp->cnic_spq_pending = 0; 12657 bp->cnic_kwq_pending = 0; 12658 12659 bp->cnic_data = data; 12660 12661 cp->num_irq = 0; 12662 cp->drv_state |= CNIC_DRV_STATE_REGD; 12663 cp->iro_arr = bp->iro_arr; 12664 12665 bnx2x_setup_cnic_irq_info(bp); 12666 12667 rcu_assign_pointer(bp->cnic_ops, ops); 12668 12669 return 0; 12670 } 12671 12672 static int bnx2x_unregister_cnic(struct net_device *dev) 12673 { 12674 struct bnx2x *bp = netdev_priv(dev); 12675 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12676 12677 mutex_lock(&bp->cnic_mutex); 12678 cp->drv_state = 0; 12679 RCU_INIT_POINTER(bp->cnic_ops, NULL); 12680 mutex_unlock(&bp->cnic_mutex); 12681 synchronize_rcu(); 12682 kfree(bp->cnic_kwq); 12683 bp->cnic_kwq = NULL; 12684 12685 return 0; 12686 } 12687 12688 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 12689 { 12690 struct bnx2x *bp = netdev_priv(dev); 12691 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12692 12693 /* If both iSCSI and FCoE are disabled - return NULL in 12694 * order to indicate CNIC that it should not try to work 12695 * with this device. 12696 */ 12697 if (NO_ISCSI(bp) && NO_FCOE(bp)) 12698 return NULL; 12699 12700 cp->drv_owner = THIS_MODULE; 12701 cp->chip_id = CHIP_ID(bp); 12702 cp->pdev = bp->pdev; 12703 cp->io_base = bp->regview; 12704 cp->io_base2 = bp->doorbells; 12705 cp->max_kwqe_pending = 8; 12706 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 12707 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 12708 bnx2x_cid_ilt_lines(bp); 12709 cp->ctx_tbl_len = CNIC_ILT_LINES; 12710 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 12711 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 12712 cp->drv_ctl = bnx2x_drv_ctl; 12713 cp->drv_register_cnic = bnx2x_register_cnic; 12714 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 12715 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 12716 cp->iscsi_l2_client_id = 12717 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12718 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 12719 12720 if (NO_ISCSI_OOO(bp)) 12721 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12722 12723 if (NO_ISCSI(bp)) 12724 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 12725 12726 if (NO_FCOE(bp)) 12727 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 12728 12729 BNX2X_DEV_INFO( 12730 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 12731 cp->ctx_blk_size, 12732 cp->ctx_tbl_offset, 12733 cp->ctx_tbl_len, 12734 cp->starting_cid); 12735 return cp; 12736 } 12737 EXPORT_SYMBOL(bnx2x_cnic_probe); 12738 12739 #endif /* BCM_CNIC */ 12740 12741