1 /* bnx2x_cmn.h: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 #ifndef BNX2X_CMN_H 18 #define BNX2X_CMN_H 19 20 #include <linux/types.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 25 26 #include "bnx2x.h" 27 28 /* This is used as a replacement for an MCP if it's not present */ 29 extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 30 31 extern int num_queues; 32 33 /************************ Macros ********************************/ 34 #define BNX2X_PCI_FREE(x, y, size) \ 35 do { \ 36 if (x) { \ 37 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 38 x = NULL; \ 39 y = 0; \ 40 } \ 41 } while (0) 42 43 #define BNX2X_FREE(x) \ 44 do { \ 45 if (x) { \ 46 kfree((void *)x); \ 47 x = NULL; \ 48 } \ 49 } while (0) 50 51 #define BNX2X_PCI_ALLOC(x, y, size) \ 52 do { \ 53 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 54 if (x == NULL) \ 55 goto alloc_mem_err; \ 56 memset((void *)x, 0, size); \ 57 } while (0) 58 59 #define BNX2X_ALLOC(x, size) \ 60 do { \ 61 x = kzalloc(size, GFP_KERNEL); \ 62 if (x == NULL) \ 63 goto alloc_mem_err; \ 64 } while (0) 65 66 /*********************** Interfaces **************************** 67 * Functions that need to be implemented by each driver version 68 */ 69 /* Init */ 70 71 /** 72 * bnx2x_send_unload_req - request unload mode from the MCP. 73 * 74 * @bp: driver handle 75 * @unload_mode: requested function's unload mode 76 * 77 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 78 */ 79 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 80 81 /** 82 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 83 * 84 * @bp: driver handle 85 */ 86 void bnx2x_send_unload_done(struct bnx2x *bp); 87 88 /** 89 * bnx2x_config_rss_pf - configure RSS parameters. 90 * 91 * @bp: driver handle 92 * @ind_table: indirection table to configure 93 * @config_hash: re-configure RSS hash keys configuration 94 */ 95 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 96 97 /** 98 * bnx2x__init_func_obj - init function object 99 * 100 * @bp: driver handle 101 * 102 * Initializes the Function Object with the appropriate 103 * parameters which include a function slow path driver 104 * interface. 105 */ 106 void bnx2x__init_func_obj(struct bnx2x *bp); 107 108 /** 109 * bnx2x_setup_queue - setup eth queue. 110 * 111 * @bp: driver handle 112 * @fp: pointer to the fastpath structure 113 * @leading: boolean 114 * 115 */ 116 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 117 bool leading); 118 119 /** 120 * bnx2x_setup_leading - bring up a leading eth queue. 121 * 122 * @bp: driver handle 123 */ 124 int bnx2x_setup_leading(struct bnx2x *bp); 125 126 /** 127 * bnx2x_fw_command - send the MCP a request 128 * 129 * @bp: driver handle 130 * @command: request 131 * @param: request's parameter 132 * 133 * block until there is a reply 134 */ 135 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 136 137 /** 138 * bnx2x_initial_phy_init - initialize link parameters structure variables. 139 * 140 * @bp: driver handle 141 * @load_mode: current mode 142 */ 143 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 144 145 /** 146 * bnx2x_link_set - configure hw according to link parameters structure. 147 * 148 * @bp: driver handle 149 */ 150 void bnx2x_link_set(struct bnx2x *bp); 151 152 /** 153 * bnx2x_link_test - query link status. 154 * 155 * @bp: driver handle 156 * @is_serdes: bool 157 * 158 * Returns 0 if link is UP. 159 */ 160 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 161 162 /** 163 * bnx2x_drv_pulse - write driver pulse to shmem 164 * 165 * @bp: driver handle 166 * 167 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 168 * in the shmem. 169 */ 170 void bnx2x_drv_pulse(struct bnx2x *bp); 171 172 /** 173 * bnx2x_igu_ack_sb - update IGU with current SB value 174 * 175 * @bp: driver handle 176 * @igu_sb_id: SB id 177 * @segment: SB segment 178 * @index: SB index 179 * @op: SB operation 180 * @update: is HW update required 181 */ 182 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 183 u16 index, u8 op, u8 update); 184 185 /* Disable transactions from chip to host */ 186 void bnx2x_pf_disable(struct bnx2x *bp); 187 188 /** 189 * bnx2x__link_status_update - handles link status change. 190 * 191 * @bp: driver handle 192 */ 193 void bnx2x__link_status_update(struct bnx2x *bp); 194 195 /** 196 * bnx2x_link_report - report link status to upper layer. 197 * 198 * @bp: driver handle 199 */ 200 void bnx2x_link_report(struct bnx2x *bp); 201 202 /* None-atomic version of bnx2x_link_report() */ 203 void __bnx2x_link_report(struct bnx2x *bp); 204 205 /** 206 * bnx2x_get_mf_speed - calculate MF speed. 207 * 208 * @bp: driver handle 209 * 210 * Takes into account current linespeed and MF configuration. 211 */ 212 u16 bnx2x_get_mf_speed(struct bnx2x *bp); 213 214 /** 215 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 216 * 217 * @irq: irq number 218 * @dev_instance: private instance 219 */ 220 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 221 222 /** 223 * bnx2x_interrupt - non MSI-X interrupt handler 224 * 225 * @irq: irq number 226 * @dev_instance: private instance 227 */ 228 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 229 #ifdef BCM_CNIC 230 231 /** 232 * bnx2x_cnic_notify - send command to cnic driver 233 * 234 * @bp: driver handle 235 * @cmd: command 236 */ 237 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 238 239 /** 240 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 241 * 242 * @bp: driver handle 243 */ 244 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 245 #endif 246 247 /** 248 * bnx2x_int_enable - enable HW interrupts. 249 * 250 * @bp: driver handle 251 */ 252 void bnx2x_int_enable(struct bnx2x *bp); 253 254 /** 255 * bnx2x_int_disable_sync - disable interrupts. 256 * 257 * @bp: driver handle 258 * @disable_hw: true, disable HW interrupts. 259 * 260 * This function ensures that there are no 261 * ISRs or SP DPCs (sp_task) are running after it returns. 262 */ 263 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 264 265 /** 266 * bnx2x_nic_init - init driver internals. 267 * 268 * @bp: driver handle 269 * @load_code: COMMON, PORT or FUNCTION 270 * 271 * Initializes: 272 * - rings 273 * - status blocks 274 * - etc. 275 */ 276 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 277 278 /** 279 * bnx2x_alloc_mem - allocate driver's memory. 280 * 281 * @bp: driver handle 282 */ 283 int bnx2x_alloc_mem(struct bnx2x *bp); 284 285 /** 286 * bnx2x_free_mem - release driver's memory. 287 * 288 * @bp: driver handle 289 */ 290 void bnx2x_free_mem(struct bnx2x *bp); 291 292 /** 293 * bnx2x_set_num_queues - set number of queues according to mode. 294 * 295 * @bp: driver handle 296 */ 297 void bnx2x_set_num_queues(struct bnx2x *bp); 298 299 /** 300 * bnx2x_chip_cleanup - cleanup chip internals. 301 * 302 * @bp: driver handle 303 * @unload_mode: COMMON, PORT, FUNCTION 304 * 305 * - Cleanup MAC configuration. 306 * - Closes clients. 307 * - etc. 308 */ 309 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 310 311 /** 312 * bnx2x_acquire_hw_lock - acquire HW lock. 313 * 314 * @bp: driver handle 315 * @resource: resource bit which was locked 316 */ 317 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 318 319 /** 320 * bnx2x_release_hw_lock - release HW lock. 321 * 322 * @bp: driver handle 323 * @resource: resource bit which was locked 324 */ 325 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 326 327 /** 328 * bnx2x_release_leader_lock - release recovery leader lock 329 * 330 * @bp: driver handle 331 */ 332 int bnx2x_release_leader_lock(struct bnx2x *bp); 333 334 /** 335 * bnx2x_set_eth_mac - configure eth MAC address in the HW 336 * 337 * @bp: driver handle 338 * @set: set or clear 339 * 340 * Configures according to the value in netdev->dev_addr. 341 */ 342 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 343 344 /** 345 * bnx2x_set_rx_mode - set MAC filtering configurations. 346 * 347 * @dev: netdevice 348 * 349 * called with netif_tx_lock from dev_mcast.c 350 * If bp->state is OPEN, should be called with 351 * netif_addr_lock_bh() 352 */ 353 void bnx2x_set_rx_mode(struct net_device *dev); 354 355 /** 356 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 357 * 358 * @bp: driver handle 359 * 360 * If bp->state is OPEN, should be called with 361 * netif_addr_lock_bh(). 362 */ 363 void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 364 365 /** 366 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 367 * 368 * @bp: driver handle 369 * @cl_id: client id 370 * @rx_mode_flags: rx mode configuration 371 * @rx_accept_flags: rx accept configuration 372 * @tx_accept_flags: tx accept configuration (tx switch) 373 * @ramrod_flags: ramrod configuration 374 */ 375 void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 376 unsigned long rx_mode_flags, 377 unsigned long rx_accept_flags, 378 unsigned long tx_accept_flags, 379 unsigned long ramrod_flags); 380 381 /* Parity errors related */ 382 void bnx2x_set_pf_load(struct bnx2x *bp); 383 bool bnx2x_clear_pf_load(struct bnx2x *bp); 384 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 385 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 386 void bnx2x_set_reset_in_progress(struct bnx2x *bp); 387 void bnx2x_set_reset_global(struct bnx2x *bp); 388 void bnx2x_disable_close_the_gate(struct bnx2x *bp); 389 390 /** 391 * bnx2x_sp_event - handle ramrods completion. 392 * 393 * @fp: fastpath handle for the event 394 * @rr_cqe: eth_rx_cqe 395 */ 396 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 397 398 /** 399 * bnx2x_ilt_set_info - prepare ILT configurations. 400 * 401 * @bp: driver handle 402 */ 403 void bnx2x_ilt_set_info(struct bnx2x *bp); 404 405 /** 406 * bnx2x_dcbx_init - initialize dcbx protocol. 407 * 408 * @bp: driver handle 409 */ 410 void bnx2x_dcbx_init(struct bnx2x *bp); 411 412 /** 413 * bnx2x_set_power_state - set power state to the requested value. 414 * 415 * @bp: driver handle 416 * @state: required state D0 or D3hot 417 * 418 * Currently only D0 and D3hot are supported. 419 */ 420 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 421 422 /** 423 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 424 * 425 * @bp: driver handle 426 * @value: new value 427 */ 428 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 429 /* Error handling */ 430 void bnx2x_panic_dump(struct bnx2x *bp); 431 432 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 433 434 /* validate currect fw is loaded */ 435 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); 436 437 /* dev_close main block */ 438 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 439 440 /* dev_open main block */ 441 int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 442 443 /* hard_xmit callback */ 444 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 445 446 /* setup_tc callback */ 447 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 448 449 /* select_queue callback */ 450 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 451 452 /* reload helper */ 453 int bnx2x_reload_if_running(struct net_device *dev); 454 455 int bnx2x_change_mac_addr(struct net_device *dev, void *p); 456 457 /* NAPI poll Rx part */ 458 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 459 460 void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 461 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); 462 463 /* NAPI poll Tx part */ 464 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 465 466 /* suspend/resume callbacks */ 467 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 468 int bnx2x_resume(struct pci_dev *pdev); 469 470 /* Release IRQ vectors */ 471 void bnx2x_free_irq(struct bnx2x *bp); 472 473 void bnx2x_free_fp_mem(struct bnx2x *bp); 474 int bnx2x_alloc_fp_mem(struct bnx2x *bp); 475 void bnx2x_init_rx_rings(struct bnx2x *bp); 476 void bnx2x_free_skbs(struct bnx2x *bp); 477 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 478 void bnx2x_netif_start(struct bnx2x *bp); 479 480 /** 481 * bnx2x_enable_msix - set msix configuration. 482 * 483 * @bp: driver handle 484 * 485 * fills msix_table, requests vectors, updates num_queues 486 * according to number of available vectors. 487 */ 488 int bnx2x_enable_msix(struct bnx2x *bp); 489 490 /** 491 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 492 * 493 * @bp: driver handle 494 */ 495 int bnx2x_enable_msi(struct bnx2x *bp); 496 497 /** 498 * bnx2x_poll - NAPI callback 499 * 500 * @napi: napi structure 501 * @budget: 502 * 503 */ 504 int bnx2x_poll(struct napi_struct *napi, int budget); 505 506 /** 507 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 508 * 509 * @bp: driver handle 510 */ 511 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); 512 513 /** 514 * bnx2x_free_mem_bp - release memories outsize main driver structure 515 * 516 * @bp: driver handle 517 */ 518 void bnx2x_free_mem_bp(struct bnx2x *bp); 519 520 /** 521 * bnx2x_change_mtu - change mtu netdev callback 522 * 523 * @dev: net device 524 * @new_mtu: requested mtu 525 * 526 */ 527 int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 528 529 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 530 /** 531 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 532 * 533 * @dev: net_device 534 * @wwn: output buffer 535 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 536 * 537 */ 538 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 539 #endif 540 541 netdev_features_t bnx2x_fix_features(struct net_device *dev, 542 netdev_features_t features); 543 int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 544 545 /** 546 * bnx2x_tx_timeout - tx timeout netdev callback 547 * 548 * @dev: net device 549 */ 550 void bnx2x_tx_timeout(struct net_device *dev); 551 552 /*********************** Inlines **********************************/ 553 /*********************** Fast path ********************************/ 554 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 555 { 556 barrier(); /* status block is written to by the chip */ 557 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 558 } 559 560 static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, 561 struct bnx2x_fastpath *fp, u16 bd_prod, 562 u16 rx_comp_prod, u16 rx_sge_prod, u32 start) 563 { 564 struct ustorm_eth_rx_producers rx_prods = {0}; 565 u32 i; 566 567 /* Update producers */ 568 rx_prods.bd_prod = bd_prod; 569 rx_prods.cqe_prod = rx_comp_prod; 570 rx_prods.sge_prod = rx_sge_prod; 571 572 /* 573 * Make sure that the BD and SGE data is updated before updating the 574 * producers since FW might read the BD/SGE right after the producer 575 * is updated. 576 * This is only applicable for weak-ordered memory model archs such 577 * as IA-64. The following barrier is also mandatory since FW will 578 * assumes BDs must have buffers. 579 */ 580 wmb(); 581 582 for (i = 0; i < sizeof(rx_prods)/4; i++) 583 REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); 584 585 mmiowb(); /* keep prod updates ordered */ 586 587 DP(NETIF_MSG_RX_STATUS, 588 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 589 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 590 } 591 592 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 593 u8 segment, u16 index, u8 op, 594 u8 update, u32 igu_addr) 595 { 596 struct igu_regular cmd_data = {0}; 597 598 cmd_data.sb_id_and_flags = 599 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 600 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 601 (update << IGU_REGULAR_BUPDATE_SHIFT) | 602 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 603 604 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 605 cmd_data.sb_id_and_flags, igu_addr); 606 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 607 608 /* Make sure that ACK is written */ 609 mmiowb(); 610 barrier(); 611 } 612 613 static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 614 u8 idu_sb_id, bool is_Pf) 615 { 616 u32 data, ctl, cnt = 100; 617 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 618 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 619 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 620 u32 sb_bit = 1 << (idu_sb_id%32); 621 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 622 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 623 624 /* Not supported in BC mode */ 625 if (CHIP_INT_MODE_IS_BC(bp)) 626 return; 627 628 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 629 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 630 IGU_REGULAR_CLEANUP_SET | 631 IGU_REGULAR_BCLEANUP; 632 633 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 634 func_encode << IGU_CTRL_REG_FID_SHIFT | 635 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 636 637 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 638 data, igu_addr_data); 639 REG_WR(bp, igu_addr_data, data); 640 mmiowb(); 641 barrier(); 642 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 643 ctl, igu_addr_ctl); 644 REG_WR(bp, igu_addr_ctl, ctl); 645 mmiowb(); 646 barrier(); 647 648 /* wait for clean up to finish */ 649 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 650 msleep(20); 651 652 653 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 654 DP(NETIF_MSG_HW, 655 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 656 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 657 } 658 } 659 660 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 661 u8 storm, u16 index, u8 op, u8 update) 662 { 663 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 664 COMMAND_REG_INT_ACK); 665 struct igu_ack_register igu_ack; 666 667 igu_ack.status_block_index = index; 668 igu_ack.sb_id_and_flags = 669 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 670 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 671 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 672 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 673 674 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 675 676 /* Make sure that ACK is written */ 677 mmiowb(); 678 barrier(); 679 } 680 681 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 682 u16 index, u8 op, u8 update) 683 { 684 if (bp->common.int_block == INT_BLOCK_HC) 685 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 686 else { 687 u8 segment; 688 689 if (CHIP_INT_MODE_IS_BC(bp)) 690 segment = storm; 691 else if (igu_sb_id != bp->igu_dsb_id) 692 segment = IGU_SEG_ACCESS_DEF; 693 else if (storm == ATTENTION_ID) 694 segment = IGU_SEG_ACCESS_ATTN; 695 else 696 segment = IGU_SEG_ACCESS_DEF; 697 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 698 } 699 } 700 701 static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 702 { 703 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 704 COMMAND_REG_SIMD_MASK); 705 u32 result = REG_RD(bp, hc_addr); 706 707 barrier(); 708 return result; 709 } 710 711 static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 712 { 713 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 714 u32 result = REG_RD(bp, igu_addr); 715 716 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", 717 result, igu_addr); 718 719 barrier(); 720 return result; 721 } 722 723 static inline u16 bnx2x_ack_int(struct bnx2x *bp) 724 { 725 barrier(); 726 if (bp->common.int_block == INT_BLOCK_HC) 727 return bnx2x_hc_ack_int(bp); 728 else 729 return bnx2x_igu_ack_int(bp); 730 } 731 732 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 733 { 734 /* Tell compiler that consumer and producer can change */ 735 barrier(); 736 return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 737 } 738 739 static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 740 struct bnx2x_fp_txdata *txdata) 741 { 742 s16 used; 743 u16 prod; 744 u16 cons; 745 746 prod = txdata->tx_bd_prod; 747 cons = txdata->tx_bd_cons; 748 749 /* NUM_TX_RINGS = number of "next-page" entries 750 It will be used as a threshold */ 751 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; 752 753 #ifdef BNX2X_STOP_ON_ERROR 754 WARN_ON(used < 0); 755 WARN_ON(used > bp->tx_ring_size); 756 WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); 757 #endif 758 759 return (s16)(bp->tx_ring_size) - used; 760 } 761 762 static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 763 { 764 u16 hw_cons; 765 766 /* Tell compiler that status block fields can change */ 767 barrier(); 768 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 769 return hw_cons != txdata->tx_pkt_cons; 770 } 771 772 static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 773 { 774 u8 cos; 775 for_each_cos_in_tx_queue(fp, cos) 776 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 777 return true; 778 return false; 779 } 780 781 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 782 { 783 u16 rx_cons_sb; 784 785 /* Tell compiler that status block fields can change */ 786 barrier(); 787 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 788 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 789 rx_cons_sb++; 790 return (fp->rx_comp_cons != rx_cons_sb); 791 } 792 793 /** 794 * bnx2x_tx_disable - disables tx from stack point of view 795 * 796 * @bp: driver handle 797 */ 798 static inline void bnx2x_tx_disable(struct bnx2x *bp) 799 { 800 netif_tx_disable(bp->dev); 801 netif_carrier_off(bp->dev); 802 } 803 804 static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 805 struct bnx2x_fastpath *fp, u16 index) 806 { 807 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 808 struct page *page = sw_buf->page; 809 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 810 811 /* Skip "next page" elements */ 812 if (!page) 813 return; 814 815 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 816 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 817 __free_pages(page, PAGES_PER_SGE_SHIFT); 818 819 sw_buf->page = NULL; 820 sge->addr_hi = 0; 821 sge->addr_lo = 0; 822 } 823 824 static inline void bnx2x_add_all_napi(struct bnx2x *bp) 825 { 826 int i; 827 828 /* Add NAPI objects */ 829 for_each_rx_queue(bp, i) 830 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 831 bnx2x_poll, BNX2X_NAPI_WEIGHT); 832 } 833 834 static inline void bnx2x_del_all_napi(struct bnx2x *bp) 835 { 836 int i; 837 838 for_each_rx_queue(bp, i) 839 netif_napi_del(&bnx2x_fp(bp, i, napi)); 840 } 841 842 static inline void bnx2x_disable_msi(struct bnx2x *bp) 843 { 844 if (bp->flags & USING_MSIX_FLAG) { 845 pci_disable_msix(bp->pdev); 846 bp->flags &= ~USING_MSIX_FLAG; 847 } else if (bp->flags & USING_MSI_FLAG) { 848 pci_disable_msi(bp->pdev); 849 bp->flags &= ~USING_MSI_FLAG; 850 } 851 } 852 853 static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 854 { 855 return num_queues ? 856 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 857 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); 858 } 859 860 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 861 { 862 int i, j; 863 864 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 865 int idx = RX_SGE_CNT * i - 1; 866 867 for (j = 0; j < 2; j++) { 868 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 869 idx--; 870 } 871 } 872 } 873 874 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 875 { 876 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 877 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 878 879 /* Clear the two last indices in the page to 1: 880 these are the indices that correspond to the "next" element, 881 hence will never be indicated and should be removed from 882 the calculations. */ 883 bnx2x_clear_sge_mask_next_elems(fp); 884 } 885 886 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 887 struct bnx2x_fastpath *fp, u16 index) 888 { 889 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); 890 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 891 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 892 dma_addr_t mapping; 893 894 if (unlikely(page == NULL)) { 895 BNX2X_ERR("Can't alloc sge\n"); 896 return -ENOMEM; 897 } 898 899 mapping = dma_map_page(&bp->pdev->dev, page, 0, 900 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 901 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 902 __free_pages(page, PAGES_PER_SGE_SHIFT); 903 BNX2X_ERR("Can't map sge\n"); 904 return -ENOMEM; 905 } 906 907 sw_buf->page = page; 908 dma_unmap_addr_set(sw_buf, mapping, mapping); 909 910 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 911 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 912 913 return 0; 914 } 915 916 static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, 917 struct bnx2x_fastpath *fp, u16 index) 918 { 919 u8 *data; 920 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 921 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 922 dma_addr_t mapping; 923 924 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 925 if (unlikely(data == NULL)) 926 return -ENOMEM; 927 928 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, 929 fp->rx_buf_size, 930 DMA_FROM_DEVICE); 931 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 932 kfree(data); 933 BNX2X_ERR("Can't map rx data\n"); 934 return -ENOMEM; 935 } 936 937 rx_buf->data = data; 938 dma_unmap_addr_set(rx_buf, mapping, mapping); 939 940 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 941 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 942 943 return 0; 944 } 945 946 /* note that we are not allocating a new buffer, 947 * we are just moving one from cons to prod 948 * we are not creating a new mapping, 949 * so there is no need to check for dma_mapping_error(). 950 */ 951 static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 952 u16 cons, u16 prod) 953 { 954 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 955 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 956 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 957 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 958 959 dma_unmap_addr_set(prod_rx_buf, mapping, 960 dma_unmap_addr(cons_rx_buf, mapping)); 961 prod_rx_buf->data = cons_rx_buf->data; 962 *prod_bd = *cons_bd; 963 } 964 965 /************************* Init ******************************************/ 966 967 /** 968 * bnx2x_func_start - init function 969 * 970 * @bp: driver handle 971 * 972 * Must be called before sending CLIENT_SETUP for the first client. 973 */ 974 static inline int bnx2x_func_start(struct bnx2x *bp) 975 { 976 struct bnx2x_func_state_params func_params = {NULL}; 977 struct bnx2x_func_start_params *start_params = 978 &func_params.params.start; 979 980 /* Prepare parameters for function state transitions */ 981 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 982 983 func_params.f_obj = &bp->func_obj; 984 func_params.cmd = BNX2X_F_CMD_START; 985 986 /* Function parameters */ 987 start_params->mf_mode = bp->mf_mode; 988 start_params->sd_vlan_tag = bp->mf_ov; 989 990 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 991 start_params->network_cos_mode = STATIC_COS; 992 else /* CHIP_IS_E1X */ 993 start_params->network_cos_mode = FW_WRR; 994 995 return bnx2x_func_state_change(bp, &func_params); 996 } 997 998 999 /** 1000 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 1001 * 1002 * @fw_hi: pointer to upper part 1003 * @fw_mid: pointer to middle part 1004 * @fw_lo: pointer to lower part 1005 * @mac: pointer to MAC address 1006 */ 1007 static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, 1008 u8 *mac) 1009 { 1010 ((u8 *)fw_hi)[0] = mac[1]; 1011 ((u8 *)fw_hi)[1] = mac[0]; 1012 ((u8 *)fw_mid)[0] = mac[3]; 1013 ((u8 *)fw_mid)[1] = mac[2]; 1014 ((u8 *)fw_lo)[0] = mac[5]; 1015 ((u8 *)fw_lo)[1] = mac[4]; 1016 } 1017 1018 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 1019 struct bnx2x_fastpath *fp, int last) 1020 { 1021 int i; 1022 1023 if (fp->disable_tpa) 1024 return; 1025 1026 for (i = 0; i < last; i++) 1027 bnx2x_free_rx_sge(bp, fp, i); 1028 } 1029 1030 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 1031 struct bnx2x_fastpath *fp, int last) 1032 { 1033 int i; 1034 1035 for (i = 0; i < last; i++) { 1036 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; 1037 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 1038 u8 *data = first_buf->data; 1039 1040 if (data == NULL) { 1041 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); 1042 continue; 1043 } 1044 if (tpa_info->tpa_state == BNX2X_TPA_START) 1045 dma_unmap_single(&bp->pdev->dev, 1046 dma_unmap_addr(first_buf, mapping), 1047 fp->rx_buf_size, DMA_FROM_DEVICE); 1048 kfree(data); 1049 first_buf->data = NULL; 1050 } 1051 } 1052 1053 static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 1054 { 1055 int i; 1056 1057 for (i = 1; i <= NUM_TX_RINGS; i++) { 1058 struct eth_tx_next_bd *tx_next_bd = 1059 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 1060 1061 tx_next_bd->addr_hi = 1062 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 1063 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1064 tx_next_bd->addr_lo = 1065 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 1066 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1067 } 1068 1069 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 1070 txdata->tx_db.data.zero_fill1 = 0; 1071 txdata->tx_db.data.prod = 0; 1072 1073 txdata->tx_pkt_prod = 0; 1074 txdata->tx_pkt_cons = 0; 1075 txdata->tx_bd_prod = 0; 1076 txdata->tx_bd_cons = 0; 1077 txdata->tx_pkt = 0; 1078 } 1079 1080 static inline void bnx2x_init_tx_rings(struct bnx2x *bp) 1081 { 1082 int i; 1083 u8 cos; 1084 1085 for_each_tx_queue(bp, i) 1086 for_each_cos_in_tx_queue(&bp->fp[i], cos) 1087 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 1088 } 1089 1090 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1091 { 1092 int i; 1093 1094 for (i = 1; i <= NUM_RX_RINGS; i++) { 1095 struct eth_rx_bd *rx_bd; 1096 1097 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1098 rx_bd->addr_hi = 1099 cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1100 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1101 rx_bd->addr_lo = 1102 cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1103 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1104 } 1105 } 1106 1107 static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) 1108 { 1109 int i; 1110 1111 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 1112 struct eth_rx_sge *sge; 1113 1114 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; 1115 sge->addr_hi = 1116 cpu_to_le32(U64_HI(fp->rx_sge_mapping + 1117 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1118 1119 sge->addr_lo = 1120 cpu_to_le32(U64_LO(fp->rx_sge_mapping + 1121 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1122 } 1123 } 1124 1125 static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) 1126 { 1127 int i; 1128 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 1129 struct eth_rx_cqe_next_page *nextpg; 1130 1131 nextpg = (struct eth_rx_cqe_next_page *) 1132 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; 1133 nextpg->addr_hi = 1134 cpu_to_le32(U64_HI(fp->rx_comp_mapping + 1135 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1136 nextpg->addr_lo = 1137 cpu_to_le32(U64_LO(fp->rx_comp_mapping + 1138 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1139 } 1140 } 1141 1142 /* Returns the number of actually allocated BDs */ 1143 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, 1144 int rx_ring_size) 1145 { 1146 struct bnx2x *bp = fp->bp; 1147 u16 ring_prod, cqe_ring_prod; 1148 int i, failure_cnt = 0; 1149 1150 fp->rx_comp_cons = 0; 1151 cqe_ring_prod = ring_prod = 0; 1152 1153 /* This routine is called only during fo init so 1154 * fp->eth_q_stats.rx_skb_alloc_failed = 0 1155 */ 1156 for (i = 0; i < rx_ring_size; i++) { 1157 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { 1158 failure_cnt++; 1159 continue; 1160 } 1161 ring_prod = NEXT_RX_IDX(ring_prod); 1162 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 1163 WARN_ON(ring_prod <= (i - failure_cnt)); 1164 } 1165 1166 if (failure_cnt) 1167 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", 1168 i - failure_cnt, fp->index); 1169 1170 fp->rx_bd_prod = ring_prod; 1171 /* Limit the CQE producer by the CQE ring size */ 1172 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, 1173 cqe_ring_prod); 1174 fp->rx_pkt = fp->rx_calls = 0; 1175 1176 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 1177 1178 return i - failure_cnt; 1179 } 1180 1181 /* Statistics ID are global per chip/path, while Client IDs for E1x are per 1182 * port. 1183 */ 1184 static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1185 { 1186 struct bnx2x *bp = fp->bp; 1187 if (!CHIP_IS_E1x(bp)) { 1188 #ifdef BCM_CNIC 1189 /* there are special statistics counters for FCoE 136..140 */ 1190 if (IS_FCOE_FP(fp)) 1191 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1192 #endif 1193 return fp->cl_id; 1194 } 1195 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1196 } 1197 1198 static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1199 bnx2x_obj_type obj_type) 1200 { 1201 struct bnx2x *bp = fp->bp; 1202 1203 /* Configure classification DBs */ 1204 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 1205 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1206 bnx2x_sp_mapping(bp, mac_rdata), 1207 BNX2X_FILTER_MAC_PENDING, 1208 &bp->sp_state, obj_type, 1209 &bp->macs_pool); 1210 } 1211 1212 /** 1213 * bnx2x_get_path_func_num - get number of active functions 1214 * 1215 * @bp: driver handle 1216 * 1217 * Calculates the number of active (not hidden) functions on the 1218 * current path. 1219 */ 1220 static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1221 { 1222 u8 func_num = 0, i; 1223 1224 /* 57710 has only one function per-port */ 1225 if (CHIP_IS_E1(bp)) 1226 return 1; 1227 1228 /* Calculate a number of functions enabled on the current 1229 * PATH/PORT. 1230 */ 1231 if (CHIP_REV_IS_SLOW(bp)) { 1232 if (IS_MF(bp)) 1233 func_num = 4; 1234 else 1235 func_num = 2; 1236 } else { 1237 for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1238 u32 func_config = 1239 MF_CFG_RD(bp, 1240 func_mf_config[BP_PORT(bp) + 2 * i]. 1241 config); 1242 func_num += 1243 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1244 } 1245 } 1246 1247 WARN_ON(!func_num); 1248 1249 return func_num; 1250 } 1251 1252 static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1253 { 1254 /* RX_MODE controlling object */ 1255 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1256 1257 /* multicast configuration controlling object */ 1258 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1259 BP_FUNC(bp), BP_FUNC(bp), 1260 bnx2x_sp(bp, mcast_rdata), 1261 bnx2x_sp_mapping(bp, mcast_rdata), 1262 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1263 BNX2X_OBJ_TYPE_RX); 1264 1265 /* Setup CAM credit pools */ 1266 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1267 bnx2x_get_path_func_num(bp)); 1268 1269 /* RSS configuration object */ 1270 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1271 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1272 bnx2x_sp(bp, rss_rdata), 1273 bnx2x_sp_mapping(bp, rss_rdata), 1274 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1275 BNX2X_OBJ_TYPE_RX); 1276 } 1277 1278 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1279 { 1280 if (CHIP_IS_E1x(fp->bp)) 1281 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1282 else 1283 return fp->cl_id; 1284 } 1285 1286 static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 1287 { 1288 struct bnx2x *bp = fp->bp; 1289 1290 if (!CHIP_IS_E1x(bp)) 1291 return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 1292 else 1293 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 1294 } 1295 1296 static inline void bnx2x_init_txdata(struct bnx2x *bp, 1297 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1298 __le16 *tx_cons_sb) 1299 { 1300 txdata->cid = cid; 1301 txdata->txq_index = txq_index; 1302 txdata->tx_cons_sb = tx_cons_sb; 1303 1304 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1305 txdata->cid, txdata->txq_index); 1306 } 1307 1308 #ifdef BCM_CNIC 1309 static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1310 { 1311 return bp->cnic_base_cl_id + cl_idx + 1312 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1313 } 1314 1315 static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1316 { 1317 1318 /* the 'first' id is allocated for the cnic */ 1319 return bp->base_fw_ndsb; 1320 } 1321 1322 static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1323 { 1324 return bp->igu_base_sb; 1325 } 1326 1327 1328 static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1329 { 1330 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1331 unsigned long q_type = 0; 1332 1333 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1334 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1335 BNX2X_FCOE_ETH_CL_ID_IDX); 1336 /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1337 * 16 ETH clients per function when CNIC is enabled! 1338 * 1339 * Fix it ASAP!!! 1340 */ 1341 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; 1342 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1343 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1344 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1345 1346 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1347 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1348 1349 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1350 1351 /* qZone id equals to FW (per path) client id */ 1352 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1353 /* init shortcut */ 1354 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1355 bnx2x_rx_ustorm_prods_offset(fp); 1356 1357 /* Configure Queue State object */ 1358 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1359 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1360 1361 /* No multi-CoS for FCoE L2 client */ 1362 BUG_ON(fp->max_cos != 1); 1363 1364 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1365 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1366 bnx2x_sp_mapping(bp, q_rdata), q_type); 1367 1368 DP(NETIF_MSG_IFUP, 1369 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 1370 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1371 fp->igu_sb_id); 1372 } 1373 #endif 1374 1375 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1376 struct bnx2x_fp_txdata *txdata) 1377 { 1378 int cnt = 1000; 1379 1380 while (bnx2x_has_tx_work_unload(txdata)) { 1381 if (!cnt) { 1382 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1383 txdata->txq_index, txdata->tx_pkt_prod, 1384 txdata->tx_pkt_cons); 1385 #ifdef BNX2X_STOP_ON_ERROR 1386 bnx2x_panic(); 1387 return -EBUSY; 1388 #else 1389 break; 1390 #endif 1391 } 1392 cnt--; 1393 usleep_range(1000, 1000); 1394 } 1395 1396 return 0; 1397 } 1398 1399 int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1400 1401 static inline void __storm_memset_struct(struct bnx2x *bp, 1402 u32 addr, size_t size, u32 *data) 1403 { 1404 int i; 1405 for (i = 0; i < size/4; i++) 1406 REG_WR(bp, addr + (i * 4), data[i]); 1407 } 1408 1409 static inline void storm_memset_func_cfg(struct bnx2x *bp, 1410 struct tstorm_eth_function_common_config *tcfg, 1411 u16 abs_fid) 1412 { 1413 size_t size = sizeof(struct tstorm_eth_function_common_config); 1414 1415 u32 addr = BAR_TSTRORM_INTMEM + 1416 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 1417 1418 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 1419 } 1420 1421 static inline void storm_memset_cmng(struct bnx2x *bp, 1422 struct cmng_struct_per_port *cmng, 1423 u8 port) 1424 { 1425 size_t size = sizeof(struct cmng_struct_per_port); 1426 1427 u32 addr = BAR_XSTRORM_INTMEM + 1428 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1429 1430 __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1431 } 1432 1433 /** 1434 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1435 * 1436 * @bp: driver handle 1437 * @mask: bits that need to be cleared 1438 */ 1439 static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1440 { 1441 int tout = 5000; /* Wait for 5 secs tops */ 1442 1443 while (tout--) { 1444 smp_mb(); 1445 netif_addr_lock_bh(bp->dev); 1446 if (!(bp->sp_state & mask)) { 1447 netif_addr_unlock_bh(bp->dev); 1448 return true; 1449 } 1450 netif_addr_unlock_bh(bp->dev); 1451 1452 usleep_range(1000, 1000); 1453 } 1454 1455 smp_mb(); 1456 1457 netif_addr_lock_bh(bp->dev); 1458 if (bp->sp_state & mask) { 1459 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", 1460 bp->sp_state, mask); 1461 netif_addr_unlock_bh(bp->dev); 1462 return false; 1463 } 1464 netif_addr_unlock_bh(bp->dev); 1465 1466 return true; 1467 } 1468 1469 /** 1470 * bnx2x_set_ctx_validation - set CDU context validation values 1471 * 1472 * @bp: driver handle 1473 * @cxt: context of the connection on the host memory 1474 * @cid: SW CID of the connection to be configured 1475 */ 1476 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1477 u32 cid); 1478 1479 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1480 u8 sb_index, u8 disable, u16 usec); 1481 void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1482 void bnx2x_release_phy_lock(struct bnx2x *bp); 1483 1484 /** 1485 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1486 * 1487 * @bp: driver handle 1488 * @mf_cfg: MF configuration 1489 * 1490 */ 1491 static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1492 { 1493 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1494 FUNC_MF_CFG_MAX_BW_SHIFT; 1495 if (!max_cfg) { 1496 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, 1497 "Max BW configured to 0 - using 100 instead\n"); 1498 max_cfg = 100; 1499 } 1500 return max_cfg; 1501 } 1502 1503 /* checks if HW supports GRO for given MTU */ 1504 static inline bool bnx2x_mtu_allows_gro(int mtu) 1505 { 1506 /* gro frags per page */ 1507 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1508 1509 /* 1510 * 1. number of frags should not grow above MAX_SKB_FRAGS 1511 * 2. frag must fit the page 1512 */ 1513 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1514 } 1515 1516 static inline bool bnx2x_need_gro_check(int mtu) 1517 { 1518 return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) != 1519 (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1)); 1520 } 1521 1522 /** 1523 * bnx2x_bz_fp - zero content of the fastpath structure. 1524 * 1525 * @bp: driver handle 1526 * @index: fastpath index to be zeroed 1527 * 1528 * Makes sure the contents of the bp->fp[index].napi is kept 1529 * intact. 1530 */ 1531 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) 1532 { 1533 struct bnx2x_fastpath *fp = &bp->fp[index]; 1534 struct napi_struct orig_napi = fp->napi; 1535 /* bzero bnx2x_fastpath contents */ 1536 if (bp->stats_init) 1537 memset(fp, 0, sizeof(*fp)); 1538 else { 1539 /* Keep Queue statistics */ 1540 struct bnx2x_eth_q_stats *tmp_eth_q_stats; 1541 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; 1542 1543 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), 1544 GFP_KERNEL); 1545 if (tmp_eth_q_stats) 1546 memcpy(tmp_eth_q_stats, &fp->eth_q_stats, 1547 sizeof(struct bnx2x_eth_q_stats)); 1548 1549 tmp_eth_q_stats_old = 1550 kzalloc(sizeof(struct bnx2x_eth_q_stats_old), 1551 GFP_KERNEL); 1552 if (tmp_eth_q_stats_old) 1553 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, 1554 sizeof(struct bnx2x_eth_q_stats_old)); 1555 1556 memset(fp, 0, sizeof(*fp)); 1557 1558 if (tmp_eth_q_stats) { 1559 memcpy(&fp->eth_q_stats, tmp_eth_q_stats, 1560 sizeof(struct bnx2x_eth_q_stats)); 1561 kfree(tmp_eth_q_stats); 1562 } 1563 1564 if (tmp_eth_q_stats_old) { 1565 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, 1566 sizeof(struct bnx2x_eth_q_stats_old)); 1567 kfree(tmp_eth_q_stats_old); 1568 } 1569 1570 } 1571 1572 /* Restore the NAPI object as it has been already initialized */ 1573 fp->napi = orig_napi; 1574 1575 fp->bp = bp; 1576 fp->index = index; 1577 if (IS_ETH_FP(fp)) 1578 fp->max_cos = bp->max_cos; 1579 else 1580 /* Special queues support only one CoS */ 1581 fp->max_cos = 1; 1582 1583 /* 1584 * set the tpa flag for each queue. The tpa flag determines the queue 1585 * minimal size so it must be set prior to queue memory allocation 1586 */ 1587 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || 1588 (bp->flags & GRO_ENABLE_FLAG && 1589 bnx2x_mtu_allows_gro(bp->dev->mtu))); 1590 if (bp->flags & TPA_ENABLE_FLAG) 1591 fp->mode = TPA_MODE_LRO; 1592 else if (bp->flags & GRO_ENABLE_FLAG) 1593 fp->mode = TPA_MODE_GRO; 1594 1595 #ifdef BCM_CNIC 1596 /* We don't want TPA on an FCoE L2 ring */ 1597 if (IS_FCOE_FP(fp)) 1598 fp->disable_tpa = 1; 1599 #endif 1600 } 1601 1602 #ifdef BCM_CNIC 1603 /** 1604 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1605 * 1606 * @bp: driver handle 1607 * 1608 */ 1609 void bnx2x_get_iscsi_info(struct bnx2x *bp); 1610 #endif 1611 /* returns func by VN for current port */ 1612 static inline int func_by_vn(struct bnx2x *bp, int vn) 1613 { 1614 return 2 * vn + BP_PORT(bp); 1615 } 1616 1617 /** 1618 * bnx2x_link_sync_notify - send notification to other functions. 1619 * 1620 * @bp: driver handle 1621 * 1622 */ 1623 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 1624 { 1625 int func; 1626 int vn; 1627 1628 /* Set the attention towards other drivers on the same port */ 1629 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 1630 if (vn == BP_VN(bp)) 1631 continue; 1632 1633 func = func_by_vn(bp, vn); 1634 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 1635 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 1636 } 1637 } 1638 1639 /** 1640 * bnx2x_update_drv_flags - update flags in shmem 1641 * 1642 * @bp: driver handle 1643 * @flags: flags to update 1644 * @set: set or clear 1645 * 1646 */ 1647 static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 1648 { 1649 if (SHMEM2_HAS(bp, drv_flags)) { 1650 u32 drv_flags; 1651 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1652 drv_flags = SHMEM2_RD(bp, drv_flags); 1653 1654 if (set) 1655 SET_FLAGS(drv_flags, flags); 1656 else 1657 RESET_FLAGS(drv_flags, flags); 1658 1659 SHMEM2_WR(bp, drv_flags, drv_flags); 1660 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); 1661 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1662 } 1663 } 1664 1665 static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1666 { 1667 if (is_valid_ether_addr(addr)) 1668 return true; 1669 #ifdef BCM_CNIC 1670 if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) 1671 return true; 1672 #endif 1673 return false; 1674 } 1675 1676 #endif /* BNX2X_CMN_H */ 1677