1 /* bnx2x_cmn.h: QLogic Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * Copyright (c) 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 12 * Written by: Eliezer Tamir 13 * Based on code from Michael Chan's bnx2 driver 14 * UDP CSUM errata workaround by Arik Gendelman 15 * Slowpath and fastpath rework by Vladislav Zolotarov 16 * Statistics and Link management by Yitchak Gertner 17 * 18 */ 19 #ifndef BNX2X_CMN_H 20 #define BNX2X_CMN_H 21 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/irq.h> 27 28 #include "bnx2x.h" 29 #include "bnx2x_sriov.h" 30 31 /* This is used as a replacement for an MCP if it's not present */ 32 extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 33 extern int bnx2x_num_queues; 34 35 /************************ Macros ********************************/ 36 #define BNX2X_PCI_FREE(x, y, size) \ 37 do { \ 38 if (x) { \ 39 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 40 x = NULL; \ 41 y = 0; \ 42 } \ 43 } while (0) 44 45 #define BNX2X_FREE(x) \ 46 do { \ 47 if (x) { \ 48 kfree((void *)x); \ 49 x = NULL; \ 50 } \ 51 } while (0) 52 53 #define BNX2X_PCI_ALLOC(y, size) \ 54 ({ \ 55 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 56 if (x) \ 57 DP(NETIF_MSG_HW, \ 58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 59 (unsigned long long)(*y), x); \ 60 x; \ 61 }) 62 #define BNX2X_PCI_FALLOC(y, size) \ 63 ({ \ 64 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 65 if (x) { \ 66 memset(x, 0xff, size); \ 67 DP(NETIF_MSG_HW, \ 68 "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \ 69 (unsigned long long)(*y), x); \ 70 } \ 71 x; \ 72 }) 73 74 /*********************** Interfaces **************************** 75 * Functions that need to be implemented by each driver version 76 */ 77 /* Init */ 78 79 /** 80 * bnx2x_send_unload_req - request unload mode from the MCP. 81 * 82 * @bp: driver handle 83 * @unload_mode: requested function's unload mode 84 * 85 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 86 */ 87 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 88 89 /** 90 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 91 * 92 * @bp: driver handle 93 * @keep_link: true iff link should be kept up 94 */ 95 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); 96 97 /** 98 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 99 * 100 * @bp: driver handle 101 * @rss_obj: RSS object to use 102 * @ind_table: indirection table to configure 103 * @config_hash: re-configure RSS hash keys configuration 104 * @enable: enabled or disabled configuration 105 */ 106 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 107 bool config_hash, bool enable); 108 109 /** 110 * bnx2x__init_func_obj - init function object 111 * 112 * @bp: driver handle 113 * 114 * Initializes the Function Object with the appropriate 115 * parameters which include a function slow path driver 116 * interface. 117 */ 118 void bnx2x__init_func_obj(struct bnx2x *bp); 119 120 /** 121 * bnx2x_setup_queue - setup eth queue. 122 * 123 * @bp: driver handle 124 * @fp: pointer to the fastpath structure 125 * @leading: boolean 126 * 127 */ 128 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 129 bool leading); 130 131 /** 132 * bnx2x_setup_leading - bring up a leading eth queue. 133 * 134 * @bp: driver handle 135 */ 136 int bnx2x_setup_leading(struct bnx2x *bp); 137 138 /** 139 * bnx2x_fw_command - send the MCP a request 140 * 141 * @bp: driver handle 142 * @command: request 143 * @param: request's parameter 144 * 145 * block until there is a reply 146 */ 147 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 148 149 /** 150 * bnx2x_initial_phy_init - initialize link parameters structure variables. 151 * 152 * @bp: driver handle 153 * @load_mode: current mode 154 */ 155 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 156 157 /** 158 * bnx2x_link_set - configure hw according to link parameters structure. 159 * 160 * @bp: driver handle 161 */ 162 void bnx2x_link_set(struct bnx2x *bp); 163 164 /** 165 * bnx2x_force_link_reset - Forces link reset, and put the PHY 166 * in reset as well. 167 * 168 * @bp: driver handle 169 */ 170 void bnx2x_force_link_reset(struct bnx2x *bp); 171 172 /** 173 * bnx2x_link_test - query link status. 174 * 175 * @bp: driver handle 176 * @is_serdes: bool 177 * 178 * Returns 0 if link is UP. 179 */ 180 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 181 182 /** 183 * bnx2x_drv_pulse - write driver pulse to shmem 184 * 185 * @bp: driver handle 186 * 187 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 188 * in the shmem. 189 */ 190 void bnx2x_drv_pulse(struct bnx2x *bp); 191 192 /** 193 * bnx2x_igu_ack_sb - update IGU with current SB value 194 * 195 * @bp: driver handle 196 * @igu_sb_id: SB id 197 * @segment: SB segment 198 * @index: SB index 199 * @op: SB operation 200 * @update: is HW update required 201 */ 202 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 203 u16 index, u8 op, u8 update); 204 205 /* Disable transactions from chip to host */ 206 void bnx2x_pf_disable(struct bnx2x *bp); 207 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); 208 209 /** 210 * bnx2x__link_status_update - handles link status change. 211 * 212 * @bp: driver handle 213 */ 214 void bnx2x__link_status_update(struct bnx2x *bp); 215 216 /** 217 * bnx2x_link_report - report link status to upper layer. 218 * 219 * @bp: driver handle 220 */ 221 void bnx2x_link_report(struct bnx2x *bp); 222 223 /* None-atomic version of bnx2x_link_report() */ 224 void __bnx2x_link_report(struct bnx2x *bp); 225 226 /** 227 * bnx2x_get_mf_speed - calculate MF speed. 228 * 229 * @bp: driver handle 230 * 231 * Takes into account current linespeed and MF configuration. 232 */ 233 u16 bnx2x_get_mf_speed(struct bnx2x *bp); 234 235 /** 236 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 237 * 238 * @irq: irq number 239 * @dev_instance: private instance 240 */ 241 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 242 243 /** 244 * bnx2x_interrupt - non MSI-X interrupt handler 245 * 246 * @irq: irq number 247 * @dev_instance: private instance 248 */ 249 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 250 251 /** 252 * bnx2x_cnic_notify - send command to cnic driver 253 * 254 * @bp: driver handle 255 * @cmd: command 256 */ 257 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 258 259 /** 260 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 261 * 262 * @bp: driver handle 263 */ 264 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 265 266 /** 267 * bnx2x_setup_cnic_info - provides cnic with updated info 268 * 269 * @bp: driver handle 270 */ 271 void bnx2x_setup_cnic_info(struct bnx2x *bp); 272 273 /** 274 * bnx2x_int_enable - enable HW interrupts. 275 * 276 * @bp: driver handle 277 */ 278 void bnx2x_int_enable(struct bnx2x *bp); 279 280 /** 281 * bnx2x_int_disable_sync - disable interrupts. 282 * 283 * @bp: driver handle 284 * @disable_hw: true, disable HW interrupts. 285 * 286 * This function ensures that there are no 287 * ISRs or SP DPCs (sp_task) are running after it returns. 288 */ 289 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 290 291 /** 292 * bnx2x_nic_init_cnic - init driver internals for cnic. 293 * 294 * @bp: driver handle 295 * @load_code: COMMON, PORT or FUNCTION 296 * 297 * Initializes: 298 * - rings 299 * - status blocks 300 * - etc. 301 */ 302 void bnx2x_nic_init_cnic(struct bnx2x *bp); 303 304 /** 305 * bnx2x_preirq_nic_init - init driver internals. 306 * 307 * @bp: driver handle 308 * 309 * Initializes: 310 * - fastpath object 311 * - fastpath rings 312 * etc. 313 */ 314 void bnx2x_pre_irq_nic_init(struct bnx2x *bp); 315 316 /** 317 * bnx2x_postirq_nic_init - init driver internals. 318 * 319 * @bp: driver handle 320 * @load_code: COMMON, PORT or FUNCTION 321 * 322 * Initializes: 323 * - status blocks 324 * - slowpath rings 325 * - etc. 326 */ 327 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code); 328 /** 329 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. 330 * 331 * @bp: driver handle 332 */ 333 int bnx2x_alloc_mem_cnic(struct bnx2x *bp); 334 /** 335 * bnx2x_alloc_mem - allocate driver's memory. 336 * 337 * @bp: driver handle 338 */ 339 int bnx2x_alloc_mem(struct bnx2x *bp); 340 341 /** 342 * bnx2x_free_mem_cnic - release driver's memory for cnic. 343 * 344 * @bp: driver handle 345 */ 346 void bnx2x_free_mem_cnic(struct bnx2x *bp); 347 /** 348 * bnx2x_free_mem - release driver's memory. 349 * 350 * @bp: driver handle 351 */ 352 void bnx2x_free_mem(struct bnx2x *bp); 353 354 /** 355 * bnx2x_set_num_queues - set number of queues according to mode. 356 * 357 * @bp: driver handle 358 */ 359 void bnx2x_set_num_queues(struct bnx2x *bp); 360 361 /** 362 * bnx2x_chip_cleanup - cleanup chip internals. 363 * 364 * @bp: driver handle 365 * @unload_mode: COMMON, PORT, FUNCTION 366 * @keep_link: true iff link should be kept up. 367 * 368 * - Cleanup MAC configuration. 369 * - Closes clients. 370 * - etc. 371 */ 372 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); 373 374 /** 375 * bnx2x_acquire_hw_lock - acquire HW lock. 376 * 377 * @bp: driver handle 378 * @resource: resource bit which was locked 379 */ 380 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 381 382 /** 383 * bnx2x_release_hw_lock - release HW lock. 384 * 385 * @bp: driver handle 386 * @resource: resource bit which was locked 387 */ 388 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 389 390 /** 391 * bnx2x_release_leader_lock - release recovery leader lock 392 * 393 * @bp: driver handle 394 */ 395 int bnx2x_release_leader_lock(struct bnx2x *bp); 396 397 /** 398 * bnx2x_set_eth_mac - configure eth MAC address in the HW 399 * 400 * @bp: driver handle 401 * @set: set or clear 402 * 403 * Configures according to the value in netdev->dev_addr. 404 */ 405 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 406 407 /** 408 * bnx2x_set_rx_mode - set MAC filtering configurations. 409 * 410 * @dev: netdevice 411 * 412 * called with netif_tx_lock from dev_mcast.c 413 * If bp->state is OPEN, should be called with 414 * netif_addr_lock_bh() 415 */ 416 void bnx2x_set_rx_mode_inner(struct bnx2x *bp); 417 418 /* Parity errors related */ 419 void bnx2x_set_pf_load(struct bnx2x *bp); 420 bool bnx2x_clear_pf_load(struct bnx2x *bp); 421 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 422 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 423 void bnx2x_set_reset_in_progress(struct bnx2x *bp); 424 void bnx2x_set_reset_global(struct bnx2x *bp); 425 void bnx2x_disable_close_the_gate(struct bnx2x *bp); 426 int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 427 428 void bnx2x_clear_vlan_info(struct bnx2x *bp); 429 430 /** 431 * bnx2x_sp_event - handle ramrods completion. 432 * 433 * @fp: fastpath handle for the event 434 * @rr_cqe: eth_rx_cqe 435 */ 436 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 437 438 /** 439 * bnx2x_ilt_set_info - prepare ILT configurations. 440 * 441 * @bp: driver handle 442 */ 443 void bnx2x_ilt_set_info(struct bnx2x *bp); 444 445 /** 446 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC 447 * and TM. 448 * 449 * @bp: driver handle 450 */ 451 void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); 452 453 /** 454 * bnx2x_dcbx_init - initialize dcbx protocol. 455 * 456 * @bp: driver handle 457 */ 458 void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); 459 460 /** 461 * bnx2x_set_power_state - set power state to the requested value. 462 * 463 * @bp: driver handle 464 * @state: required state D0 or D3hot 465 * 466 * Currently only D0 and D3hot are supported. 467 */ 468 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 469 470 /** 471 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 472 * 473 * @bp: driver handle 474 * @value: new value 475 */ 476 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 477 /* Error handling */ 478 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 479 480 /* dev_close main block */ 481 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); 482 483 /* dev_open main block */ 484 int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 485 486 /* hard_xmit callback */ 487 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 488 489 /* setup_tc callback */ 490 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 491 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, 492 void *type_data); 493 494 int bnx2x_get_vf_config(struct net_device *dev, int vf, 495 struct ifla_vf_info *ivi); 496 int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); 497 int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, 498 __be16 vlan_proto); 499 int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val); 500 501 /* select_queue callback */ 502 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 503 struct net_device *sb_dev); 504 505 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 506 struct bnx2x_fastpath *fp, 507 u16 bd_prod, u16 rx_comp_prod, 508 u16 rx_sge_prod) 509 { 510 struct ustorm_eth_rx_producers rx_prods = {0}; 511 u32 i; 512 513 /* Update producers */ 514 rx_prods.bd_prod = bd_prod; 515 rx_prods.cqe_prod = rx_comp_prod; 516 rx_prods.sge_prod = rx_sge_prod; 517 518 /* Make sure that the BD and SGE data is updated before updating the 519 * producers since FW might read the BD/SGE right after the producer 520 * is updated. 521 * This is only applicable for weak-ordered memory model archs such 522 * as IA-64. The following barrier is also mandatory since FW will 523 * assumes BDs must have buffers. 524 */ 525 wmb(); 526 527 for (i = 0; i < sizeof(rx_prods)/4; i++) 528 REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, 529 ((u32 *)&rx_prods)[i]); 530 531 DP(NETIF_MSG_RX_STATUS, 532 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 533 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 534 } 535 536 /* reload helper */ 537 int bnx2x_reload_if_running(struct net_device *dev); 538 539 int bnx2x_change_mac_addr(struct net_device *dev, void *p); 540 541 /* NAPI poll Tx part */ 542 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 543 544 extern const struct dev_pm_ops bnx2x_pm_ops; 545 546 /* Release IRQ vectors */ 547 void bnx2x_free_irq(struct bnx2x *bp); 548 549 void bnx2x_free_fp_mem(struct bnx2x *bp); 550 void bnx2x_init_rx_rings(struct bnx2x *bp); 551 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); 552 void bnx2x_free_skbs(struct bnx2x *bp); 553 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 554 void bnx2x_netif_start(struct bnx2x *bp); 555 int bnx2x_load_cnic(struct bnx2x *bp); 556 557 /** 558 * bnx2x_enable_msix - set msix configuration. 559 * 560 * @bp: driver handle 561 * 562 * fills msix_table, requests vectors, updates num_queues 563 * according to number of available vectors. 564 */ 565 int bnx2x_enable_msix(struct bnx2x *bp); 566 567 /** 568 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 569 * 570 * @bp: driver handle 571 */ 572 int bnx2x_enable_msi(struct bnx2x *bp); 573 574 /** 575 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 576 * 577 * @bp: driver handle 578 */ 579 int bnx2x_alloc_mem_bp(struct bnx2x *bp); 580 581 /** 582 * bnx2x_free_mem_bp - release memories outsize main driver structure 583 * 584 * @bp: driver handle 585 */ 586 void bnx2x_free_mem_bp(struct bnx2x *bp); 587 588 /** 589 * bnx2x_change_mtu - change mtu netdev callback 590 * 591 * @dev: net device 592 * @new_mtu: requested mtu 593 * 594 */ 595 int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 596 597 #ifdef NETDEV_FCOE_WWNN 598 /** 599 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 600 * 601 * @dev: net_device 602 * @wwn: output buffer 603 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 604 * 605 */ 606 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 607 #endif 608 609 netdev_features_t bnx2x_fix_features(struct net_device *dev, 610 netdev_features_t features); 611 int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 612 613 /** 614 * bnx2x_tx_timeout - tx timeout netdev callback 615 * 616 * @dev: net device 617 */ 618 void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue); 619 620 /** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration 621 * c2s_map should have BNX2X_MAX_PRIORITY entries. 622 * @bp: driver handle 623 * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping 624 * @c2s_default: entry for non-tagged configuration 625 */ 626 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default); 627 628 /*********************** Inlines **********************************/ 629 /*********************** Fast path ********************************/ 630 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 631 { 632 barrier(); /* status block is written to by the chip */ 633 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 634 } 635 636 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 637 u8 segment, u16 index, u8 op, 638 u8 update, u32 igu_addr) 639 { 640 struct igu_regular cmd_data = {0}; 641 642 cmd_data.sb_id_and_flags = 643 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 644 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 645 (update << IGU_REGULAR_BUPDATE_SHIFT) | 646 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 647 648 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 649 cmd_data.sb_id_and_flags, igu_addr); 650 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 651 652 /* Make sure that ACK is written */ 653 barrier(); 654 } 655 656 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 657 u8 storm, u16 index, u8 op, u8 update) 658 { 659 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 660 COMMAND_REG_INT_ACK); 661 struct igu_ack_register igu_ack; 662 663 igu_ack.status_block_index = index; 664 igu_ack.sb_id_and_flags = 665 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 666 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 667 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 668 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 669 670 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 671 672 /* Make sure that ACK is written */ 673 barrier(); 674 } 675 676 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 677 u16 index, u8 op, u8 update) 678 { 679 if (bp->common.int_block == INT_BLOCK_HC) 680 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 681 else { 682 u8 segment; 683 684 if (CHIP_INT_MODE_IS_BC(bp)) 685 segment = storm; 686 else if (igu_sb_id != bp->igu_dsb_id) 687 segment = IGU_SEG_ACCESS_DEF; 688 else if (storm == ATTENTION_ID) 689 segment = IGU_SEG_ACCESS_ATTN; 690 else 691 segment = IGU_SEG_ACCESS_DEF; 692 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 693 } 694 } 695 696 static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 697 { 698 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 699 COMMAND_REG_SIMD_MASK); 700 u32 result = REG_RD(bp, hc_addr); 701 702 barrier(); 703 return result; 704 } 705 706 static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 707 { 708 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 709 u32 result = REG_RD(bp, igu_addr); 710 711 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", 712 result, igu_addr); 713 714 barrier(); 715 return result; 716 } 717 718 static inline u16 bnx2x_ack_int(struct bnx2x *bp) 719 { 720 barrier(); 721 if (bp->common.int_block == INT_BLOCK_HC) 722 return bnx2x_hc_ack_int(bp); 723 else 724 return bnx2x_igu_ack_int(bp); 725 } 726 727 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 728 { 729 /* Tell compiler that consumer and producer can change */ 730 barrier(); 731 return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 732 } 733 734 static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 735 struct bnx2x_fp_txdata *txdata) 736 { 737 s16 used; 738 u16 prod; 739 u16 cons; 740 741 prod = txdata->tx_bd_prod; 742 cons = txdata->tx_bd_cons; 743 744 used = SUB_S16(prod, cons); 745 746 #ifdef BNX2X_STOP_ON_ERROR 747 WARN_ON(used < 0); 748 WARN_ON(used > txdata->tx_ring_size); 749 WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); 750 #endif 751 752 return (s16)(txdata->tx_ring_size) - used; 753 } 754 755 static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 756 { 757 u16 hw_cons; 758 759 /* Tell compiler that status block fields can change */ 760 barrier(); 761 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 762 return hw_cons != txdata->tx_pkt_cons; 763 } 764 765 static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 766 { 767 u8 cos; 768 for_each_cos_in_tx_queue(fp, cos) 769 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 770 return true; 771 return false; 772 } 773 774 #define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0) 775 #define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF) 776 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 777 { 778 u16 cons; 779 union eth_rx_cqe *cqe; 780 struct eth_fast_path_rx_cqe *cqe_fp; 781 782 cons = RCQ_BD(fp->rx_comp_cons); 783 cqe = &fp->rx_comp_ring[cons]; 784 cqe_fp = &cqe->fast_path_cqe; 785 return BNX2X_IS_CQE_COMPLETED(cqe_fp); 786 } 787 788 /** 789 * bnx2x_tx_disable - disables tx from stack point of view 790 * 791 * @bp: driver handle 792 */ 793 static inline void bnx2x_tx_disable(struct bnx2x *bp) 794 { 795 netif_tx_disable(bp->dev); 796 netif_carrier_off(bp->dev); 797 } 798 799 static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 800 struct bnx2x_fastpath *fp, u16 index) 801 { 802 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 803 struct page *page = sw_buf->page; 804 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 805 806 /* Skip "next page" elements */ 807 if (!page) 808 return; 809 810 /* Since many fragments can share the same page, make sure to 811 * only unmap and free the page once. 812 */ 813 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 814 SGE_PAGE_SIZE, DMA_FROM_DEVICE); 815 816 put_page(page); 817 818 sw_buf->page = NULL; 819 sge->addr_hi = 0; 820 sge->addr_lo = 0; 821 } 822 823 static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 824 { 825 int i; 826 827 for_each_rx_queue_cnic(bp, i) { 828 __netif_napi_del(&bnx2x_fp(bp, i, napi)); 829 } 830 synchronize_net(); 831 } 832 833 static inline void bnx2x_del_all_napi(struct bnx2x *bp) 834 { 835 int i; 836 837 for_each_eth_queue(bp, i) { 838 __netif_napi_del(&bnx2x_fp(bp, i, napi)); 839 } 840 synchronize_net(); 841 } 842 843 int bnx2x_set_int_mode(struct bnx2x *bp); 844 845 static inline void bnx2x_disable_msi(struct bnx2x *bp) 846 { 847 if (bp->flags & USING_MSIX_FLAG) { 848 pci_disable_msix(bp->pdev); 849 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); 850 } else if (bp->flags & USING_MSI_FLAG) { 851 pci_disable_msi(bp->pdev); 852 bp->flags &= ~USING_MSI_FLAG; 853 } 854 } 855 856 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 857 { 858 int i, j; 859 860 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 861 int idx = RX_SGE_CNT * i - 1; 862 863 for (j = 0; j < 2; j++) { 864 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 865 idx--; 866 } 867 } 868 } 869 870 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 871 { 872 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 873 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 874 875 /* Clear the two last indices in the page to 1: 876 these are the indices that correspond to the "next" element, 877 hence will never be indicated and should be removed from 878 the calculations. */ 879 bnx2x_clear_sge_mask_next_elems(fp); 880 } 881 882 /* note that we are not allocating a new buffer, 883 * we are just moving one from cons to prod 884 * we are not creating a new mapping, 885 * so there is no need to check for dma_mapping_error(). 886 */ 887 static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 888 u16 cons, u16 prod) 889 { 890 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 891 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 892 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 893 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 894 895 dma_unmap_addr_set(prod_rx_buf, mapping, 896 dma_unmap_addr(cons_rx_buf, mapping)); 897 prod_rx_buf->data = cons_rx_buf->data; 898 *prod_bd = *cons_bd; 899 } 900 901 /************************* Init ******************************************/ 902 903 /* returns func by VN for current port */ 904 static inline int func_by_vn(struct bnx2x *bp, int vn) 905 { 906 return 2 * vn + BP_PORT(bp); 907 } 908 909 static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) 910 { 911 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); 912 } 913 914 /** 915 * bnx2x_func_start - init function 916 * 917 * @bp: driver handle 918 * 919 * Must be called before sending CLIENT_SETUP for the first client. 920 */ 921 static inline int bnx2x_func_start(struct bnx2x *bp) 922 { 923 struct bnx2x_func_state_params func_params = {NULL}; 924 struct bnx2x_func_start_params *start_params = 925 &func_params.params.start; 926 u16 port; 927 928 /* Prepare parameters for function state transitions */ 929 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 930 931 func_params.f_obj = &bp->func_obj; 932 func_params.cmd = BNX2X_F_CMD_START; 933 934 /* Function parameters */ 935 start_params->mf_mode = bp->mf_mode; 936 start_params->sd_vlan_tag = bp->mf_ov; 937 938 /* Configure Ethertype for BD mode */ 939 if (IS_MF_BD(bp)) { 940 DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n"); 941 start_params->sd_vlan_eth_type = ETH_P_8021AD; 942 REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD); 943 REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD); 944 REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD); 945 946 bnx2x_get_c2s_mapping(bp, start_params->c2s_pri, 947 &start_params->c2s_pri_default); 948 start_params->c2s_pri_valid = 1; 949 950 DP(NETIF_MSG_IFUP, 951 "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n", 952 start_params->c2s_pri[0], start_params->c2s_pri[1], 953 start_params->c2s_pri[2], start_params->c2s_pri[3], 954 start_params->c2s_pri[4], start_params->c2s_pri[5], 955 start_params->c2s_pri[6], start_params->c2s_pri[7], 956 start_params->c2s_pri_default); 957 } 958 959 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 960 start_params->network_cos_mode = STATIC_COS; 961 else /* CHIP_IS_E1X */ 962 start_params->network_cos_mode = FW_WRR; 963 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { 964 port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; 965 start_params->vxlan_dst_port = port; 966 } 967 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { 968 port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; 969 start_params->geneve_dst_port = port; 970 } 971 972 start_params->inner_rss = 1; 973 974 if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { 975 start_params->class_fail_ethtype = ETH_P_FIP; 976 start_params->class_fail = 1; 977 start_params->no_added_tags = 1; 978 } 979 980 return bnx2x_func_state_change(bp, &func_params); 981 } 982 983 /** 984 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 985 * 986 * @fw_hi: pointer to upper part 987 * @fw_mid: pointer to middle part 988 * @fw_lo: pointer to lower part 989 * @mac: pointer to MAC address 990 */ 991 static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 992 __le16 *fw_lo, u8 *mac) 993 { 994 ((u8 *)fw_hi)[0] = mac[1]; 995 ((u8 *)fw_hi)[1] = mac[0]; 996 ((u8 *)fw_mid)[0] = mac[3]; 997 ((u8 *)fw_mid)[1] = mac[2]; 998 ((u8 *)fw_lo)[0] = mac[5]; 999 ((u8 *)fw_lo)[1] = mac[4]; 1000 } 1001 1002 static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, 1003 struct bnx2x_alloc_pool *pool) 1004 { 1005 if (!pool->page) 1006 return; 1007 1008 put_page(pool->page); 1009 1010 pool->page = NULL; 1011 } 1012 1013 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 1014 struct bnx2x_fastpath *fp, int last) 1015 { 1016 int i; 1017 1018 if (fp->mode == TPA_MODE_DISABLED) 1019 return; 1020 1021 for (i = 0; i < last; i++) 1022 bnx2x_free_rx_sge(bp, fp, i); 1023 1024 bnx2x_free_rx_mem_pool(bp, &fp->page_pool); 1025 } 1026 1027 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1028 { 1029 int i; 1030 1031 for (i = 1; i <= NUM_RX_RINGS; i++) { 1032 struct eth_rx_bd *rx_bd; 1033 1034 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1035 rx_bd->addr_hi = 1036 cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1037 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1038 rx_bd->addr_lo = 1039 cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1040 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1041 } 1042 } 1043 1044 /* Statistics ID are global per chip/path, while Client IDs for E1x are per 1045 * port. 1046 */ 1047 static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1048 { 1049 struct bnx2x *bp = fp->bp; 1050 if (!CHIP_IS_E1x(bp)) { 1051 /* there are special statistics counters for FCoE 136..140 */ 1052 if (IS_FCOE_FP(fp)) 1053 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1054 return fp->cl_id; 1055 } 1056 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1057 } 1058 1059 static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1060 bnx2x_obj_type obj_type) 1061 { 1062 struct bnx2x *bp = fp->bp; 1063 1064 /* Configure classification DBs */ 1065 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, 1066 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1067 bnx2x_sp_mapping(bp, mac_rdata), 1068 BNX2X_FILTER_MAC_PENDING, 1069 &bp->sp_state, obj_type, 1070 &bp->macs_pool); 1071 1072 if (!CHIP_IS_E1x(bp)) 1073 bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj, 1074 fp->cl_id, fp->cid, BP_FUNC(bp), 1075 bnx2x_sp(bp, vlan_rdata), 1076 bnx2x_sp_mapping(bp, vlan_rdata), 1077 BNX2X_FILTER_VLAN_PENDING, 1078 &bp->sp_state, obj_type, 1079 &bp->vlans_pool); 1080 } 1081 1082 /** 1083 * bnx2x_get_path_func_num - get number of active functions 1084 * 1085 * @bp: driver handle 1086 * 1087 * Calculates the number of active (not hidden) functions on the 1088 * current path. 1089 */ 1090 static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1091 { 1092 u8 func_num = 0, i; 1093 1094 /* 57710 has only one function per-port */ 1095 if (CHIP_IS_E1(bp)) 1096 return 1; 1097 1098 /* Calculate a number of functions enabled on the current 1099 * PATH/PORT. 1100 */ 1101 if (CHIP_REV_IS_SLOW(bp)) { 1102 if (IS_MF(bp)) 1103 func_num = 4; 1104 else 1105 func_num = 2; 1106 } else { 1107 for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1108 u32 func_config = 1109 MF_CFG_RD(bp, 1110 func_mf_config[BP_PATH(bp) + 2 * i]. 1111 config); 1112 func_num += 1113 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1114 } 1115 } 1116 1117 WARN_ON(!func_num); 1118 1119 return func_num; 1120 } 1121 1122 static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1123 { 1124 /* RX_MODE controlling object */ 1125 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1126 1127 /* multicast configuration controlling object */ 1128 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1129 BP_FUNC(bp), BP_FUNC(bp), 1130 bnx2x_sp(bp, mcast_rdata), 1131 bnx2x_sp_mapping(bp, mcast_rdata), 1132 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1133 BNX2X_OBJ_TYPE_RX); 1134 1135 /* Setup CAM credit pools */ 1136 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1137 bnx2x_get_path_func_num(bp)); 1138 1139 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp), 1140 bnx2x_get_path_func_num(bp)); 1141 1142 /* RSS configuration object */ 1143 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1144 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1145 bnx2x_sp(bp, rss_rdata), 1146 bnx2x_sp_mapping(bp, rss_rdata), 1147 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1148 BNX2X_OBJ_TYPE_RX); 1149 1150 bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp)); 1151 } 1152 1153 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1154 { 1155 if (CHIP_IS_E1x(fp->bp)) 1156 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1157 else 1158 return fp->cl_id; 1159 } 1160 1161 static inline void bnx2x_init_txdata(struct bnx2x *bp, 1162 struct bnx2x_fp_txdata *txdata, u32 cid, 1163 int txq_index, __le16 *tx_cons_sb, 1164 struct bnx2x_fastpath *fp) 1165 { 1166 txdata->cid = cid; 1167 txdata->txq_index = txq_index; 1168 txdata->tx_cons_sb = tx_cons_sb; 1169 txdata->parent_fp = fp; 1170 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; 1171 1172 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1173 txdata->cid, txdata->txq_index); 1174 } 1175 1176 static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1177 { 1178 return bp->cnic_base_cl_id + cl_idx + 1179 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1180 } 1181 1182 static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1183 { 1184 /* the 'first' id is allocated for the cnic */ 1185 return bp->base_fw_ndsb; 1186 } 1187 1188 static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1189 { 1190 return bp->igu_base_sb; 1191 } 1192 1193 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1194 struct bnx2x_fp_txdata *txdata) 1195 { 1196 int cnt = 1000; 1197 1198 while (bnx2x_has_tx_work_unload(txdata)) { 1199 if (!cnt) { 1200 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1201 txdata->txq_index, txdata->tx_pkt_prod, 1202 txdata->tx_pkt_cons); 1203 #ifdef BNX2X_STOP_ON_ERROR 1204 bnx2x_panic(); 1205 return -EBUSY; 1206 #else 1207 break; 1208 #endif 1209 } 1210 cnt--; 1211 usleep_range(1000, 2000); 1212 } 1213 1214 return 0; 1215 } 1216 1217 int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1218 1219 static inline void __storm_memset_struct(struct bnx2x *bp, 1220 u32 addr, size_t size, u32 *data) 1221 { 1222 int i; 1223 for (i = 0; i < size/4; i++) 1224 REG_WR(bp, addr + (i * 4), data[i]); 1225 } 1226 1227 /** 1228 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1229 * 1230 * @bp: driver handle 1231 * @mask: bits that need to be cleared 1232 */ 1233 static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1234 { 1235 int tout = 5000; /* Wait for 5 secs tops */ 1236 1237 while (tout--) { 1238 smp_mb(); 1239 netif_addr_lock_bh(bp->dev); 1240 if (!(bp->sp_state & mask)) { 1241 netif_addr_unlock_bh(bp->dev); 1242 return true; 1243 } 1244 netif_addr_unlock_bh(bp->dev); 1245 1246 usleep_range(1000, 2000); 1247 } 1248 1249 smp_mb(); 1250 1251 netif_addr_lock_bh(bp->dev); 1252 if (bp->sp_state & mask) { 1253 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", 1254 bp->sp_state, mask); 1255 netif_addr_unlock_bh(bp->dev); 1256 return false; 1257 } 1258 netif_addr_unlock_bh(bp->dev); 1259 1260 return true; 1261 } 1262 1263 /** 1264 * bnx2x_set_ctx_validation - set CDU context validation values 1265 * 1266 * @bp: driver handle 1267 * @cxt: context of the connection on the host memory 1268 * @cid: SW CID of the connection to be configured 1269 */ 1270 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1271 u32 cid); 1272 1273 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1274 u8 sb_index, u8 disable, u16 usec); 1275 void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1276 void bnx2x_release_phy_lock(struct bnx2x *bp); 1277 1278 /** 1279 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1280 * 1281 * @bp: driver handle 1282 * @mf_cfg: MF configuration 1283 * 1284 */ 1285 static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1286 { 1287 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1288 FUNC_MF_CFG_MAX_BW_SHIFT; 1289 if (!max_cfg) { 1290 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, 1291 "Max BW configured to 0 - using 100 instead\n"); 1292 max_cfg = 100; 1293 } 1294 return max_cfg; 1295 } 1296 1297 /* checks if HW supports GRO for given MTU */ 1298 static inline bool bnx2x_mtu_allows_gro(int mtu) 1299 { 1300 /* gro frags per page */ 1301 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1302 1303 /* 1304 * 1. Number of frags should not grow above MAX_SKB_FRAGS 1305 * 2. Frag must fit the page 1306 */ 1307 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1308 } 1309 1310 /** 1311 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1312 * 1313 * @bp: driver handle 1314 * 1315 */ 1316 void bnx2x_get_iscsi_info(struct bnx2x *bp); 1317 1318 /** 1319 * bnx2x_link_sync_notify - send notification to other functions. 1320 * 1321 * @bp: driver handle 1322 * 1323 */ 1324 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 1325 { 1326 int func; 1327 int vn; 1328 1329 /* Set the attention towards other drivers on the same port */ 1330 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 1331 if (vn == BP_VN(bp)) 1332 continue; 1333 1334 func = func_by_vn(bp, vn); 1335 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 1336 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 1337 } 1338 } 1339 1340 /** 1341 * bnx2x_update_drv_flags - update flags in shmem 1342 * 1343 * @bp: driver handle 1344 * @flags: flags to update 1345 * @set: set or clear 1346 * 1347 */ 1348 static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 1349 { 1350 if (SHMEM2_HAS(bp, drv_flags)) { 1351 u32 drv_flags; 1352 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1353 drv_flags = SHMEM2_RD(bp, drv_flags); 1354 1355 if (set) 1356 SET_FLAGS(drv_flags, flags); 1357 else 1358 RESET_FLAGS(drv_flags, flags); 1359 1360 SHMEM2_WR(bp, drv_flags, drv_flags); 1361 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); 1362 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1363 } 1364 } 1365 1366 1367 1368 /** 1369 * bnx2x_fill_fw_str - Fill buffer with FW version string 1370 * 1371 * @bp: driver handle 1372 * @buf: character buffer to fill with the fw name 1373 * @buf_len: length of the above buffer 1374 * 1375 */ 1376 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1377 1378 int bnx2x_drain_tx_queues(struct bnx2x *bp); 1379 void bnx2x_squeeze_objects(struct bnx2x *bp); 1380 1381 void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, 1382 u32 verbose); 1383 1384 /** 1385 * bnx2x_set_os_driver_state - write driver state for management FW usage 1386 * 1387 * @bp: driver handle 1388 * @state: OS_DRIVER_STATE_* value reflecting current driver state 1389 */ 1390 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state); 1391 1392 /** 1393 * bnx2x_nvram_read - reads data from nvram [might sleep] 1394 * 1395 * @bp: driver handle 1396 * @offset: byte offset in nvram 1397 * @ret_buf: pointer to buffer where data is to be stored 1398 * @buf_size: Length of 'ret_buf' in bytes 1399 */ 1400 int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, 1401 int buf_size); 1402 1403 #endif /* BNX2X_CMN_H */ 1404