1adfc5217SJeff Kirsher /* bnx2x_cmn.h: Broadcom Everest network driver. 2adfc5217SJeff Kirsher * 3adfc5217SJeff Kirsher * Copyright (c) 2007-2011 Broadcom Corporation 4adfc5217SJeff Kirsher * 5adfc5217SJeff Kirsher * This program is free software; you can redistribute it and/or modify 6adfc5217SJeff Kirsher * it under the terms of the GNU General Public License as published by 7adfc5217SJeff Kirsher * the Free Software Foundation. 8adfc5217SJeff Kirsher * 9adfc5217SJeff Kirsher * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10adfc5217SJeff Kirsher * Written by: Eliezer Tamir 11adfc5217SJeff Kirsher * Based on code from Michael Chan's bnx2 driver 12adfc5217SJeff Kirsher * UDP CSUM errata workaround by Arik Gendelman 13adfc5217SJeff Kirsher * Slowpath and fastpath rework by Vladislav Zolotarov 14adfc5217SJeff Kirsher * Statistics and Link management by Yitchak Gertner 15adfc5217SJeff Kirsher * 16adfc5217SJeff Kirsher */ 17adfc5217SJeff Kirsher #ifndef BNX2X_CMN_H 18adfc5217SJeff Kirsher #define BNX2X_CMN_H 19adfc5217SJeff Kirsher 20adfc5217SJeff Kirsher #include <linux/types.h> 21adfc5217SJeff Kirsher #include <linux/pci.h> 22adfc5217SJeff Kirsher #include <linux/netdevice.h> 23adfc5217SJeff Kirsher 24adfc5217SJeff Kirsher 25adfc5217SJeff Kirsher #include "bnx2x.h" 26adfc5217SJeff Kirsher 27adfc5217SJeff Kirsher /* This is used as a replacement for an MCP if it's not present */ 28adfc5217SJeff Kirsher extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 29adfc5217SJeff Kirsher 30adfc5217SJeff Kirsher extern int num_queues; 31adfc5217SJeff Kirsher 32adfc5217SJeff Kirsher /************************ Macros ********************************/ 33adfc5217SJeff Kirsher #define BNX2X_PCI_FREE(x, y, size) \ 34adfc5217SJeff Kirsher do { \ 35adfc5217SJeff Kirsher if (x) { \ 36adfc5217SJeff Kirsher dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 37adfc5217SJeff Kirsher x = NULL; \ 38adfc5217SJeff Kirsher y = 0; \ 39adfc5217SJeff Kirsher } \ 40adfc5217SJeff Kirsher } while (0) 41adfc5217SJeff Kirsher 42adfc5217SJeff Kirsher #define BNX2X_FREE(x) \ 43adfc5217SJeff Kirsher do { \ 44adfc5217SJeff Kirsher if (x) { \ 45adfc5217SJeff Kirsher kfree((void *)x); \ 46adfc5217SJeff Kirsher x = NULL; \ 47adfc5217SJeff Kirsher } \ 48adfc5217SJeff Kirsher } while (0) 49adfc5217SJeff Kirsher 50adfc5217SJeff Kirsher #define BNX2X_PCI_ALLOC(x, y, size) \ 51adfc5217SJeff Kirsher do { \ 52adfc5217SJeff Kirsher x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 53adfc5217SJeff Kirsher if (x == NULL) \ 54adfc5217SJeff Kirsher goto alloc_mem_err; \ 55adfc5217SJeff Kirsher memset((void *)x, 0, size); \ 56adfc5217SJeff Kirsher } while (0) 57adfc5217SJeff Kirsher 58adfc5217SJeff Kirsher #define BNX2X_ALLOC(x, size) \ 59adfc5217SJeff Kirsher do { \ 60adfc5217SJeff Kirsher x = kzalloc(size, GFP_KERNEL); \ 61adfc5217SJeff Kirsher if (x == NULL) \ 62adfc5217SJeff Kirsher goto alloc_mem_err; \ 63adfc5217SJeff Kirsher } while (0) 64adfc5217SJeff Kirsher 65adfc5217SJeff Kirsher /*********************** Interfaces **************************** 66adfc5217SJeff Kirsher * Functions that need to be implemented by each driver version 67adfc5217SJeff Kirsher */ 68adfc5217SJeff Kirsher /* Init */ 69adfc5217SJeff Kirsher 70adfc5217SJeff Kirsher /** 71adfc5217SJeff Kirsher * bnx2x_send_unload_req - request unload mode from the MCP. 72adfc5217SJeff Kirsher * 73adfc5217SJeff Kirsher * @bp: driver handle 74adfc5217SJeff Kirsher * @unload_mode: requested function's unload mode 75adfc5217SJeff Kirsher * 76adfc5217SJeff Kirsher * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 77adfc5217SJeff Kirsher */ 78adfc5217SJeff Kirsher u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 79adfc5217SJeff Kirsher 80adfc5217SJeff Kirsher /** 81adfc5217SJeff Kirsher * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 82adfc5217SJeff Kirsher * 83adfc5217SJeff Kirsher * @bp: driver handle 84adfc5217SJeff Kirsher */ 85adfc5217SJeff Kirsher void bnx2x_send_unload_done(struct bnx2x *bp); 86adfc5217SJeff Kirsher 87adfc5217SJeff Kirsher /** 88adfc5217SJeff Kirsher * bnx2x_config_rss_pf - configure RSS parameters. 89adfc5217SJeff Kirsher * 90adfc5217SJeff Kirsher * @bp: driver handle 91adfc5217SJeff Kirsher * @ind_table: indirection table to configure 92adfc5217SJeff Kirsher * @config_hash: re-configure RSS hash keys configuration 93adfc5217SJeff Kirsher */ 94adfc5217SJeff Kirsher int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 95adfc5217SJeff Kirsher 96adfc5217SJeff Kirsher /** 97adfc5217SJeff Kirsher * bnx2x__init_func_obj - init function object 98adfc5217SJeff Kirsher * 99adfc5217SJeff Kirsher * @bp: driver handle 100adfc5217SJeff Kirsher * 101adfc5217SJeff Kirsher * Initializes the Function Object with the appropriate 102adfc5217SJeff Kirsher * parameters which include a function slow path driver 103adfc5217SJeff Kirsher * interface. 104adfc5217SJeff Kirsher */ 105adfc5217SJeff Kirsher void bnx2x__init_func_obj(struct bnx2x *bp); 106adfc5217SJeff Kirsher 107adfc5217SJeff Kirsher /** 108adfc5217SJeff Kirsher * bnx2x_setup_queue - setup eth queue. 109adfc5217SJeff Kirsher * 110adfc5217SJeff Kirsher * @bp: driver handle 111adfc5217SJeff Kirsher * @fp: pointer to the fastpath structure 112adfc5217SJeff Kirsher * @leading: boolean 113adfc5217SJeff Kirsher * 114adfc5217SJeff Kirsher */ 115adfc5217SJeff Kirsher int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 116adfc5217SJeff Kirsher bool leading); 117adfc5217SJeff Kirsher 118adfc5217SJeff Kirsher /** 119adfc5217SJeff Kirsher * bnx2x_setup_leading - bring up a leading eth queue. 120adfc5217SJeff Kirsher * 121adfc5217SJeff Kirsher * @bp: driver handle 122adfc5217SJeff Kirsher */ 123adfc5217SJeff Kirsher int bnx2x_setup_leading(struct bnx2x *bp); 124adfc5217SJeff Kirsher 125adfc5217SJeff Kirsher /** 126adfc5217SJeff Kirsher * bnx2x_fw_command - send the MCP a request 127adfc5217SJeff Kirsher * 128adfc5217SJeff Kirsher * @bp: driver handle 129adfc5217SJeff Kirsher * @command: request 130adfc5217SJeff Kirsher * @param: request's parameter 131adfc5217SJeff Kirsher * 132adfc5217SJeff Kirsher * block until there is a reply 133adfc5217SJeff Kirsher */ 134adfc5217SJeff Kirsher u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 135adfc5217SJeff Kirsher 136adfc5217SJeff Kirsher /** 137adfc5217SJeff Kirsher * bnx2x_initial_phy_init - initialize link parameters structure variables. 138adfc5217SJeff Kirsher * 139adfc5217SJeff Kirsher * @bp: driver handle 140adfc5217SJeff Kirsher * @load_mode: current mode 141adfc5217SJeff Kirsher */ 142adfc5217SJeff Kirsher u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 143adfc5217SJeff Kirsher 144adfc5217SJeff Kirsher /** 145adfc5217SJeff Kirsher * bnx2x_link_set - configure hw according to link parameters structure. 146adfc5217SJeff Kirsher * 147adfc5217SJeff Kirsher * @bp: driver handle 148adfc5217SJeff Kirsher */ 149adfc5217SJeff Kirsher void bnx2x_link_set(struct bnx2x *bp); 150adfc5217SJeff Kirsher 151adfc5217SJeff Kirsher /** 152adfc5217SJeff Kirsher * bnx2x_link_test - query link status. 153adfc5217SJeff Kirsher * 154adfc5217SJeff Kirsher * @bp: driver handle 155adfc5217SJeff Kirsher * @is_serdes: bool 156adfc5217SJeff Kirsher * 157adfc5217SJeff Kirsher * Returns 0 if link is UP. 158adfc5217SJeff Kirsher */ 159adfc5217SJeff Kirsher u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 160adfc5217SJeff Kirsher 161adfc5217SJeff Kirsher /** 162adfc5217SJeff Kirsher * bnx2x_drv_pulse - write driver pulse to shmem 163adfc5217SJeff Kirsher * 164adfc5217SJeff Kirsher * @bp: driver handle 165adfc5217SJeff Kirsher * 166adfc5217SJeff Kirsher * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 167adfc5217SJeff Kirsher * in the shmem. 168adfc5217SJeff Kirsher */ 169adfc5217SJeff Kirsher void bnx2x_drv_pulse(struct bnx2x *bp); 170adfc5217SJeff Kirsher 171adfc5217SJeff Kirsher /** 172adfc5217SJeff Kirsher * bnx2x_igu_ack_sb - update IGU with current SB value 173adfc5217SJeff Kirsher * 174adfc5217SJeff Kirsher * @bp: driver handle 175adfc5217SJeff Kirsher * @igu_sb_id: SB id 176adfc5217SJeff Kirsher * @segment: SB segment 177adfc5217SJeff Kirsher * @index: SB index 178adfc5217SJeff Kirsher * @op: SB operation 179adfc5217SJeff Kirsher * @update: is HW update required 180adfc5217SJeff Kirsher */ 181adfc5217SJeff Kirsher void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 182adfc5217SJeff Kirsher u16 index, u8 op, u8 update); 183adfc5217SJeff Kirsher 184adfc5217SJeff Kirsher /* Disable transactions from chip to host */ 185adfc5217SJeff Kirsher void bnx2x_pf_disable(struct bnx2x *bp); 186adfc5217SJeff Kirsher 187adfc5217SJeff Kirsher /** 188adfc5217SJeff Kirsher * bnx2x__link_status_update - handles link status change. 189adfc5217SJeff Kirsher * 190adfc5217SJeff Kirsher * @bp: driver handle 191adfc5217SJeff Kirsher */ 192adfc5217SJeff Kirsher void bnx2x__link_status_update(struct bnx2x *bp); 193adfc5217SJeff Kirsher 194adfc5217SJeff Kirsher /** 195adfc5217SJeff Kirsher * bnx2x_link_report - report link status to upper layer. 196adfc5217SJeff Kirsher * 197adfc5217SJeff Kirsher * @bp: driver handle 198adfc5217SJeff Kirsher */ 199adfc5217SJeff Kirsher void bnx2x_link_report(struct bnx2x *bp); 200adfc5217SJeff Kirsher 201adfc5217SJeff Kirsher /* None-atomic version of bnx2x_link_report() */ 202adfc5217SJeff Kirsher void __bnx2x_link_report(struct bnx2x *bp); 203adfc5217SJeff Kirsher 204adfc5217SJeff Kirsher /** 205adfc5217SJeff Kirsher * bnx2x_get_mf_speed - calculate MF speed. 206adfc5217SJeff Kirsher * 207adfc5217SJeff Kirsher * @bp: driver handle 208adfc5217SJeff Kirsher * 209adfc5217SJeff Kirsher * Takes into account current linespeed and MF configuration. 210adfc5217SJeff Kirsher */ 211adfc5217SJeff Kirsher u16 bnx2x_get_mf_speed(struct bnx2x *bp); 212adfc5217SJeff Kirsher 213adfc5217SJeff Kirsher /** 214adfc5217SJeff Kirsher * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 215adfc5217SJeff Kirsher * 216adfc5217SJeff Kirsher * @irq: irq number 217adfc5217SJeff Kirsher * @dev_instance: private instance 218adfc5217SJeff Kirsher */ 219adfc5217SJeff Kirsher irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 220adfc5217SJeff Kirsher 221adfc5217SJeff Kirsher /** 222adfc5217SJeff Kirsher * bnx2x_interrupt - non MSI-X interrupt handler 223adfc5217SJeff Kirsher * 224adfc5217SJeff Kirsher * @irq: irq number 225adfc5217SJeff Kirsher * @dev_instance: private instance 226adfc5217SJeff Kirsher */ 227adfc5217SJeff Kirsher irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 228adfc5217SJeff Kirsher #ifdef BCM_CNIC 229adfc5217SJeff Kirsher 230adfc5217SJeff Kirsher /** 231adfc5217SJeff Kirsher * bnx2x_cnic_notify - send command to cnic driver 232adfc5217SJeff Kirsher * 233adfc5217SJeff Kirsher * @bp: driver handle 234adfc5217SJeff Kirsher * @cmd: command 235adfc5217SJeff Kirsher */ 236adfc5217SJeff Kirsher int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 237adfc5217SJeff Kirsher 238adfc5217SJeff Kirsher /** 239adfc5217SJeff Kirsher * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 240adfc5217SJeff Kirsher * 241adfc5217SJeff Kirsher * @bp: driver handle 242adfc5217SJeff Kirsher */ 243adfc5217SJeff Kirsher void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 244adfc5217SJeff Kirsher #endif 245adfc5217SJeff Kirsher 246adfc5217SJeff Kirsher /** 247adfc5217SJeff Kirsher * bnx2x_int_enable - enable HW interrupts. 248adfc5217SJeff Kirsher * 249adfc5217SJeff Kirsher * @bp: driver handle 250adfc5217SJeff Kirsher */ 251adfc5217SJeff Kirsher void bnx2x_int_enable(struct bnx2x *bp); 252adfc5217SJeff Kirsher 253adfc5217SJeff Kirsher /** 254adfc5217SJeff Kirsher * bnx2x_int_disable_sync - disable interrupts. 255adfc5217SJeff Kirsher * 256adfc5217SJeff Kirsher * @bp: driver handle 257adfc5217SJeff Kirsher * @disable_hw: true, disable HW interrupts. 258adfc5217SJeff Kirsher * 259adfc5217SJeff Kirsher * This function ensures that there are no 260adfc5217SJeff Kirsher * ISRs or SP DPCs (sp_task) are running after it returns. 261adfc5217SJeff Kirsher */ 262adfc5217SJeff Kirsher void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 263adfc5217SJeff Kirsher 264adfc5217SJeff Kirsher /** 265adfc5217SJeff Kirsher * bnx2x_nic_init - init driver internals. 266adfc5217SJeff Kirsher * 267adfc5217SJeff Kirsher * @bp: driver handle 268adfc5217SJeff Kirsher * @load_code: COMMON, PORT or FUNCTION 269adfc5217SJeff Kirsher * 270adfc5217SJeff Kirsher * Initializes: 271adfc5217SJeff Kirsher * - rings 272adfc5217SJeff Kirsher * - status blocks 273adfc5217SJeff Kirsher * - etc. 274adfc5217SJeff Kirsher */ 275adfc5217SJeff Kirsher void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 276adfc5217SJeff Kirsher 277adfc5217SJeff Kirsher /** 278adfc5217SJeff Kirsher * bnx2x_alloc_mem - allocate driver's memory. 279adfc5217SJeff Kirsher * 280adfc5217SJeff Kirsher * @bp: driver handle 281adfc5217SJeff Kirsher */ 282adfc5217SJeff Kirsher int bnx2x_alloc_mem(struct bnx2x *bp); 283adfc5217SJeff Kirsher 284adfc5217SJeff Kirsher /** 285adfc5217SJeff Kirsher * bnx2x_free_mem - release driver's memory. 286adfc5217SJeff Kirsher * 287adfc5217SJeff Kirsher * @bp: driver handle 288adfc5217SJeff Kirsher */ 289adfc5217SJeff Kirsher void bnx2x_free_mem(struct bnx2x *bp); 290adfc5217SJeff Kirsher 291adfc5217SJeff Kirsher /** 292adfc5217SJeff Kirsher * bnx2x_set_num_queues - set number of queues according to mode. 293adfc5217SJeff Kirsher * 294adfc5217SJeff Kirsher * @bp: driver handle 295adfc5217SJeff Kirsher */ 296adfc5217SJeff Kirsher void bnx2x_set_num_queues(struct bnx2x *bp); 297adfc5217SJeff Kirsher 298adfc5217SJeff Kirsher /** 299adfc5217SJeff Kirsher * bnx2x_chip_cleanup - cleanup chip internals. 300adfc5217SJeff Kirsher * 301adfc5217SJeff Kirsher * @bp: driver handle 302adfc5217SJeff Kirsher * @unload_mode: COMMON, PORT, FUNCTION 303adfc5217SJeff Kirsher * 304adfc5217SJeff Kirsher * - Cleanup MAC configuration. 305adfc5217SJeff Kirsher * - Closes clients. 306adfc5217SJeff Kirsher * - etc. 307adfc5217SJeff Kirsher */ 308adfc5217SJeff Kirsher void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 309adfc5217SJeff Kirsher 310adfc5217SJeff Kirsher /** 311adfc5217SJeff Kirsher * bnx2x_acquire_hw_lock - acquire HW lock. 312adfc5217SJeff Kirsher * 313adfc5217SJeff Kirsher * @bp: driver handle 314adfc5217SJeff Kirsher * @resource: resource bit which was locked 315adfc5217SJeff Kirsher */ 316adfc5217SJeff Kirsher int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 317adfc5217SJeff Kirsher 318adfc5217SJeff Kirsher /** 319adfc5217SJeff Kirsher * bnx2x_release_hw_lock - release HW lock. 320adfc5217SJeff Kirsher * 321adfc5217SJeff Kirsher * @bp: driver handle 322adfc5217SJeff Kirsher * @resource: resource bit which was locked 323adfc5217SJeff Kirsher */ 324adfc5217SJeff Kirsher int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 325adfc5217SJeff Kirsher 326adfc5217SJeff Kirsher /** 327adfc5217SJeff Kirsher * bnx2x_release_leader_lock - release recovery leader lock 328adfc5217SJeff Kirsher * 329adfc5217SJeff Kirsher * @bp: driver handle 330adfc5217SJeff Kirsher */ 331adfc5217SJeff Kirsher int bnx2x_release_leader_lock(struct bnx2x *bp); 332adfc5217SJeff Kirsher 333adfc5217SJeff Kirsher /** 334adfc5217SJeff Kirsher * bnx2x_set_eth_mac - configure eth MAC address in the HW 335adfc5217SJeff Kirsher * 336adfc5217SJeff Kirsher * @bp: driver handle 337adfc5217SJeff Kirsher * @set: set or clear 338adfc5217SJeff Kirsher * 339adfc5217SJeff Kirsher * Configures according to the value in netdev->dev_addr. 340adfc5217SJeff Kirsher */ 341adfc5217SJeff Kirsher int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 342adfc5217SJeff Kirsher 343adfc5217SJeff Kirsher /** 344adfc5217SJeff Kirsher * bnx2x_set_rx_mode - set MAC filtering configurations. 345adfc5217SJeff Kirsher * 346adfc5217SJeff Kirsher * @dev: netdevice 347adfc5217SJeff Kirsher * 348adfc5217SJeff Kirsher * called with netif_tx_lock from dev_mcast.c 349adfc5217SJeff Kirsher * If bp->state is OPEN, should be called with 350adfc5217SJeff Kirsher * netif_addr_lock_bh() 351adfc5217SJeff Kirsher */ 352adfc5217SJeff Kirsher void bnx2x_set_rx_mode(struct net_device *dev); 353adfc5217SJeff Kirsher 354adfc5217SJeff Kirsher /** 355adfc5217SJeff Kirsher * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 356adfc5217SJeff Kirsher * 357adfc5217SJeff Kirsher * @bp: driver handle 358adfc5217SJeff Kirsher * 359adfc5217SJeff Kirsher * If bp->state is OPEN, should be called with 360adfc5217SJeff Kirsher * netif_addr_lock_bh(). 361adfc5217SJeff Kirsher */ 362adfc5217SJeff Kirsher void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 363adfc5217SJeff Kirsher 364adfc5217SJeff Kirsher /** 365adfc5217SJeff Kirsher * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 366adfc5217SJeff Kirsher * 367adfc5217SJeff Kirsher * @bp: driver handle 368adfc5217SJeff Kirsher * @cl_id: client id 369adfc5217SJeff Kirsher * @rx_mode_flags: rx mode configuration 370adfc5217SJeff Kirsher * @rx_accept_flags: rx accept configuration 371adfc5217SJeff Kirsher * @tx_accept_flags: tx accept configuration (tx switch) 372adfc5217SJeff Kirsher * @ramrod_flags: ramrod configuration 373adfc5217SJeff Kirsher */ 374adfc5217SJeff Kirsher void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 375adfc5217SJeff Kirsher unsigned long rx_mode_flags, 376adfc5217SJeff Kirsher unsigned long rx_accept_flags, 377adfc5217SJeff Kirsher unsigned long tx_accept_flags, 378adfc5217SJeff Kirsher unsigned long ramrod_flags); 379adfc5217SJeff Kirsher 380adfc5217SJeff Kirsher /* Parity errors related */ 381adfc5217SJeff Kirsher void bnx2x_inc_load_cnt(struct bnx2x *bp); 382adfc5217SJeff Kirsher u32 bnx2x_dec_load_cnt(struct bnx2x *bp); 383adfc5217SJeff Kirsher bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 384adfc5217SJeff Kirsher bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 385adfc5217SJeff Kirsher void bnx2x_set_reset_in_progress(struct bnx2x *bp); 386adfc5217SJeff Kirsher void bnx2x_set_reset_global(struct bnx2x *bp); 387adfc5217SJeff Kirsher void bnx2x_disable_close_the_gate(struct bnx2x *bp); 388adfc5217SJeff Kirsher 389adfc5217SJeff Kirsher /** 390adfc5217SJeff Kirsher * bnx2x_sp_event - handle ramrods completion. 391adfc5217SJeff Kirsher * 392adfc5217SJeff Kirsher * @fp: fastpath handle for the event 393adfc5217SJeff Kirsher * @rr_cqe: eth_rx_cqe 394adfc5217SJeff Kirsher */ 395adfc5217SJeff Kirsher void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 396adfc5217SJeff Kirsher 397adfc5217SJeff Kirsher /** 398adfc5217SJeff Kirsher * bnx2x_ilt_set_info - prepare ILT configurations. 399adfc5217SJeff Kirsher * 400adfc5217SJeff Kirsher * @bp: driver handle 401adfc5217SJeff Kirsher */ 402adfc5217SJeff Kirsher void bnx2x_ilt_set_info(struct bnx2x *bp); 403adfc5217SJeff Kirsher 404adfc5217SJeff Kirsher /** 405adfc5217SJeff Kirsher * bnx2x_dcbx_init - initialize dcbx protocol. 406adfc5217SJeff Kirsher * 407adfc5217SJeff Kirsher * @bp: driver handle 408adfc5217SJeff Kirsher */ 409adfc5217SJeff Kirsher void bnx2x_dcbx_init(struct bnx2x *bp); 410adfc5217SJeff Kirsher 411adfc5217SJeff Kirsher /** 412adfc5217SJeff Kirsher * bnx2x_set_power_state - set power state to the requested value. 413adfc5217SJeff Kirsher * 414adfc5217SJeff Kirsher * @bp: driver handle 415adfc5217SJeff Kirsher * @state: required state D0 or D3hot 416adfc5217SJeff Kirsher * 417adfc5217SJeff Kirsher * Currently only D0 and D3hot are supported. 418adfc5217SJeff Kirsher */ 419adfc5217SJeff Kirsher int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 420adfc5217SJeff Kirsher 421adfc5217SJeff Kirsher /** 422adfc5217SJeff Kirsher * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 423adfc5217SJeff Kirsher * 424adfc5217SJeff Kirsher * @bp: driver handle 425adfc5217SJeff Kirsher * @value: new value 426adfc5217SJeff Kirsher */ 427adfc5217SJeff Kirsher void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 428adfc5217SJeff Kirsher /* Error handling */ 429adfc5217SJeff Kirsher void bnx2x_panic_dump(struct bnx2x *bp); 430adfc5217SJeff Kirsher 431adfc5217SJeff Kirsher void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 432adfc5217SJeff Kirsher 433adfc5217SJeff Kirsher /* dev_close main block */ 434adfc5217SJeff Kirsher int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 435adfc5217SJeff Kirsher 436adfc5217SJeff Kirsher /* dev_open main block */ 437adfc5217SJeff Kirsher int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 438adfc5217SJeff Kirsher 439adfc5217SJeff Kirsher /* hard_xmit callback */ 440adfc5217SJeff Kirsher netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 441adfc5217SJeff Kirsher 442adfc5217SJeff Kirsher /* setup_tc callback */ 443adfc5217SJeff Kirsher int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 444adfc5217SJeff Kirsher 445adfc5217SJeff Kirsher /* select_queue callback */ 446adfc5217SJeff Kirsher u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 447adfc5217SJeff Kirsher 448adfc5217SJeff Kirsher /* reload helper */ 449adfc5217SJeff Kirsher int bnx2x_reload_if_running(struct net_device *dev); 450adfc5217SJeff Kirsher 451adfc5217SJeff Kirsher int bnx2x_change_mac_addr(struct net_device *dev, void *p); 452adfc5217SJeff Kirsher 453adfc5217SJeff Kirsher /* NAPI poll Rx part */ 454adfc5217SJeff Kirsher int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 455adfc5217SJeff Kirsher 456adfc5217SJeff Kirsher void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 457adfc5217SJeff Kirsher u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); 458adfc5217SJeff Kirsher 459adfc5217SJeff Kirsher /* NAPI poll Tx part */ 460adfc5217SJeff Kirsher int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 461adfc5217SJeff Kirsher 462adfc5217SJeff Kirsher /* suspend/resume callbacks */ 463adfc5217SJeff Kirsher int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 464adfc5217SJeff Kirsher int bnx2x_resume(struct pci_dev *pdev); 465adfc5217SJeff Kirsher 466adfc5217SJeff Kirsher /* Release IRQ vectors */ 467adfc5217SJeff Kirsher void bnx2x_free_irq(struct bnx2x *bp); 468adfc5217SJeff Kirsher 469adfc5217SJeff Kirsher void bnx2x_free_fp_mem(struct bnx2x *bp); 470adfc5217SJeff Kirsher int bnx2x_alloc_fp_mem(struct bnx2x *bp); 471adfc5217SJeff Kirsher void bnx2x_init_rx_rings(struct bnx2x *bp); 472adfc5217SJeff Kirsher void bnx2x_free_skbs(struct bnx2x *bp); 473adfc5217SJeff Kirsher void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 474adfc5217SJeff Kirsher void bnx2x_netif_start(struct bnx2x *bp); 475adfc5217SJeff Kirsher 476adfc5217SJeff Kirsher /** 477adfc5217SJeff Kirsher * bnx2x_enable_msix - set msix configuration. 478adfc5217SJeff Kirsher * 479adfc5217SJeff Kirsher * @bp: driver handle 480adfc5217SJeff Kirsher * 481adfc5217SJeff Kirsher * fills msix_table, requests vectors, updates num_queues 482adfc5217SJeff Kirsher * according to number of available vectors. 483adfc5217SJeff Kirsher */ 484adfc5217SJeff Kirsher int bnx2x_enable_msix(struct bnx2x *bp); 485adfc5217SJeff Kirsher 486adfc5217SJeff Kirsher /** 487adfc5217SJeff Kirsher * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 488adfc5217SJeff Kirsher * 489adfc5217SJeff Kirsher * @bp: driver handle 490adfc5217SJeff Kirsher */ 491adfc5217SJeff Kirsher int bnx2x_enable_msi(struct bnx2x *bp); 492adfc5217SJeff Kirsher 493adfc5217SJeff Kirsher /** 494adfc5217SJeff Kirsher * bnx2x_poll - NAPI callback 495adfc5217SJeff Kirsher * 496adfc5217SJeff Kirsher * @napi: napi structure 497adfc5217SJeff Kirsher * @budget: 498adfc5217SJeff Kirsher * 499adfc5217SJeff Kirsher */ 500adfc5217SJeff Kirsher int bnx2x_poll(struct napi_struct *napi, int budget); 501adfc5217SJeff Kirsher 502adfc5217SJeff Kirsher /** 503adfc5217SJeff Kirsher * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 504adfc5217SJeff Kirsher * 505adfc5217SJeff Kirsher * @bp: driver handle 506adfc5217SJeff Kirsher */ 507adfc5217SJeff Kirsher int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); 508adfc5217SJeff Kirsher 509adfc5217SJeff Kirsher /** 510adfc5217SJeff Kirsher * bnx2x_free_mem_bp - release memories outsize main driver structure 511adfc5217SJeff Kirsher * 512adfc5217SJeff Kirsher * @bp: driver handle 513adfc5217SJeff Kirsher */ 514adfc5217SJeff Kirsher void bnx2x_free_mem_bp(struct bnx2x *bp); 515adfc5217SJeff Kirsher 516adfc5217SJeff Kirsher /** 517adfc5217SJeff Kirsher * bnx2x_change_mtu - change mtu netdev callback 518adfc5217SJeff Kirsher * 519adfc5217SJeff Kirsher * @dev: net device 520adfc5217SJeff Kirsher * @new_mtu: requested mtu 521adfc5217SJeff Kirsher * 522adfc5217SJeff Kirsher */ 523adfc5217SJeff Kirsher int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 524adfc5217SJeff Kirsher 5253857e3eeSDmitry Kravkov #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 526adfc5217SJeff Kirsher /** 527adfc5217SJeff Kirsher * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 528adfc5217SJeff Kirsher * 529adfc5217SJeff Kirsher * @dev: net_device 530adfc5217SJeff Kirsher * @wwn: output buffer 531adfc5217SJeff Kirsher * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 532adfc5217SJeff Kirsher * 533adfc5217SJeff Kirsher */ 534adfc5217SJeff Kirsher int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 535adfc5217SJeff Kirsher #endif 536adfc5217SJeff Kirsher u32 bnx2x_fix_features(struct net_device *dev, u32 features); 537adfc5217SJeff Kirsher int bnx2x_set_features(struct net_device *dev, u32 features); 538adfc5217SJeff Kirsher 539adfc5217SJeff Kirsher /** 540adfc5217SJeff Kirsher * bnx2x_tx_timeout - tx timeout netdev callback 541adfc5217SJeff Kirsher * 542adfc5217SJeff Kirsher * @dev: net device 543adfc5217SJeff Kirsher */ 544adfc5217SJeff Kirsher void bnx2x_tx_timeout(struct net_device *dev); 545adfc5217SJeff Kirsher 546adfc5217SJeff Kirsher /*********************** Inlines **********************************/ 547adfc5217SJeff Kirsher /*********************** Fast path ********************************/ 548adfc5217SJeff Kirsher static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 549adfc5217SJeff Kirsher { 550adfc5217SJeff Kirsher barrier(); /* status block is written to by the chip */ 551adfc5217SJeff Kirsher fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 552adfc5217SJeff Kirsher } 553adfc5217SJeff Kirsher 554adfc5217SJeff Kirsher static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, 555adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 bd_prod, 556adfc5217SJeff Kirsher u16 rx_comp_prod, u16 rx_sge_prod, u32 start) 557adfc5217SJeff Kirsher { 558adfc5217SJeff Kirsher struct ustorm_eth_rx_producers rx_prods = {0}; 559adfc5217SJeff Kirsher u32 i; 560adfc5217SJeff Kirsher 561adfc5217SJeff Kirsher /* Update producers */ 562adfc5217SJeff Kirsher rx_prods.bd_prod = bd_prod; 563adfc5217SJeff Kirsher rx_prods.cqe_prod = rx_comp_prod; 564adfc5217SJeff Kirsher rx_prods.sge_prod = rx_sge_prod; 565adfc5217SJeff Kirsher 566adfc5217SJeff Kirsher /* 567adfc5217SJeff Kirsher * Make sure that the BD and SGE data is updated before updating the 568adfc5217SJeff Kirsher * producers since FW might read the BD/SGE right after the producer 569adfc5217SJeff Kirsher * is updated. 570adfc5217SJeff Kirsher * This is only applicable for weak-ordered memory model archs such 571adfc5217SJeff Kirsher * as IA-64. The following barrier is also mandatory since FW will 572adfc5217SJeff Kirsher * assumes BDs must have buffers. 573adfc5217SJeff Kirsher */ 574adfc5217SJeff Kirsher wmb(); 575adfc5217SJeff Kirsher 576adfc5217SJeff Kirsher for (i = 0; i < sizeof(rx_prods)/4; i++) 577adfc5217SJeff Kirsher REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); 578adfc5217SJeff Kirsher 579adfc5217SJeff Kirsher mmiowb(); /* keep prod updates ordered */ 580adfc5217SJeff Kirsher 581adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS, 582adfc5217SJeff Kirsher "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 583adfc5217SJeff Kirsher fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 584adfc5217SJeff Kirsher } 585adfc5217SJeff Kirsher 586adfc5217SJeff Kirsher static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 587adfc5217SJeff Kirsher u8 segment, u16 index, u8 op, 588adfc5217SJeff Kirsher u8 update, u32 igu_addr) 589adfc5217SJeff Kirsher { 590adfc5217SJeff Kirsher struct igu_regular cmd_data = {0}; 591adfc5217SJeff Kirsher 592adfc5217SJeff Kirsher cmd_data.sb_id_and_flags = 593adfc5217SJeff Kirsher ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 594adfc5217SJeff Kirsher (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 595adfc5217SJeff Kirsher (update << IGU_REGULAR_BUPDATE_SHIFT) | 596adfc5217SJeff Kirsher (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 597adfc5217SJeff Kirsher 598adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", 599adfc5217SJeff Kirsher cmd_data.sb_id_and_flags, igu_addr); 600adfc5217SJeff Kirsher REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 601adfc5217SJeff Kirsher 602adfc5217SJeff Kirsher /* Make sure that ACK is written */ 603adfc5217SJeff Kirsher mmiowb(); 604adfc5217SJeff Kirsher barrier(); 605adfc5217SJeff Kirsher } 606adfc5217SJeff Kirsher 607adfc5217SJeff Kirsher static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 608adfc5217SJeff Kirsher u8 idu_sb_id, bool is_Pf) 609adfc5217SJeff Kirsher { 610adfc5217SJeff Kirsher u32 data, ctl, cnt = 100; 611adfc5217SJeff Kirsher u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 612adfc5217SJeff Kirsher u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 613adfc5217SJeff Kirsher u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 614adfc5217SJeff Kirsher u32 sb_bit = 1 << (idu_sb_id%32); 615adfc5217SJeff Kirsher u32 func_encode = func | 616adfc5217SJeff Kirsher ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); 617adfc5217SJeff Kirsher u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 618adfc5217SJeff Kirsher 619adfc5217SJeff Kirsher /* Not supported in BC mode */ 620adfc5217SJeff Kirsher if (CHIP_INT_MODE_IS_BC(bp)) 621adfc5217SJeff Kirsher return; 622adfc5217SJeff Kirsher 623adfc5217SJeff Kirsher data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 624adfc5217SJeff Kirsher << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 625adfc5217SJeff Kirsher IGU_REGULAR_CLEANUP_SET | 626adfc5217SJeff Kirsher IGU_REGULAR_BCLEANUP; 627adfc5217SJeff Kirsher 628adfc5217SJeff Kirsher ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 629adfc5217SJeff Kirsher func_encode << IGU_CTRL_REG_FID_SHIFT | 630adfc5217SJeff Kirsher IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 631adfc5217SJeff Kirsher 632adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 633adfc5217SJeff Kirsher data, igu_addr_data); 634adfc5217SJeff Kirsher REG_WR(bp, igu_addr_data, data); 635adfc5217SJeff Kirsher mmiowb(); 636adfc5217SJeff Kirsher barrier(); 637adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 638adfc5217SJeff Kirsher ctl, igu_addr_ctl); 639adfc5217SJeff Kirsher REG_WR(bp, igu_addr_ctl, ctl); 640adfc5217SJeff Kirsher mmiowb(); 641adfc5217SJeff Kirsher barrier(); 642adfc5217SJeff Kirsher 643adfc5217SJeff Kirsher /* wait for clean up to finish */ 644adfc5217SJeff Kirsher while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 645adfc5217SJeff Kirsher msleep(20); 646adfc5217SJeff Kirsher 647adfc5217SJeff Kirsher 648adfc5217SJeff Kirsher if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 649adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " 650adfc5217SJeff Kirsher "idu_sb_id %d offset %d bit %d (cnt %d)\n", 651adfc5217SJeff Kirsher idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 652adfc5217SJeff Kirsher } 653adfc5217SJeff Kirsher } 654adfc5217SJeff Kirsher 655adfc5217SJeff Kirsher static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 656adfc5217SJeff Kirsher u8 storm, u16 index, u8 op, u8 update) 657adfc5217SJeff Kirsher { 658adfc5217SJeff Kirsher u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 659adfc5217SJeff Kirsher COMMAND_REG_INT_ACK); 660adfc5217SJeff Kirsher struct igu_ack_register igu_ack; 661adfc5217SJeff Kirsher 662adfc5217SJeff Kirsher igu_ack.status_block_index = index; 663adfc5217SJeff Kirsher igu_ack.sb_id_and_flags = 664adfc5217SJeff Kirsher ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 665adfc5217SJeff Kirsher (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 666adfc5217SJeff Kirsher (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 667adfc5217SJeff Kirsher (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 668adfc5217SJeff Kirsher 669adfc5217SJeff Kirsher DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", 670adfc5217SJeff Kirsher (*(u32 *)&igu_ack), hc_addr); 671adfc5217SJeff Kirsher REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 672adfc5217SJeff Kirsher 673adfc5217SJeff Kirsher /* Make sure that ACK is written */ 674adfc5217SJeff Kirsher mmiowb(); 675adfc5217SJeff Kirsher barrier(); 676adfc5217SJeff Kirsher } 677adfc5217SJeff Kirsher 678adfc5217SJeff Kirsher static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 679adfc5217SJeff Kirsher u16 index, u8 op, u8 update) 680adfc5217SJeff Kirsher { 681adfc5217SJeff Kirsher if (bp->common.int_block == INT_BLOCK_HC) 682adfc5217SJeff Kirsher bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 683adfc5217SJeff Kirsher else { 684adfc5217SJeff Kirsher u8 segment; 685adfc5217SJeff Kirsher 686adfc5217SJeff Kirsher if (CHIP_INT_MODE_IS_BC(bp)) 687adfc5217SJeff Kirsher segment = storm; 688adfc5217SJeff Kirsher else if (igu_sb_id != bp->igu_dsb_id) 689adfc5217SJeff Kirsher segment = IGU_SEG_ACCESS_DEF; 690adfc5217SJeff Kirsher else if (storm == ATTENTION_ID) 691adfc5217SJeff Kirsher segment = IGU_SEG_ACCESS_ATTN; 692adfc5217SJeff Kirsher else 693adfc5217SJeff Kirsher segment = IGU_SEG_ACCESS_DEF; 694adfc5217SJeff Kirsher bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 695adfc5217SJeff Kirsher } 696adfc5217SJeff Kirsher } 697adfc5217SJeff Kirsher 698adfc5217SJeff Kirsher static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 699adfc5217SJeff Kirsher { 700adfc5217SJeff Kirsher u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 701adfc5217SJeff Kirsher COMMAND_REG_SIMD_MASK); 702adfc5217SJeff Kirsher u32 result = REG_RD(bp, hc_addr); 703adfc5217SJeff Kirsher 704adfc5217SJeff Kirsher DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", 705adfc5217SJeff Kirsher result, hc_addr); 706adfc5217SJeff Kirsher 707adfc5217SJeff Kirsher barrier(); 708adfc5217SJeff Kirsher return result; 709adfc5217SJeff Kirsher } 710adfc5217SJeff Kirsher 711adfc5217SJeff Kirsher static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 712adfc5217SJeff Kirsher { 713adfc5217SJeff Kirsher u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 714adfc5217SJeff Kirsher u32 result = REG_RD(bp, igu_addr); 715adfc5217SJeff Kirsher 716adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", 717adfc5217SJeff Kirsher result, igu_addr); 718adfc5217SJeff Kirsher 719adfc5217SJeff Kirsher barrier(); 720adfc5217SJeff Kirsher return result; 721adfc5217SJeff Kirsher } 722adfc5217SJeff Kirsher 723adfc5217SJeff Kirsher static inline u16 bnx2x_ack_int(struct bnx2x *bp) 724adfc5217SJeff Kirsher { 725adfc5217SJeff Kirsher barrier(); 726adfc5217SJeff Kirsher if (bp->common.int_block == INT_BLOCK_HC) 727adfc5217SJeff Kirsher return bnx2x_hc_ack_int(bp); 728adfc5217SJeff Kirsher else 729adfc5217SJeff Kirsher return bnx2x_igu_ack_int(bp); 730adfc5217SJeff Kirsher } 731adfc5217SJeff Kirsher 732adfc5217SJeff Kirsher static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 733adfc5217SJeff Kirsher { 734adfc5217SJeff Kirsher /* Tell compiler that consumer and producer can change */ 735adfc5217SJeff Kirsher barrier(); 736adfc5217SJeff Kirsher return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 737adfc5217SJeff Kirsher } 738adfc5217SJeff Kirsher 739adfc5217SJeff Kirsher static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 740adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata) 741adfc5217SJeff Kirsher { 742adfc5217SJeff Kirsher s16 used; 743adfc5217SJeff Kirsher u16 prod; 744adfc5217SJeff Kirsher u16 cons; 745adfc5217SJeff Kirsher 746adfc5217SJeff Kirsher prod = txdata->tx_bd_prod; 747adfc5217SJeff Kirsher cons = txdata->tx_bd_cons; 748adfc5217SJeff Kirsher 749adfc5217SJeff Kirsher /* NUM_TX_RINGS = number of "next-page" entries 750adfc5217SJeff Kirsher It will be used as a threshold */ 751adfc5217SJeff Kirsher used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; 752adfc5217SJeff Kirsher 753adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR 754adfc5217SJeff Kirsher WARN_ON(used < 0); 755adfc5217SJeff Kirsher WARN_ON(used > bp->tx_ring_size); 756adfc5217SJeff Kirsher WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); 757adfc5217SJeff Kirsher #endif 758adfc5217SJeff Kirsher 759adfc5217SJeff Kirsher return (s16)(bp->tx_ring_size) - used; 760adfc5217SJeff Kirsher } 761adfc5217SJeff Kirsher 762adfc5217SJeff Kirsher static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 763adfc5217SJeff Kirsher { 764adfc5217SJeff Kirsher u16 hw_cons; 765adfc5217SJeff Kirsher 766adfc5217SJeff Kirsher /* Tell compiler that status block fields can change */ 767adfc5217SJeff Kirsher barrier(); 768adfc5217SJeff Kirsher hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 769adfc5217SJeff Kirsher return hw_cons != txdata->tx_pkt_cons; 770adfc5217SJeff Kirsher } 771adfc5217SJeff Kirsher 772adfc5217SJeff Kirsher static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 773adfc5217SJeff Kirsher { 774adfc5217SJeff Kirsher u8 cos; 775adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos) 776adfc5217SJeff Kirsher if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 777adfc5217SJeff Kirsher return true; 778adfc5217SJeff Kirsher return false; 779adfc5217SJeff Kirsher } 780adfc5217SJeff Kirsher 781adfc5217SJeff Kirsher static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 782adfc5217SJeff Kirsher { 783adfc5217SJeff Kirsher u16 rx_cons_sb; 784adfc5217SJeff Kirsher 785adfc5217SJeff Kirsher /* Tell compiler that status block fields can change */ 786adfc5217SJeff Kirsher barrier(); 787adfc5217SJeff Kirsher rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 788adfc5217SJeff Kirsher if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 789adfc5217SJeff Kirsher rx_cons_sb++; 790adfc5217SJeff Kirsher return (fp->rx_comp_cons != rx_cons_sb); 791adfc5217SJeff Kirsher } 792adfc5217SJeff Kirsher 793adfc5217SJeff Kirsher /** 794adfc5217SJeff Kirsher * bnx2x_tx_disable - disables tx from stack point of view 795adfc5217SJeff Kirsher * 796adfc5217SJeff Kirsher * @bp: driver handle 797adfc5217SJeff Kirsher */ 798adfc5217SJeff Kirsher static inline void bnx2x_tx_disable(struct bnx2x *bp) 799adfc5217SJeff Kirsher { 800adfc5217SJeff Kirsher netif_tx_disable(bp->dev); 801adfc5217SJeff Kirsher netif_carrier_off(bp->dev); 802adfc5217SJeff Kirsher } 803adfc5217SJeff Kirsher 804adfc5217SJeff Kirsher static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 805adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 index) 806adfc5217SJeff Kirsher { 807adfc5217SJeff Kirsher struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 808adfc5217SJeff Kirsher struct page *page = sw_buf->page; 809adfc5217SJeff Kirsher struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 810adfc5217SJeff Kirsher 811adfc5217SJeff Kirsher /* Skip "next page" elements */ 812adfc5217SJeff Kirsher if (!page) 813adfc5217SJeff Kirsher return; 814adfc5217SJeff Kirsher 815adfc5217SJeff Kirsher dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 816adfc5217SJeff Kirsher SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 817adfc5217SJeff Kirsher __free_pages(page, PAGES_PER_SGE_SHIFT); 818adfc5217SJeff Kirsher 819adfc5217SJeff Kirsher sw_buf->page = NULL; 820adfc5217SJeff Kirsher sge->addr_hi = 0; 821adfc5217SJeff Kirsher sge->addr_lo = 0; 822adfc5217SJeff Kirsher } 823adfc5217SJeff Kirsher 824adfc5217SJeff Kirsher static inline void bnx2x_add_all_napi(struct bnx2x *bp) 825adfc5217SJeff Kirsher { 826adfc5217SJeff Kirsher int i; 827adfc5217SJeff Kirsher 828adfc5217SJeff Kirsher /* Add NAPI objects */ 829adfc5217SJeff Kirsher for_each_rx_queue(bp, i) 830adfc5217SJeff Kirsher netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 831adfc5217SJeff Kirsher bnx2x_poll, BNX2X_NAPI_WEIGHT); 832adfc5217SJeff Kirsher } 833adfc5217SJeff Kirsher 834adfc5217SJeff Kirsher static inline void bnx2x_del_all_napi(struct bnx2x *bp) 835adfc5217SJeff Kirsher { 836adfc5217SJeff Kirsher int i; 837adfc5217SJeff Kirsher 838adfc5217SJeff Kirsher for_each_rx_queue(bp, i) 839adfc5217SJeff Kirsher netif_napi_del(&bnx2x_fp(bp, i, napi)); 840adfc5217SJeff Kirsher } 841adfc5217SJeff Kirsher 842adfc5217SJeff Kirsher static inline void bnx2x_disable_msi(struct bnx2x *bp) 843adfc5217SJeff Kirsher { 844adfc5217SJeff Kirsher if (bp->flags & USING_MSIX_FLAG) { 845adfc5217SJeff Kirsher pci_disable_msix(bp->pdev); 846adfc5217SJeff Kirsher bp->flags &= ~USING_MSIX_FLAG; 847adfc5217SJeff Kirsher } else if (bp->flags & USING_MSI_FLAG) { 848adfc5217SJeff Kirsher pci_disable_msi(bp->pdev); 849adfc5217SJeff Kirsher bp->flags &= ~USING_MSI_FLAG; 850adfc5217SJeff Kirsher } 851adfc5217SJeff Kirsher } 852adfc5217SJeff Kirsher 853adfc5217SJeff Kirsher static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 854adfc5217SJeff Kirsher { 855adfc5217SJeff Kirsher return num_queues ? 856adfc5217SJeff Kirsher min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 857adfc5217SJeff Kirsher min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); 858adfc5217SJeff Kirsher } 859adfc5217SJeff Kirsher 860adfc5217SJeff Kirsher static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 861adfc5217SJeff Kirsher { 862adfc5217SJeff Kirsher int i, j; 863adfc5217SJeff Kirsher 864adfc5217SJeff Kirsher for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 865adfc5217SJeff Kirsher int idx = RX_SGE_CNT * i - 1; 866adfc5217SJeff Kirsher 867adfc5217SJeff Kirsher for (j = 0; j < 2; j++) { 868adfc5217SJeff Kirsher BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 869adfc5217SJeff Kirsher idx--; 870adfc5217SJeff Kirsher } 871adfc5217SJeff Kirsher } 872adfc5217SJeff Kirsher } 873adfc5217SJeff Kirsher 874adfc5217SJeff Kirsher static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 875adfc5217SJeff Kirsher { 876adfc5217SJeff Kirsher /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 877adfc5217SJeff Kirsher memset(fp->sge_mask, 0xff, 878adfc5217SJeff Kirsher (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); 879adfc5217SJeff Kirsher 880adfc5217SJeff Kirsher /* Clear the two last indices in the page to 1: 881adfc5217SJeff Kirsher these are the indices that correspond to the "next" element, 882adfc5217SJeff Kirsher hence will never be indicated and should be removed from 883adfc5217SJeff Kirsher the calculations. */ 884adfc5217SJeff Kirsher bnx2x_clear_sge_mask_next_elems(fp); 885adfc5217SJeff Kirsher } 886adfc5217SJeff Kirsher 887adfc5217SJeff Kirsher static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 888adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 index) 889adfc5217SJeff Kirsher { 890adfc5217SJeff Kirsher struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); 891adfc5217SJeff Kirsher struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 892adfc5217SJeff Kirsher struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 893adfc5217SJeff Kirsher dma_addr_t mapping; 894adfc5217SJeff Kirsher 895adfc5217SJeff Kirsher if (unlikely(page == NULL)) 896adfc5217SJeff Kirsher return -ENOMEM; 897adfc5217SJeff Kirsher 898adfc5217SJeff Kirsher mapping = dma_map_page(&bp->pdev->dev, page, 0, 899adfc5217SJeff Kirsher SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 900adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 901adfc5217SJeff Kirsher __free_pages(page, PAGES_PER_SGE_SHIFT); 902adfc5217SJeff Kirsher return -ENOMEM; 903adfc5217SJeff Kirsher } 904adfc5217SJeff Kirsher 905adfc5217SJeff Kirsher sw_buf->page = page; 906adfc5217SJeff Kirsher dma_unmap_addr_set(sw_buf, mapping, mapping); 907adfc5217SJeff Kirsher 908adfc5217SJeff Kirsher sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 909adfc5217SJeff Kirsher sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 910adfc5217SJeff Kirsher 911adfc5217SJeff Kirsher return 0; 912adfc5217SJeff Kirsher } 913adfc5217SJeff Kirsher 914adfc5217SJeff Kirsher static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, 915adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 index) 916adfc5217SJeff Kirsher { 917adfc5217SJeff Kirsher struct sk_buff *skb; 918adfc5217SJeff Kirsher struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 919adfc5217SJeff Kirsher struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 920adfc5217SJeff Kirsher dma_addr_t mapping; 921adfc5217SJeff Kirsher 922adfc5217SJeff Kirsher skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); 923adfc5217SJeff Kirsher if (unlikely(skb == NULL)) 924adfc5217SJeff Kirsher return -ENOMEM; 925adfc5217SJeff Kirsher 926adfc5217SJeff Kirsher mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, 927adfc5217SJeff Kirsher DMA_FROM_DEVICE); 928adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 929adfc5217SJeff Kirsher dev_kfree_skb_any(skb); 930adfc5217SJeff Kirsher return -ENOMEM; 931adfc5217SJeff Kirsher } 932adfc5217SJeff Kirsher 933adfc5217SJeff Kirsher rx_buf->skb = skb; 934adfc5217SJeff Kirsher dma_unmap_addr_set(rx_buf, mapping, mapping); 935adfc5217SJeff Kirsher 936adfc5217SJeff Kirsher rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 937adfc5217SJeff Kirsher rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 938adfc5217SJeff Kirsher 939adfc5217SJeff Kirsher return 0; 940adfc5217SJeff Kirsher } 941adfc5217SJeff Kirsher 942adfc5217SJeff Kirsher /* note that we are not allocating a new skb, 943adfc5217SJeff Kirsher * we are just moving one from cons to prod 944adfc5217SJeff Kirsher * we are not creating a new mapping, 945adfc5217SJeff Kirsher * so there is no need to check for dma_mapping_error(). 946adfc5217SJeff Kirsher */ 947adfc5217SJeff Kirsher static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, 948adfc5217SJeff Kirsher u16 cons, u16 prod) 949adfc5217SJeff Kirsher { 950adfc5217SJeff Kirsher struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 951adfc5217SJeff Kirsher struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 952adfc5217SJeff Kirsher struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 953adfc5217SJeff Kirsher struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 954adfc5217SJeff Kirsher 955adfc5217SJeff Kirsher dma_unmap_addr_set(prod_rx_buf, mapping, 956adfc5217SJeff Kirsher dma_unmap_addr(cons_rx_buf, mapping)); 957adfc5217SJeff Kirsher prod_rx_buf->skb = cons_rx_buf->skb; 958adfc5217SJeff Kirsher *prod_bd = *cons_bd; 959adfc5217SJeff Kirsher } 960adfc5217SJeff Kirsher 961adfc5217SJeff Kirsher /************************* Init ******************************************/ 962adfc5217SJeff Kirsher 963adfc5217SJeff Kirsher /** 964adfc5217SJeff Kirsher * bnx2x_func_start - init function 965adfc5217SJeff Kirsher * 966adfc5217SJeff Kirsher * @bp: driver handle 967adfc5217SJeff Kirsher * 968adfc5217SJeff Kirsher * Must be called before sending CLIENT_SETUP for the first client. 969adfc5217SJeff Kirsher */ 970adfc5217SJeff Kirsher static inline int bnx2x_func_start(struct bnx2x *bp) 971adfc5217SJeff Kirsher { 972adfc5217SJeff Kirsher struct bnx2x_func_state_params func_params = {0}; 973adfc5217SJeff Kirsher struct bnx2x_func_start_params *start_params = 974adfc5217SJeff Kirsher &func_params.params.start; 975adfc5217SJeff Kirsher 976adfc5217SJeff Kirsher /* Prepare parameters for function state transitions */ 977adfc5217SJeff Kirsher __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 978adfc5217SJeff Kirsher 979adfc5217SJeff Kirsher func_params.f_obj = &bp->func_obj; 980adfc5217SJeff Kirsher func_params.cmd = BNX2X_F_CMD_START; 981adfc5217SJeff Kirsher 982adfc5217SJeff Kirsher /* Function parameters */ 983adfc5217SJeff Kirsher start_params->mf_mode = bp->mf_mode; 984adfc5217SJeff Kirsher start_params->sd_vlan_tag = bp->mf_ov; 985adfc5217SJeff Kirsher if (CHIP_IS_E1x(bp)) 986adfc5217SJeff Kirsher start_params->network_cos_mode = OVERRIDE_COS; 987adfc5217SJeff Kirsher else 988adfc5217SJeff Kirsher start_params->network_cos_mode = STATIC_COS; 989adfc5217SJeff Kirsher 990adfc5217SJeff Kirsher return bnx2x_func_state_change(bp, &func_params); 991adfc5217SJeff Kirsher } 992adfc5217SJeff Kirsher 993adfc5217SJeff Kirsher 994adfc5217SJeff Kirsher /** 995adfc5217SJeff Kirsher * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 996adfc5217SJeff Kirsher * 997adfc5217SJeff Kirsher * @fw_hi: pointer to upper part 998adfc5217SJeff Kirsher * @fw_mid: pointer to middle part 999adfc5217SJeff Kirsher * @fw_lo: pointer to lower part 1000adfc5217SJeff Kirsher * @mac: pointer to MAC address 1001adfc5217SJeff Kirsher */ 1002adfc5217SJeff Kirsher static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, 1003adfc5217SJeff Kirsher u8 *mac) 1004adfc5217SJeff Kirsher { 1005adfc5217SJeff Kirsher ((u8 *)fw_hi)[0] = mac[1]; 1006adfc5217SJeff Kirsher ((u8 *)fw_hi)[1] = mac[0]; 1007adfc5217SJeff Kirsher ((u8 *)fw_mid)[0] = mac[3]; 1008adfc5217SJeff Kirsher ((u8 *)fw_mid)[1] = mac[2]; 1009adfc5217SJeff Kirsher ((u8 *)fw_lo)[0] = mac[5]; 1010adfc5217SJeff Kirsher ((u8 *)fw_lo)[1] = mac[4]; 1011adfc5217SJeff Kirsher } 1012adfc5217SJeff Kirsher 1013adfc5217SJeff Kirsher static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 1014adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, int last) 1015adfc5217SJeff Kirsher { 1016adfc5217SJeff Kirsher int i; 1017adfc5217SJeff Kirsher 1018adfc5217SJeff Kirsher if (fp->disable_tpa) 1019adfc5217SJeff Kirsher return; 1020adfc5217SJeff Kirsher 1021adfc5217SJeff Kirsher for (i = 0; i < last; i++) 1022adfc5217SJeff Kirsher bnx2x_free_rx_sge(bp, fp, i); 1023adfc5217SJeff Kirsher } 1024adfc5217SJeff Kirsher 1025adfc5217SJeff Kirsher static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 1026adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, int last) 1027adfc5217SJeff Kirsher { 1028adfc5217SJeff Kirsher int i; 1029adfc5217SJeff Kirsher 1030adfc5217SJeff Kirsher for (i = 0; i < last; i++) { 1031adfc5217SJeff Kirsher struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; 1032adfc5217SJeff Kirsher struct sw_rx_bd *first_buf = &tpa_info->first_buf; 1033adfc5217SJeff Kirsher struct sk_buff *skb = first_buf->skb; 1034adfc5217SJeff Kirsher 1035adfc5217SJeff Kirsher if (skb == NULL) { 1036adfc5217SJeff Kirsher DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); 1037adfc5217SJeff Kirsher continue; 1038adfc5217SJeff Kirsher } 1039adfc5217SJeff Kirsher if (tpa_info->tpa_state == BNX2X_TPA_START) 1040adfc5217SJeff Kirsher dma_unmap_single(&bp->pdev->dev, 1041adfc5217SJeff Kirsher dma_unmap_addr(first_buf, mapping), 1042adfc5217SJeff Kirsher fp->rx_buf_size, DMA_FROM_DEVICE); 1043adfc5217SJeff Kirsher dev_kfree_skb(skb); 1044adfc5217SJeff Kirsher first_buf->skb = NULL; 1045adfc5217SJeff Kirsher } 1046adfc5217SJeff Kirsher } 1047adfc5217SJeff Kirsher 1048adfc5217SJeff Kirsher static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 1049adfc5217SJeff Kirsher { 1050adfc5217SJeff Kirsher int i; 1051adfc5217SJeff Kirsher 1052adfc5217SJeff Kirsher for (i = 1; i <= NUM_TX_RINGS; i++) { 1053adfc5217SJeff Kirsher struct eth_tx_next_bd *tx_next_bd = 1054adfc5217SJeff Kirsher &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 1055adfc5217SJeff Kirsher 1056adfc5217SJeff Kirsher tx_next_bd->addr_hi = 1057adfc5217SJeff Kirsher cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 1058adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1059adfc5217SJeff Kirsher tx_next_bd->addr_lo = 1060adfc5217SJeff Kirsher cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 1061adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1062adfc5217SJeff Kirsher } 1063adfc5217SJeff Kirsher 1064adfc5217SJeff Kirsher SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 1065adfc5217SJeff Kirsher txdata->tx_db.data.zero_fill1 = 0; 1066adfc5217SJeff Kirsher txdata->tx_db.data.prod = 0; 1067adfc5217SJeff Kirsher 1068adfc5217SJeff Kirsher txdata->tx_pkt_prod = 0; 1069adfc5217SJeff Kirsher txdata->tx_pkt_cons = 0; 1070adfc5217SJeff Kirsher txdata->tx_bd_prod = 0; 1071adfc5217SJeff Kirsher txdata->tx_bd_cons = 0; 1072adfc5217SJeff Kirsher txdata->tx_pkt = 0; 1073adfc5217SJeff Kirsher } 1074adfc5217SJeff Kirsher 1075adfc5217SJeff Kirsher static inline void bnx2x_init_tx_rings(struct bnx2x *bp) 1076adfc5217SJeff Kirsher { 1077adfc5217SJeff Kirsher int i; 1078adfc5217SJeff Kirsher u8 cos; 1079adfc5217SJeff Kirsher 1080adfc5217SJeff Kirsher for_each_tx_queue(bp, i) 1081adfc5217SJeff Kirsher for_each_cos_in_tx_queue(&bp->fp[i], cos) 1082adfc5217SJeff Kirsher bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 1083adfc5217SJeff Kirsher } 1084adfc5217SJeff Kirsher 1085adfc5217SJeff Kirsher static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1086adfc5217SJeff Kirsher { 1087adfc5217SJeff Kirsher int i; 1088adfc5217SJeff Kirsher 1089adfc5217SJeff Kirsher for (i = 1; i <= NUM_RX_RINGS; i++) { 1090adfc5217SJeff Kirsher struct eth_rx_bd *rx_bd; 1091adfc5217SJeff Kirsher 1092adfc5217SJeff Kirsher rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1093adfc5217SJeff Kirsher rx_bd->addr_hi = 1094adfc5217SJeff Kirsher cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1095adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1096adfc5217SJeff Kirsher rx_bd->addr_lo = 1097adfc5217SJeff Kirsher cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1098adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1099adfc5217SJeff Kirsher } 1100adfc5217SJeff Kirsher } 1101adfc5217SJeff Kirsher 1102adfc5217SJeff Kirsher static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) 1103adfc5217SJeff Kirsher { 1104adfc5217SJeff Kirsher int i; 1105adfc5217SJeff Kirsher 1106adfc5217SJeff Kirsher for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 1107adfc5217SJeff Kirsher struct eth_rx_sge *sge; 1108adfc5217SJeff Kirsher 1109adfc5217SJeff Kirsher sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; 1110adfc5217SJeff Kirsher sge->addr_hi = 1111adfc5217SJeff Kirsher cpu_to_le32(U64_HI(fp->rx_sge_mapping + 1112adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1113adfc5217SJeff Kirsher 1114adfc5217SJeff Kirsher sge->addr_lo = 1115adfc5217SJeff Kirsher cpu_to_le32(U64_LO(fp->rx_sge_mapping + 1116adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1117adfc5217SJeff Kirsher } 1118adfc5217SJeff Kirsher } 1119adfc5217SJeff Kirsher 1120adfc5217SJeff Kirsher static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) 1121adfc5217SJeff Kirsher { 1122adfc5217SJeff Kirsher int i; 1123adfc5217SJeff Kirsher for (i = 1; i <= NUM_RCQ_RINGS; i++) { 1124adfc5217SJeff Kirsher struct eth_rx_cqe_next_page *nextpg; 1125adfc5217SJeff Kirsher 1126adfc5217SJeff Kirsher nextpg = (struct eth_rx_cqe_next_page *) 1127adfc5217SJeff Kirsher &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; 1128adfc5217SJeff Kirsher nextpg->addr_hi = 1129adfc5217SJeff Kirsher cpu_to_le32(U64_HI(fp->rx_comp_mapping + 1130adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1131adfc5217SJeff Kirsher nextpg->addr_lo = 1132adfc5217SJeff Kirsher cpu_to_le32(U64_LO(fp->rx_comp_mapping + 1133adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1134adfc5217SJeff Kirsher } 1135adfc5217SJeff Kirsher } 1136adfc5217SJeff Kirsher 1137adfc5217SJeff Kirsher /* Returns the number of actually allocated BDs */ 1138adfc5217SJeff Kirsher static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, 1139adfc5217SJeff Kirsher int rx_ring_size) 1140adfc5217SJeff Kirsher { 1141adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp; 1142adfc5217SJeff Kirsher u16 ring_prod, cqe_ring_prod; 1143adfc5217SJeff Kirsher int i; 1144adfc5217SJeff Kirsher 1145adfc5217SJeff Kirsher fp->rx_comp_cons = 0; 1146adfc5217SJeff Kirsher cqe_ring_prod = ring_prod = 0; 1147adfc5217SJeff Kirsher 1148adfc5217SJeff Kirsher /* This routine is called only during fo init so 1149adfc5217SJeff Kirsher * fp->eth_q_stats.rx_skb_alloc_failed = 0 1150adfc5217SJeff Kirsher */ 1151adfc5217SJeff Kirsher for (i = 0; i < rx_ring_size; i++) { 1152adfc5217SJeff Kirsher if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 1153adfc5217SJeff Kirsher fp->eth_q_stats.rx_skb_alloc_failed++; 1154adfc5217SJeff Kirsher continue; 1155adfc5217SJeff Kirsher } 1156adfc5217SJeff Kirsher ring_prod = NEXT_RX_IDX(ring_prod); 1157adfc5217SJeff Kirsher cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 1158adfc5217SJeff Kirsher WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); 1159adfc5217SJeff Kirsher } 1160adfc5217SJeff Kirsher 1161adfc5217SJeff Kirsher if (fp->eth_q_stats.rx_skb_alloc_failed) 1162adfc5217SJeff Kirsher BNX2X_ERR("was only able to allocate " 1163adfc5217SJeff Kirsher "%d rx skbs on queue[%d]\n", 1164adfc5217SJeff Kirsher (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); 1165adfc5217SJeff Kirsher 1166adfc5217SJeff Kirsher fp->rx_bd_prod = ring_prod; 1167adfc5217SJeff Kirsher /* Limit the CQE producer by the CQE ring size */ 1168adfc5217SJeff Kirsher fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, 1169adfc5217SJeff Kirsher cqe_ring_prod); 1170adfc5217SJeff Kirsher fp->rx_pkt = fp->rx_calls = 0; 1171adfc5217SJeff Kirsher 1172adfc5217SJeff Kirsher return i - fp->eth_q_stats.rx_skb_alloc_failed; 1173adfc5217SJeff Kirsher } 1174adfc5217SJeff Kirsher 1175adfc5217SJeff Kirsher /* Statistics ID are global per chip/path, while Client IDs for E1x are per 1176adfc5217SJeff Kirsher * port. 1177adfc5217SJeff Kirsher */ 1178adfc5217SJeff Kirsher static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1179adfc5217SJeff Kirsher { 1180adfc5217SJeff Kirsher if (!CHIP_IS_E1x(fp->bp)) 1181adfc5217SJeff Kirsher return fp->cl_id; 1182adfc5217SJeff Kirsher else 1183adfc5217SJeff Kirsher return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; 1184adfc5217SJeff Kirsher } 1185adfc5217SJeff Kirsher 1186adfc5217SJeff Kirsher static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1187adfc5217SJeff Kirsher bnx2x_obj_type obj_type) 1188adfc5217SJeff Kirsher { 1189adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp; 1190adfc5217SJeff Kirsher 1191adfc5217SJeff Kirsher /* Configure classification DBs */ 1192adfc5217SJeff Kirsher bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 1193adfc5217SJeff Kirsher BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1194adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, mac_rdata), 1195adfc5217SJeff Kirsher BNX2X_FILTER_MAC_PENDING, 1196adfc5217SJeff Kirsher &bp->sp_state, obj_type, 1197adfc5217SJeff Kirsher &bp->macs_pool); 1198adfc5217SJeff Kirsher } 1199adfc5217SJeff Kirsher 1200adfc5217SJeff Kirsher /** 1201adfc5217SJeff Kirsher * bnx2x_get_path_func_num - get number of active functions 1202adfc5217SJeff Kirsher * 1203adfc5217SJeff Kirsher * @bp: driver handle 1204adfc5217SJeff Kirsher * 1205adfc5217SJeff Kirsher * Calculates the number of active (not hidden) functions on the 1206adfc5217SJeff Kirsher * current path. 1207adfc5217SJeff Kirsher */ 1208adfc5217SJeff Kirsher static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1209adfc5217SJeff Kirsher { 1210adfc5217SJeff Kirsher u8 func_num = 0, i; 1211adfc5217SJeff Kirsher 1212adfc5217SJeff Kirsher /* 57710 has only one function per-port */ 1213adfc5217SJeff Kirsher if (CHIP_IS_E1(bp)) 1214adfc5217SJeff Kirsher return 1; 1215adfc5217SJeff Kirsher 1216adfc5217SJeff Kirsher /* Calculate a number of functions enabled on the current 1217adfc5217SJeff Kirsher * PATH/PORT. 1218adfc5217SJeff Kirsher */ 1219adfc5217SJeff Kirsher if (CHIP_REV_IS_SLOW(bp)) { 1220adfc5217SJeff Kirsher if (IS_MF(bp)) 1221adfc5217SJeff Kirsher func_num = 4; 1222adfc5217SJeff Kirsher else 1223adfc5217SJeff Kirsher func_num = 2; 1224adfc5217SJeff Kirsher } else { 1225adfc5217SJeff Kirsher for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1226adfc5217SJeff Kirsher u32 func_config = 1227adfc5217SJeff Kirsher MF_CFG_RD(bp, 1228adfc5217SJeff Kirsher func_mf_config[BP_PORT(bp) + 2 * i]. 1229adfc5217SJeff Kirsher config); 1230adfc5217SJeff Kirsher func_num += 1231adfc5217SJeff Kirsher ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1232adfc5217SJeff Kirsher } 1233adfc5217SJeff Kirsher } 1234adfc5217SJeff Kirsher 1235adfc5217SJeff Kirsher WARN_ON(!func_num); 1236adfc5217SJeff Kirsher 1237adfc5217SJeff Kirsher return func_num; 1238adfc5217SJeff Kirsher } 1239adfc5217SJeff Kirsher 1240adfc5217SJeff Kirsher static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1241adfc5217SJeff Kirsher { 1242adfc5217SJeff Kirsher /* RX_MODE controlling object */ 1243adfc5217SJeff Kirsher bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1244adfc5217SJeff Kirsher 1245adfc5217SJeff Kirsher /* multicast configuration controlling object */ 1246adfc5217SJeff Kirsher bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1247adfc5217SJeff Kirsher BP_FUNC(bp), BP_FUNC(bp), 1248adfc5217SJeff Kirsher bnx2x_sp(bp, mcast_rdata), 1249adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, mcast_rdata), 1250adfc5217SJeff Kirsher BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1251adfc5217SJeff Kirsher BNX2X_OBJ_TYPE_RX); 1252adfc5217SJeff Kirsher 1253adfc5217SJeff Kirsher /* Setup CAM credit pools */ 1254adfc5217SJeff Kirsher bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1255adfc5217SJeff Kirsher bnx2x_get_path_func_num(bp)); 1256adfc5217SJeff Kirsher 1257adfc5217SJeff Kirsher /* RSS configuration object */ 1258adfc5217SJeff Kirsher bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1259adfc5217SJeff Kirsher bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1260adfc5217SJeff Kirsher bnx2x_sp(bp, rss_rdata), 1261adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, rss_rdata), 1262adfc5217SJeff Kirsher BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1263adfc5217SJeff Kirsher BNX2X_OBJ_TYPE_RX); 1264adfc5217SJeff Kirsher } 1265adfc5217SJeff Kirsher 1266adfc5217SJeff Kirsher static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1267adfc5217SJeff Kirsher { 1268adfc5217SJeff Kirsher if (CHIP_IS_E1x(fp->bp)) 1269adfc5217SJeff Kirsher return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1270adfc5217SJeff Kirsher else 1271adfc5217SJeff Kirsher return fp->cl_id; 1272adfc5217SJeff Kirsher } 1273adfc5217SJeff Kirsher 1274adfc5217SJeff Kirsher static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 1275adfc5217SJeff Kirsher { 1276adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp; 1277adfc5217SJeff Kirsher 1278adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp)) 1279adfc5217SJeff Kirsher return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 1280adfc5217SJeff Kirsher else 1281adfc5217SJeff Kirsher return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 1282adfc5217SJeff Kirsher } 1283adfc5217SJeff Kirsher 1284adfc5217SJeff Kirsher static inline void bnx2x_init_txdata(struct bnx2x *bp, 1285adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1286adfc5217SJeff Kirsher __le16 *tx_cons_sb) 1287adfc5217SJeff Kirsher { 1288adfc5217SJeff Kirsher txdata->cid = cid; 1289adfc5217SJeff Kirsher txdata->txq_index = txq_index; 1290adfc5217SJeff Kirsher txdata->tx_cons_sb = tx_cons_sb; 1291adfc5217SJeff Kirsher 129294f05b0fSJoe Perches DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", 1293adfc5217SJeff Kirsher txdata->cid, txdata->txq_index); 1294adfc5217SJeff Kirsher } 1295adfc5217SJeff Kirsher 1296adfc5217SJeff Kirsher #ifdef BCM_CNIC 1297adfc5217SJeff Kirsher static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1298adfc5217SJeff Kirsher { 1299adfc5217SJeff Kirsher return bp->cnic_base_cl_id + cl_idx + 13001805b2f0SDavid S. Miller (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1301adfc5217SJeff Kirsher } 1302adfc5217SJeff Kirsher 1303adfc5217SJeff Kirsher static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1304adfc5217SJeff Kirsher { 1305adfc5217SJeff Kirsher 1306adfc5217SJeff Kirsher /* the 'first' id is allocated for the cnic */ 1307adfc5217SJeff Kirsher return bp->base_fw_ndsb; 1308adfc5217SJeff Kirsher } 1309adfc5217SJeff Kirsher 1310adfc5217SJeff Kirsher static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1311adfc5217SJeff Kirsher { 1312adfc5217SJeff Kirsher return bp->igu_base_sb; 1313adfc5217SJeff Kirsher } 1314adfc5217SJeff Kirsher 1315adfc5217SJeff Kirsher 1316adfc5217SJeff Kirsher static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1317adfc5217SJeff Kirsher { 1318adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1319adfc5217SJeff Kirsher unsigned long q_type = 0; 1320adfc5217SJeff Kirsher 1321f233cafeSDmitry Kravkov bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1322adfc5217SJeff Kirsher bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1323adfc5217SJeff Kirsher BNX2X_FCOE_ETH_CL_ID_IDX); 1324adfc5217SJeff Kirsher /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1325adfc5217SJeff Kirsher * 16 ETH clients per function when CNIC is enabled! 1326adfc5217SJeff Kirsher * 1327adfc5217SJeff Kirsher * Fix it ASAP!!! 1328adfc5217SJeff Kirsher */ 1329adfc5217SJeff Kirsher bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; 1330adfc5217SJeff Kirsher bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1331adfc5217SJeff Kirsher bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1332adfc5217SJeff Kirsher bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1333adfc5217SJeff Kirsher 1334adfc5217SJeff Kirsher bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1335adfc5217SJeff Kirsher fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1336adfc5217SJeff Kirsher 133794f05b0fSJoe Perches DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); 1338adfc5217SJeff Kirsher 1339adfc5217SJeff Kirsher /* qZone id equals to FW (per path) client id */ 1340adfc5217SJeff Kirsher bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1341adfc5217SJeff Kirsher /* init shortcut */ 1342adfc5217SJeff Kirsher bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1343adfc5217SJeff Kirsher bnx2x_rx_ustorm_prods_offset(fp); 1344adfc5217SJeff Kirsher 1345adfc5217SJeff Kirsher /* Configure Queue State object */ 1346adfc5217SJeff Kirsher __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1347adfc5217SJeff Kirsher __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1348adfc5217SJeff Kirsher 1349adfc5217SJeff Kirsher /* No multi-CoS for FCoE L2 client */ 1350adfc5217SJeff Kirsher BUG_ON(fp->max_cos != 1); 1351adfc5217SJeff Kirsher 1352adfc5217SJeff Kirsher bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1353adfc5217SJeff Kirsher BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1354adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, q_rdata), q_type); 1355adfc5217SJeff Kirsher 1356adfc5217SJeff Kirsher DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " 1357adfc5217SJeff Kirsher "igu_sb %d\n", 1358adfc5217SJeff Kirsher fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1359adfc5217SJeff Kirsher fp->igu_sb_id); 1360adfc5217SJeff Kirsher } 1361adfc5217SJeff Kirsher #endif 1362adfc5217SJeff Kirsher 1363adfc5217SJeff Kirsher static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1364adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata) 1365adfc5217SJeff Kirsher { 1366adfc5217SJeff Kirsher int cnt = 1000; 1367adfc5217SJeff Kirsher 1368adfc5217SJeff Kirsher while (bnx2x_has_tx_work_unload(txdata)) { 1369adfc5217SJeff Kirsher if (!cnt) { 1370adfc5217SJeff Kirsher BNX2X_ERR("timeout waiting for queue[%d]: " 1371adfc5217SJeff Kirsher "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1372adfc5217SJeff Kirsher txdata->txq_index, txdata->tx_pkt_prod, 1373adfc5217SJeff Kirsher txdata->tx_pkt_cons); 1374adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR 1375adfc5217SJeff Kirsher bnx2x_panic(); 1376adfc5217SJeff Kirsher return -EBUSY; 1377adfc5217SJeff Kirsher #else 1378adfc5217SJeff Kirsher break; 1379adfc5217SJeff Kirsher #endif 1380adfc5217SJeff Kirsher } 1381adfc5217SJeff Kirsher cnt--; 1382adfc5217SJeff Kirsher usleep_range(1000, 1000); 1383adfc5217SJeff Kirsher } 1384adfc5217SJeff Kirsher 1385adfc5217SJeff Kirsher return 0; 1386adfc5217SJeff Kirsher } 1387adfc5217SJeff Kirsher 1388adfc5217SJeff Kirsher int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1389adfc5217SJeff Kirsher 1390adfc5217SJeff Kirsher static inline void __storm_memset_struct(struct bnx2x *bp, 1391adfc5217SJeff Kirsher u32 addr, size_t size, u32 *data) 1392adfc5217SJeff Kirsher { 1393adfc5217SJeff Kirsher int i; 1394adfc5217SJeff Kirsher for (i = 0; i < size/4; i++) 1395adfc5217SJeff Kirsher REG_WR(bp, addr + (i * 4), data[i]); 1396adfc5217SJeff Kirsher } 1397adfc5217SJeff Kirsher 1398adfc5217SJeff Kirsher static inline void storm_memset_func_cfg(struct bnx2x *bp, 1399adfc5217SJeff Kirsher struct tstorm_eth_function_common_config *tcfg, 1400adfc5217SJeff Kirsher u16 abs_fid) 1401adfc5217SJeff Kirsher { 1402adfc5217SJeff Kirsher size_t size = sizeof(struct tstorm_eth_function_common_config); 1403adfc5217SJeff Kirsher 1404adfc5217SJeff Kirsher u32 addr = BAR_TSTRORM_INTMEM + 1405adfc5217SJeff Kirsher TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 1406adfc5217SJeff Kirsher 1407adfc5217SJeff Kirsher __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 1408adfc5217SJeff Kirsher } 1409adfc5217SJeff Kirsher 1410adfc5217SJeff Kirsher static inline void storm_memset_cmng(struct bnx2x *bp, 1411adfc5217SJeff Kirsher struct cmng_struct_per_port *cmng, 1412adfc5217SJeff Kirsher u8 port) 1413adfc5217SJeff Kirsher { 1414adfc5217SJeff Kirsher size_t size = sizeof(struct cmng_struct_per_port); 1415adfc5217SJeff Kirsher 1416adfc5217SJeff Kirsher u32 addr = BAR_XSTRORM_INTMEM + 1417adfc5217SJeff Kirsher XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1418adfc5217SJeff Kirsher 1419adfc5217SJeff Kirsher __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1420adfc5217SJeff Kirsher } 1421adfc5217SJeff Kirsher 1422adfc5217SJeff Kirsher /** 1423adfc5217SJeff Kirsher * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1424adfc5217SJeff Kirsher * 1425adfc5217SJeff Kirsher * @bp: driver handle 1426adfc5217SJeff Kirsher * @mask: bits that need to be cleared 1427adfc5217SJeff Kirsher */ 1428adfc5217SJeff Kirsher static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1429adfc5217SJeff Kirsher { 1430adfc5217SJeff Kirsher int tout = 5000; /* Wait for 5 secs tops */ 1431adfc5217SJeff Kirsher 1432adfc5217SJeff Kirsher while (tout--) { 1433adfc5217SJeff Kirsher smp_mb(); 1434adfc5217SJeff Kirsher netif_addr_lock_bh(bp->dev); 1435adfc5217SJeff Kirsher if (!(bp->sp_state & mask)) { 1436adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1437adfc5217SJeff Kirsher return true; 1438adfc5217SJeff Kirsher } 1439adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1440adfc5217SJeff Kirsher 1441adfc5217SJeff Kirsher usleep_range(1000, 1000); 1442adfc5217SJeff Kirsher } 1443adfc5217SJeff Kirsher 1444adfc5217SJeff Kirsher smp_mb(); 1445adfc5217SJeff Kirsher 1446adfc5217SJeff Kirsher netif_addr_lock_bh(bp->dev); 1447adfc5217SJeff Kirsher if (bp->sp_state & mask) { 1448adfc5217SJeff Kirsher BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " 1449adfc5217SJeff Kirsher "mask 0x%lx\n", bp->sp_state, mask); 1450adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1451adfc5217SJeff Kirsher return false; 1452adfc5217SJeff Kirsher } 1453adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1454adfc5217SJeff Kirsher 1455adfc5217SJeff Kirsher return true; 1456adfc5217SJeff Kirsher } 1457adfc5217SJeff Kirsher 1458adfc5217SJeff Kirsher /** 1459adfc5217SJeff Kirsher * bnx2x_set_ctx_validation - set CDU context validation values 1460adfc5217SJeff Kirsher * 1461adfc5217SJeff Kirsher * @bp: driver handle 1462adfc5217SJeff Kirsher * @cxt: context of the connection on the host memory 1463adfc5217SJeff Kirsher * @cid: SW CID of the connection to be configured 1464adfc5217SJeff Kirsher */ 1465adfc5217SJeff Kirsher void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1466adfc5217SJeff Kirsher u32 cid); 1467adfc5217SJeff Kirsher 1468adfc5217SJeff Kirsher void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1469adfc5217SJeff Kirsher u8 sb_index, u8 disable, u16 usec); 1470adfc5217SJeff Kirsher void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1471adfc5217SJeff Kirsher void bnx2x_release_phy_lock(struct bnx2x *bp); 1472adfc5217SJeff Kirsher 1473adfc5217SJeff Kirsher /** 1474adfc5217SJeff Kirsher * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1475adfc5217SJeff Kirsher * 1476adfc5217SJeff Kirsher * @bp: driver handle 1477adfc5217SJeff Kirsher * @mf_cfg: MF configuration 1478adfc5217SJeff Kirsher * 1479adfc5217SJeff Kirsher */ 1480adfc5217SJeff Kirsher static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1481adfc5217SJeff Kirsher { 1482adfc5217SJeff Kirsher u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1483adfc5217SJeff Kirsher FUNC_MF_CFG_MAX_BW_SHIFT; 1484adfc5217SJeff Kirsher if (!max_cfg) { 148596b0accbSMichal Schmidt DP(NETIF_MSG_LINK, 148696b0accbSMichal Schmidt "Max BW configured to 0 - using 100 instead\n"); 1487adfc5217SJeff Kirsher max_cfg = 100; 1488adfc5217SJeff Kirsher } 1489adfc5217SJeff Kirsher return max_cfg; 1490adfc5217SJeff Kirsher } 1491adfc5217SJeff Kirsher 1492b306f5edSDmitry Kravkov #ifdef BCM_CNIC 1493b306f5edSDmitry Kravkov /** 1494b306f5edSDmitry Kravkov * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1495b306f5edSDmitry Kravkov * 1496b306f5edSDmitry Kravkov * @bp: driver handle 1497b306f5edSDmitry Kravkov * 1498b306f5edSDmitry Kravkov */ 1499b306f5edSDmitry Kravkov void bnx2x_get_iscsi_info(struct bnx2x *bp); 1500b306f5edSDmitry Kravkov #endif 1501b306f5edSDmitry Kravkov 1502adfc5217SJeff Kirsher #endif /* BNX2X_CMN_H */ 1503