1adfc5217SJeff Kirsher /* bnx2x_cmn.h: Broadcom Everest network driver. 2adfc5217SJeff Kirsher * 385b26ea1SAriel Elior * Copyright (c) 2007-2012 Broadcom Corporation 4adfc5217SJeff Kirsher * 5adfc5217SJeff Kirsher * This program is free software; you can redistribute it and/or modify 6adfc5217SJeff Kirsher * it under the terms of the GNU General Public License as published by 7adfc5217SJeff Kirsher * the Free Software Foundation. 8adfc5217SJeff Kirsher * 9adfc5217SJeff Kirsher * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10adfc5217SJeff Kirsher * Written by: Eliezer Tamir 11adfc5217SJeff Kirsher * Based on code from Michael Chan's bnx2 driver 12adfc5217SJeff Kirsher * UDP CSUM errata workaround by Arik Gendelman 13adfc5217SJeff Kirsher * Slowpath and fastpath rework by Vladislav Zolotarov 14adfc5217SJeff Kirsher * Statistics and Link management by Yitchak Gertner 15adfc5217SJeff Kirsher * 16adfc5217SJeff Kirsher */ 17adfc5217SJeff Kirsher #ifndef BNX2X_CMN_H 18adfc5217SJeff Kirsher #define BNX2X_CMN_H 19adfc5217SJeff Kirsher 20adfc5217SJeff Kirsher #include <linux/types.h> 21adfc5217SJeff Kirsher #include <linux/pci.h> 22adfc5217SJeff Kirsher #include <linux/netdevice.h> 23614c76dfSDmitry Kravkov #include <linux/etherdevice.h> 24adfc5217SJeff Kirsher 25adfc5217SJeff Kirsher 26adfc5217SJeff Kirsher #include "bnx2x.h" 27adfc5217SJeff Kirsher 28adfc5217SJeff Kirsher /* This is used as a replacement for an MCP if it's not present */ 29adfc5217SJeff Kirsher extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 30adfc5217SJeff Kirsher 31adfc5217SJeff Kirsher extern int num_queues; 32adfc5217SJeff Kirsher 33adfc5217SJeff Kirsher /************************ Macros ********************************/ 34adfc5217SJeff Kirsher #define BNX2X_PCI_FREE(x, y, size) \ 35adfc5217SJeff Kirsher do { \ 36adfc5217SJeff Kirsher if (x) { \ 37adfc5217SJeff Kirsher dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 38adfc5217SJeff Kirsher x = NULL; \ 39adfc5217SJeff Kirsher y = 0; \ 40adfc5217SJeff Kirsher } \ 41adfc5217SJeff Kirsher } while (0) 42adfc5217SJeff Kirsher 43adfc5217SJeff Kirsher #define BNX2X_FREE(x) \ 44adfc5217SJeff Kirsher do { \ 45adfc5217SJeff Kirsher if (x) { \ 46adfc5217SJeff Kirsher kfree((void *)x); \ 47adfc5217SJeff Kirsher x = NULL; \ 48adfc5217SJeff Kirsher } \ 49adfc5217SJeff Kirsher } while (0) 50adfc5217SJeff Kirsher 51adfc5217SJeff Kirsher #define BNX2X_PCI_ALLOC(x, y, size) \ 52adfc5217SJeff Kirsher do { \ 53adfc5217SJeff Kirsher x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 54adfc5217SJeff Kirsher if (x == NULL) \ 55adfc5217SJeff Kirsher goto alloc_mem_err; \ 56adfc5217SJeff Kirsher memset((void *)x, 0, size); \ 57adfc5217SJeff Kirsher } while (0) 58adfc5217SJeff Kirsher 59adfc5217SJeff Kirsher #define BNX2X_ALLOC(x, size) \ 60adfc5217SJeff Kirsher do { \ 61adfc5217SJeff Kirsher x = kzalloc(size, GFP_KERNEL); \ 62adfc5217SJeff Kirsher if (x == NULL) \ 63adfc5217SJeff Kirsher goto alloc_mem_err; \ 64adfc5217SJeff Kirsher } while (0) 65adfc5217SJeff Kirsher 66adfc5217SJeff Kirsher /*********************** Interfaces **************************** 67adfc5217SJeff Kirsher * Functions that need to be implemented by each driver version 68adfc5217SJeff Kirsher */ 69adfc5217SJeff Kirsher /* Init */ 70adfc5217SJeff Kirsher 71adfc5217SJeff Kirsher /** 72adfc5217SJeff Kirsher * bnx2x_send_unload_req - request unload mode from the MCP. 73adfc5217SJeff Kirsher * 74adfc5217SJeff Kirsher * @bp: driver handle 75adfc5217SJeff Kirsher * @unload_mode: requested function's unload mode 76adfc5217SJeff Kirsher * 77adfc5217SJeff Kirsher * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 78adfc5217SJeff Kirsher */ 79adfc5217SJeff Kirsher u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 80adfc5217SJeff Kirsher 81adfc5217SJeff Kirsher /** 82adfc5217SJeff Kirsher * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 83adfc5217SJeff Kirsher * 84adfc5217SJeff Kirsher * @bp: driver handle 85adfc5217SJeff Kirsher */ 86adfc5217SJeff Kirsher void bnx2x_send_unload_done(struct bnx2x *bp); 87adfc5217SJeff Kirsher 88adfc5217SJeff Kirsher /** 89adfc5217SJeff Kirsher * bnx2x_config_rss_pf - configure RSS parameters. 90adfc5217SJeff Kirsher * 91adfc5217SJeff Kirsher * @bp: driver handle 92adfc5217SJeff Kirsher * @ind_table: indirection table to configure 93adfc5217SJeff Kirsher * @config_hash: re-configure RSS hash keys configuration 94adfc5217SJeff Kirsher */ 95adfc5217SJeff Kirsher int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 96adfc5217SJeff Kirsher 97adfc5217SJeff Kirsher /** 98adfc5217SJeff Kirsher * bnx2x__init_func_obj - init function object 99adfc5217SJeff Kirsher * 100adfc5217SJeff Kirsher * @bp: driver handle 101adfc5217SJeff Kirsher * 102adfc5217SJeff Kirsher * Initializes the Function Object with the appropriate 103adfc5217SJeff Kirsher * parameters which include a function slow path driver 104adfc5217SJeff Kirsher * interface. 105adfc5217SJeff Kirsher */ 106adfc5217SJeff Kirsher void bnx2x__init_func_obj(struct bnx2x *bp); 107adfc5217SJeff Kirsher 108adfc5217SJeff Kirsher /** 109adfc5217SJeff Kirsher * bnx2x_setup_queue - setup eth queue. 110adfc5217SJeff Kirsher * 111adfc5217SJeff Kirsher * @bp: driver handle 112adfc5217SJeff Kirsher * @fp: pointer to the fastpath structure 113adfc5217SJeff Kirsher * @leading: boolean 114adfc5217SJeff Kirsher * 115adfc5217SJeff Kirsher */ 116adfc5217SJeff Kirsher int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 117adfc5217SJeff Kirsher bool leading); 118adfc5217SJeff Kirsher 119adfc5217SJeff Kirsher /** 120adfc5217SJeff Kirsher * bnx2x_setup_leading - bring up a leading eth queue. 121adfc5217SJeff Kirsher * 122adfc5217SJeff Kirsher * @bp: driver handle 123adfc5217SJeff Kirsher */ 124adfc5217SJeff Kirsher int bnx2x_setup_leading(struct bnx2x *bp); 125adfc5217SJeff Kirsher 126adfc5217SJeff Kirsher /** 127adfc5217SJeff Kirsher * bnx2x_fw_command - send the MCP a request 128adfc5217SJeff Kirsher * 129adfc5217SJeff Kirsher * @bp: driver handle 130adfc5217SJeff Kirsher * @command: request 131adfc5217SJeff Kirsher * @param: request's parameter 132adfc5217SJeff Kirsher * 133adfc5217SJeff Kirsher * block until there is a reply 134adfc5217SJeff Kirsher */ 135adfc5217SJeff Kirsher u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 136adfc5217SJeff Kirsher 137adfc5217SJeff Kirsher /** 138adfc5217SJeff Kirsher * bnx2x_initial_phy_init - initialize link parameters structure variables. 139adfc5217SJeff Kirsher * 140adfc5217SJeff Kirsher * @bp: driver handle 141adfc5217SJeff Kirsher * @load_mode: current mode 142adfc5217SJeff Kirsher */ 143adfc5217SJeff Kirsher u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 144adfc5217SJeff Kirsher 145adfc5217SJeff Kirsher /** 146adfc5217SJeff Kirsher * bnx2x_link_set - configure hw according to link parameters structure. 147adfc5217SJeff Kirsher * 148adfc5217SJeff Kirsher * @bp: driver handle 149adfc5217SJeff Kirsher */ 150adfc5217SJeff Kirsher void bnx2x_link_set(struct bnx2x *bp); 151adfc5217SJeff Kirsher 152adfc5217SJeff Kirsher /** 153adfc5217SJeff Kirsher * bnx2x_link_test - query link status. 154adfc5217SJeff Kirsher * 155adfc5217SJeff Kirsher * @bp: driver handle 156adfc5217SJeff Kirsher * @is_serdes: bool 157adfc5217SJeff Kirsher * 158adfc5217SJeff Kirsher * Returns 0 if link is UP. 159adfc5217SJeff Kirsher */ 160adfc5217SJeff Kirsher u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 161adfc5217SJeff Kirsher 162adfc5217SJeff Kirsher /** 163adfc5217SJeff Kirsher * bnx2x_drv_pulse - write driver pulse to shmem 164adfc5217SJeff Kirsher * 165adfc5217SJeff Kirsher * @bp: driver handle 166adfc5217SJeff Kirsher * 167adfc5217SJeff Kirsher * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 168adfc5217SJeff Kirsher * in the shmem. 169adfc5217SJeff Kirsher */ 170adfc5217SJeff Kirsher void bnx2x_drv_pulse(struct bnx2x *bp); 171adfc5217SJeff Kirsher 172adfc5217SJeff Kirsher /** 173adfc5217SJeff Kirsher * bnx2x_igu_ack_sb - update IGU with current SB value 174adfc5217SJeff Kirsher * 175adfc5217SJeff Kirsher * @bp: driver handle 176adfc5217SJeff Kirsher * @igu_sb_id: SB id 177adfc5217SJeff Kirsher * @segment: SB segment 178adfc5217SJeff Kirsher * @index: SB index 179adfc5217SJeff Kirsher * @op: SB operation 180adfc5217SJeff Kirsher * @update: is HW update required 181adfc5217SJeff Kirsher */ 182adfc5217SJeff Kirsher void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 183adfc5217SJeff Kirsher u16 index, u8 op, u8 update); 184adfc5217SJeff Kirsher 185adfc5217SJeff Kirsher /* Disable transactions from chip to host */ 186adfc5217SJeff Kirsher void bnx2x_pf_disable(struct bnx2x *bp); 187adfc5217SJeff Kirsher 188adfc5217SJeff Kirsher /** 189adfc5217SJeff Kirsher * bnx2x__link_status_update - handles link status change. 190adfc5217SJeff Kirsher * 191adfc5217SJeff Kirsher * @bp: driver handle 192adfc5217SJeff Kirsher */ 193adfc5217SJeff Kirsher void bnx2x__link_status_update(struct bnx2x *bp); 194adfc5217SJeff Kirsher 195adfc5217SJeff Kirsher /** 196adfc5217SJeff Kirsher * bnx2x_link_report - report link status to upper layer. 197adfc5217SJeff Kirsher * 198adfc5217SJeff Kirsher * @bp: driver handle 199adfc5217SJeff Kirsher */ 200adfc5217SJeff Kirsher void bnx2x_link_report(struct bnx2x *bp); 201adfc5217SJeff Kirsher 202adfc5217SJeff Kirsher /* None-atomic version of bnx2x_link_report() */ 203adfc5217SJeff Kirsher void __bnx2x_link_report(struct bnx2x *bp); 204adfc5217SJeff Kirsher 205adfc5217SJeff Kirsher /** 206adfc5217SJeff Kirsher * bnx2x_get_mf_speed - calculate MF speed. 207adfc5217SJeff Kirsher * 208adfc5217SJeff Kirsher * @bp: driver handle 209adfc5217SJeff Kirsher * 210adfc5217SJeff Kirsher * Takes into account current linespeed and MF configuration. 211adfc5217SJeff Kirsher */ 212adfc5217SJeff Kirsher u16 bnx2x_get_mf_speed(struct bnx2x *bp); 213adfc5217SJeff Kirsher 214adfc5217SJeff Kirsher /** 215adfc5217SJeff Kirsher * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 216adfc5217SJeff Kirsher * 217adfc5217SJeff Kirsher * @irq: irq number 218adfc5217SJeff Kirsher * @dev_instance: private instance 219adfc5217SJeff Kirsher */ 220adfc5217SJeff Kirsher irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 221adfc5217SJeff Kirsher 222adfc5217SJeff Kirsher /** 223adfc5217SJeff Kirsher * bnx2x_interrupt - non MSI-X interrupt handler 224adfc5217SJeff Kirsher * 225adfc5217SJeff Kirsher * @irq: irq number 226adfc5217SJeff Kirsher * @dev_instance: private instance 227adfc5217SJeff Kirsher */ 228adfc5217SJeff Kirsher irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 229adfc5217SJeff Kirsher #ifdef BCM_CNIC 230adfc5217SJeff Kirsher 231adfc5217SJeff Kirsher /** 232adfc5217SJeff Kirsher * bnx2x_cnic_notify - send command to cnic driver 233adfc5217SJeff Kirsher * 234adfc5217SJeff Kirsher * @bp: driver handle 235adfc5217SJeff Kirsher * @cmd: command 236adfc5217SJeff Kirsher */ 237adfc5217SJeff Kirsher int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 238adfc5217SJeff Kirsher 239adfc5217SJeff Kirsher /** 240adfc5217SJeff Kirsher * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 241adfc5217SJeff Kirsher * 242adfc5217SJeff Kirsher * @bp: driver handle 243adfc5217SJeff Kirsher */ 244adfc5217SJeff Kirsher void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 245adfc5217SJeff Kirsher #endif 246adfc5217SJeff Kirsher 247adfc5217SJeff Kirsher /** 248adfc5217SJeff Kirsher * bnx2x_int_enable - enable HW interrupts. 249adfc5217SJeff Kirsher * 250adfc5217SJeff Kirsher * @bp: driver handle 251adfc5217SJeff Kirsher */ 252adfc5217SJeff Kirsher void bnx2x_int_enable(struct bnx2x *bp); 253adfc5217SJeff Kirsher 254adfc5217SJeff Kirsher /** 255adfc5217SJeff Kirsher * bnx2x_int_disable_sync - disable interrupts. 256adfc5217SJeff Kirsher * 257adfc5217SJeff Kirsher * @bp: driver handle 258adfc5217SJeff Kirsher * @disable_hw: true, disable HW interrupts. 259adfc5217SJeff Kirsher * 260adfc5217SJeff Kirsher * This function ensures that there are no 261adfc5217SJeff Kirsher * ISRs or SP DPCs (sp_task) are running after it returns. 262adfc5217SJeff Kirsher */ 263adfc5217SJeff Kirsher void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 264adfc5217SJeff Kirsher 265adfc5217SJeff Kirsher /** 266adfc5217SJeff Kirsher * bnx2x_nic_init - init driver internals. 267adfc5217SJeff Kirsher * 268adfc5217SJeff Kirsher * @bp: driver handle 269adfc5217SJeff Kirsher * @load_code: COMMON, PORT or FUNCTION 270adfc5217SJeff Kirsher * 271adfc5217SJeff Kirsher * Initializes: 272adfc5217SJeff Kirsher * - rings 273adfc5217SJeff Kirsher * - status blocks 274adfc5217SJeff Kirsher * - etc. 275adfc5217SJeff Kirsher */ 276adfc5217SJeff Kirsher void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 277adfc5217SJeff Kirsher 278adfc5217SJeff Kirsher /** 279adfc5217SJeff Kirsher * bnx2x_alloc_mem - allocate driver's memory. 280adfc5217SJeff Kirsher * 281adfc5217SJeff Kirsher * @bp: driver handle 282adfc5217SJeff Kirsher */ 283adfc5217SJeff Kirsher int bnx2x_alloc_mem(struct bnx2x *bp); 284adfc5217SJeff Kirsher 285adfc5217SJeff Kirsher /** 286adfc5217SJeff Kirsher * bnx2x_free_mem - release driver's memory. 287adfc5217SJeff Kirsher * 288adfc5217SJeff Kirsher * @bp: driver handle 289adfc5217SJeff Kirsher */ 290adfc5217SJeff Kirsher void bnx2x_free_mem(struct bnx2x *bp); 291adfc5217SJeff Kirsher 292adfc5217SJeff Kirsher /** 293adfc5217SJeff Kirsher * bnx2x_set_num_queues - set number of queues according to mode. 294adfc5217SJeff Kirsher * 295adfc5217SJeff Kirsher * @bp: driver handle 296adfc5217SJeff Kirsher */ 297adfc5217SJeff Kirsher void bnx2x_set_num_queues(struct bnx2x *bp); 298adfc5217SJeff Kirsher 299adfc5217SJeff Kirsher /** 300adfc5217SJeff Kirsher * bnx2x_chip_cleanup - cleanup chip internals. 301adfc5217SJeff Kirsher * 302adfc5217SJeff Kirsher * @bp: driver handle 303adfc5217SJeff Kirsher * @unload_mode: COMMON, PORT, FUNCTION 304adfc5217SJeff Kirsher * 305adfc5217SJeff Kirsher * - Cleanup MAC configuration. 306adfc5217SJeff Kirsher * - Closes clients. 307adfc5217SJeff Kirsher * - etc. 308adfc5217SJeff Kirsher */ 309adfc5217SJeff Kirsher void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 310adfc5217SJeff Kirsher 311adfc5217SJeff Kirsher /** 312adfc5217SJeff Kirsher * bnx2x_acquire_hw_lock - acquire HW lock. 313adfc5217SJeff Kirsher * 314adfc5217SJeff Kirsher * @bp: driver handle 315adfc5217SJeff Kirsher * @resource: resource bit which was locked 316adfc5217SJeff Kirsher */ 317adfc5217SJeff Kirsher int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 318adfc5217SJeff Kirsher 319adfc5217SJeff Kirsher /** 320adfc5217SJeff Kirsher * bnx2x_release_hw_lock - release HW lock. 321adfc5217SJeff Kirsher * 322adfc5217SJeff Kirsher * @bp: driver handle 323adfc5217SJeff Kirsher * @resource: resource bit which was locked 324adfc5217SJeff Kirsher */ 325adfc5217SJeff Kirsher int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 326adfc5217SJeff Kirsher 327adfc5217SJeff Kirsher /** 328adfc5217SJeff Kirsher * bnx2x_release_leader_lock - release recovery leader lock 329adfc5217SJeff Kirsher * 330adfc5217SJeff Kirsher * @bp: driver handle 331adfc5217SJeff Kirsher */ 332adfc5217SJeff Kirsher int bnx2x_release_leader_lock(struct bnx2x *bp); 333adfc5217SJeff Kirsher 334adfc5217SJeff Kirsher /** 335adfc5217SJeff Kirsher * bnx2x_set_eth_mac - configure eth MAC address in the HW 336adfc5217SJeff Kirsher * 337adfc5217SJeff Kirsher * @bp: driver handle 338adfc5217SJeff Kirsher * @set: set or clear 339adfc5217SJeff Kirsher * 340adfc5217SJeff Kirsher * Configures according to the value in netdev->dev_addr. 341adfc5217SJeff Kirsher */ 342adfc5217SJeff Kirsher int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 343adfc5217SJeff Kirsher 344adfc5217SJeff Kirsher /** 345adfc5217SJeff Kirsher * bnx2x_set_rx_mode - set MAC filtering configurations. 346adfc5217SJeff Kirsher * 347adfc5217SJeff Kirsher * @dev: netdevice 348adfc5217SJeff Kirsher * 349adfc5217SJeff Kirsher * called with netif_tx_lock from dev_mcast.c 350adfc5217SJeff Kirsher * If bp->state is OPEN, should be called with 351adfc5217SJeff Kirsher * netif_addr_lock_bh() 352adfc5217SJeff Kirsher */ 353adfc5217SJeff Kirsher void bnx2x_set_rx_mode(struct net_device *dev); 354adfc5217SJeff Kirsher 355adfc5217SJeff Kirsher /** 356adfc5217SJeff Kirsher * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 357adfc5217SJeff Kirsher * 358adfc5217SJeff Kirsher * @bp: driver handle 359adfc5217SJeff Kirsher * 360adfc5217SJeff Kirsher * If bp->state is OPEN, should be called with 361adfc5217SJeff Kirsher * netif_addr_lock_bh(). 362adfc5217SJeff Kirsher */ 363adfc5217SJeff Kirsher void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 364adfc5217SJeff Kirsher 365adfc5217SJeff Kirsher /** 366adfc5217SJeff Kirsher * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 367adfc5217SJeff Kirsher * 368adfc5217SJeff Kirsher * @bp: driver handle 369adfc5217SJeff Kirsher * @cl_id: client id 370adfc5217SJeff Kirsher * @rx_mode_flags: rx mode configuration 371adfc5217SJeff Kirsher * @rx_accept_flags: rx accept configuration 372adfc5217SJeff Kirsher * @tx_accept_flags: tx accept configuration (tx switch) 373adfc5217SJeff Kirsher * @ramrod_flags: ramrod configuration 374adfc5217SJeff Kirsher */ 375adfc5217SJeff Kirsher void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 376adfc5217SJeff Kirsher unsigned long rx_mode_flags, 377adfc5217SJeff Kirsher unsigned long rx_accept_flags, 378adfc5217SJeff Kirsher unsigned long tx_accept_flags, 379adfc5217SJeff Kirsher unsigned long ramrod_flags); 380adfc5217SJeff Kirsher 381adfc5217SJeff Kirsher /* Parity errors related */ 382889b9af3SAriel Elior void bnx2x_set_pf_load(struct bnx2x *bp); 383889b9af3SAriel Elior bool bnx2x_clear_pf_load(struct bnx2x *bp); 384adfc5217SJeff Kirsher bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 385adfc5217SJeff Kirsher bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 386adfc5217SJeff Kirsher void bnx2x_set_reset_in_progress(struct bnx2x *bp); 387adfc5217SJeff Kirsher void bnx2x_set_reset_global(struct bnx2x *bp); 388adfc5217SJeff Kirsher void bnx2x_disable_close_the_gate(struct bnx2x *bp); 389adfc5217SJeff Kirsher 390adfc5217SJeff Kirsher /** 391adfc5217SJeff Kirsher * bnx2x_sp_event - handle ramrods completion. 392adfc5217SJeff Kirsher * 393adfc5217SJeff Kirsher * @fp: fastpath handle for the event 394adfc5217SJeff Kirsher * @rr_cqe: eth_rx_cqe 395adfc5217SJeff Kirsher */ 396adfc5217SJeff Kirsher void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 397adfc5217SJeff Kirsher 398adfc5217SJeff Kirsher /** 399adfc5217SJeff Kirsher * bnx2x_ilt_set_info - prepare ILT configurations. 400adfc5217SJeff Kirsher * 401adfc5217SJeff Kirsher * @bp: driver handle 402adfc5217SJeff Kirsher */ 403adfc5217SJeff Kirsher void bnx2x_ilt_set_info(struct bnx2x *bp); 404adfc5217SJeff Kirsher 405adfc5217SJeff Kirsher /** 406adfc5217SJeff Kirsher * bnx2x_dcbx_init - initialize dcbx protocol. 407adfc5217SJeff Kirsher * 408adfc5217SJeff Kirsher * @bp: driver handle 409adfc5217SJeff Kirsher */ 410adfc5217SJeff Kirsher void bnx2x_dcbx_init(struct bnx2x *bp); 411adfc5217SJeff Kirsher 412adfc5217SJeff Kirsher /** 413adfc5217SJeff Kirsher * bnx2x_set_power_state - set power state to the requested value. 414adfc5217SJeff Kirsher * 415adfc5217SJeff Kirsher * @bp: driver handle 416adfc5217SJeff Kirsher * @state: required state D0 or D3hot 417adfc5217SJeff Kirsher * 418adfc5217SJeff Kirsher * Currently only D0 and D3hot are supported. 419adfc5217SJeff Kirsher */ 420adfc5217SJeff Kirsher int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 421adfc5217SJeff Kirsher 422adfc5217SJeff Kirsher /** 423adfc5217SJeff Kirsher * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 424adfc5217SJeff Kirsher * 425adfc5217SJeff Kirsher * @bp: driver handle 426adfc5217SJeff Kirsher * @value: new value 427adfc5217SJeff Kirsher */ 428adfc5217SJeff Kirsher void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 429adfc5217SJeff Kirsher /* Error handling */ 430adfc5217SJeff Kirsher void bnx2x_panic_dump(struct bnx2x *bp); 431adfc5217SJeff Kirsher 432adfc5217SJeff Kirsher void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 433adfc5217SJeff Kirsher 434adfc5217SJeff Kirsher /* dev_close main block */ 435adfc5217SJeff Kirsher int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 436adfc5217SJeff Kirsher 437adfc5217SJeff Kirsher /* dev_open main block */ 438adfc5217SJeff Kirsher int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 439adfc5217SJeff Kirsher 440adfc5217SJeff Kirsher /* hard_xmit callback */ 441adfc5217SJeff Kirsher netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 442adfc5217SJeff Kirsher 443adfc5217SJeff Kirsher /* setup_tc callback */ 444adfc5217SJeff Kirsher int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 445adfc5217SJeff Kirsher 446adfc5217SJeff Kirsher /* select_queue callback */ 447adfc5217SJeff Kirsher u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 448adfc5217SJeff Kirsher 449adfc5217SJeff Kirsher /* reload helper */ 450adfc5217SJeff Kirsher int bnx2x_reload_if_running(struct net_device *dev); 451adfc5217SJeff Kirsher 452adfc5217SJeff Kirsher int bnx2x_change_mac_addr(struct net_device *dev, void *p); 453adfc5217SJeff Kirsher 454adfc5217SJeff Kirsher /* NAPI poll Rx part */ 455adfc5217SJeff Kirsher int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 456adfc5217SJeff Kirsher 457adfc5217SJeff Kirsher void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 458adfc5217SJeff Kirsher u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); 459adfc5217SJeff Kirsher 460adfc5217SJeff Kirsher /* NAPI poll Tx part */ 461adfc5217SJeff Kirsher int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 462adfc5217SJeff Kirsher 463adfc5217SJeff Kirsher /* suspend/resume callbacks */ 464adfc5217SJeff Kirsher int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 465adfc5217SJeff Kirsher int bnx2x_resume(struct pci_dev *pdev); 466adfc5217SJeff Kirsher 467adfc5217SJeff Kirsher /* Release IRQ vectors */ 468adfc5217SJeff Kirsher void bnx2x_free_irq(struct bnx2x *bp); 469adfc5217SJeff Kirsher 470adfc5217SJeff Kirsher void bnx2x_free_fp_mem(struct bnx2x *bp); 471adfc5217SJeff Kirsher int bnx2x_alloc_fp_mem(struct bnx2x *bp); 472adfc5217SJeff Kirsher void bnx2x_init_rx_rings(struct bnx2x *bp); 473adfc5217SJeff Kirsher void bnx2x_free_skbs(struct bnx2x *bp); 474adfc5217SJeff Kirsher void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 475adfc5217SJeff Kirsher void bnx2x_netif_start(struct bnx2x *bp); 476adfc5217SJeff Kirsher 477adfc5217SJeff Kirsher /** 478adfc5217SJeff Kirsher * bnx2x_enable_msix - set msix configuration. 479adfc5217SJeff Kirsher * 480adfc5217SJeff Kirsher * @bp: driver handle 481adfc5217SJeff Kirsher * 482adfc5217SJeff Kirsher * fills msix_table, requests vectors, updates num_queues 483adfc5217SJeff Kirsher * according to number of available vectors. 484adfc5217SJeff Kirsher */ 485adfc5217SJeff Kirsher int bnx2x_enable_msix(struct bnx2x *bp); 486adfc5217SJeff Kirsher 487adfc5217SJeff Kirsher /** 488adfc5217SJeff Kirsher * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 489adfc5217SJeff Kirsher * 490adfc5217SJeff Kirsher * @bp: driver handle 491adfc5217SJeff Kirsher */ 492adfc5217SJeff Kirsher int bnx2x_enable_msi(struct bnx2x *bp); 493adfc5217SJeff Kirsher 494adfc5217SJeff Kirsher /** 495adfc5217SJeff Kirsher * bnx2x_poll - NAPI callback 496adfc5217SJeff Kirsher * 497adfc5217SJeff Kirsher * @napi: napi structure 498adfc5217SJeff Kirsher * @budget: 499adfc5217SJeff Kirsher * 500adfc5217SJeff Kirsher */ 501adfc5217SJeff Kirsher int bnx2x_poll(struct napi_struct *napi, int budget); 502adfc5217SJeff Kirsher 503adfc5217SJeff Kirsher /** 504adfc5217SJeff Kirsher * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 505adfc5217SJeff Kirsher * 506adfc5217SJeff Kirsher * @bp: driver handle 507adfc5217SJeff Kirsher */ 508adfc5217SJeff Kirsher int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); 509adfc5217SJeff Kirsher 510adfc5217SJeff Kirsher /** 511adfc5217SJeff Kirsher * bnx2x_free_mem_bp - release memories outsize main driver structure 512adfc5217SJeff Kirsher * 513adfc5217SJeff Kirsher * @bp: driver handle 514adfc5217SJeff Kirsher */ 515adfc5217SJeff Kirsher void bnx2x_free_mem_bp(struct bnx2x *bp); 516adfc5217SJeff Kirsher 517adfc5217SJeff Kirsher /** 518adfc5217SJeff Kirsher * bnx2x_change_mtu - change mtu netdev callback 519adfc5217SJeff Kirsher * 520adfc5217SJeff Kirsher * @dev: net device 521adfc5217SJeff Kirsher * @new_mtu: requested mtu 522adfc5217SJeff Kirsher * 523adfc5217SJeff Kirsher */ 524adfc5217SJeff Kirsher int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 525adfc5217SJeff Kirsher 5263857e3eeSDmitry Kravkov #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 527adfc5217SJeff Kirsher /** 528adfc5217SJeff Kirsher * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 529adfc5217SJeff Kirsher * 530adfc5217SJeff Kirsher * @dev: net_device 531adfc5217SJeff Kirsher * @wwn: output buffer 532adfc5217SJeff Kirsher * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 533adfc5217SJeff Kirsher * 534adfc5217SJeff Kirsher */ 535adfc5217SJeff Kirsher int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 536adfc5217SJeff Kirsher #endif 537c8f44affSMichał Mirosław netdev_features_t bnx2x_fix_features(struct net_device *dev, 538c8f44affSMichał Mirosław netdev_features_t features); 539c8f44affSMichał Mirosław int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 540adfc5217SJeff Kirsher 541adfc5217SJeff Kirsher /** 542adfc5217SJeff Kirsher * bnx2x_tx_timeout - tx timeout netdev callback 543adfc5217SJeff Kirsher * 544adfc5217SJeff Kirsher * @dev: net device 545adfc5217SJeff Kirsher */ 546adfc5217SJeff Kirsher void bnx2x_tx_timeout(struct net_device *dev); 547adfc5217SJeff Kirsher 548adfc5217SJeff Kirsher /*********************** Inlines **********************************/ 549adfc5217SJeff Kirsher /*********************** Fast path ********************************/ 550adfc5217SJeff Kirsher static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 551adfc5217SJeff Kirsher { 552adfc5217SJeff Kirsher barrier(); /* status block is written to by the chip */ 553adfc5217SJeff Kirsher fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 554adfc5217SJeff Kirsher } 555adfc5217SJeff Kirsher 556adfc5217SJeff Kirsher static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, 557adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 bd_prod, 558adfc5217SJeff Kirsher u16 rx_comp_prod, u16 rx_sge_prod, u32 start) 559adfc5217SJeff Kirsher { 560adfc5217SJeff Kirsher struct ustorm_eth_rx_producers rx_prods = {0}; 561adfc5217SJeff Kirsher u32 i; 562adfc5217SJeff Kirsher 563adfc5217SJeff Kirsher /* Update producers */ 564adfc5217SJeff Kirsher rx_prods.bd_prod = bd_prod; 565adfc5217SJeff Kirsher rx_prods.cqe_prod = rx_comp_prod; 566adfc5217SJeff Kirsher rx_prods.sge_prod = rx_sge_prod; 567adfc5217SJeff Kirsher 568adfc5217SJeff Kirsher /* 569adfc5217SJeff Kirsher * Make sure that the BD and SGE data is updated before updating the 570adfc5217SJeff Kirsher * producers since FW might read the BD/SGE right after the producer 571adfc5217SJeff Kirsher * is updated. 572adfc5217SJeff Kirsher * This is only applicable for weak-ordered memory model archs such 573adfc5217SJeff Kirsher * as IA-64. The following barrier is also mandatory since FW will 574adfc5217SJeff Kirsher * assumes BDs must have buffers. 575adfc5217SJeff Kirsher */ 576adfc5217SJeff Kirsher wmb(); 577adfc5217SJeff Kirsher 578adfc5217SJeff Kirsher for (i = 0; i < sizeof(rx_prods)/4; i++) 579adfc5217SJeff Kirsher REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); 580adfc5217SJeff Kirsher 581adfc5217SJeff Kirsher mmiowb(); /* keep prod updates ordered */ 582adfc5217SJeff Kirsher 583adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS, 584adfc5217SJeff Kirsher "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 585adfc5217SJeff Kirsher fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 586adfc5217SJeff Kirsher } 587adfc5217SJeff Kirsher 588adfc5217SJeff Kirsher static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 589adfc5217SJeff Kirsher u8 segment, u16 index, u8 op, 590adfc5217SJeff Kirsher u8 update, u32 igu_addr) 591adfc5217SJeff Kirsher { 592adfc5217SJeff Kirsher struct igu_regular cmd_data = {0}; 593adfc5217SJeff Kirsher 594adfc5217SJeff Kirsher cmd_data.sb_id_and_flags = 595adfc5217SJeff Kirsher ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 596adfc5217SJeff Kirsher (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 597adfc5217SJeff Kirsher (update << IGU_REGULAR_BUPDATE_SHIFT) | 598adfc5217SJeff Kirsher (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 599adfc5217SJeff Kirsher 600adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", 601adfc5217SJeff Kirsher cmd_data.sb_id_and_flags, igu_addr); 602adfc5217SJeff Kirsher REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 603adfc5217SJeff Kirsher 604adfc5217SJeff Kirsher /* Make sure that ACK is written */ 605adfc5217SJeff Kirsher mmiowb(); 606adfc5217SJeff Kirsher barrier(); 607adfc5217SJeff Kirsher } 608adfc5217SJeff Kirsher 609adfc5217SJeff Kirsher static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 610adfc5217SJeff Kirsher u8 idu_sb_id, bool is_Pf) 611adfc5217SJeff Kirsher { 612adfc5217SJeff Kirsher u32 data, ctl, cnt = 100; 613adfc5217SJeff Kirsher u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 614adfc5217SJeff Kirsher u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 615adfc5217SJeff Kirsher u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 616adfc5217SJeff Kirsher u32 sb_bit = 1 << (idu_sb_id%32); 61723677ce3SJoe Perches u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 618adfc5217SJeff Kirsher u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 619adfc5217SJeff Kirsher 620adfc5217SJeff Kirsher /* Not supported in BC mode */ 621adfc5217SJeff Kirsher if (CHIP_INT_MODE_IS_BC(bp)) 622adfc5217SJeff Kirsher return; 623adfc5217SJeff Kirsher 624adfc5217SJeff Kirsher data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 625adfc5217SJeff Kirsher << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 626adfc5217SJeff Kirsher IGU_REGULAR_CLEANUP_SET | 627adfc5217SJeff Kirsher IGU_REGULAR_BCLEANUP; 628adfc5217SJeff Kirsher 629adfc5217SJeff Kirsher ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 630adfc5217SJeff Kirsher func_encode << IGU_CTRL_REG_FID_SHIFT | 631adfc5217SJeff Kirsher IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 632adfc5217SJeff Kirsher 633adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 634adfc5217SJeff Kirsher data, igu_addr_data); 635adfc5217SJeff Kirsher REG_WR(bp, igu_addr_data, data); 636adfc5217SJeff Kirsher mmiowb(); 637adfc5217SJeff Kirsher barrier(); 638adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 639adfc5217SJeff Kirsher ctl, igu_addr_ctl); 640adfc5217SJeff Kirsher REG_WR(bp, igu_addr_ctl, ctl); 641adfc5217SJeff Kirsher mmiowb(); 642adfc5217SJeff Kirsher barrier(); 643adfc5217SJeff Kirsher 644adfc5217SJeff Kirsher /* wait for clean up to finish */ 645adfc5217SJeff Kirsher while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 646adfc5217SJeff Kirsher msleep(20); 647adfc5217SJeff Kirsher 648adfc5217SJeff Kirsher 649adfc5217SJeff Kirsher if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 650adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " 651adfc5217SJeff Kirsher "idu_sb_id %d offset %d bit %d (cnt %d)\n", 652adfc5217SJeff Kirsher idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 653adfc5217SJeff Kirsher } 654adfc5217SJeff Kirsher } 655adfc5217SJeff Kirsher 656adfc5217SJeff Kirsher static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 657adfc5217SJeff Kirsher u8 storm, u16 index, u8 op, u8 update) 658adfc5217SJeff Kirsher { 659adfc5217SJeff Kirsher u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 660adfc5217SJeff Kirsher COMMAND_REG_INT_ACK); 661adfc5217SJeff Kirsher struct igu_ack_register igu_ack; 662adfc5217SJeff Kirsher 663adfc5217SJeff Kirsher igu_ack.status_block_index = index; 664adfc5217SJeff Kirsher igu_ack.sb_id_and_flags = 665adfc5217SJeff Kirsher ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 666adfc5217SJeff Kirsher (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 667adfc5217SJeff Kirsher (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 668adfc5217SJeff Kirsher (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 669adfc5217SJeff Kirsher 670adfc5217SJeff Kirsher DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", 671adfc5217SJeff Kirsher (*(u32 *)&igu_ack), hc_addr); 672adfc5217SJeff Kirsher REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 673adfc5217SJeff Kirsher 674adfc5217SJeff Kirsher /* Make sure that ACK is written */ 675adfc5217SJeff Kirsher mmiowb(); 676adfc5217SJeff Kirsher barrier(); 677adfc5217SJeff Kirsher } 678adfc5217SJeff Kirsher 679adfc5217SJeff Kirsher static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 680adfc5217SJeff Kirsher u16 index, u8 op, u8 update) 681adfc5217SJeff Kirsher { 682adfc5217SJeff Kirsher if (bp->common.int_block == INT_BLOCK_HC) 683adfc5217SJeff Kirsher bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 684adfc5217SJeff Kirsher else { 685adfc5217SJeff Kirsher u8 segment; 686adfc5217SJeff Kirsher 687adfc5217SJeff Kirsher if (CHIP_INT_MODE_IS_BC(bp)) 688adfc5217SJeff Kirsher segment = storm; 689adfc5217SJeff Kirsher else if (igu_sb_id != bp->igu_dsb_id) 690adfc5217SJeff Kirsher segment = IGU_SEG_ACCESS_DEF; 691adfc5217SJeff Kirsher else if (storm == ATTENTION_ID) 692adfc5217SJeff Kirsher segment = IGU_SEG_ACCESS_ATTN; 693adfc5217SJeff Kirsher else 694adfc5217SJeff Kirsher segment = IGU_SEG_ACCESS_DEF; 695adfc5217SJeff Kirsher bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 696adfc5217SJeff Kirsher } 697adfc5217SJeff Kirsher } 698adfc5217SJeff Kirsher 699adfc5217SJeff Kirsher static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 700adfc5217SJeff Kirsher { 701adfc5217SJeff Kirsher u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 702adfc5217SJeff Kirsher COMMAND_REG_SIMD_MASK); 703adfc5217SJeff Kirsher u32 result = REG_RD(bp, hc_addr); 704adfc5217SJeff Kirsher 705adfc5217SJeff Kirsher DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", 706adfc5217SJeff Kirsher result, hc_addr); 707adfc5217SJeff Kirsher 708adfc5217SJeff Kirsher barrier(); 709adfc5217SJeff Kirsher return result; 710adfc5217SJeff Kirsher } 711adfc5217SJeff Kirsher 712adfc5217SJeff Kirsher static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 713adfc5217SJeff Kirsher { 714adfc5217SJeff Kirsher u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 715adfc5217SJeff Kirsher u32 result = REG_RD(bp, igu_addr); 716adfc5217SJeff Kirsher 717adfc5217SJeff Kirsher DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", 718adfc5217SJeff Kirsher result, igu_addr); 719adfc5217SJeff Kirsher 720adfc5217SJeff Kirsher barrier(); 721adfc5217SJeff Kirsher return result; 722adfc5217SJeff Kirsher } 723adfc5217SJeff Kirsher 724adfc5217SJeff Kirsher static inline u16 bnx2x_ack_int(struct bnx2x *bp) 725adfc5217SJeff Kirsher { 726adfc5217SJeff Kirsher barrier(); 727adfc5217SJeff Kirsher if (bp->common.int_block == INT_BLOCK_HC) 728adfc5217SJeff Kirsher return bnx2x_hc_ack_int(bp); 729adfc5217SJeff Kirsher else 730adfc5217SJeff Kirsher return bnx2x_igu_ack_int(bp); 731adfc5217SJeff Kirsher } 732adfc5217SJeff Kirsher 733adfc5217SJeff Kirsher static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 734adfc5217SJeff Kirsher { 735adfc5217SJeff Kirsher /* Tell compiler that consumer and producer can change */ 736adfc5217SJeff Kirsher barrier(); 737adfc5217SJeff Kirsher return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 738adfc5217SJeff Kirsher } 739adfc5217SJeff Kirsher 740adfc5217SJeff Kirsher static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 741adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata) 742adfc5217SJeff Kirsher { 743adfc5217SJeff Kirsher s16 used; 744adfc5217SJeff Kirsher u16 prod; 745adfc5217SJeff Kirsher u16 cons; 746adfc5217SJeff Kirsher 747adfc5217SJeff Kirsher prod = txdata->tx_bd_prod; 748adfc5217SJeff Kirsher cons = txdata->tx_bd_cons; 749adfc5217SJeff Kirsher 750adfc5217SJeff Kirsher /* NUM_TX_RINGS = number of "next-page" entries 751adfc5217SJeff Kirsher It will be used as a threshold */ 752adfc5217SJeff Kirsher used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; 753adfc5217SJeff Kirsher 754adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR 755adfc5217SJeff Kirsher WARN_ON(used < 0); 756adfc5217SJeff Kirsher WARN_ON(used > bp->tx_ring_size); 757adfc5217SJeff Kirsher WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); 758adfc5217SJeff Kirsher #endif 759adfc5217SJeff Kirsher 760adfc5217SJeff Kirsher return (s16)(bp->tx_ring_size) - used; 761adfc5217SJeff Kirsher } 762adfc5217SJeff Kirsher 763adfc5217SJeff Kirsher static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 764adfc5217SJeff Kirsher { 765adfc5217SJeff Kirsher u16 hw_cons; 766adfc5217SJeff Kirsher 767adfc5217SJeff Kirsher /* Tell compiler that status block fields can change */ 768adfc5217SJeff Kirsher barrier(); 769adfc5217SJeff Kirsher hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 770adfc5217SJeff Kirsher return hw_cons != txdata->tx_pkt_cons; 771adfc5217SJeff Kirsher } 772adfc5217SJeff Kirsher 773adfc5217SJeff Kirsher static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 774adfc5217SJeff Kirsher { 775adfc5217SJeff Kirsher u8 cos; 776adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos) 777adfc5217SJeff Kirsher if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 778adfc5217SJeff Kirsher return true; 779adfc5217SJeff Kirsher return false; 780adfc5217SJeff Kirsher } 781adfc5217SJeff Kirsher 782adfc5217SJeff Kirsher static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 783adfc5217SJeff Kirsher { 784adfc5217SJeff Kirsher u16 rx_cons_sb; 785adfc5217SJeff Kirsher 786adfc5217SJeff Kirsher /* Tell compiler that status block fields can change */ 787adfc5217SJeff Kirsher barrier(); 788adfc5217SJeff Kirsher rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 789adfc5217SJeff Kirsher if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 790adfc5217SJeff Kirsher rx_cons_sb++; 791adfc5217SJeff Kirsher return (fp->rx_comp_cons != rx_cons_sb); 792adfc5217SJeff Kirsher } 793adfc5217SJeff Kirsher 794adfc5217SJeff Kirsher /** 795adfc5217SJeff Kirsher * bnx2x_tx_disable - disables tx from stack point of view 796adfc5217SJeff Kirsher * 797adfc5217SJeff Kirsher * @bp: driver handle 798adfc5217SJeff Kirsher */ 799adfc5217SJeff Kirsher static inline void bnx2x_tx_disable(struct bnx2x *bp) 800adfc5217SJeff Kirsher { 801adfc5217SJeff Kirsher netif_tx_disable(bp->dev); 802adfc5217SJeff Kirsher netif_carrier_off(bp->dev); 803adfc5217SJeff Kirsher } 804adfc5217SJeff Kirsher 805adfc5217SJeff Kirsher static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 806adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 index) 807adfc5217SJeff Kirsher { 808adfc5217SJeff Kirsher struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 809adfc5217SJeff Kirsher struct page *page = sw_buf->page; 810adfc5217SJeff Kirsher struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 811adfc5217SJeff Kirsher 812adfc5217SJeff Kirsher /* Skip "next page" elements */ 813adfc5217SJeff Kirsher if (!page) 814adfc5217SJeff Kirsher return; 815adfc5217SJeff Kirsher 816adfc5217SJeff Kirsher dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 817adfc5217SJeff Kirsher SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 818adfc5217SJeff Kirsher __free_pages(page, PAGES_PER_SGE_SHIFT); 819adfc5217SJeff Kirsher 820adfc5217SJeff Kirsher sw_buf->page = NULL; 821adfc5217SJeff Kirsher sge->addr_hi = 0; 822adfc5217SJeff Kirsher sge->addr_lo = 0; 823adfc5217SJeff Kirsher } 824adfc5217SJeff Kirsher 825adfc5217SJeff Kirsher static inline void bnx2x_add_all_napi(struct bnx2x *bp) 826adfc5217SJeff Kirsher { 827adfc5217SJeff Kirsher int i; 828adfc5217SJeff Kirsher 829adfc5217SJeff Kirsher /* Add NAPI objects */ 830adfc5217SJeff Kirsher for_each_rx_queue(bp, i) 831adfc5217SJeff Kirsher netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 832adfc5217SJeff Kirsher bnx2x_poll, BNX2X_NAPI_WEIGHT); 833adfc5217SJeff Kirsher } 834adfc5217SJeff Kirsher 835adfc5217SJeff Kirsher static inline void bnx2x_del_all_napi(struct bnx2x *bp) 836adfc5217SJeff Kirsher { 837adfc5217SJeff Kirsher int i; 838adfc5217SJeff Kirsher 839adfc5217SJeff Kirsher for_each_rx_queue(bp, i) 840adfc5217SJeff Kirsher netif_napi_del(&bnx2x_fp(bp, i, napi)); 841adfc5217SJeff Kirsher } 842adfc5217SJeff Kirsher 843adfc5217SJeff Kirsher static inline void bnx2x_disable_msi(struct bnx2x *bp) 844adfc5217SJeff Kirsher { 845adfc5217SJeff Kirsher if (bp->flags & USING_MSIX_FLAG) { 846adfc5217SJeff Kirsher pci_disable_msix(bp->pdev); 847adfc5217SJeff Kirsher bp->flags &= ~USING_MSIX_FLAG; 848adfc5217SJeff Kirsher } else if (bp->flags & USING_MSI_FLAG) { 849adfc5217SJeff Kirsher pci_disable_msi(bp->pdev); 850adfc5217SJeff Kirsher bp->flags &= ~USING_MSI_FLAG; 851adfc5217SJeff Kirsher } 852adfc5217SJeff Kirsher } 853adfc5217SJeff Kirsher 854adfc5217SJeff Kirsher static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 855adfc5217SJeff Kirsher { 856adfc5217SJeff Kirsher return num_queues ? 857adfc5217SJeff Kirsher min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 858adfc5217SJeff Kirsher min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); 859adfc5217SJeff Kirsher } 860adfc5217SJeff Kirsher 861adfc5217SJeff Kirsher static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 862adfc5217SJeff Kirsher { 863adfc5217SJeff Kirsher int i, j; 864adfc5217SJeff Kirsher 865adfc5217SJeff Kirsher for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 866adfc5217SJeff Kirsher int idx = RX_SGE_CNT * i - 1; 867adfc5217SJeff Kirsher 868adfc5217SJeff Kirsher for (j = 0; j < 2; j++) { 869adfc5217SJeff Kirsher BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 870adfc5217SJeff Kirsher idx--; 871adfc5217SJeff Kirsher } 872adfc5217SJeff Kirsher } 873adfc5217SJeff Kirsher } 874adfc5217SJeff Kirsher 875adfc5217SJeff Kirsher static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 876adfc5217SJeff Kirsher { 877adfc5217SJeff Kirsher /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 878b3637827SDmitry Kravkov memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 879adfc5217SJeff Kirsher 880adfc5217SJeff Kirsher /* Clear the two last indices in the page to 1: 881adfc5217SJeff Kirsher these are the indices that correspond to the "next" element, 882adfc5217SJeff Kirsher hence will never be indicated and should be removed from 883adfc5217SJeff Kirsher the calculations. */ 884adfc5217SJeff Kirsher bnx2x_clear_sge_mask_next_elems(fp); 885adfc5217SJeff Kirsher } 886adfc5217SJeff Kirsher 887adfc5217SJeff Kirsher static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 888adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 index) 889adfc5217SJeff Kirsher { 890adfc5217SJeff Kirsher struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); 891adfc5217SJeff Kirsher struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 892adfc5217SJeff Kirsher struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 893adfc5217SJeff Kirsher dma_addr_t mapping; 894adfc5217SJeff Kirsher 895adfc5217SJeff Kirsher if (unlikely(page == NULL)) 896adfc5217SJeff Kirsher return -ENOMEM; 897adfc5217SJeff Kirsher 898adfc5217SJeff Kirsher mapping = dma_map_page(&bp->pdev->dev, page, 0, 899adfc5217SJeff Kirsher SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 900adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 901adfc5217SJeff Kirsher __free_pages(page, PAGES_PER_SGE_SHIFT); 902adfc5217SJeff Kirsher return -ENOMEM; 903adfc5217SJeff Kirsher } 904adfc5217SJeff Kirsher 905adfc5217SJeff Kirsher sw_buf->page = page; 906adfc5217SJeff Kirsher dma_unmap_addr_set(sw_buf, mapping, mapping); 907adfc5217SJeff Kirsher 908adfc5217SJeff Kirsher sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 909adfc5217SJeff Kirsher sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 910adfc5217SJeff Kirsher 911adfc5217SJeff Kirsher return 0; 912adfc5217SJeff Kirsher } 913adfc5217SJeff Kirsher 914e52fcb24SEric Dumazet static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, 915adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, u16 index) 916adfc5217SJeff Kirsher { 917e52fcb24SEric Dumazet u8 *data; 918adfc5217SJeff Kirsher struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 919adfc5217SJeff Kirsher struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 920adfc5217SJeff Kirsher dma_addr_t mapping; 921adfc5217SJeff Kirsher 922e52fcb24SEric Dumazet data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 923e52fcb24SEric Dumazet if (unlikely(data == NULL)) 924adfc5217SJeff Kirsher return -ENOMEM; 925adfc5217SJeff Kirsher 926e52fcb24SEric Dumazet mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, 927e52fcb24SEric Dumazet fp->rx_buf_size, 928adfc5217SJeff Kirsher DMA_FROM_DEVICE); 929adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 930e52fcb24SEric Dumazet kfree(data); 931adfc5217SJeff Kirsher return -ENOMEM; 932adfc5217SJeff Kirsher } 933adfc5217SJeff Kirsher 934e52fcb24SEric Dumazet rx_buf->data = data; 935adfc5217SJeff Kirsher dma_unmap_addr_set(rx_buf, mapping, mapping); 936adfc5217SJeff Kirsher 937adfc5217SJeff Kirsher rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 938adfc5217SJeff Kirsher rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 939adfc5217SJeff Kirsher 940adfc5217SJeff Kirsher return 0; 941adfc5217SJeff Kirsher } 942adfc5217SJeff Kirsher 943e52fcb24SEric Dumazet /* note that we are not allocating a new buffer, 944adfc5217SJeff Kirsher * we are just moving one from cons to prod 945adfc5217SJeff Kirsher * we are not creating a new mapping, 946adfc5217SJeff Kirsher * so there is no need to check for dma_mapping_error(). 947adfc5217SJeff Kirsher */ 948e52fcb24SEric Dumazet static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 949adfc5217SJeff Kirsher u16 cons, u16 prod) 950adfc5217SJeff Kirsher { 951adfc5217SJeff Kirsher struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 952adfc5217SJeff Kirsher struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 953adfc5217SJeff Kirsher struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 954adfc5217SJeff Kirsher struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 955adfc5217SJeff Kirsher 956adfc5217SJeff Kirsher dma_unmap_addr_set(prod_rx_buf, mapping, 957adfc5217SJeff Kirsher dma_unmap_addr(cons_rx_buf, mapping)); 958e52fcb24SEric Dumazet prod_rx_buf->data = cons_rx_buf->data; 959adfc5217SJeff Kirsher *prod_bd = *cons_bd; 960adfc5217SJeff Kirsher } 961adfc5217SJeff Kirsher 962adfc5217SJeff Kirsher /************************* Init ******************************************/ 963adfc5217SJeff Kirsher 964adfc5217SJeff Kirsher /** 965adfc5217SJeff Kirsher * bnx2x_func_start - init function 966adfc5217SJeff Kirsher * 967adfc5217SJeff Kirsher * @bp: driver handle 968adfc5217SJeff Kirsher * 969adfc5217SJeff Kirsher * Must be called before sending CLIENT_SETUP for the first client. 970adfc5217SJeff Kirsher */ 971adfc5217SJeff Kirsher static inline int bnx2x_func_start(struct bnx2x *bp) 972adfc5217SJeff Kirsher { 973adfc5217SJeff Kirsher struct bnx2x_func_state_params func_params = {0}; 974adfc5217SJeff Kirsher struct bnx2x_func_start_params *start_params = 975adfc5217SJeff Kirsher &func_params.params.start; 976adfc5217SJeff Kirsher 977adfc5217SJeff Kirsher /* Prepare parameters for function state transitions */ 978adfc5217SJeff Kirsher __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 979adfc5217SJeff Kirsher 980adfc5217SJeff Kirsher func_params.f_obj = &bp->func_obj; 981adfc5217SJeff Kirsher func_params.cmd = BNX2X_F_CMD_START; 982adfc5217SJeff Kirsher 983adfc5217SJeff Kirsher /* Function parameters */ 984adfc5217SJeff Kirsher start_params->mf_mode = bp->mf_mode; 985adfc5217SJeff Kirsher start_params->sd_vlan_tag = bp->mf_ov; 9868d7b0278SAriel Elior 9878d7b0278SAriel Elior if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 988adfc5217SJeff Kirsher start_params->network_cos_mode = STATIC_COS; 9898d7b0278SAriel Elior else /* CHIP_IS_E1X */ 9908d7b0278SAriel Elior start_params->network_cos_mode = FW_WRR; 991adfc5217SJeff Kirsher 992adfc5217SJeff Kirsher return bnx2x_func_state_change(bp, &func_params); 993adfc5217SJeff Kirsher } 994adfc5217SJeff Kirsher 995adfc5217SJeff Kirsher 996adfc5217SJeff Kirsher /** 997adfc5217SJeff Kirsher * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 998adfc5217SJeff Kirsher * 999adfc5217SJeff Kirsher * @fw_hi: pointer to upper part 1000adfc5217SJeff Kirsher * @fw_mid: pointer to middle part 1001adfc5217SJeff Kirsher * @fw_lo: pointer to lower part 1002adfc5217SJeff Kirsher * @mac: pointer to MAC address 1003adfc5217SJeff Kirsher */ 1004adfc5217SJeff Kirsher static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, 1005adfc5217SJeff Kirsher u8 *mac) 1006adfc5217SJeff Kirsher { 1007adfc5217SJeff Kirsher ((u8 *)fw_hi)[0] = mac[1]; 1008adfc5217SJeff Kirsher ((u8 *)fw_hi)[1] = mac[0]; 1009adfc5217SJeff Kirsher ((u8 *)fw_mid)[0] = mac[3]; 1010adfc5217SJeff Kirsher ((u8 *)fw_mid)[1] = mac[2]; 1011adfc5217SJeff Kirsher ((u8 *)fw_lo)[0] = mac[5]; 1012adfc5217SJeff Kirsher ((u8 *)fw_lo)[1] = mac[4]; 1013adfc5217SJeff Kirsher } 1014adfc5217SJeff Kirsher 1015adfc5217SJeff Kirsher static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 1016adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, int last) 1017adfc5217SJeff Kirsher { 1018adfc5217SJeff Kirsher int i; 1019adfc5217SJeff Kirsher 1020adfc5217SJeff Kirsher if (fp->disable_tpa) 1021adfc5217SJeff Kirsher return; 1022adfc5217SJeff Kirsher 1023adfc5217SJeff Kirsher for (i = 0; i < last; i++) 1024adfc5217SJeff Kirsher bnx2x_free_rx_sge(bp, fp, i); 1025adfc5217SJeff Kirsher } 1026adfc5217SJeff Kirsher 1027adfc5217SJeff Kirsher static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 1028adfc5217SJeff Kirsher struct bnx2x_fastpath *fp, int last) 1029adfc5217SJeff Kirsher { 1030adfc5217SJeff Kirsher int i; 1031adfc5217SJeff Kirsher 1032adfc5217SJeff Kirsher for (i = 0; i < last; i++) { 1033adfc5217SJeff Kirsher struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; 1034adfc5217SJeff Kirsher struct sw_rx_bd *first_buf = &tpa_info->first_buf; 1035e52fcb24SEric Dumazet u8 *data = first_buf->data; 1036adfc5217SJeff Kirsher 1037e52fcb24SEric Dumazet if (data == NULL) { 1038adfc5217SJeff Kirsher DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); 1039adfc5217SJeff Kirsher continue; 1040adfc5217SJeff Kirsher } 1041adfc5217SJeff Kirsher if (tpa_info->tpa_state == BNX2X_TPA_START) 1042adfc5217SJeff Kirsher dma_unmap_single(&bp->pdev->dev, 1043adfc5217SJeff Kirsher dma_unmap_addr(first_buf, mapping), 1044adfc5217SJeff Kirsher fp->rx_buf_size, DMA_FROM_DEVICE); 1045e52fcb24SEric Dumazet kfree(data); 1046e52fcb24SEric Dumazet first_buf->data = NULL; 1047adfc5217SJeff Kirsher } 1048adfc5217SJeff Kirsher } 1049adfc5217SJeff Kirsher 1050adfc5217SJeff Kirsher static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 1051adfc5217SJeff Kirsher { 1052adfc5217SJeff Kirsher int i; 1053adfc5217SJeff Kirsher 1054adfc5217SJeff Kirsher for (i = 1; i <= NUM_TX_RINGS; i++) { 1055adfc5217SJeff Kirsher struct eth_tx_next_bd *tx_next_bd = 1056adfc5217SJeff Kirsher &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 1057adfc5217SJeff Kirsher 1058adfc5217SJeff Kirsher tx_next_bd->addr_hi = 1059adfc5217SJeff Kirsher cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 1060adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1061adfc5217SJeff Kirsher tx_next_bd->addr_lo = 1062adfc5217SJeff Kirsher cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 1063adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1064adfc5217SJeff Kirsher } 1065adfc5217SJeff Kirsher 1066adfc5217SJeff Kirsher SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 1067adfc5217SJeff Kirsher txdata->tx_db.data.zero_fill1 = 0; 1068adfc5217SJeff Kirsher txdata->tx_db.data.prod = 0; 1069adfc5217SJeff Kirsher 1070adfc5217SJeff Kirsher txdata->tx_pkt_prod = 0; 1071adfc5217SJeff Kirsher txdata->tx_pkt_cons = 0; 1072adfc5217SJeff Kirsher txdata->tx_bd_prod = 0; 1073adfc5217SJeff Kirsher txdata->tx_bd_cons = 0; 1074adfc5217SJeff Kirsher txdata->tx_pkt = 0; 1075adfc5217SJeff Kirsher } 1076adfc5217SJeff Kirsher 1077adfc5217SJeff Kirsher static inline void bnx2x_init_tx_rings(struct bnx2x *bp) 1078adfc5217SJeff Kirsher { 1079adfc5217SJeff Kirsher int i; 1080adfc5217SJeff Kirsher u8 cos; 1081adfc5217SJeff Kirsher 1082adfc5217SJeff Kirsher for_each_tx_queue(bp, i) 1083adfc5217SJeff Kirsher for_each_cos_in_tx_queue(&bp->fp[i], cos) 1084adfc5217SJeff Kirsher bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 1085adfc5217SJeff Kirsher } 1086adfc5217SJeff Kirsher 1087adfc5217SJeff Kirsher static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1088adfc5217SJeff Kirsher { 1089adfc5217SJeff Kirsher int i; 1090adfc5217SJeff Kirsher 1091adfc5217SJeff Kirsher for (i = 1; i <= NUM_RX_RINGS; i++) { 1092adfc5217SJeff Kirsher struct eth_rx_bd *rx_bd; 1093adfc5217SJeff Kirsher 1094adfc5217SJeff Kirsher rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1095adfc5217SJeff Kirsher rx_bd->addr_hi = 1096adfc5217SJeff Kirsher cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1097adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1098adfc5217SJeff Kirsher rx_bd->addr_lo = 1099adfc5217SJeff Kirsher cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1100adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1101adfc5217SJeff Kirsher } 1102adfc5217SJeff Kirsher } 1103adfc5217SJeff Kirsher 1104adfc5217SJeff Kirsher static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) 1105adfc5217SJeff Kirsher { 1106adfc5217SJeff Kirsher int i; 1107adfc5217SJeff Kirsher 1108adfc5217SJeff Kirsher for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 1109adfc5217SJeff Kirsher struct eth_rx_sge *sge; 1110adfc5217SJeff Kirsher 1111adfc5217SJeff Kirsher sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; 1112adfc5217SJeff Kirsher sge->addr_hi = 1113adfc5217SJeff Kirsher cpu_to_le32(U64_HI(fp->rx_sge_mapping + 1114adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1115adfc5217SJeff Kirsher 1116adfc5217SJeff Kirsher sge->addr_lo = 1117adfc5217SJeff Kirsher cpu_to_le32(U64_LO(fp->rx_sge_mapping + 1118adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1119adfc5217SJeff Kirsher } 1120adfc5217SJeff Kirsher } 1121adfc5217SJeff Kirsher 1122adfc5217SJeff Kirsher static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) 1123adfc5217SJeff Kirsher { 1124adfc5217SJeff Kirsher int i; 1125adfc5217SJeff Kirsher for (i = 1; i <= NUM_RCQ_RINGS; i++) { 1126adfc5217SJeff Kirsher struct eth_rx_cqe_next_page *nextpg; 1127adfc5217SJeff Kirsher 1128adfc5217SJeff Kirsher nextpg = (struct eth_rx_cqe_next_page *) 1129adfc5217SJeff Kirsher &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; 1130adfc5217SJeff Kirsher nextpg->addr_hi = 1131adfc5217SJeff Kirsher cpu_to_le32(U64_HI(fp->rx_comp_mapping + 1132adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1133adfc5217SJeff Kirsher nextpg->addr_lo = 1134adfc5217SJeff Kirsher cpu_to_le32(U64_LO(fp->rx_comp_mapping + 1135adfc5217SJeff Kirsher BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1136adfc5217SJeff Kirsher } 1137adfc5217SJeff Kirsher } 1138adfc5217SJeff Kirsher 1139adfc5217SJeff Kirsher /* Returns the number of actually allocated BDs */ 1140adfc5217SJeff Kirsher static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, 1141adfc5217SJeff Kirsher int rx_ring_size) 1142adfc5217SJeff Kirsher { 1143adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp; 1144adfc5217SJeff Kirsher u16 ring_prod, cqe_ring_prod; 1145adfc5217SJeff Kirsher int i; 1146adfc5217SJeff Kirsher 1147adfc5217SJeff Kirsher fp->rx_comp_cons = 0; 1148adfc5217SJeff Kirsher cqe_ring_prod = ring_prod = 0; 1149adfc5217SJeff Kirsher 1150adfc5217SJeff Kirsher /* This routine is called only during fo init so 1151adfc5217SJeff Kirsher * fp->eth_q_stats.rx_skb_alloc_failed = 0 1152adfc5217SJeff Kirsher */ 1153adfc5217SJeff Kirsher for (i = 0; i < rx_ring_size; i++) { 1154e52fcb24SEric Dumazet if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { 1155adfc5217SJeff Kirsher fp->eth_q_stats.rx_skb_alloc_failed++; 1156adfc5217SJeff Kirsher continue; 1157adfc5217SJeff Kirsher } 1158adfc5217SJeff Kirsher ring_prod = NEXT_RX_IDX(ring_prod); 1159adfc5217SJeff Kirsher cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 1160adfc5217SJeff Kirsher WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); 1161adfc5217SJeff Kirsher } 1162adfc5217SJeff Kirsher 1163adfc5217SJeff Kirsher if (fp->eth_q_stats.rx_skb_alloc_failed) 1164adfc5217SJeff Kirsher BNX2X_ERR("was only able to allocate " 1165adfc5217SJeff Kirsher "%d rx skbs on queue[%d]\n", 1166adfc5217SJeff Kirsher (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); 1167adfc5217SJeff Kirsher 1168adfc5217SJeff Kirsher fp->rx_bd_prod = ring_prod; 1169adfc5217SJeff Kirsher /* Limit the CQE producer by the CQE ring size */ 1170adfc5217SJeff Kirsher fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, 1171adfc5217SJeff Kirsher cqe_ring_prod); 1172adfc5217SJeff Kirsher fp->rx_pkt = fp->rx_calls = 0; 1173adfc5217SJeff Kirsher 1174adfc5217SJeff Kirsher return i - fp->eth_q_stats.rx_skb_alloc_failed; 1175adfc5217SJeff Kirsher } 1176adfc5217SJeff Kirsher 1177adfc5217SJeff Kirsher /* Statistics ID are global per chip/path, while Client IDs for E1x are per 1178adfc5217SJeff Kirsher * port. 1179adfc5217SJeff Kirsher */ 1180adfc5217SJeff Kirsher static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1181adfc5217SJeff Kirsher { 1182adfc5217SJeff Kirsher if (!CHIP_IS_E1x(fp->bp)) 1183adfc5217SJeff Kirsher return fp->cl_id; 1184adfc5217SJeff Kirsher else 1185adfc5217SJeff Kirsher return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; 1186adfc5217SJeff Kirsher } 1187adfc5217SJeff Kirsher 1188adfc5217SJeff Kirsher static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1189adfc5217SJeff Kirsher bnx2x_obj_type obj_type) 1190adfc5217SJeff Kirsher { 1191adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp; 1192adfc5217SJeff Kirsher 1193adfc5217SJeff Kirsher /* Configure classification DBs */ 1194adfc5217SJeff Kirsher bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 1195adfc5217SJeff Kirsher BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1196adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, mac_rdata), 1197adfc5217SJeff Kirsher BNX2X_FILTER_MAC_PENDING, 1198adfc5217SJeff Kirsher &bp->sp_state, obj_type, 1199adfc5217SJeff Kirsher &bp->macs_pool); 1200adfc5217SJeff Kirsher } 1201adfc5217SJeff Kirsher 1202adfc5217SJeff Kirsher /** 1203adfc5217SJeff Kirsher * bnx2x_get_path_func_num - get number of active functions 1204adfc5217SJeff Kirsher * 1205adfc5217SJeff Kirsher * @bp: driver handle 1206adfc5217SJeff Kirsher * 1207adfc5217SJeff Kirsher * Calculates the number of active (not hidden) functions on the 1208adfc5217SJeff Kirsher * current path. 1209adfc5217SJeff Kirsher */ 1210adfc5217SJeff Kirsher static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1211adfc5217SJeff Kirsher { 1212adfc5217SJeff Kirsher u8 func_num = 0, i; 1213adfc5217SJeff Kirsher 1214adfc5217SJeff Kirsher /* 57710 has only one function per-port */ 1215adfc5217SJeff Kirsher if (CHIP_IS_E1(bp)) 1216adfc5217SJeff Kirsher return 1; 1217adfc5217SJeff Kirsher 1218adfc5217SJeff Kirsher /* Calculate a number of functions enabled on the current 1219adfc5217SJeff Kirsher * PATH/PORT. 1220adfc5217SJeff Kirsher */ 1221adfc5217SJeff Kirsher if (CHIP_REV_IS_SLOW(bp)) { 1222adfc5217SJeff Kirsher if (IS_MF(bp)) 1223adfc5217SJeff Kirsher func_num = 4; 1224adfc5217SJeff Kirsher else 1225adfc5217SJeff Kirsher func_num = 2; 1226adfc5217SJeff Kirsher } else { 1227adfc5217SJeff Kirsher for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1228adfc5217SJeff Kirsher u32 func_config = 1229adfc5217SJeff Kirsher MF_CFG_RD(bp, 1230adfc5217SJeff Kirsher func_mf_config[BP_PORT(bp) + 2 * i]. 1231adfc5217SJeff Kirsher config); 1232adfc5217SJeff Kirsher func_num += 1233adfc5217SJeff Kirsher ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1234adfc5217SJeff Kirsher } 1235adfc5217SJeff Kirsher } 1236adfc5217SJeff Kirsher 1237adfc5217SJeff Kirsher WARN_ON(!func_num); 1238adfc5217SJeff Kirsher 1239adfc5217SJeff Kirsher return func_num; 1240adfc5217SJeff Kirsher } 1241adfc5217SJeff Kirsher 1242adfc5217SJeff Kirsher static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1243adfc5217SJeff Kirsher { 1244adfc5217SJeff Kirsher /* RX_MODE controlling object */ 1245adfc5217SJeff Kirsher bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1246adfc5217SJeff Kirsher 1247adfc5217SJeff Kirsher /* multicast configuration controlling object */ 1248adfc5217SJeff Kirsher bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1249adfc5217SJeff Kirsher BP_FUNC(bp), BP_FUNC(bp), 1250adfc5217SJeff Kirsher bnx2x_sp(bp, mcast_rdata), 1251adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, mcast_rdata), 1252adfc5217SJeff Kirsher BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1253adfc5217SJeff Kirsher BNX2X_OBJ_TYPE_RX); 1254adfc5217SJeff Kirsher 1255adfc5217SJeff Kirsher /* Setup CAM credit pools */ 1256adfc5217SJeff Kirsher bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1257adfc5217SJeff Kirsher bnx2x_get_path_func_num(bp)); 1258adfc5217SJeff Kirsher 1259adfc5217SJeff Kirsher /* RSS configuration object */ 1260adfc5217SJeff Kirsher bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1261adfc5217SJeff Kirsher bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1262adfc5217SJeff Kirsher bnx2x_sp(bp, rss_rdata), 1263adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, rss_rdata), 1264adfc5217SJeff Kirsher BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1265adfc5217SJeff Kirsher BNX2X_OBJ_TYPE_RX); 1266adfc5217SJeff Kirsher } 1267adfc5217SJeff Kirsher 1268adfc5217SJeff Kirsher static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1269adfc5217SJeff Kirsher { 1270adfc5217SJeff Kirsher if (CHIP_IS_E1x(fp->bp)) 1271adfc5217SJeff Kirsher return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1272adfc5217SJeff Kirsher else 1273adfc5217SJeff Kirsher return fp->cl_id; 1274adfc5217SJeff Kirsher } 1275adfc5217SJeff Kirsher 1276adfc5217SJeff Kirsher static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 1277adfc5217SJeff Kirsher { 1278adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp; 1279adfc5217SJeff Kirsher 1280adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp)) 1281adfc5217SJeff Kirsher return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 1282adfc5217SJeff Kirsher else 1283adfc5217SJeff Kirsher return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 1284adfc5217SJeff Kirsher } 1285adfc5217SJeff Kirsher 1286adfc5217SJeff Kirsher static inline void bnx2x_init_txdata(struct bnx2x *bp, 1287adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1288adfc5217SJeff Kirsher __le16 *tx_cons_sb) 1289adfc5217SJeff Kirsher { 1290adfc5217SJeff Kirsher txdata->cid = cid; 1291adfc5217SJeff Kirsher txdata->txq_index = txq_index; 1292adfc5217SJeff Kirsher txdata->tx_cons_sb = tx_cons_sb; 1293adfc5217SJeff Kirsher 129494f05b0fSJoe Perches DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", 1295adfc5217SJeff Kirsher txdata->cid, txdata->txq_index); 1296adfc5217SJeff Kirsher } 1297adfc5217SJeff Kirsher 1298adfc5217SJeff Kirsher #ifdef BCM_CNIC 1299adfc5217SJeff Kirsher static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1300adfc5217SJeff Kirsher { 1301adfc5217SJeff Kirsher return bp->cnic_base_cl_id + cl_idx + 13021805b2f0SDavid S. Miller (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1303adfc5217SJeff Kirsher } 1304adfc5217SJeff Kirsher 1305adfc5217SJeff Kirsher static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1306adfc5217SJeff Kirsher { 1307adfc5217SJeff Kirsher 1308adfc5217SJeff Kirsher /* the 'first' id is allocated for the cnic */ 1309adfc5217SJeff Kirsher return bp->base_fw_ndsb; 1310adfc5217SJeff Kirsher } 1311adfc5217SJeff Kirsher 1312adfc5217SJeff Kirsher static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1313adfc5217SJeff Kirsher { 1314adfc5217SJeff Kirsher return bp->igu_base_sb; 1315adfc5217SJeff Kirsher } 1316adfc5217SJeff Kirsher 1317adfc5217SJeff Kirsher 1318adfc5217SJeff Kirsher static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1319adfc5217SJeff Kirsher { 1320adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1321adfc5217SJeff Kirsher unsigned long q_type = 0; 1322adfc5217SJeff Kirsher 1323f233cafeSDmitry Kravkov bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1324adfc5217SJeff Kirsher bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1325adfc5217SJeff Kirsher BNX2X_FCOE_ETH_CL_ID_IDX); 1326adfc5217SJeff Kirsher /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1327adfc5217SJeff Kirsher * 16 ETH clients per function when CNIC is enabled! 1328adfc5217SJeff Kirsher * 1329adfc5217SJeff Kirsher * Fix it ASAP!!! 1330adfc5217SJeff Kirsher */ 1331adfc5217SJeff Kirsher bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; 1332adfc5217SJeff Kirsher bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1333adfc5217SJeff Kirsher bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1334adfc5217SJeff Kirsher bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1335adfc5217SJeff Kirsher 1336adfc5217SJeff Kirsher bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1337adfc5217SJeff Kirsher fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1338adfc5217SJeff Kirsher 133994f05b0fSJoe Perches DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); 1340adfc5217SJeff Kirsher 1341adfc5217SJeff Kirsher /* qZone id equals to FW (per path) client id */ 1342adfc5217SJeff Kirsher bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1343adfc5217SJeff Kirsher /* init shortcut */ 1344adfc5217SJeff Kirsher bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1345adfc5217SJeff Kirsher bnx2x_rx_ustorm_prods_offset(fp); 1346adfc5217SJeff Kirsher 1347adfc5217SJeff Kirsher /* Configure Queue State object */ 1348adfc5217SJeff Kirsher __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1349adfc5217SJeff Kirsher __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1350adfc5217SJeff Kirsher 1351adfc5217SJeff Kirsher /* No multi-CoS for FCoE L2 client */ 1352adfc5217SJeff Kirsher BUG_ON(fp->max_cos != 1); 1353adfc5217SJeff Kirsher 1354adfc5217SJeff Kirsher bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1355adfc5217SJeff Kirsher BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1356adfc5217SJeff Kirsher bnx2x_sp_mapping(bp, q_rdata), q_type); 1357adfc5217SJeff Kirsher 1358adfc5217SJeff Kirsher DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " 1359adfc5217SJeff Kirsher "igu_sb %d\n", 1360adfc5217SJeff Kirsher fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1361adfc5217SJeff Kirsher fp->igu_sb_id); 1362adfc5217SJeff Kirsher } 1363adfc5217SJeff Kirsher #endif 1364adfc5217SJeff Kirsher 1365adfc5217SJeff Kirsher static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1366adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata) 1367adfc5217SJeff Kirsher { 1368adfc5217SJeff Kirsher int cnt = 1000; 1369adfc5217SJeff Kirsher 1370adfc5217SJeff Kirsher while (bnx2x_has_tx_work_unload(txdata)) { 1371adfc5217SJeff Kirsher if (!cnt) { 1372adfc5217SJeff Kirsher BNX2X_ERR("timeout waiting for queue[%d]: " 1373adfc5217SJeff Kirsher "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1374adfc5217SJeff Kirsher txdata->txq_index, txdata->tx_pkt_prod, 1375adfc5217SJeff Kirsher txdata->tx_pkt_cons); 1376adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR 1377adfc5217SJeff Kirsher bnx2x_panic(); 1378adfc5217SJeff Kirsher return -EBUSY; 1379adfc5217SJeff Kirsher #else 1380adfc5217SJeff Kirsher break; 1381adfc5217SJeff Kirsher #endif 1382adfc5217SJeff Kirsher } 1383adfc5217SJeff Kirsher cnt--; 1384adfc5217SJeff Kirsher usleep_range(1000, 1000); 1385adfc5217SJeff Kirsher } 1386adfc5217SJeff Kirsher 1387adfc5217SJeff Kirsher return 0; 1388adfc5217SJeff Kirsher } 1389adfc5217SJeff Kirsher 1390adfc5217SJeff Kirsher int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1391adfc5217SJeff Kirsher 1392adfc5217SJeff Kirsher static inline void __storm_memset_struct(struct bnx2x *bp, 1393adfc5217SJeff Kirsher u32 addr, size_t size, u32 *data) 1394adfc5217SJeff Kirsher { 1395adfc5217SJeff Kirsher int i; 1396adfc5217SJeff Kirsher for (i = 0; i < size/4; i++) 1397adfc5217SJeff Kirsher REG_WR(bp, addr + (i * 4), data[i]); 1398adfc5217SJeff Kirsher } 1399adfc5217SJeff Kirsher 1400adfc5217SJeff Kirsher static inline void storm_memset_func_cfg(struct bnx2x *bp, 1401adfc5217SJeff Kirsher struct tstorm_eth_function_common_config *tcfg, 1402adfc5217SJeff Kirsher u16 abs_fid) 1403adfc5217SJeff Kirsher { 1404adfc5217SJeff Kirsher size_t size = sizeof(struct tstorm_eth_function_common_config); 1405adfc5217SJeff Kirsher 1406adfc5217SJeff Kirsher u32 addr = BAR_TSTRORM_INTMEM + 1407adfc5217SJeff Kirsher TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 1408adfc5217SJeff Kirsher 1409adfc5217SJeff Kirsher __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 1410adfc5217SJeff Kirsher } 1411adfc5217SJeff Kirsher 1412adfc5217SJeff Kirsher static inline void storm_memset_cmng(struct bnx2x *bp, 1413adfc5217SJeff Kirsher struct cmng_struct_per_port *cmng, 1414adfc5217SJeff Kirsher u8 port) 1415adfc5217SJeff Kirsher { 1416adfc5217SJeff Kirsher size_t size = sizeof(struct cmng_struct_per_port); 1417adfc5217SJeff Kirsher 1418adfc5217SJeff Kirsher u32 addr = BAR_XSTRORM_INTMEM + 1419adfc5217SJeff Kirsher XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1420adfc5217SJeff Kirsher 1421adfc5217SJeff Kirsher __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1422adfc5217SJeff Kirsher } 1423adfc5217SJeff Kirsher 1424adfc5217SJeff Kirsher /** 1425adfc5217SJeff Kirsher * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1426adfc5217SJeff Kirsher * 1427adfc5217SJeff Kirsher * @bp: driver handle 1428adfc5217SJeff Kirsher * @mask: bits that need to be cleared 1429adfc5217SJeff Kirsher */ 1430adfc5217SJeff Kirsher static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1431adfc5217SJeff Kirsher { 1432adfc5217SJeff Kirsher int tout = 5000; /* Wait for 5 secs tops */ 1433adfc5217SJeff Kirsher 1434adfc5217SJeff Kirsher while (tout--) { 1435adfc5217SJeff Kirsher smp_mb(); 1436adfc5217SJeff Kirsher netif_addr_lock_bh(bp->dev); 1437adfc5217SJeff Kirsher if (!(bp->sp_state & mask)) { 1438adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1439adfc5217SJeff Kirsher return true; 1440adfc5217SJeff Kirsher } 1441adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1442adfc5217SJeff Kirsher 1443adfc5217SJeff Kirsher usleep_range(1000, 1000); 1444adfc5217SJeff Kirsher } 1445adfc5217SJeff Kirsher 1446adfc5217SJeff Kirsher smp_mb(); 1447adfc5217SJeff Kirsher 1448adfc5217SJeff Kirsher netif_addr_lock_bh(bp->dev); 1449adfc5217SJeff Kirsher if (bp->sp_state & mask) { 1450adfc5217SJeff Kirsher BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " 1451adfc5217SJeff Kirsher "mask 0x%lx\n", bp->sp_state, mask); 1452adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1453adfc5217SJeff Kirsher return false; 1454adfc5217SJeff Kirsher } 1455adfc5217SJeff Kirsher netif_addr_unlock_bh(bp->dev); 1456adfc5217SJeff Kirsher 1457adfc5217SJeff Kirsher return true; 1458adfc5217SJeff Kirsher } 1459adfc5217SJeff Kirsher 1460adfc5217SJeff Kirsher /** 1461adfc5217SJeff Kirsher * bnx2x_set_ctx_validation - set CDU context validation values 1462adfc5217SJeff Kirsher * 1463adfc5217SJeff Kirsher * @bp: driver handle 1464adfc5217SJeff Kirsher * @cxt: context of the connection on the host memory 1465adfc5217SJeff Kirsher * @cid: SW CID of the connection to be configured 1466adfc5217SJeff Kirsher */ 1467adfc5217SJeff Kirsher void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1468adfc5217SJeff Kirsher u32 cid); 1469adfc5217SJeff Kirsher 1470adfc5217SJeff Kirsher void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1471adfc5217SJeff Kirsher u8 sb_index, u8 disable, u16 usec); 1472adfc5217SJeff Kirsher void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1473adfc5217SJeff Kirsher void bnx2x_release_phy_lock(struct bnx2x *bp); 1474adfc5217SJeff Kirsher 1475adfc5217SJeff Kirsher /** 1476adfc5217SJeff Kirsher * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1477adfc5217SJeff Kirsher * 1478adfc5217SJeff Kirsher * @bp: driver handle 1479adfc5217SJeff Kirsher * @mf_cfg: MF configuration 1480adfc5217SJeff Kirsher * 1481adfc5217SJeff Kirsher */ 1482adfc5217SJeff Kirsher static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1483adfc5217SJeff Kirsher { 1484adfc5217SJeff Kirsher u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1485adfc5217SJeff Kirsher FUNC_MF_CFG_MAX_BW_SHIFT; 1486adfc5217SJeff Kirsher if (!max_cfg) { 148796b0accbSMichal Schmidt DP(NETIF_MSG_LINK, 148896b0accbSMichal Schmidt "Max BW configured to 0 - using 100 instead\n"); 1489adfc5217SJeff Kirsher max_cfg = 100; 1490adfc5217SJeff Kirsher } 1491adfc5217SJeff Kirsher return max_cfg; 1492adfc5217SJeff Kirsher } 1493adfc5217SJeff Kirsher 1494b306f5edSDmitry Kravkov /** 14951355b704SMintz Yuval * bnx2x_bz_fp - zero content of the fastpath structure. 14961355b704SMintz Yuval * 14971355b704SMintz Yuval * @bp: driver handle 14981355b704SMintz Yuval * @index: fastpath index to be zeroed 14991355b704SMintz Yuval * 15001355b704SMintz Yuval * Makes sure the contents of the bp->fp[index].napi is kept 15011355b704SMintz Yuval * intact. 15021355b704SMintz Yuval */ 15031355b704SMintz Yuval static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) 15041355b704SMintz Yuval { 15051355b704SMintz Yuval struct bnx2x_fastpath *fp = &bp->fp[index]; 15061355b704SMintz Yuval struct napi_struct orig_napi = fp->napi; 15071355b704SMintz Yuval /* bzero bnx2x_fastpath contents */ 15081355b704SMintz Yuval if (bp->stats_init) 15091355b704SMintz Yuval memset(fp, 0, sizeof(*fp)); 15101355b704SMintz Yuval else { 15111355b704SMintz Yuval /* Keep Queue statistics */ 15121355b704SMintz Yuval struct bnx2x_eth_q_stats *tmp_eth_q_stats; 15131355b704SMintz Yuval struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; 15141355b704SMintz Yuval 15151355b704SMintz Yuval tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), 15161355b704SMintz Yuval GFP_KERNEL); 15171355b704SMintz Yuval if (tmp_eth_q_stats) 15181355b704SMintz Yuval memcpy(tmp_eth_q_stats, &fp->eth_q_stats, 15191355b704SMintz Yuval sizeof(struct bnx2x_eth_q_stats)); 15201355b704SMintz Yuval 15211355b704SMintz Yuval tmp_eth_q_stats_old = 15221355b704SMintz Yuval kzalloc(sizeof(struct bnx2x_eth_q_stats_old), 15231355b704SMintz Yuval GFP_KERNEL); 15241355b704SMintz Yuval if (tmp_eth_q_stats_old) 15251355b704SMintz Yuval memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, 15261355b704SMintz Yuval sizeof(struct bnx2x_eth_q_stats_old)); 15271355b704SMintz Yuval 15281355b704SMintz Yuval memset(fp, 0, sizeof(*fp)); 15291355b704SMintz Yuval 15301355b704SMintz Yuval if (tmp_eth_q_stats) { 15311355b704SMintz Yuval memcpy(&fp->eth_q_stats, tmp_eth_q_stats, 15321355b704SMintz Yuval sizeof(struct bnx2x_eth_q_stats)); 15331355b704SMintz Yuval kfree(tmp_eth_q_stats); 15341355b704SMintz Yuval } 15351355b704SMintz Yuval 15361355b704SMintz Yuval if (tmp_eth_q_stats_old) { 15371355b704SMintz Yuval memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, 15381355b704SMintz Yuval sizeof(struct bnx2x_eth_q_stats_old)); 15391355b704SMintz Yuval kfree(tmp_eth_q_stats_old); 15401355b704SMintz Yuval } 15411355b704SMintz Yuval 15421355b704SMintz Yuval } 15431355b704SMintz Yuval 15441355b704SMintz Yuval /* Restore the NAPI object as it has been already initialized */ 15451355b704SMintz Yuval fp->napi = orig_napi; 15461355b704SMintz Yuval 15471355b704SMintz Yuval fp->bp = bp; 15481355b704SMintz Yuval fp->index = index; 15491355b704SMintz Yuval if (IS_ETH_FP(fp)) 15501355b704SMintz Yuval fp->max_cos = bp->max_cos; 15511355b704SMintz Yuval else 15521355b704SMintz Yuval /* Special queues support only one CoS */ 15531355b704SMintz Yuval fp->max_cos = 1; 15541355b704SMintz Yuval 15551355b704SMintz Yuval /* 15561355b704SMintz Yuval * set the tpa flag for each queue. The tpa flag determines the queue 15571355b704SMintz Yuval * minimal size so it must be set prior to queue memory allocation 15581355b704SMintz Yuval */ 15591355b704SMintz Yuval fp->disable_tpa = (bp->flags & TPA_ENABLE_FLAG) == 0; 15601355b704SMintz Yuval #ifdef BCM_CNIC 15611355b704SMintz Yuval /* We don't want TPA on an FCoE L2 ring */ 15621355b704SMintz Yuval if (IS_FCOE_FP(fp)) 15631355b704SMintz Yuval fp->disable_tpa = 1; 15641355b704SMintz Yuval #endif 15651355b704SMintz Yuval } 15661355b704SMintz Yuval 15671355b704SMintz Yuval /** 1568b306f5edSDmitry Kravkov * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1569b306f5edSDmitry Kravkov * 1570b306f5edSDmitry Kravkov * @bp: driver handle 1571b306f5edSDmitry Kravkov * 1572b306f5edSDmitry Kravkov */ 1573b306f5edSDmitry Kravkov void bnx2x_get_iscsi_info(struct bnx2x *bp); 1574b306f5edSDmitry Kravkov 157500253a8cSDmitry Kravkov /* returns func by VN for current port */ 157600253a8cSDmitry Kravkov static inline int func_by_vn(struct bnx2x *bp, int vn) 157700253a8cSDmitry Kravkov { 157800253a8cSDmitry Kravkov return 2 * vn + BP_PORT(bp); 157900253a8cSDmitry Kravkov } 158000253a8cSDmitry Kravkov 158100253a8cSDmitry Kravkov /** 158200253a8cSDmitry Kravkov * bnx2x_link_sync_notify - send notification to other functions. 158300253a8cSDmitry Kravkov * 158400253a8cSDmitry Kravkov * @bp: driver handle 158500253a8cSDmitry Kravkov * 158600253a8cSDmitry Kravkov */ 158700253a8cSDmitry Kravkov static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 158800253a8cSDmitry Kravkov { 158900253a8cSDmitry Kravkov int func; 159000253a8cSDmitry Kravkov int vn; 159100253a8cSDmitry Kravkov 159200253a8cSDmitry Kravkov /* Set the attention towards other drivers on the same port */ 159300253a8cSDmitry Kravkov for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 159400253a8cSDmitry Kravkov if (vn == BP_VN(bp)) 159500253a8cSDmitry Kravkov continue; 159600253a8cSDmitry Kravkov 159700253a8cSDmitry Kravkov func = func_by_vn(bp, vn); 159800253a8cSDmitry Kravkov REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 159900253a8cSDmitry Kravkov (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 160000253a8cSDmitry Kravkov } 160100253a8cSDmitry Kravkov } 160200253a8cSDmitry Kravkov 160300253a8cSDmitry Kravkov /** 160400253a8cSDmitry Kravkov * bnx2x_update_drv_flags - update flags in shmem 160500253a8cSDmitry Kravkov * 160600253a8cSDmitry Kravkov * @bp: driver handle 160700253a8cSDmitry Kravkov * @flags: flags to update 160800253a8cSDmitry Kravkov * @set: set or clear 160900253a8cSDmitry Kravkov * 161000253a8cSDmitry Kravkov */ 161100253a8cSDmitry Kravkov static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 161200253a8cSDmitry Kravkov { 161300253a8cSDmitry Kravkov if (SHMEM2_HAS(bp, drv_flags)) { 161400253a8cSDmitry Kravkov u32 drv_flags; 1615f16da43bSAriel Elior bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 161600253a8cSDmitry Kravkov drv_flags = SHMEM2_RD(bp, drv_flags); 161700253a8cSDmitry Kravkov 161800253a8cSDmitry Kravkov if (set) 161900253a8cSDmitry Kravkov SET_FLAGS(drv_flags, flags); 162000253a8cSDmitry Kravkov else 162100253a8cSDmitry Kravkov RESET_FLAGS(drv_flags, flags); 162200253a8cSDmitry Kravkov 162300253a8cSDmitry Kravkov SHMEM2_WR(bp, drv_flags, drv_flags); 162400253a8cSDmitry Kravkov DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); 1625f16da43bSAriel Elior bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 162600253a8cSDmitry Kravkov } 162700253a8cSDmitry Kravkov } 162800253a8cSDmitry Kravkov 1629614c76dfSDmitry Kravkov static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1630614c76dfSDmitry Kravkov { 1631614c76dfSDmitry Kravkov if (is_valid_ether_addr(addr)) 1632614c76dfSDmitry Kravkov return true; 1633614c76dfSDmitry Kravkov #ifdef BCM_CNIC 1634614c76dfSDmitry Kravkov if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp)) 1635614c76dfSDmitry Kravkov return true; 1636614c76dfSDmitry Kravkov #endif 1637614c76dfSDmitry Kravkov return false; 1638614c76dfSDmitry Kravkov } 1639614c76dfSDmitry Kravkov 1640adfc5217SJeff Kirsher #endif /* BNX2X_CMN_H */ 1641