1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5e705c121SKalle Valo * 6e705c121SKalle Valo * Portions of this file are derived from the ipw3945 project, as well 7e705c121SKalle Valo * as portions of the ieee80211 subsystem header files. 8e705c121SKalle Valo * 9e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 10e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 11e705c121SKalle Valo * published by the Free Software Foundation. 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 14e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16e705c121SKalle Valo * more details. 17e705c121SKalle Valo * 18e705c121SKalle Valo * You should have received a copy of the GNU General Public License along with 19e705c121SKalle Valo * this program; if not, write to the Free Software Foundation, Inc., 20e705c121SKalle Valo * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23e705c121SKalle Valo * file called LICENSE. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26e705c121SKalle Valo * Intel Linux Wireless <ilw@linux.intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29e705c121SKalle Valo *****************************************************************************/ 30e705c121SKalle Valo #ifndef __iwl_trans_int_pcie_h__ 31e705c121SKalle Valo #define __iwl_trans_int_pcie_h__ 32e705c121SKalle Valo 33e705c121SKalle Valo #include <linux/spinlock.h> 34e705c121SKalle Valo #include <linux/interrupt.h> 35e705c121SKalle Valo #include <linux/skbuff.h> 36e705c121SKalle Valo #include <linux/wait.h> 37e705c121SKalle Valo #include <linux/pci.h> 38e705c121SKalle Valo #include <linux/timer.h> 39e705c121SKalle Valo 40e705c121SKalle Valo #include "iwl-fh.h" 41e705c121SKalle Valo #include "iwl-csr.h" 42e705c121SKalle Valo #include "iwl-trans.h" 43e705c121SKalle Valo #include "iwl-debug.h" 44e705c121SKalle Valo #include "iwl-io.h" 45e705c121SKalle Valo #include "iwl-op-mode.h" 46e705c121SKalle Valo 47e705c121SKalle Valo /* We need 2 entries for the TX command and header, and another one might 48e705c121SKalle Valo * be needed for potential data in the SKB's head. The remaining ones can 49e705c121SKalle Valo * be used for frags. 50e705c121SKalle Valo */ 51e705c121SKalle Valo #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) 52e705c121SKalle Valo 53e705c121SKalle Valo /* 54e705c121SKalle Valo * RX related structures and functions 55e705c121SKalle Valo */ 56e705c121SKalle Valo #define RX_NUM_QUEUES 1 57e705c121SKalle Valo #define RX_POST_REQ_ALLOC 2 58e705c121SKalle Valo #define RX_CLAIM_REQ_ALLOC 8 59e705c121SKalle Valo #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) 60e705c121SKalle Valo #define RX_LOW_WATERMARK 8 61e705c121SKalle Valo 62e705c121SKalle Valo struct iwl_host_cmd; 63e705c121SKalle Valo 64e705c121SKalle Valo /*This file includes the declaration that are internal to the 65e705c121SKalle Valo * trans_pcie layer */ 66e705c121SKalle Valo 67e705c121SKalle Valo struct iwl_rx_mem_buffer { 68e705c121SKalle Valo dma_addr_t page_dma; 69e705c121SKalle Valo struct page *page; 70e705c121SKalle Valo struct list_head list; 71e705c121SKalle Valo }; 72e705c121SKalle Valo 73e705c121SKalle Valo /** 74e705c121SKalle Valo * struct isr_statistics - interrupt statistics 75e705c121SKalle Valo * 76e705c121SKalle Valo */ 77e705c121SKalle Valo struct isr_statistics { 78e705c121SKalle Valo u32 hw; 79e705c121SKalle Valo u32 sw; 80e705c121SKalle Valo u32 err_code; 81e705c121SKalle Valo u32 sch; 82e705c121SKalle Valo u32 alive; 83e705c121SKalle Valo u32 rfkill; 84e705c121SKalle Valo u32 ctkill; 85e705c121SKalle Valo u32 wakeup; 86e705c121SKalle Valo u32 rx; 87e705c121SKalle Valo u32 tx; 88e705c121SKalle Valo u32 unhandled; 89e705c121SKalle Valo }; 90e705c121SKalle Valo 91e705c121SKalle Valo /** 92e705c121SKalle Valo * struct iwl_rxq - Rx queue 93e705c121SKalle Valo * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 94e705c121SKalle Valo * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 95e705c121SKalle Valo * @read: Shared index to newest available Rx buffer 96e705c121SKalle Valo * @write: Shared index to oldest written Rx packet 97e705c121SKalle Valo * @free_count: Number of pre-allocated buffers in rx_free 98e705c121SKalle Valo * @used_count: Number of RBDs handled to allocator to use for allocation 99e705c121SKalle Valo * @write_actual: 100e705c121SKalle Valo * @rx_free: list of RBDs with allocated RB ready for use 101e705c121SKalle Valo * @rx_used: list of RBDs with no RB attached 102e705c121SKalle Valo * @need_update: flag to indicate we need to update read/write index 103e705c121SKalle Valo * @rb_stts: driver's pointer to receive buffer status 104e705c121SKalle Valo * @rb_stts_dma: bus address of receive buffer status 105e705c121SKalle Valo * @lock: 106e705c121SKalle Valo * @pool: initial pool of iwl_rx_mem_buffer for the queue 107e705c121SKalle Valo * @queue: actual rx queue 108e705c121SKalle Valo * 109e705c121SKalle Valo * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 110e705c121SKalle Valo */ 111e705c121SKalle Valo struct iwl_rxq { 112e705c121SKalle Valo __le32 *bd; 113e705c121SKalle Valo dma_addr_t bd_dma; 114e705c121SKalle Valo u32 read; 115e705c121SKalle Valo u32 write; 116e705c121SKalle Valo u32 free_count; 117e705c121SKalle Valo u32 used_count; 118e705c121SKalle Valo u32 write_actual; 119e705c121SKalle Valo struct list_head rx_free; 120e705c121SKalle Valo struct list_head rx_used; 121e705c121SKalle Valo bool need_update; 122e705c121SKalle Valo struct iwl_rb_status *rb_stts; 123e705c121SKalle Valo dma_addr_t rb_stts_dma; 124e705c121SKalle Valo spinlock_t lock; 125e705c121SKalle Valo struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; 126e705c121SKalle Valo struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 127e705c121SKalle Valo }; 128e705c121SKalle Valo 129e705c121SKalle Valo /** 130e705c121SKalle Valo * struct iwl_rb_allocator - Rx allocator 131e705c121SKalle Valo * @pool: initial pool of allocator 132e705c121SKalle Valo * @req_pending: number of requests the allcator had not processed yet 133e705c121SKalle Valo * @req_ready: number of requests honored and ready for claiming 134e705c121SKalle Valo * @rbd_allocated: RBDs with pages allocated and ready to be handled to 135e705c121SKalle Valo * the queue. This is a list of &struct iwl_rx_mem_buffer 136e705c121SKalle Valo * @rbd_empty: RBDs with no page attached for allocator use. This is a list 137e705c121SKalle Valo * of &struct iwl_rx_mem_buffer 138e705c121SKalle Valo * @lock: protects the rbd_allocated and rbd_empty lists 139e705c121SKalle Valo * @alloc_wq: work queue for background calls 140e705c121SKalle Valo * @rx_alloc: work struct for background calls 141e705c121SKalle Valo */ 142e705c121SKalle Valo struct iwl_rb_allocator { 143e705c121SKalle Valo struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; 144e705c121SKalle Valo atomic_t req_pending; 145e705c121SKalle Valo atomic_t req_ready; 146e705c121SKalle Valo struct list_head rbd_allocated; 147e705c121SKalle Valo struct list_head rbd_empty; 148e705c121SKalle Valo spinlock_t lock; 149e705c121SKalle Valo struct workqueue_struct *alloc_wq; 150e705c121SKalle Valo struct work_struct rx_alloc; 151e705c121SKalle Valo }; 152e705c121SKalle Valo 153e705c121SKalle Valo struct iwl_dma_ptr { 154e705c121SKalle Valo dma_addr_t dma; 155e705c121SKalle Valo void *addr; 156e705c121SKalle Valo size_t size; 157e705c121SKalle Valo }; 158e705c121SKalle Valo 159e705c121SKalle Valo /** 160e705c121SKalle Valo * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 161e705c121SKalle Valo * @index -- current index 162e705c121SKalle Valo */ 163e705c121SKalle Valo static inline int iwl_queue_inc_wrap(int index) 164e705c121SKalle Valo { 165e705c121SKalle Valo return ++index & (TFD_QUEUE_SIZE_MAX - 1); 166e705c121SKalle Valo } 167e705c121SKalle Valo 168e705c121SKalle Valo /** 169e705c121SKalle Valo * iwl_queue_dec_wrap - decrement queue index, wrap back to end 170e705c121SKalle Valo * @index -- current index 171e705c121SKalle Valo */ 172e705c121SKalle Valo static inline int iwl_queue_dec_wrap(int index) 173e705c121SKalle Valo { 174e705c121SKalle Valo return --index & (TFD_QUEUE_SIZE_MAX - 1); 175e705c121SKalle Valo } 176e705c121SKalle Valo 177e705c121SKalle Valo struct iwl_cmd_meta { 178e705c121SKalle Valo /* only for SYNC commands, iff the reply skb is wanted */ 179e705c121SKalle Valo struct iwl_host_cmd *source; 180e705c121SKalle Valo u32 flags; 181e705c121SKalle Valo }; 182e705c121SKalle Valo 183e705c121SKalle Valo /* 184e705c121SKalle Valo * Generic queue structure 185e705c121SKalle Valo * 186e705c121SKalle Valo * Contains common data for Rx and Tx queues. 187e705c121SKalle Valo * 188e705c121SKalle Valo * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 189e705c121SKalle Valo * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 190e705c121SKalle Valo * there might be HW changes in the future). For the normal TX 191e705c121SKalle Valo * queues, n_window, which is the size of the software queue data 192e705c121SKalle Valo * is also 256; however, for the command queue, n_window is only 193e705c121SKalle Valo * 32 since we don't need so many commands pending. Since the HW 194e705c121SKalle Valo * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, 195e705c121SKalle Valo * the software buffers (in the variables @meta, @txb in struct 196e705c121SKalle Valo * iwl_txq) only have 32 entries, while the HW buffers (@tfds in 197e705c121SKalle Valo * the same struct) have 256. 198e705c121SKalle Valo * This means that we end up with the following: 199e705c121SKalle Valo * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 200e705c121SKalle Valo * SW entries: | 0 | ... | 31 | 201e705c121SKalle Valo * where N is a number between 0 and 7. This means that the SW 202e705c121SKalle Valo * data is a window overlayed over the HW queue. 203e705c121SKalle Valo */ 204e705c121SKalle Valo struct iwl_queue { 205e705c121SKalle Valo int write_ptr; /* 1-st empty entry (index) host_w*/ 206e705c121SKalle Valo int read_ptr; /* last used entry (index) host_r*/ 207e705c121SKalle Valo /* use for monitoring and recovering the stuck queue */ 208e705c121SKalle Valo dma_addr_t dma_addr; /* physical addr for BD's */ 209e705c121SKalle Valo int n_window; /* safe queue window */ 210e705c121SKalle Valo u32 id; 211e705c121SKalle Valo int low_mark; /* low watermark, resume queue if free 212e705c121SKalle Valo * space more than this */ 213e705c121SKalle Valo int high_mark; /* high watermark, stop queue if free 214e705c121SKalle Valo * space less than this */ 215e705c121SKalle Valo }; 216e705c121SKalle Valo 217e705c121SKalle Valo #define TFD_TX_CMD_SLOTS 256 218e705c121SKalle Valo #define TFD_CMD_SLOTS 32 219e705c121SKalle Valo 220e705c121SKalle Valo /* 221e705c121SKalle Valo * The FH will write back to the first TB only, so we need 222e705c121SKalle Valo * to copy some data into the buffer regardless of whether 223e705c121SKalle Valo * it should be mapped or not. This indicates how big the 224e705c121SKalle Valo * first TB must be to include the scratch buffer. Since 225e705c121SKalle Valo * the scratch is 4 bytes at offset 12, it's 16 now. If we 226e705c121SKalle Valo * make it bigger then allocations will be bigger and copy 227e705c121SKalle Valo * slower, so that's probably not useful. 228e705c121SKalle Valo */ 229e705c121SKalle Valo #define IWL_HCMD_SCRATCHBUF_SIZE 16 230e705c121SKalle Valo 231e705c121SKalle Valo struct iwl_pcie_txq_entry { 232e705c121SKalle Valo struct iwl_device_cmd *cmd; 233e705c121SKalle Valo struct sk_buff *skb; 234e705c121SKalle Valo /* buffer to free after command completes */ 235e705c121SKalle Valo const void *free_buf; 236e705c121SKalle Valo struct iwl_cmd_meta meta; 237e705c121SKalle Valo }; 238e705c121SKalle Valo 239e705c121SKalle Valo struct iwl_pcie_txq_scratch_buf { 240e705c121SKalle Valo struct iwl_cmd_header hdr; 241e705c121SKalle Valo u8 buf[8]; 242e705c121SKalle Valo __le32 scratch; 243e705c121SKalle Valo }; 244e705c121SKalle Valo 245e705c121SKalle Valo /** 246e705c121SKalle Valo * struct iwl_txq - Tx Queue for DMA 247e705c121SKalle Valo * @q: generic Rx/Tx queue descriptor 248e705c121SKalle Valo * @tfds: transmit frame descriptors (DMA memory) 249e705c121SKalle Valo * @scratchbufs: start of command headers, including scratch buffers, for 250e705c121SKalle Valo * the writeback -- this is DMA memory and an array holding one buffer 251e705c121SKalle Valo * for each command on the queue 252e705c121SKalle Valo * @scratchbufs_dma: DMA address for the scratchbufs start 253e705c121SKalle Valo * @entries: transmit entries (driver state) 254e705c121SKalle Valo * @lock: queue lock 255e705c121SKalle Valo * @stuck_timer: timer that fires if queue gets stuck 256e705c121SKalle Valo * @trans_pcie: pointer back to transport (for timer) 257e705c121SKalle Valo * @need_update: indicates need to update read/write index 258e705c121SKalle Valo * @active: stores if queue is active 259e705c121SKalle Valo * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 260e705c121SKalle Valo * @wd_timeout: queue watchdog timeout (jiffies) - per queue 261e705c121SKalle Valo * @frozen: tx stuck queue timer is frozen 262e705c121SKalle Valo * @frozen_expiry_remainder: remember how long until the timer fires 263e705c121SKalle Valo * 264e705c121SKalle Valo * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 265e705c121SKalle Valo * descriptors) and required locking structures. 266e705c121SKalle Valo */ 267e705c121SKalle Valo struct iwl_txq { 268e705c121SKalle Valo struct iwl_queue q; 269e705c121SKalle Valo struct iwl_tfd *tfds; 270e705c121SKalle Valo struct iwl_pcie_txq_scratch_buf *scratchbufs; 271e705c121SKalle Valo dma_addr_t scratchbufs_dma; 272e705c121SKalle Valo struct iwl_pcie_txq_entry *entries; 273e705c121SKalle Valo spinlock_t lock; 274e705c121SKalle Valo unsigned long frozen_expiry_remainder; 275e705c121SKalle Valo struct timer_list stuck_timer; 276e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie; 277e705c121SKalle Valo bool need_update; 278e705c121SKalle Valo bool frozen; 279e705c121SKalle Valo u8 active; 280e705c121SKalle Valo bool ampdu; 2810cd58eaaSEmmanuel Grumbach bool block; 282e705c121SKalle Valo unsigned long wd_timeout; 283e705c121SKalle Valo }; 284e705c121SKalle Valo 285e705c121SKalle Valo static inline dma_addr_t 286e705c121SKalle Valo iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) 287e705c121SKalle Valo { 288e705c121SKalle Valo return txq->scratchbufs_dma + 289e705c121SKalle Valo sizeof(struct iwl_pcie_txq_scratch_buf) * idx; 290e705c121SKalle Valo } 291e705c121SKalle Valo 292e705c121SKalle Valo /** 293e705c121SKalle Valo * struct iwl_trans_pcie - PCIe transport specific data 294e705c121SKalle Valo * @rxq: all the RX queue data 295e705c121SKalle Valo * @rba: allocator for RX replenishing 296e705c121SKalle Valo * @drv - pointer to iwl_drv 297e705c121SKalle Valo * @trans: pointer to the generic transport area 298e705c121SKalle Valo * @scd_base_addr: scheduler sram base address in SRAM 299e705c121SKalle Valo * @scd_bc_tbls: pointer to the byte count table of the scheduler 300e705c121SKalle Valo * @kw: keep warm address 301e705c121SKalle Valo * @pci_dev: basic pci-network driver stuff 302e705c121SKalle Valo * @hw_base: pci hardware address support 303e705c121SKalle Valo * @ucode_write_complete: indicates that the ucode has been copied. 304e705c121SKalle Valo * @ucode_write_waitq: wait queue for uCode load 305e705c121SKalle Valo * @cmd_queue - command queue number 3066c4fbcbcSEmmanuel Grumbach * @rx_buf_size: Rx buffer size 307e705c121SKalle Valo * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 308e705c121SKalle Valo * @scd_set_active: should the transport configure the SCD for HCMD queue 309e705c121SKalle Valo * @wide_cmd_header: true when ucode supports wide command header format 310e705c121SKalle Valo * @rx_page_order: page order for receive buffer size 311e705c121SKalle Valo * @reg_lock: protect hw register access 312e705c121SKalle Valo * @mutex: to protect stop_device / start_fw / start_hw 313e705c121SKalle Valo * @cmd_in_flight: true when we have a host command in flight 314e705c121SKalle Valo * @fw_mon_phys: physical address of the buffer for the firmware monitor 315e705c121SKalle Valo * @fw_mon_page: points to the first page of the buffer for the firmware monitor 316e705c121SKalle Valo * @fw_mon_size: size of the buffer for the firmware monitor 317e705c121SKalle Valo */ 318e705c121SKalle Valo struct iwl_trans_pcie { 319e705c121SKalle Valo struct iwl_rxq rxq; 320e705c121SKalle Valo struct iwl_rb_allocator rba; 321e705c121SKalle Valo struct iwl_trans *trans; 322e705c121SKalle Valo struct iwl_drv *drv; 323e705c121SKalle Valo 324e705c121SKalle Valo struct net_device napi_dev; 325e705c121SKalle Valo struct napi_struct napi; 326e705c121SKalle Valo 327e705c121SKalle Valo /* INT ICT Table */ 328e705c121SKalle Valo __le32 *ict_tbl; 329e705c121SKalle Valo dma_addr_t ict_tbl_dma; 330e705c121SKalle Valo int ict_index; 331e705c121SKalle Valo bool use_ict; 332e705c121SKalle Valo bool is_down; 333e705c121SKalle Valo struct isr_statistics isr_stats; 334e705c121SKalle Valo 335e705c121SKalle Valo spinlock_t irq_lock; 336e705c121SKalle Valo struct mutex mutex; 337e705c121SKalle Valo u32 inta_mask; 338e705c121SKalle Valo u32 scd_base_addr; 339e705c121SKalle Valo struct iwl_dma_ptr scd_bc_tbls; 340e705c121SKalle Valo struct iwl_dma_ptr kw; 341e705c121SKalle Valo 342e705c121SKalle Valo struct iwl_txq *txq; 343e705c121SKalle Valo unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 344e705c121SKalle Valo unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 345e705c121SKalle Valo 346e705c121SKalle Valo /* PCI bus related data */ 347e705c121SKalle Valo struct pci_dev *pci_dev; 348e705c121SKalle Valo void __iomem *hw_base; 349e705c121SKalle Valo 350e705c121SKalle Valo bool ucode_write_complete; 351e705c121SKalle Valo wait_queue_head_t ucode_write_waitq; 352e705c121SKalle Valo wait_queue_head_t wait_command_queue; 353e705c121SKalle Valo 354e705c121SKalle Valo u8 cmd_queue; 355e705c121SKalle Valo u8 cmd_fifo; 356e705c121SKalle Valo unsigned int cmd_q_wdg_timeout; 357e705c121SKalle Valo u8 n_no_reclaim_cmds; 358e705c121SKalle Valo u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 359e705c121SKalle Valo 3606c4fbcbcSEmmanuel Grumbach enum iwl_amsdu_size rx_buf_size; 361e705c121SKalle Valo bool bc_table_dword; 362e705c121SKalle Valo bool scd_set_active; 363e705c121SKalle Valo bool wide_cmd_header; 364e705c121SKalle Valo u32 rx_page_order; 365e705c121SKalle Valo 366e705c121SKalle Valo const char *const *command_names; 367e705c121SKalle Valo 368e705c121SKalle Valo /*protect hw register */ 369e705c121SKalle Valo spinlock_t reg_lock; 370e705c121SKalle Valo bool cmd_hold_nic_awake; 371e705c121SKalle Valo bool ref_cmd_in_flight; 372e705c121SKalle Valo 373e705c121SKalle Valo /* protect ref counter */ 374e705c121SKalle Valo spinlock_t ref_lock; 375e705c121SKalle Valo u32 ref_count; 376e705c121SKalle Valo 377e705c121SKalle Valo dma_addr_t fw_mon_phys; 378e705c121SKalle Valo struct page *fw_mon_page; 379e705c121SKalle Valo u32 fw_mon_size; 380e705c121SKalle Valo }; 381e705c121SKalle Valo 38285e5a387SJohannes Berg static inline struct iwl_trans_pcie * 38385e5a387SJohannes Berg IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 38485e5a387SJohannes Berg { 38585e5a387SJohannes Berg return (void *)trans->trans_specific; 38685e5a387SJohannes Berg } 387e705c121SKalle Valo 388e705c121SKalle Valo static inline struct iwl_trans * 389e705c121SKalle Valo iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 390e705c121SKalle Valo { 391e705c121SKalle Valo return container_of((void *)trans_pcie, struct iwl_trans, 392e705c121SKalle Valo trans_specific); 393e705c121SKalle Valo } 394e705c121SKalle Valo 395e705c121SKalle Valo /* 396e705c121SKalle Valo * Convention: trans API functions: iwl_trans_pcie_XXX 397e705c121SKalle Valo * Other functions: iwl_pcie_XXX 398e705c121SKalle Valo */ 399e705c121SKalle Valo struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 400e705c121SKalle Valo const struct pci_device_id *ent, 401e705c121SKalle Valo const struct iwl_cfg *cfg); 402e705c121SKalle Valo void iwl_trans_pcie_free(struct iwl_trans *trans); 403e705c121SKalle Valo 404e705c121SKalle Valo /***************************************************** 405e705c121SKalle Valo * RX 406e705c121SKalle Valo ******************************************************/ 407e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans); 408e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 409e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans); 410e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans); 411e705c121SKalle Valo 412e705c121SKalle Valo /***************************************************** 413e705c121SKalle Valo * ICT - interrupt handling 414e705c121SKalle Valo ******************************************************/ 415e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data); 416e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans); 417e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans); 418e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans); 419e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans); 420e705c121SKalle Valo 421e705c121SKalle Valo /***************************************************** 422e705c121SKalle Valo * TX / HCMD 423e705c121SKalle Valo ******************************************************/ 424e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans); 425e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 426e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans); 427e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans); 428e705c121SKalle Valo void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 429e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 430e705c121SKalle Valo unsigned int wdg_timeout); 431e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 432e705c121SKalle Valo bool configure_scd); 433e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 434e705c121SKalle Valo struct iwl_device_cmd *dev_cmd, int txq_id); 435e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 436e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 437e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 438e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb); 439e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 440e705c121SKalle Valo struct sk_buff_head *skbs); 441e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 442e705c121SKalle Valo 443e705c121SKalle Valo void iwl_trans_pcie_ref(struct iwl_trans *trans); 444e705c121SKalle Valo void iwl_trans_pcie_unref(struct iwl_trans *trans); 445e705c121SKalle Valo 446e705c121SKalle Valo static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 447e705c121SKalle Valo { 448e705c121SKalle Valo struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 449e705c121SKalle Valo 450e705c121SKalle Valo return le16_to_cpu(tb->hi_n_len) >> 4; 451e705c121SKalle Valo } 452e705c121SKalle Valo 453e705c121SKalle Valo /***************************************************** 454e705c121SKalle Valo * Error handling 455e705c121SKalle Valo ******************************************************/ 456e705c121SKalle Valo void iwl_pcie_dump_csr(struct iwl_trans *trans); 457e705c121SKalle Valo 458e705c121SKalle Valo /***************************************************** 459e705c121SKalle Valo * Helpers 460e705c121SKalle Valo ******************************************************/ 461e705c121SKalle Valo static inline void iwl_disable_interrupts(struct iwl_trans *trans) 462e705c121SKalle Valo { 463e705c121SKalle Valo clear_bit(STATUS_INT_ENABLED, &trans->status); 464e705c121SKalle Valo 465e705c121SKalle Valo /* disable interrupts from uCode/NIC to host */ 466e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 467e705c121SKalle Valo 468e705c121SKalle Valo /* acknowledge/clear/reset any interrupts still pending 469e705c121SKalle Valo * from uCode or flow handler (Rx/Tx DMA) */ 470e705c121SKalle Valo iwl_write32(trans, CSR_INT, 0xffffffff); 471e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 472e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 473e705c121SKalle Valo } 474e705c121SKalle Valo 475e705c121SKalle Valo static inline void iwl_enable_interrupts(struct iwl_trans *trans) 476e705c121SKalle Valo { 477e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 478e705c121SKalle Valo 479e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 480e705c121SKalle Valo set_bit(STATUS_INT_ENABLED, &trans->status); 481e705c121SKalle Valo trans_pcie->inta_mask = CSR_INI_SET_MASK; 482e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 483e705c121SKalle Valo } 484e705c121SKalle Valo 485e705c121SKalle Valo static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 486e705c121SKalle Valo { 487e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 488e705c121SKalle Valo 489e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 490e705c121SKalle Valo trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 491e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 492e705c121SKalle Valo } 493e705c121SKalle Valo 494e705c121SKalle Valo static inline void iwl_wake_queue(struct iwl_trans *trans, 495e705c121SKalle Valo struct iwl_txq *txq) 496e705c121SKalle Valo { 497e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 498e705c121SKalle Valo 499e705c121SKalle Valo if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { 500e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); 501e705c121SKalle Valo iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); 502e705c121SKalle Valo } 503e705c121SKalle Valo } 504e705c121SKalle Valo 505e705c121SKalle Valo static inline void iwl_stop_queue(struct iwl_trans *trans, 506e705c121SKalle Valo struct iwl_txq *txq) 507e705c121SKalle Valo { 508e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 509e705c121SKalle Valo 510e705c121SKalle Valo if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { 511e705c121SKalle Valo iwl_op_mode_queue_full(trans->op_mode, txq->q.id); 512e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); 513e705c121SKalle Valo } else 514e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 515e705c121SKalle Valo txq->q.id); 516e705c121SKalle Valo } 517e705c121SKalle Valo 518e705c121SKalle Valo static inline bool iwl_queue_used(const struct iwl_queue *q, int i) 519e705c121SKalle Valo { 520e705c121SKalle Valo return q->write_ptr >= q->read_ptr ? 521e705c121SKalle Valo (i >= q->read_ptr && i < q->write_ptr) : 522e705c121SKalle Valo !(i < q->read_ptr && i >= q->write_ptr); 523e705c121SKalle Valo } 524e705c121SKalle Valo 525e705c121SKalle Valo static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) 526e705c121SKalle Valo { 527e705c121SKalle Valo return index & (q->n_window - 1); 528e705c121SKalle Valo } 529e705c121SKalle Valo 530e705c121SKalle Valo static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, 531e705c121SKalle Valo u8 cmd) 532e705c121SKalle Valo { 533e705c121SKalle Valo if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) 534e705c121SKalle Valo return "UNKNOWN"; 535e705c121SKalle Valo return trans_pcie->command_names[cmd]; 536e705c121SKalle Valo } 537e705c121SKalle Valo 538e705c121SKalle Valo static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 539e705c121SKalle Valo { 540e705c121SKalle Valo return !(iwl_read32(trans, CSR_GP_CNTRL) & 541e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 542e705c121SKalle Valo } 543e705c121SKalle Valo 544e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 545e705c121SKalle Valo u32 reg, u32 mask, u32 value) 546e705c121SKalle Valo { 547e705c121SKalle Valo u32 v; 548e705c121SKalle Valo 549e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUG 550e705c121SKalle Valo WARN_ON_ONCE(value & ~mask); 551e705c121SKalle Valo #endif 552e705c121SKalle Valo 553e705c121SKalle Valo v = iwl_read32(trans, reg); 554e705c121SKalle Valo v &= ~mask; 555e705c121SKalle Valo v |= value; 556e705c121SKalle Valo iwl_write32(trans, reg, v); 557e705c121SKalle Valo } 558e705c121SKalle Valo 559e705c121SKalle Valo static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 560e705c121SKalle Valo u32 reg, u32 mask) 561e705c121SKalle Valo { 562e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 563e705c121SKalle Valo } 564e705c121SKalle Valo 565e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 566e705c121SKalle Valo u32 reg, u32 mask) 567e705c121SKalle Valo { 568e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 569e705c121SKalle Valo } 570e705c121SKalle Valo 571e705c121SKalle Valo void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 572e705c121SKalle Valo 573f8a1edb7SJohannes Berg #ifdef CONFIG_IWLWIFI_DEBUGFS 574f8a1edb7SJohannes Berg int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 575f8a1edb7SJohannes Berg #else 576f8a1edb7SJohannes Berg static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 577f8a1edb7SJohannes Berg { 578f8a1edb7SJohannes Berg return 0; 579f8a1edb7SJohannes Berg } 580f8a1edb7SJohannes Berg #endif 581f8a1edb7SJohannes Berg 582e705c121SKalle Valo #endif /* __iwl_trans_int_pcie_h__ */ 583