1e705c121SKalle Valo /******************************************************************************
2e705c121SKalle Valo  *
3e705c121SKalle Valo  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4e705c121SKalle Valo  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5e705c121SKalle Valo  *
6e705c121SKalle Valo  * Portions of this file are derived from the ipw3945 project, as well
7e705c121SKalle Valo  * as portions of the ieee80211 subsystem header files.
8e705c121SKalle Valo  *
9e705c121SKalle Valo  * This program is free software; you can redistribute it and/or modify it
10e705c121SKalle Valo  * under the terms of version 2 of the GNU General Public License as
11e705c121SKalle Valo  * published by the Free Software Foundation.
12e705c121SKalle Valo  *
13e705c121SKalle Valo  * This program is distributed in the hope that it will be useful, but WITHOUT
14e705c121SKalle Valo  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15e705c121SKalle Valo  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16e705c121SKalle Valo  * more details.
17e705c121SKalle Valo  *
18e705c121SKalle Valo  * You should have received a copy of the GNU General Public License along with
19e705c121SKalle Valo  * this program; if not, write to the Free Software Foundation, Inc.,
20e705c121SKalle Valo  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21e705c121SKalle Valo  *
22e705c121SKalle Valo  * The full GNU General Public License is included in this distribution in the
23e705c121SKalle Valo  * file called LICENSE.
24e705c121SKalle Valo  *
25e705c121SKalle Valo  * Contact Information:
26e705c121SKalle Valo  *  Intel Linux Wireless <ilw@linux.intel.com>
27e705c121SKalle Valo  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28e705c121SKalle Valo  *
29e705c121SKalle Valo  *****************************************************************************/
30e705c121SKalle Valo #ifndef __iwl_trans_int_pcie_h__
31e705c121SKalle Valo #define __iwl_trans_int_pcie_h__
32e705c121SKalle Valo 
33e705c121SKalle Valo #include <linux/spinlock.h>
34e705c121SKalle Valo #include <linux/interrupt.h>
35e705c121SKalle Valo #include <linux/skbuff.h>
36e705c121SKalle Valo #include <linux/wait.h>
37e705c121SKalle Valo #include <linux/pci.h>
38e705c121SKalle Valo #include <linux/timer.h>
39e705c121SKalle Valo 
40e705c121SKalle Valo #include "iwl-fh.h"
41e705c121SKalle Valo #include "iwl-csr.h"
42e705c121SKalle Valo #include "iwl-trans.h"
43e705c121SKalle Valo #include "iwl-debug.h"
44e705c121SKalle Valo #include "iwl-io.h"
45e705c121SKalle Valo #include "iwl-op-mode.h"
46e705c121SKalle Valo 
47e705c121SKalle Valo /* We need 2 entries for the TX command and header, and another one might
48e705c121SKalle Valo  * be needed for potential data in the SKB's head. The remaining ones can
49e705c121SKalle Valo  * be used for frags.
50e705c121SKalle Valo  */
51e705c121SKalle Valo #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
52e705c121SKalle Valo 
53e705c121SKalle Valo /*
54e705c121SKalle Valo  * RX related structures and functions
55e705c121SKalle Valo  */
56e705c121SKalle Valo #define RX_NUM_QUEUES 1
57e705c121SKalle Valo #define RX_POST_REQ_ALLOC 2
58e705c121SKalle Valo #define RX_CLAIM_REQ_ALLOC 8
59e705c121SKalle Valo #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
60e705c121SKalle Valo #define RX_LOW_WATERMARK 8
61e705c121SKalle Valo 
62e705c121SKalle Valo struct iwl_host_cmd;
63e705c121SKalle Valo 
64e705c121SKalle Valo /*This file includes the declaration that are internal to the
65e705c121SKalle Valo  * trans_pcie layer */
66e705c121SKalle Valo 
67e705c121SKalle Valo struct iwl_rx_mem_buffer {
68e705c121SKalle Valo 	dma_addr_t page_dma;
69e705c121SKalle Valo 	struct page *page;
70e705c121SKalle Valo 	struct list_head list;
71e705c121SKalle Valo };
72e705c121SKalle Valo 
73e705c121SKalle Valo /**
74e705c121SKalle Valo  * struct isr_statistics - interrupt statistics
75e705c121SKalle Valo  *
76e705c121SKalle Valo  */
77e705c121SKalle Valo struct isr_statistics {
78e705c121SKalle Valo 	u32 hw;
79e705c121SKalle Valo 	u32 sw;
80e705c121SKalle Valo 	u32 err_code;
81e705c121SKalle Valo 	u32 sch;
82e705c121SKalle Valo 	u32 alive;
83e705c121SKalle Valo 	u32 rfkill;
84e705c121SKalle Valo 	u32 ctkill;
85e705c121SKalle Valo 	u32 wakeup;
86e705c121SKalle Valo 	u32 rx;
87e705c121SKalle Valo 	u32 tx;
88e705c121SKalle Valo 	u32 unhandled;
89e705c121SKalle Valo };
90e705c121SKalle Valo 
91e705c121SKalle Valo /**
92e705c121SKalle Valo  * struct iwl_rxq - Rx queue
93e705c121SKalle Valo  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
94e705c121SKalle Valo  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
95e705c121SKalle Valo  * @read: Shared index to newest available Rx buffer
96e705c121SKalle Valo  * @write: Shared index to oldest written Rx packet
97e705c121SKalle Valo  * @free_count: Number of pre-allocated buffers in rx_free
98e705c121SKalle Valo  * @used_count: Number of RBDs handled to allocator to use for allocation
99e705c121SKalle Valo  * @write_actual:
100e705c121SKalle Valo  * @rx_free: list of RBDs with allocated RB ready for use
101e705c121SKalle Valo  * @rx_used: list of RBDs with no RB attached
102e705c121SKalle Valo  * @need_update: flag to indicate we need to update read/write index
103e705c121SKalle Valo  * @rb_stts: driver's pointer to receive buffer status
104e705c121SKalle Valo  * @rb_stts_dma: bus address of receive buffer status
105e705c121SKalle Valo  * @lock:
106e705c121SKalle Valo  * @pool: initial pool of iwl_rx_mem_buffer for the queue
107e705c121SKalle Valo  * @queue: actual rx queue
108e705c121SKalle Valo  *
109e705c121SKalle Valo  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
110e705c121SKalle Valo  */
111e705c121SKalle Valo struct iwl_rxq {
112e705c121SKalle Valo 	__le32 *bd;
113e705c121SKalle Valo 	dma_addr_t bd_dma;
114e705c121SKalle Valo 	u32 read;
115e705c121SKalle Valo 	u32 write;
116e705c121SKalle Valo 	u32 free_count;
117e705c121SKalle Valo 	u32 used_count;
118e705c121SKalle Valo 	u32 write_actual;
119e705c121SKalle Valo 	struct list_head rx_free;
120e705c121SKalle Valo 	struct list_head rx_used;
121e705c121SKalle Valo 	bool need_update;
122e705c121SKalle Valo 	struct iwl_rb_status *rb_stts;
123e705c121SKalle Valo 	dma_addr_t rb_stts_dma;
124e705c121SKalle Valo 	spinlock_t lock;
125e705c121SKalle Valo 	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
126e705c121SKalle Valo 	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
127e705c121SKalle Valo };
128e705c121SKalle Valo 
129e705c121SKalle Valo /**
130e705c121SKalle Valo  * struct iwl_rb_allocator - Rx allocator
131e705c121SKalle Valo  * @pool: initial pool of allocator
132e705c121SKalle Valo  * @req_pending: number of requests the allcator had not processed yet
133e705c121SKalle Valo  * @req_ready: number of requests honored and ready for claiming
134e705c121SKalle Valo  * @rbd_allocated: RBDs with pages allocated and ready to be handled to
135e705c121SKalle Valo  *	the queue. This is a list of &struct iwl_rx_mem_buffer
136e705c121SKalle Valo  * @rbd_empty: RBDs with no page attached for allocator use. This is a list
137e705c121SKalle Valo  *	of &struct iwl_rx_mem_buffer
138e705c121SKalle Valo  * @lock: protects the rbd_allocated and rbd_empty lists
139e705c121SKalle Valo  * @alloc_wq: work queue for background calls
140e705c121SKalle Valo  * @rx_alloc: work struct for background calls
141e705c121SKalle Valo  */
142e705c121SKalle Valo struct iwl_rb_allocator {
143e705c121SKalle Valo 	struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
144e705c121SKalle Valo 	atomic_t req_pending;
145e705c121SKalle Valo 	atomic_t req_ready;
146e705c121SKalle Valo 	struct list_head rbd_allocated;
147e705c121SKalle Valo 	struct list_head rbd_empty;
148e705c121SKalle Valo 	spinlock_t lock;
149e705c121SKalle Valo 	struct workqueue_struct *alloc_wq;
150e705c121SKalle Valo 	struct work_struct rx_alloc;
151e705c121SKalle Valo };
152e705c121SKalle Valo 
153e705c121SKalle Valo struct iwl_dma_ptr {
154e705c121SKalle Valo 	dma_addr_t dma;
155e705c121SKalle Valo 	void *addr;
156e705c121SKalle Valo 	size_t size;
157e705c121SKalle Valo };
158e705c121SKalle Valo 
159e705c121SKalle Valo /**
160e705c121SKalle Valo  * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
161e705c121SKalle Valo  * @index -- current index
162e705c121SKalle Valo  */
163e705c121SKalle Valo static inline int iwl_queue_inc_wrap(int index)
164e705c121SKalle Valo {
165e705c121SKalle Valo 	return ++index & (TFD_QUEUE_SIZE_MAX - 1);
166e705c121SKalle Valo }
167e705c121SKalle Valo 
168e705c121SKalle Valo /**
169e705c121SKalle Valo  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
170e705c121SKalle Valo  * @index -- current index
171e705c121SKalle Valo  */
172e705c121SKalle Valo static inline int iwl_queue_dec_wrap(int index)
173e705c121SKalle Valo {
174e705c121SKalle Valo 	return --index & (TFD_QUEUE_SIZE_MAX - 1);
175e705c121SKalle Valo }
176e705c121SKalle Valo 
177e705c121SKalle Valo struct iwl_cmd_meta {
178e705c121SKalle Valo 	/* only for SYNC commands, iff the reply skb is wanted */
179e705c121SKalle Valo 	struct iwl_host_cmd *source;
180e705c121SKalle Valo 	u32 flags;
181e705c121SKalle Valo };
182e705c121SKalle Valo 
183e705c121SKalle Valo /*
184e705c121SKalle Valo  * Generic queue structure
185e705c121SKalle Valo  *
186e705c121SKalle Valo  * Contains common data for Rx and Tx queues.
187e705c121SKalle Valo  *
188e705c121SKalle Valo  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
189e705c121SKalle Valo  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
190e705c121SKalle Valo  * there might be HW changes in the future). For the normal TX
191e705c121SKalle Valo  * queues, n_window, which is the size of the software queue data
192e705c121SKalle Valo  * is also 256; however, for the command queue, n_window is only
193e705c121SKalle Valo  * 32 since we don't need so many commands pending. Since the HW
194e705c121SKalle Valo  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
195e705c121SKalle Valo  * the software buffers (in the variables @meta, @txb in struct
196e705c121SKalle Valo  * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
197e705c121SKalle Valo  * the same struct) have 256.
198e705c121SKalle Valo  * This means that we end up with the following:
199e705c121SKalle Valo  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
200e705c121SKalle Valo  *  SW entries:           | 0      | ... | 31          |
201e705c121SKalle Valo  * where N is a number between 0 and 7. This means that the SW
202e705c121SKalle Valo  * data is a window overlayed over the HW queue.
203e705c121SKalle Valo  */
204e705c121SKalle Valo struct iwl_queue {
205e705c121SKalle Valo 	int write_ptr;       /* 1-st empty entry (index) host_w*/
206e705c121SKalle Valo 	int read_ptr;         /* last used entry (index) host_r*/
207e705c121SKalle Valo 	/* use for monitoring and recovering the stuck queue */
208e705c121SKalle Valo 	dma_addr_t dma_addr;   /* physical addr for BD's */
209e705c121SKalle Valo 	int n_window;	       /* safe queue window */
210e705c121SKalle Valo 	u32 id;
211e705c121SKalle Valo 	int low_mark;	       /* low watermark, resume queue if free
212e705c121SKalle Valo 				* space more than this */
213e705c121SKalle Valo 	int high_mark;         /* high watermark, stop queue if free
214e705c121SKalle Valo 				* space less than this */
215e705c121SKalle Valo };
216e705c121SKalle Valo 
217e705c121SKalle Valo #define TFD_TX_CMD_SLOTS 256
218e705c121SKalle Valo #define TFD_CMD_SLOTS 32
219e705c121SKalle Valo 
220e705c121SKalle Valo /*
221e705c121SKalle Valo  * The FH will write back to the first TB only, so we need
222e705c121SKalle Valo  * to copy some data into the buffer regardless of whether
223e705c121SKalle Valo  * it should be mapped or not. This indicates how big the
224e705c121SKalle Valo  * first TB must be to include the scratch buffer. Since
225e705c121SKalle Valo  * the scratch is 4 bytes at offset 12, it's 16 now. If we
226e705c121SKalle Valo  * make it bigger then allocations will be bigger and copy
227e705c121SKalle Valo  * slower, so that's probably not useful.
228e705c121SKalle Valo  */
229e705c121SKalle Valo #define IWL_HCMD_SCRATCHBUF_SIZE	16
230e705c121SKalle Valo 
231e705c121SKalle Valo struct iwl_pcie_txq_entry {
232e705c121SKalle Valo 	struct iwl_device_cmd *cmd;
233e705c121SKalle Valo 	struct sk_buff *skb;
234e705c121SKalle Valo 	/* buffer to free after command completes */
235e705c121SKalle Valo 	const void *free_buf;
236e705c121SKalle Valo 	struct iwl_cmd_meta meta;
237e705c121SKalle Valo };
238e705c121SKalle Valo 
239e705c121SKalle Valo struct iwl_pcie_txq_scratch_buf {
240e705c121SKalle Valo 	struct iwl_cmd_header hdr;
241e705c121SKalle Valo 	u8 buf[8];
242e705c121SKalle Valo 	__le32 scratch;
243e705c121SKalle Valo };
244e705c121SKalle Valo 
245e705c121SKalle Valo /**
246e705c121SKalle Valo  * struct iwl_txq - Tx Queue for DMA
247e705c121SKalle Valo  * @q: generic Rx/Tx queue descriptor
248e705c121SKalle Valo  * @tfds: transmit frame descriptors (DMA memory)
249e705c121SKalle Valo  * @scratchbufs: start of command headers, including scratch buffers, for
250e705c121SKalle Valo  *	the writeback -- this is DMA memory and an array holding one buffer
251e705c121SKalle Valo  *	for each command on the queue
252e705c121SKalle Valo  * @scratchbufs_dma: DMA address for the scratchbufs start
253e705c121SKalle Valo  * @entries: transmit entries (driver state)
254e705c121SKalle Valo  * @lock: queue lock
255e705c121SKalle Valo  * @stuck_timer: timer that fires if queue gets stuck
256e705c121SKalle Valo  * @trans_pcie: pointer back to transport (for timer)
257e705c121SKalle Valo  * @need_update: indicates need to update read/write index
258e705c121SKalle Valo  * @active: stores if queue is active
259e705c121SKalle Valo  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
260e705c121SKalle Valo  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
261e705c121SKalle Valo  * @frozen: tx stuck queue timer is frozen
262e705c121SKalle Valo  * @frozen_expiry_remainder: remember how long until the timer fires
263e705c121SKalle Valo  *
264e705c121SKalle Valo  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
265e705c121SKalle Valo  * descriptors) and required locking structures.
266e705c121SKalle Valo  */
267e705c121SKalle Valo struct iwl_txq {
268e705c121SKalle Valo 	struct iwl_queue q;
269e705c121SKalle Valo 	struct iwl_tfd *tfds;
270e705c121SKalle Valo 	struct iwl_pcie_txq_scratch_buf *scratchbufs;
271e705c121SKalle Valo 	dma_addr_t scratchbufs_dma;
272e705c121SKalle Valo 	struct iwl_pcie_txq_entry *entries;
273e705c121SKalle Valo 	spinlock_t lock;
274e705c121SKalle Valo 	unsigned long frozen_expiry_remainder;
275e705c121SKalle Valo 	struct timer_list stuck_timer;
276e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie;
277e705c121SKalle Valo 	bool need_update;
278e705c121SKalle Valo 	bool frozen;
279e705c121SKalle Valo 	u8 active;
280e705c121SKalle Valo 	bool ampdu;
281e705c121SKalle Valo 	unsigned long wd_timeout;
282e705c121SKalle Valo };
283e705c121SKalle Valo 
284e705c121SKalle Valo static inline dma_addr_t
285e705c121SKalle Valo iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
286e705c121SKalle Valo {
287e705c121SKalle Valo 	return txq->scratchbufs_dma +
288e705c121SKalle Valo 	       sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
289e705c121SKalle Valo }
290e705c121SKalle Valo 
291e705c121SKalle Valo /**
292e705c121SKalle Valo  * struct iwl_trans_pcie - PCIe transport specific data
293e705c121SKalle Valo  * @rxq: all the RX queue data
294e705c121SKalle Valo  * @rba: allocator for RX replenishing
295e705c121SKalle Valo  * @drv - pointer to iwl_drv
296e705c121SKalle Valo  * @trans: pointer to the generic transport area
297e705c121SKalle Valo  * @scd_base_addr: scheduler sram base address in SRAM
298e705c121SKalle Valo  * @scd_bc_tbls: pointer to the byte count table of the scheduler
299e705c121SKalle Valo  * @kw: keep warm address
300e705c121SKalle Valo  * @pci_dev: basic pci-network driver stuff
301e705c121SKalle Valo  * @hw_base: pci hardware address support
302e705c121SKalle Valo  * @ucode_write_complete: indicates that the ucode has been copied.
303e705c121SKalle Valo  * @ucode_write_waitq: wait queue for uCode load
304e705c121SKalle Valo  * @cmd_queue - command queue number
3056c4fbcbcSEmmanuel Grumbach  * @rx_buf_size: Rx buffer size
306e705c121SKalle Valo  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
307e705c121SKalle Valo  * @scd_set_active: should the transport configure the SCD for HCMD queue
308e705c121SKalle Valo  * @wide_cmd_header: true when ucode supports wide command header format
309e705c121SKalle Valo  * @rx_page_order: page order for receive buffer size
310e705c121SKalle Valo  * @reg_lock: protect hw register access
311e705c121SKalle Valo  * @mutex: to protect stop_device / start_fw / start_hw
312e705c121SKalle Valo  * @cmd_in_flight: true when we have a host command in flight
313e705c121SKalle Valo  * @fw_mon_phys: physical address of the buffer for the firmware monitor
314e705c121SKalle Valo  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
315e705c121SKalle Valo  * @fw_mon_size: size of the buffer for the firmware monitor
316e705c121SKalle Valo  */
317e705c121SKalle Valo struct iwl_trans_pcie {
318e705c121SKalle Valo 	struct iwl_rxq rxq;
319e705c121SKalle Valo 	struct iwl_rb_allocator rba;
320e705c121SKalle Valo 	struct iwl_trans *trans;
321e705c121SKalle Valo 	struct iwl_drv *drv;
322e705c121SKalle Valo 
323e705c121SKalle Valo 	struct net_device napi_dev;
324e705c121SKalle Valo 	struct napi_struct napi;
325e705c121SKalle Valo 
326e705c121SKalle Valo 	/* INT ICT Table */
327e705c121SKalle Valo 	__le32 *ict_tbl;
328e705c121SKalle Valo 	dma_addr_t ict_tbl_dma;
329e705c121SKalle Valo 	int ict_index;
330e705c121SKalle Valo 	bool use_ict;
331e705c121SKalle Valo 	bool is_down;
332e705c121SKalle Valo 	struct isr_statistics isr_stats;
333e705c121SKalle Valo 
334e705c121SKalle Valo 	spinlock_t irq_lock;
335e705c121SKalle Valo 	struct mutex mutex;
336e705c121SKalle Valo 	u32 inta_mask;
337e705c121SKalle Valo 	u32 scd_base_addr;
338e705c121SKalle Valo 	struct iwl_dma_ptr scd_bc_tbls;
339e705c121SKalle Valo 	struct iwl_dma_ptr kw;
340e705c121SKalle Valo 
341e705c121SKalle Valo 	struct iwl_txq *txq;
342e705c121SKalle Valo 	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
343e705c121SKalle Valo 	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
344e705c121SKalle Valo 
345e705c121SKalle Valo 	/* PCI bus related data */
346e705c121SKalle Valo 	struct pci_dev *pci_dev;
347e705c121SKalle Valo 	void __iomem *hw_base;
348e705c121SKalle Valo 
349e705c121SKalle Valo 	bool ucode_write_complete;
350e705c121SKalle Valo 	wait_queue_head_t ucode_write_waitq;
351e705c121SKalle Valo 	wait_queue_head_t wait_command_queue;
352e705c121SKalle Valo 
353e705c121SKalle Valo 	u8 cmd_queue;
354e705c121SKalle Valo 	u8 cmd_fifo;
355e705c121SKalle Valo 	unsigned int cmd_q_wdg_timeout;
356e705c121SKalle Valo 	u8 n_no_reclaim_cmds;
357e705c121SKalle Valo 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
358e705c121SKalle Valo 
3596c4fbcbcSEmmanuel Grumbach 	enum iwl_amsdu_size rx_buf_size;
360e705c121SKalle Valo 	bool bc_table_dword;
361e705c121SKalle Valo 	bool scd_set_active;
362e705c121SKalle Valo 	bool wide_cmd_header;
363e705c121SKalle Valo 	u32 rx_page_order;
364e705c121SKalle Valo 
365e705c121SKalle Valo 	const char *const *command_names;
366e705c121SKalle Valo 
367e705c121SKalle Valo 	/*protect hw register */
368e705c121SKalle Valo 	spinlock_t reg_lock;
369e705c121SKalle Valo 	bool cmd_hold_nic_awake;
370e705c121SKalle Valo 	bool ref_cmd_in_flight;
371e705c121SKalle Valo 
372e705c121SKalle Valo 	/* protect ref counter */
373e705c121SKalle Valo 	spinlock_t ref_lock;
374e705c121SKalle Valo 	u32 ref_count;
375e705c121SKalle Valo 
376e705c121SKalle Valo 	dma_addr_t fw_mon_phys;
377e705c121SKalle Valo 	struct page *fw_mon_page;
378e705c121SKalle Valo 	u32 fw_mon_size;
379e705c121SKalle Valo };
380e705c121SKalle Valo 
381e705c121SKalle Valo #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
382e705c121SKalle Valo 	((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
383e705c121SKalle Valo 
384e705c121SKalle Valo static inline struct iwl_trans *
385e705c121SKalle Valo iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
386e705c121SKalle Valo {
387e705c121SKalle Valo 	return container_of((void *)trans_pcie, struct iwl_trans,
388e705c121SKalle Valo 			    trans_specific);
389e705c121SKalle Valo }
390e705c121SKalle Valo 
391e705c121SKalle Valo /*
392e705c121SKalle Valo  * Convention: trans API functions: iwl_trans_pcie_XXX
393e705c121SKalle Valo  *	Other functions: iwl_pcie_XXX
394e705c121SKalle Valo  */
395e705c121SKalle Valo struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
396e705c121SKalle Valo 				       const struct pci_device_id *ent,
397e705c121SKalle Valo 				       const struct iwl_cfg *cfg);
398e705c121SKalle Valo void iwl_trans_pcie_free(struct iwl_trans *trans);
399e705c121SKalle Valo 
400e705c121SKalle Valo /*****************************************************
401e705c121SKalle Valo * RX
402e705c121SKalle Valo ******************************************************/
403e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans);
404e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
405e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans);
406e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans);
407e705c121SKalle Valo 
408e705c121SKalle Valo /*****************************************************
409e705c121SKalle Valo * ICT - interrupt handling
410e705c121SKalle Valo ******************************************************/
411e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data);
412e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans);
413e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans);
414e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans);
415e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans);
416e705c121SKalle Valo 
417e705c121SKalle Valo /*****************************************************
418e705c121SKalle Valo * TX / HCMD
419e705c121SKalle Valo ******************************************************/
420e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans);
421e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
422e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans);
423e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans);
424e705c121SKalle Valo void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
425e705c121SKalle Valo 			       const struct iwl_trans_txq_scd_cfg *cfg,
426e705c121SKalle Valo 			       unsigned int wdg_timeout);
427e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
428e705c121SKalle Valo 				bool configure_scd);
429e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
430e705c121SKalle Valo 		      struct iwl_device_cmd *dev_cmd, int txq_id);
431e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
432e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
433e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
434e705c121SKalle Valo 			    struct iwl_rx_cmd_buffer *rxb);
435e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
436e705c121SKalle Valo 			    struct sk_buff_head *skbs);
437e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
438e705c121SKalle Valo 
439e705c121SKalle Valo void iwl_trans_pcie_ref(struct iwl_trans *trans);
440e705c121SKalle Valo void iwl_trans_pcie_unref(struct iwl_trans *trans);
441e705c121SKalle Valo 
442e705c121SKalle Valo static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
443e705c121SKalle Valo {
444e705c121SKalle Valo 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
445e705c121SKalle Valo 
446e705c121SKalle Valo 	return le16_to_cpu(tb->hi_n_len) >> 4;
447e705c121SKalle Valo }
448e705c121SKalle Valo 
449e705c121SKalle Valo /*****************************************************
450e705c121SKalle Valo * Error handling
451e705c121SKalle Valo ******************************************************/
452e705c121SKalle Valo void iwl_pcie_dump_csr(struct iwl_trans *trans);
453e705c121SKalle Valo 
454e705c121SKalle Valo /*****************************************************
455e705c121SKalle Valo * Helpers
456e705c121SKalle Valo ******************************************************/
457e705c121SKalle Valo static inline void iwl_disable_interrupts(struct iwl_trans *trans)
458e705c121SKalle Valo {
459e705c121SKalle Valo 	clear_bit(STATUS_INT_ENABLED, &trans->status);
460e705c121SKalle Valo 
461e705c121SKalle Valo 	/* disable interrupts from uCode/NIC to host */
462e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
463e705c121SKalle Valo 
464e705c121SKalle Valo 	/* acknowledge/clear/reset any interrupts still pending
465e705c121SKalle Valo 	 * from uCode or flow handler (Rx/Tx DMA) */
466e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, 0xffffffff);
467e705c121SKalle Valo 	iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
468e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
469e705c121SKalle Valo }
470e705c121SKalle Valo 
471e705c121SKalle Valo static inline void iwl_enable_interrupts(struct iwl_trans *trans)
472e705c121SKalle Valo {
473e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
474e705c121SKalle Valo 
475e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
476e705c121SKalle Valo 	set_bit(STATUS_INT_ENABLED, &trans->status);
477e705c121SKalle Valo 	trans_pcie->inta_mask = CSR_INI_SET_MASK;
478e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
479e705c121SKalle Valo }
480e705c121SKalle Valo 
481e705c121SKalle Valo static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
482e705c121SKalle Valo {
483e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
484e705c121SKalle Valo 
485e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
486e705c121SKalle Valo 	trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
487e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
488e705c121SKalle Valo }
489e705c121SKalle Valo 
490e705c121SKalle Valo static inline void iwl_wake_queue(struct iwl_trans *trans,
491e705c121SKalle Valo 				  struct iwl_txq *txq)
492e705c121SKalle Valo {
493e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
494e705c121SKalle Valo 
495e705c121SKalle Valo 	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
496e705c121SKalle Valo 		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
497e705c121SKalle Valo 		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
498e705c121SKalle Valo 	}
499e705c121SKalle Valo }
500e705c121SKalle Valo 
501e705c121SKalle Valo static inline void iwl_stop_queue(struct iwl_trans *trans,
502e705c121SKalle Valo 				  struct iwl_txq *txq)
503e705c121SKalle Valo {
504e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
505e705c121SKalle Valo 
506e705c121SKalle Valo 	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
507e705c121SKalle Valo 		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
508e705c121SKalle Valo 		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
509e705c121SKalle Valo 	} else
510e705c121SKalle Valo 		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
511e705c121SKalle Valo 				    txq->q.id);
512e705c121SKalle Valo }
513e705c121SKalle Valo 
514e705c121SKalle Valo static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
515e705c121SKalle Valo {
516e705c121SKalle Valo 	return q->write_ptr >= q->read_ptr ?
517e705c121SKalle Valo 		(i >= q->read_ptr && i < q->write_ptr) :
518e705c121SKalle Valo 		!(i < q->read_ptr && i >= q->write_ptr);
519e705c121SKalle Valo }
520e705c121SKalle Valo 
521e705c121SKalle Valo static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
522e705c121SKalle Valo {
523e705c121SKalle Valo 	return index & (q->n_window - 1);
524e705c121SKalle Valo }
525e705c121SKalle Valo 
526e705c121SKalle Valo static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
527e705c121SKalle Valo 					 u8 cmd)
528e705c121SKalle Valo {
529e705c121SKalle Valo 	if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
530e705c121SKalle Valo 		return "UNKNOWN";
531e705c121SKalle Valo 	return trans_pcie->command_names[cmd];
532e705c121SKalle Valo }
533e705c121SKalle Valo 
534e705c121SKalle Valo static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
535e705c121SKalle Valo {
536e705c121SKalle Valo 	return !(iwl_read32(trans, CSR_GP_CNTRL) &
537e705c121SKalle Valo 		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
538e705c121SKalle Valo }
539e705c121SKalle Valo 
540e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
541e705c121SKalle Valo 						  u32 reg, u32 mask, u32 value)
542e705c121SKalle Valo {
543e705c121SKalle Valo 	u32 v;
544e705c121SKalle Valo 
545e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUG
546e705c121SKalle Valo 	WARN_ON_ONCE(value & ~mask);
547e705c121SKalle Valo #endif
548e705c121SKalle Valo 
549e705c121SKalle Valo 	v = iwl_read32(trans, reg);
550e705c121SKalle Valo 	v &= ~mask;
551e705c121SKalle Valo 	v |= value;
552e705c121SKalle Valo 	iwl_write32(trans, reg, v);
553e705c121SKalle Valo }
554e705c121SKalle Valo 
555e705c121SKalle Valo static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
556e705c121SKalle Valo 					      u32 reg, u32 mask)
557e705c121SKalle Valo {
558e705c121SKalle Valo 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
559e705c121SKalle Valo }
560e705c121SKalle Valo 
561e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
562e705c121SKalle Valo 					    u32 reg, u32 mask)
563e705c121SKalle Valo {
564e705c121SKalle Valo 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
565e705c121SKalle Valo }
566e705c121SKalle Valo 
567e705c121SKalle Valo void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
568e705c121SKalle Valo 
569e705c121SKalle Valo #endif /* __iwl_trans_int_pcie_h__ */
570