xref: /openbmc/linux/drivers/net/wireless/intel/iwlwifi/pcie/internal.h (revision 9977a8c3497a8f7f7f951994f298a8e4d961234f)
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6  *
7  * Portions of this file are derived from the ipw3945 project, as well
8  * as portions of the ieee80211 subsystem header files.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called LICENSE.
25  *
26  * Contact Information:
27  *  Intel Linux Wireless <linuxwifi@intel.com>
28  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29  *
30  *****************************************************************************/
31 #ifndef __iwl_trans_int_pcie_h__
32 #define __iwl_trans_int_pcie_h__
33 
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/skbuff.h>
37 #include <linux/wait.h>
38 #include <linux/pci.h>
39 #include <linux/timer.h>
40 #include <linux/cpu.h>
41 
42 #include "iwl-fh.h"
43 #include "iwl-csr.h"
44 #include "iwl-trans.h"
45 #include "iwl-debug.h"
46 #include "iwl-io.h"
47 #include "iwl-op-mode.h"
48 
49 /* We need 2 entries for the TX command and header, and another one might
50  * be needed for potential data in the SKB's head. The remaining ones can
51  * be used for frags.
52  */
53 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
54 
55 /*
56  * RX related structures and functions
57  */
58 #define RX_NUM_QUEUES 1
59 #define RX_POST_REQ_ALLOC 2
60 #define RX_CLAIM_REQ_ALLOC 8
61 #define RX_PENDING_WATERMARK 16
62 
63 struct iwl_host_cmd;
64 
65 /*This file includes the declaration that are internal to the
66  * trans_pcie layer */
67 
68 /**
69  * struct iwl_rx_mem_buffer
70  * @page_dma: bus address of rxb page
71  * @page: driver's pointer to the rxb page
72  * @invalid: rxb is in driver ownership - not owned by HW
73  * @vid: index of this rxb in the global table
74  */
75 struct iwl_rx_mem_buffer {
76 	dma_addr_t page_dma;
77 	struct page *page;
78 	u16 vid;
79 	bool invalid;
80 	struct list_head list;
81 };
82 
83 /**
84  * struct isr_statistics - interrupt statistics
85  *
86  */
87 struct isr_statistics {
88 	u32 hw;
89 	u32 sw;
90 	u32 err_code;
91 	u32 sch;
92 	u32 alive;
93 	u32 rfkill;
94 	u32 ctkill;
95 	u32 wakeup;
96 	u32 rx;
97 	u32 tx;
98 	u32 unhandled;
99 };
100 
101 /**
102  * struct iwl_rxq - Rx queue
103  * @id: queue index
104  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
105  *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
106  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
107  * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
108  * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
109  * @read: Shared index to newest available Rx buffer
110  * @write: Shared index to oldest written Rx packet
111  * @free_count: Number of pre-allocated buffers in rx_free
112  * @used_count: Number of RBDs handled to allocator to use for allocation
113  * @write_actual:
114  * @rx_free: list of RBDs with allocated RB ready for use
115  * @rx_used: list of RBDs with no RB attached
116  * @need_update: flag to indicate we need to update read/write index
117  * @rb_stts: driver's pointer to receive buffer status
118  * @rb_stts_dma: bus address of receive buffer status
119  * @lock:
120  * @queue: actual rx queue. Not used for multi-rx queue.
121  *
122  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
123  */
124 struct iwl_rxq {
125 	int id;
126 	void *bd;
127 	dma_addr_t bd_dma;
128 	__le32 *used_bd;
129 	dma_addr_t used_bd_dma;
130 	u32 read;
131 	u32 write;
132 	u32 free_count;
133 	u32 used_count;
134 	u32 write_actual;
135 	u32 queue_size;
136 	struct list_head rx_free;
137 	struct list_head rx_used;
138 	bool need_update;
139 	struct iwl_rb_status *rb_stts;
140 	dma_addr_t rb_stts_dma;
141 	spinlock_t lock;
142 	struct napi_struct napi;
143 	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
144 };
145 
146 /**
147  * struct iwl_rb_allocator - Rx allocator
148  * @req_pending: number of requests the allcator had not processed yet
149  * @req_ready: number of requests honored and ready for claiming
150  * @rbd_allocated: RBDs with pages allocated and ready to be handled to
151  *	the queue. This is a list of &struct iwl_rx_mem_buffer
152  * @rbd_empty: RBDs with no page attached for allocator use. This is a list
153  *	of &struct iwl_rx_mem_buffer
154  * @lock: protects the rbd_allocated and rbd_empty lists
155  * @alloc_wq: work queue for background calls
156  * @rx_alloc: work struct for background calls
157  */
158 struct iwl_rb_allocator {
159 	atomic_t req_pending;
160 	atomic_t req_ready;
161 	struct list_head rbd_allocated;
162 	struct list_head rbd_empty;
163 	spinlock_t lock;
164 	struct workqueue_struct *alloc_wq;
165 	struct work_struct rx_alloc;
166 };
167 
168 struct iwl_dma_ptr {
169 	dma_addr_t dma;
170 	void *addr;
171 	size_t size;
172 };
173 
174 /**
175  * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
176  * @index -- current index
177  */
178 static inline int iwl_queue_inc_wrap(int index)
179 {
180 	return ++index & (TFD_QUEUE_SIZE_MAX - 1);
181 }
182 
183 /**
184  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
185  * @index -- current index
186  */
187 static inline int iwl_queue_dec_wrap(int index)
188 {
189 	return --index & (TFD_QUEUE_SIZE_MAX - 1);
190 }
191 
192 struct iwl_cmd_meta {
193 	/* only for SYNC commands, iff the reply skb is wanted */
194 	struct iwl_host_cmd *source;
195 	u32 flags;
196 	u32 tbs;
197 };
198 
199 
200 #define TFD_TX_CMD_SLOTS 256
201 #define TFD_CMD_SLOTS 32
202 
203 /*
204  * The FH will write back to the first TB only, so we need to copy some data
205  * into the buffer regardless of whether it should be mapped or not.
206  * This indicates how big the first TB must be to include the scratch buffer
207  * and the assigned PN.
208  * Since PN location is 8 bytes at offset 12, it's 20 now.
209  * If we make it bigger then allocations will be bigger and copy slower, so
210  * that's probably not useful.
211  */
212 #define IWL_FIRST_TB_SIZE	20
213 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
214 
215 struct iwl_pcie_txq_entry {
216 	struct iwl_device_cmd *cmd;
217 	struct sk_buff *skb;
218 	/* buffer to free after command completes */
219 	const void *free_buf;
220 	struct iwl_cmd_meta meta;
221 };
222 
223 struct iwl_pcie_first_tb_buf {
224 	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
225 };
226 
227 /**
228  * struct iwl_txq - Tx Queue for DMA
229  * @q: generic Rx/Tx queue descriptor
230  * @tfds: transmit frame descriptors (DMA memory)
231  * @first_tb_bufs: start of command headers, including scratch buffers, for
232  *	the writeback -- this is DMA memory and an array holding one buffer
233  *	for each command on the queue
234  * @first_tb_dma: DMA address for the first_tb_bufs start
235  * @entries: transmit entries (driver state)
236  * @lock: queue lock
237  * @stuck_timer: timer that fires if queue gets stuck
238  * @trans_pcie: pointer back to transport (for timer)
239  * @need_update: indicates need to update read/write index
240  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
241  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
242  * @frozen: tx stuck queue timer is frozen
243  * @frozen_expiry_remainder: remember how long until the timer fires
244  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
245  * @write_ptr: 1-st empty entry (index) host_w
246  * @read_ptr: last used entry (index) host_r
247  * @dma_addr:  physical addr for BD's
248  * @n_window: safe queue window
249  * @id: queue id
250  * @low_mark: low watermark, resume queue if free space more than this
251  * @high_mark: high watermark, stop queue if free space less than this
252  *
253  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
254  * descriptors) and required locking structures.
255  *
256  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
257  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
258  * there might be HW changes in the future). For the normal TX
259  * queues, n_window, which is the size of the software queue data
260  * is also 256; however, for the command queue, n_window is only
261  * 32 since we don't need so many commands pending. Since the HW
262  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
263  * This means that we end up with the following:
264  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
265  *  SW entries:           | 0      | ... | 31          |
266  * where N is a number between 0 and 7. This means that the SW
267  * data is a window overlayed over the HW queue.
268  */
269 struct iwl_txq {
270 	void *tfds;
271 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
272 	dma_addr_t first_tb_dma;
273 	struct iwl_pcie_txq_entry *entries;
274 	spinlock_t lock;
275 	unsigned long frozen_expiry_remainder;
276 	struct timer_list stuck_timer;
277 	struct iwl_trans_pcie *trans_pcie;
278 	bool need_update;
279 	bool frozen;
280 	bool ampdu;
281 	int block;
282 	unsigned long wd_timeout;
283 	struct sk_buff_head overflow_q;
284 	struct iwl_dma_ptr bc_tbl;
285 
286 	int write_ptr;
287 	int read_ptr;
288 	dma_addr_t dma_addr;
289 	int n_window;
290 	u32 id;
291 	int low_mark;
292 	int high_mark;
293 };
294 
295 static inline dma_addr_t
296 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
297 {
298 	return txq->first_tb_dma +
299 	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
300 }
301 
302 struct iwl_tso_hdr_page {
303 	struct page *page;
304 	u8 *pos;
305 };
306 
307 /**
308  * enum iwl_shared_irq_flags - level of sharing for irq
309  * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
310  * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
311  */
312 enum iwl_shared_irq_flags {
313 	IWL_SHARED_IRQ_NON_RX		= BIT(0),
314 	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
315 };
316 
317 /**
318  * struct iwl_dram_data
319  * @physical: page phy pointer
320  * @block: pointer to the allocated block/page
321  * @size: size of the block/page
322  */
323 struct iwl_dram_data {
324 	dma_addr_t physical;
325 	void *block;
326 	int size;
327 };
328 
329 /**
330  * struct iwl_self_init_dram - dram data used by self init process
331  * @fw: lmac and umac dram data
332  * @fw_cnt: total number of items in array
333  * @paging: paging dram data
334  * @paging_cnt: total number of items in array
335  */
336 struct iwl_self_init_dram {
337 	struct iwl_dram_data *fw;
338 	int fw_cnt;
339 	struct iwl_dram_data *paging;
340 	int paging_cnt;
341 };
342 
343 /**
344  * struct iwl_trans_pcie - PCIe transport specific data
345  * @rxq: all the RX queue data
346  * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
347  * @global_table: table mapping received VID from hw to rxb
348  * @rba: allocator for RX replenishing
349  * @ctxt_info: context information for FW self init
350  * @ctxt_info_dma_addr: dma addr of context information
351  * @init_dram: DRAM data of firmware image (including paging).
352  *	Context information addresses will be taken from here.
353  *	This is driver's local copy for keeping track of size and
354  *	count for allocating and freeing the memory.
355  * @trans: pointer to the generic transport area
356  * @scd_base_addr: scheduler sram base address in SRAM
357  * @scd_bc_tbls: pointer to the byte count table of the scheduler
358  * @kw: keep warm address
359  * @pci_dev: basic pci-network driver stuff
360  * @hw_base: pci hardware address support
361  * @ucode_write_complete: indicates that the ucode has been copied.
362  * @ucode_write_waitq: wait queue for uCode load
363  * @cmd_queue - command queue number
364  * @rx_buf_size: Rx buffer size
365  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
366  * @scd_set_active: should the transport configure the SCD for HCMD queue
367  * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
368  *	frame.
369  * @rx_page_order: page order for receive buffer size
370  * @reg_lock: protect hw register access
371  * @mutex: to protect stop_device / start_fw / start_hw
372  * @cmd_in_flight: true when we have a host command in flight
373  * @fw_mon_phys: physical address of the buffer for the firmware monitor
374  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
375  * @fw_mon_size: size of the buffer for the firmware monitor
376  * @msix_entries: array of MSI-X entries
377  * @msix_enabled: true if managed to enable MSI-X
378  * @shared_vec_mask: the type of causes the shared vector handles
379  *	(see iwl_shared_irq_flags).
380  * @alloc_vecs: the number of interrupt vectors allocated by the OS
381  * @def_irq: default irq for non rx causes
382  * @fh_init_mask: initial unmasked fh causes
383  * @hw_init_mask: initial unmasked hw causes
384  * @fh_mask: current unmasked fh causes
385  * @hw_mask: current unmasked hw causes
386  * @tx_cmd_queue_size: the size of the tx command queue
387  */
388 struct iwl_trans_pcie {
389 	struct iwl_rxq *rxq;
390 	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
391 	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
392 	struct iwl_rb_allocator rba;
393 	struct iwl_context_info *ctxt_info;
394 	dma_addr_t ctxt_info_dma_addr;
395 	struct iwl_self_init_dram init_dram;
396 	struct iwl_trans *trans;
397 
398 	struct net_device napi_dev;
399 
400 	struct __percpu iwl_tso_hdr_page *tso_hdr_page;
401 
402 	/* INT ICT Table */
403 	__le32 *ict_tbl;
404 	dma_addr_t ict_tbl_dma;
405 	int ict_index;
406 	bool use_ict;
407 	bool is_down, opmode_down;
408 	bool debug_rfkill;
409 	struct isr_statistics isr_stats;
410 
411 	spinlock_t irq_lock;
412 	struct mutex mutex;
413 	u32 inta_mask;
414 	u32 scd_base_addr;
415 	struct iwl_dma_ptr scd_bc_tbls;
416 	struct iwl_dma_ptr kw;
417 
418 	struct iwl_txq *txq_memory;
419 	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
420 	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
421 	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
422 
423 	/* PCI bus related data */
424 	struct pci_dev *pci_dev;
425 	void __iomem *hw_base;
426 
427 	bool ucode_write_complete;
428 	wait_queue_head_t ucode_write_waitq;
429 	wait_queue_head_t wait_command_queue;
430 	wait_queue_head_t d0i3_waitq;
431 
432 	u8 page_offs, dev_cmd_offs;
433 
434 	u8 cmd_queue;
435 	u8 cmd_fifo;
436 	unsigned int cmd_q_wdg_timeout;
437 	u8 n_no_reclaim_cmds;
438 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
439 	u8 max_tbs;
440 	u16 tfd_size;
441 
442 	enum iwl_amsdu_size rx_buf_size;
443 	bool bc_table_dword;
444 	bool scd_set_active;
445 	bool sw_csum_tx;
446 	bool pcie_dbg_dumped_once;
447 	u32 rx_page_order;
448 
449 	/*protect hw register */
450 	spinlock_t reg_lock;
451 	bool cmd_hold_nic_awake;
452 	bool ref_cmd_in_flight;
453 
454 	dma_addr_t fw_mon_phys;
455 	struct page *fw_mon_page;
456 	u32 fw_mon_size;
457 
458 	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
459 	bool msix_enabled;
460 	u8 shared_vec_mask;
461 	u32 alloc_vecs;
462 	u32 def_irq;
463 	u32 fh_init_mask;
464 	u32 hw_init_mask;
465 	u32 fh_mask;
466 	u32 hw_mask;
467 	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
468 	u16 tx_cmd_queue_size;
469 };
470 
471 static inline struct iwl_trans_pcie *
472 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
473 {
474 	return (void *)trans->trans_specific;
475 }
476 
477 static inline struct iwl_trans *
478 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
479 {
480 	return container_of((void *)trans_pcie, struct iwl_trans,
481 			    trans_specific);
482 }
483 
484 /*
485  * Convention: trans API functions: iwl_trans_pcie_XXX
486  *	Other functions: iwl_pcie_XXX
487  */
488 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
489 				       const struct pci_device_id *ent,
490 				       const struct iwl_cfg *cfg);
491 void iwl_trans_pcie_free(struct iwl_trans *trans);
492 
493 /*****************************************************
494 * RX
495 ******************************************************/
496 int iwl_pcie_rx_init(struct iwl_trans *trans);
497 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
498 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
499 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
500 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
501 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
502 int iwl_pcie_rx_stop(struct iwl_trans *trans);
503 void iwl_pcie_rx_free(struct iwl_trans *trans);
504 
505 /*****************************************************
506 * ICT - interrupt handling
507 ******************************************************/
508 irqreturn_t iwl_pcie_isr(int irq, void *data);
509 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
510 void iwl_pcie_free_ict(struct iwl_trans *trans);
511 void iwl_pcie_reset_ict(struct iwl_trans *trans);
512 void iwl_pcie_disable_ict(struct iwl_trans *trans);
513 
514 /*****************************************************
515 * TX / HCMD
516 ******************************************************/
517 int iwl_pcie_tx_init(struct iwl_trans *trans);
518 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
519 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
520 int iwl_pcie_tx_stop(struct iwl_trans *trans);
521 void iwl_pcie_tx_free(struct iwl_trans *trans);
522 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
523 			       const struct iwl_trans_txq_scd_cfg *cfg,
524 			       unsigned int wdg_timeout);
525 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
526 				bool configure_scd);
527 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
528 					bool shared_mode);
529 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
530 				  struct iwl_txq *txq);
531 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
532 		      struct iwl_device_cmd *dev_cmd, int txq_id);
533 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
534 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
535 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
536 			    struct iwl_rx_cmd_buffer *rxb);
537 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
538 			    struct sk_buff_head *skbs);
539 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
540 void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans);
541 
542 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
543 					  u8 idx)
544 {
545 	if (trans->cfg->use_tfh) {
546 		struct iwl_tfh_tfd *tfd = _tfd;
547 		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
548 
549 		return le16_to_cpu(tb->tb_len);
550 	} else {
551 		struct iwl_tfd *tfd = _tfd;
552 		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
553 
554 		return le16_to_cpu(tb->hi_n_len) >> 4;
555 	}
556 }
557 
558 /*****************************************************
559 * Error handling
560 ******************************************************/
561 void iwl_pcie_dump_csr(struct iwl_trans *trans);
562 
563 /*****************************************************
564 * Helpers
565 ******************************************************/
566 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
567 {
568 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
569 
570 	clear_bit(STATUS_INT_ENABLED, &trans->status);
571 	if (!trans_pcie->msix_enabled) {
572 		/* disable interrupts from uCode/NIC to host */
573 		iwl_write32(trans, CSR_INT_MASK, 0x00000000);
574 
575 		/* acknowledge/clear/reset any interrupts still pending
576 		 * from uCode or flow handler (Rx/Tx DMA) */
577 		iwl_write32(trans, CSR_INT, 0xffffffff);
578 		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
579 	} else {
580 		/* disable all the interrupt we might use */
581 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
582 			    trans_pcie->fh_init_mask);
583 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
584 			    trans_pcie->hw_init_mask);
585 	}
586 	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
587 }
588 
589 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
590 {
591 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
592 
593 	spin_lock(&trans_pcie->irq_lock);
594 	_iwl_disable_interrupts(trans);
595 	spin_unlock(&trans_pcie->irq_lock);
596 }
597 
598 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
599 {
600 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
601 
602 	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
603 	set_bit(STATUS_INT_ENABLED, &trans->status);
604 	if (!trans_pcie->msix_enabled) {
605 		trans_pcie->inta_mask = CSR_INI_SET_MASK;
606 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
607 	} else {
608 		/*
609 		 * fh/hw_mask keeps all the unmasked causes.
610 		 * Unlike msi, in msix cause is enabled when it is unset.
611 		 */
612 		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
613 		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
614 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
615 			    ~trans_pcie->fh_mask);
616 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
617 			    ~trans_pcie->hw_mask);
618 	}
619 }
620 
621 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
622 {
623 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
624 
625 	spin_lock(&trans_pcie->irq_lock);
626 	_iwl_enable_interrupts(trans);
627 	spin_unlock(&trans_pcie->irq_lock);
628 }
629 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
630 {
631 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
632 
633 	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
634 	trans_pcie->hw_mask = msk;
635 }
636 
637 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
638 {
639 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
640 
641 	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
642 	trans_pcie->fh_mask = msk;
643 }
644 
645 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
646 {
647 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
648 
649 	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
650 	if (!trans_pcie->msix_enabled) {
651 		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
652 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
653 	} else {
654 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
655 			    trans_pcie->hw_init_mask);
656 		iwl_enable_fh_int_msk_msix(trans,
657 					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
658 	}
659 }
660 
661 static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
662 {
663 	return index & (q->n_window - 1);
664 }
665 
666 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
667 				     struct iwl_txq *txq, int idx)
668 {
669 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
670 
671 	if (trans->cfg->use_tfh)
672 		idx = iwl_pcie_get_cmd_index(txq, idx);
673 
674 	return txq->tfds + trans_pcie->tfd_size * idx;
675 }
676 
677 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
678 {
679 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
680 
681 	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
682 	if (!trans_pcie->msix_enabled) {
683 		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
684 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
685 	} else {
686 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
687 			    trans_pcie->fh_init_mask);
688 		iwl_enable_hw_int_msk_msix(trans,
689 					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
690 	}
691 
692 	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
693 		/*
694 		 * On 9000-series devices this bit isn't enabled by default, so
695 		 * when we power down the device we need set the bit to allow it
696 		 * to wake up the PCI-E bus for RF-kill interrupts.
697 		 */
698 		iwl_set_bit(trans, CSR_GP_CNTRL,
699 			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
700 	}
701 }
702 
703 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
704 
705 static inline void iwl_wake_queue(struct iwl_trans *trans,
706 				  struct iwl_txq *txq)
707 {
708 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
709 
710 	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
711 		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
712 		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
713 	}
714 }
715 
716 static inline void iwl_stop_queue(struct iwl_trans *trans,
717 				  struct iwl_txq *txq)
718 {
719 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
720 
721 	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
722 		iwl_op_mode_queue_full(trans->op_mode, txq->id);
723 		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
724 	} else
725 		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
726 				    txq->id);
727 }
728 
729 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
730 {
731 	return q->write_ptr >= q->read_ptr ?
732 		(i >= q->read_ptr && i < q->write_ptr) :
733 		!(i < q->read_ptr && i >= q->write_ptr);
734 }
735 
736 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
737 {
738 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
739 
740 	lockdep_assert_held(&trans_pcie->mutex);
741 
742 	if (trans_pcie->debug_rfkill)
743 		return true;
744 
745 	return !(iwl_read32(trans, CSR_GP_CNTRL) &
746 		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
747 }
748 
749 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
750 						  u32 reg, u32 mask, u32 value)
751 {
752 	u32 v;
753 
754 #ifdef CONFIG_IWLWIFI_DEBUG
755 	WARN_ON_ONCE(value & ~mask);
756 #endif
757 
758 	v = iwl_read32(trans, reg);
759 	v &= ~mask;
760 	v |= value;
761 	iwl_write32(trans, reg, v);
762 }
763 
764 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
765 					      u32 reg, u32 mask)
766 {
767 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
768 }
769 
770 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
771 					    u32 reg, u32 mask)
772 {
773 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
774 }
775 
776 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
777 
778 #ifdef CONFIG_IWLWIFI_DEBUGFS
779 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
780 #else
781 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
782 {
783 	return 0;
784 }
785 #endif
786 
787 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
788 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
789 
790 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
791 
792 void iwl_pcie_rx_allocator_work(struct work_struct *data);
793 
794 /* common functions that are used by gen2 transport */
795 void iwl_pcie_apm_config(struct iwl_trans *trans);
796 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
797 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
798 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
799 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
800 				       bool was_in_rfkill);
801 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
802 int iwl_queue_space(const struct iwl_txq *q);
803 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
804 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
805 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
806 		      int slots_num, bool cmd_queue);
807 int iwl_pcie_txq_alloc(struct iwl_trans *trans,
808 		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
809 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
810 			   struct iwl_dma_ptr *ptr, size_t size);
811 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
812 void iwl_pcie_apply_destination(struct iwl_trans *trans);
813 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
814 			    struct sk_buff *skb);
815 #ifdef CONFIG_INET
816 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
817 #endif
818 
819 /* transport gen 2 exported functions */
820 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
821 				 const struct fw_img *fw, bool run_in_rfkill);
822 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
823 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
824 				 struct iwl_tx_queue_cfg_cmd *cmd,
825 				 int cmd_id,
826 				 unsigned int timeout);
827 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
828 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
829 			   struct iwl_device_cmd *dev_cmd, int txq_id);
830 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
831 				  struct iwl_host_cmd *cmd);
832 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
833 				     bool low_power);
834 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
835 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
836 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
837 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
838 #endif /* __iwl_trans_int_pcie_h__ */
839