1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 - 2019 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
20  * more details.
21  *
22  * The full GNU General Public License is included in this distribution in the
23  * file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #ifndef __iwl_trans_int_pcie_h__
65 #define __iwl_trans_int_pcie_h__
66 
67 #include <linux/spinlock.h>
68 #include <linux/interrupt.h>
69 #include <linux/skbuff.h>
70 #include <linux/wait.h>
71 #include <linux/pci.h>
72 #include <linux/timer.h>
73 #include <linux/cpu.h>
74 
75 #include "iwl-fh.h"
76 #include "iwl-csr.h"
77 #include "iwl-trans.h"
78 #include "iwl-debug.h"
79 #include "iwl-io.h"
80 #include "iwl-op-mode.h"
81 #include "iwl-drv.h"
82 
83 /* We need 2 entries for the TX command and header, and another one might
84  * be needed for potential data in the SKB's head. The remaining ones can
85  * be used for frags.
86  */
87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
88 
89 /*
90  * RX related structures and functions
91  */
92 #define RX_NUM_QUEUES 1
93 #define RX_POST_REQ_ALLOC 2
94 #define RX_CLAIM_REQ_ALLOC 8
95 #define RX_PENDING_WATERMARK 16
96 #define FIRST_RX_QUEUE 512
97 
98 struct iwl_host_cmd;
99 
100 /*This file includes the declaration that are internal to the
101  * trans_pcie layer */
102 
103 /**
104  * struct iwl_rx_mem_buffer
105  * @page_dma: bus address of rxb page
106  * @page: driver's pointer to the rxb page
107  * @invalid: rxb is in driver ownership - not owned by HW
108  * @vid: index of this rxb in the global table
109  */
110 struct iwl_rx_mem_buffer {
111 	dma_addr_t page_dma;
112 	struct page *page;
113 	u16 vid;
114 	bool invalid;
115 	struct list_head list;
116 };
117 
118 /**
119  * struct isr_statistics - interrupt statistics
120  *
121  */
122 struct isr_statistics {
123 	u32 hw;
124 	u32 sw;
125 	u32 err_code;
126 	u32 sch;
127 	u32 alive;
128 	u32 rfkill;
129 	u32 ctkill;
130 	u32 wakeup;
131 	u32 rx;
132 	u32 tx;
133 	u32 unhandled;
134 };
135 
136 /**
137  * struct iwl_rx_transfer_desc - transfer descriptor
138  * @addr: ptr to free buffer start address
139  * @rbid: unique tag of the buffer
140  * @reserved: reserved
141  */
142 struct iwl_rx_transfer_desc {
143 	__le16 rbid;
144 	__le16 reserved[3];
145 	__le64 addr;
146 } __packed;
147 
148 #define IWL_RX_CD_FLAGS_FRAGMENTED	BIT(0)
149 
150 /**
151  * struct iwl_rx_completion_desc - completion descriptor
152  * @reserved1: reserved
153  * @rbid: unique tag of the received buffer
154  * @flags: flags (0: fragmented, all others: reserved)
155  * @reserved2: reserved
156  */
157 struct iwl_rx_completion_desc {
158 	__le32 reserved1;
159 	__le16 rbid;
160 	u8 flags;
161 	u8 reserved2[25];
162 } __packed;
163 
164 /**
165  * struct iwl_rxq - Rx queue
166  * @id: queue index
167  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
168  *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
169  *	In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
170  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
171  * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
172  * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
173  * @tr_tail: driver's pointer to the transmission ring tail buffer
174  * @tr_tail_dma: physical address of the buffer for the transmission ring tail
175  * @cr_tail: driver's pointer to the completion ring tail buffer
176  * @cr_tail_dma: physical address of the buffer for the completion ring tail
177  * @read: Shared index to newest available Rx buffer
178  * @write: Shared index to oldest written Rx packet
179  * @free_count: Number of pre-allocated buffers in rx_free
180  * @used_count: Number of RBDs handled to allocator to use for allocation
181  * @write_actual:
182  * @rx_free: list of RBDs with allocated RB ready for use
183  * @rx_used: list of RBDs with no RB attached
184  * @need_update: flag to indicate we need to update read/write index
185  * @rb_stts: driver's pointer to receive buffer status
186  * @rb_stts_dma: bus address of receive buffer status
187  * @lock:
188  * @queue: actual rx queue. Not used for multi-rx queue.
189  *
190  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
191  */
192 struct iwl_rxq {
193 	int id;
194 	void *bd;
195 	dma_addr_t bd_dma;
196 	union {
197 		void *used_bd;
198 		__le32 *bd_32;
199 		struct iwl_rx_completion_desc *cd;
200 	};
201 	dma_addr_t used_bd_dma;
202 	__le16 *tr_tail;
203 	dma_addr_t tr_tail_dma;
204 	__le16 *cr_tail;
205 	dma_addr_t cr_tail_dma;
206 	u32 read;
207 	u32 write;
208 	u32 free_count;
209 	u32 used_count;
210 	u32 write_actual;
211 	u32 queue_size;
212 	struct list_head rx_free;
213 	struct list_head rx_used;
214 	bool need_update;
215 	void *rb_stts;
216 	dma_addr_t rb_stts_dma;
217 	spinlock_t lock;
218 	struct napi_struct napi;
219 	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
220 };
221 
222 /**
223  * struct iwl_rb_allocator - Rx allocator
224  * @req_pending: number of requests the allcator had not processed yet
225  * @req_ready: number of requests honored and ready for claiming
226  * @rbd_allocated: RBDs with pages allocated and ready to be handled to
227  *	the queue. This is a list of &struct iwl_rx_mem_buffer
228  * @rbd_empty: RBDs with no page attached for allocator use. This is a list
229  *	of &struct iwl_rx_mem_buffer
230  * @lock: protects the rbd_allocated and rbd_empty lists
231  * @alloc_wq: work queue for background calls
232  * @rx_alloc: work struct for background calls
233  */
234 struct iwl_rb_allocator {
235 	atomic_t req_pending;
236 	atomic_t req_ready;
237 	struct list_head rbd_allocated;
238 	struct list_head rbd_empty;
239 	spinlock_t lock;
240 	struct workqueue_struct *alloc_wq;
241 	struct work_struct rx_alloc;
242 };
243 
244 struct iwl_dma_ptr {
245 	dma_addr_t dma;
246 	void *addr;
247 	size_t size;
248 };
249 
250 /**
251  * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
252  * @index -- current index
253  */
254 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
255 {
256 	return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
257 }
258 
259 /**
260  * iwl_get_closed_rb_stts - get closed rb stts from different structs
261  * @rxq - the rxq to get the rb stts from
262  */
263 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
264 					    struct iwl_rxq *rxq)
265 {
266 	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
267 		__le16 *rb_stts = rxq->rb_stts;
268 
269 		return READ_ONCE(*rb_stts);
270 	} else {
271 		struct iwl_rb_status *rb_stts = rxq->rb_stts;
272 
273 		return READ_ONCE(rb_stts->closed_rb_num);
274 	}
275 }
276 
277 /**
278  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
279  * @index -- current index
280  */
281 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
282 {
283 	return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
284 }
285 
286 struct iwl_cmd_meta {
287 	/* only for SYNC commands, iff the reply skb is wanted */
288 	struct iwl_host_cmd *source;
289 	u32 flags;
290 	u32 tbs;
291 };
292 
293 
294 #define TFD_TX_CMD_SLOTS 256
295 #define TFD_CMD_SLOTS 32
296 
297 /*
298  * The FH will write back to the first TB only, so we need to copy some data
299  * into the buffer regardless of whether it should be mapped or not.
300  * This indicates how big the first TB must be to include the scratch buffer
301  * and the assigned PN.
302  * Since PN location is 8 bytes at offset 12, it's 20 now.
303  * If we make it bigger then allocations will be bigger and copy slower, so
304  * that's probably not useful.
305  */
306 #define IWL_FIRST_TB_SIZE	20
307 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
308 
309 struct iwl_pcie_txq_entry {
310 	struct iwl_device_cmd *cmd;
311 	struct sk_buff *skb;
312 	/* buffer to free after command completes */
313 	const void *free_buf;
314 	struct iwl_cmd_meta meta;
315 };
316 
317 struct iwl_pcie_first_tb_buf {
318 	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
319 };
320 
321 /**
322  * struct iwl_txq - Tx Queue for DMA
323  * @q: generic Rx/Tx queue descriptor
324  * @tfds: transmit frame descriptors (DMA memory)
325  * @first_tb_bufs: start of command headers, including scratch buffers, for
326  *	the writeback -- this is DMA memory and an array holding one buffer
327  *	for each command on the queue
328  * @first_tb_dma: DMA address for the first_tb_bufs start
329  * @entries: transmit entries (driver state)
330  * @lock: queue lock
331  * @stuck_timer: timer that fires if queue gets stuck
332  * @trans_pcie: pointer back to transport (for timer)
333  * @need_update: indicates need to update read/write index
334  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
335  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
336  * @frozen: tx stuck queue timer is frozen
337  * @frozen_expiry_remainder: remember how long until the timer fires
338  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
339  * @write_ptr: 1-st empty entry (index) host_w
340  * @read_ptr: last used entry (index) host_r
341  * @dma_addr:  physical addr for BD's
342  * @n_window: safe queue window
343  * @id: queue id
344  * @low_mark: low watermark, resume queue if free space more than this
345  * @high_mark: high watermark, stop queue if free space less than this
346  *
347  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
348  * descriptors) and required locking structures.
349  *
350  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
351  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
352  * there might be HW changes in the future). For the normal TX
353  * queues, n_window, which is the size of the software queue data
354  * is also 256; however, for the command queue, n_window is only
355  * 32 since we don't need so many commands pending. Since the HW
356  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
357  * This means that we end up with the following:
358  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
359  *  SW entries:           | 0      | ... | 31          |
360  * where N is a number between 0 and 7. This means that the SW
361  * data is a window overlayed over the HW queue.
362  */
363 struct iwl_txq {
364 	void *tfds;
365 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
366 	dma_addr_t first_tb_dma;
367 	struct iwl_pcie_txq_entry *entries;
368 	spinlock_t lock;
369 	unsigned long frozen_expiry_remainder;
370 	struct timer_list stuck_timer;
371 	struct iwl_trans_pcie *trans_pcie;
372 	bool need_update;
373 	bool frozen;
374 	bool ampdu;
375 	int block;
376 	unsigned long wd_timeout;
377 	struct sk_buff_head overflow_q;
378 	struct iwl_dma_ptr bc_tbl;
379 
380 	int write_ptr;
381 	int read_ptr;
382 	dma_addr_t dma_addr;
383 	int n_window;
384 	u32 id;
385 	int low_mark;
386 	int high_mark;
387 
388 	bool overflow_tx;
389 };
390 
391 static inline dma_addr_t
392 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
393 {
394 	return txq->first_tb_dma +
395 	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
396 }
397 
398 struct iwl_tso_hdr_page {
399 	struct page *page;
400 	u8 *pos;
401 };
402 
403 #ifdef CONFIG_IWLWIFI_DEBUGFS
404 /**
405  * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
406  * debugfs file
407  *
408  * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
409  * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
410  * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
411  *	set the file can no longer be used.
412  */
413 enum iwl_fw_mon_dbgfs_state {
414 	IWL_FW_MON_DBGFS_STATE_CLOSED,
415 	IWL_FW_MON_DBGFS_STATE_OPEN,
416 	IWL_FW_MON_DBGFS_STATE_DISABLED,
417 };
418 #endif
419 
420 /**
421  * enum iwl_shared_irq_flags - level of sharing for irq
422  * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
423  * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
424  */
425 enum iwl_shared_irq_flags {
426 	IWL_SHARED_IRQ_NON_RX		= BIT(0),
427 	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
428 };
429 
430 /**
431  * enum iwl_image_response_code - image response values
432  * @IWL_IMAGE_RESP_DEF: the default value of the register
433  * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
434  * @IWL_IMAGE_RESP_FAIL: iml reading failed
435  */
436 enum iwl_image_response_code {
437 	IWL_IMAGE_RESP_DEF		= 0,
438 	IWL_IMAGE_RESP_SUCCESS		= 1,
439 	IWL_IMAGE_RESP_FAIL		= 2,
440 };
441 
442 /**
443  * struct cont_rec: continuous recording data structure
444  * @prev_wr_ptr: the last address that was read in monitor_data
445  *	debugfs file
446  * @prev_wrap_cnt: the wrap count that was used during the last read in
447  *	monitor_data debugfs file
448  * @state: the state of monitor_data debugfs file as described
449  *	in &iwl_fw_mon_dbgfs_state enum
450  * @mutex: locked while reading from monitor_data debugfs file
451  */
452 #ifdef CONFIG_IWLWIFI_DEBUGFS
453 struct cont_rec {
454 	u32 prev_wr_ptr;
455 	u32 prev_wrap_cnt;
456 	u8  state;
457 	/* Used to sync monitor_data debugfs file with driver unload flow */
458 	struct mutex mutex;
459 };
460 #endif
461 
462 /**
463  * struct iwl_trans_pcie - PCIe transport specific data
464  * @rxq: all the RX queue data
465  * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
466  * @global_table: table mapping received VID from hw to rxb
467  * @rba: allocator for RX replenishing
468  * @ctxt_info: context information for FW self init
469  * @ctxt_info_gen3: context information for gen3 devices
470  * @prph_info: prph info for self init
471  * @prph_scratch: prph scratch for self init
472  * @ctxt_info_dma_addr: dma addr of context information
473  * @prph_info_dma_addr: dma addr of prph info
474  * @prph_scratch_dma_addr: dma addr of prph scratch
475  * @ctxt_info_dma_addr: dma addr of context information
476  * @init_dram: DRAM data of firmware image (including paging).
477  *	Context information addresses will be taken from here.
478  *	This is driver's local copy for keeping track of size and
479  *	count for allocating and freeing the memory.
480  * @trans: pointer to the generic transport area
481  * @scd_base_addr: scheduler sram base address in SRAM
482  * @scd_bc_tbls: pointer to the byte count table of the scheduler
483  * @kw: keep warm address
484  * @pci_dev: basic pci-network driver stuff
485  * @hw_base: pci hardware address support
486  * @ucode_write_complete: indicates that the ucode has been copied.
487  * @ucode_write_waitq: wait queue for uCode load
488  * @cmd_queue - command queue number
489  * @def_rx_queue - default rx queue number
490  * @rx_buf_size: Rx buffer size
491  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
492  * @scd_set_active: should the transport configure the SCD for HCMD queue
493  * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
494  *	frame.
495  * @rx_page_order: page order for receive buffer size
496  * @reg_lock: protect hw register access
497  * @mutex: to protect stop_device / start_fw / start_hw
498  * @cmd_in_flight: true when we have a host command in flight
499 #ifdef CONFIG_IWLWIFI_DEBUGFS
500  * @fw_mon_data: fw continuous recording data
501 #endif
502  * @msix_entries: array of MSI-X entries
503  * @msix_enabled: true if managed to enable MSI-X
504  * @shared_vec_mask: the type of causes the shared vector handles
505  *	(see iwl_shared_irq_flags).
506  * @alloc_vecs: the number of interrupt vectors allocated by the OS
507  * @def_irq: default irq for non rx causes
508  * @fh_init_mask: initial unmasked fh causes
509  * @hw_init_mask: initial unmasked hw causes
510  * @fh_mask: current unmasked fh causes
511  * @hw_mask: current unmasked hw causes
512  * @in_rescan: true if we have triggered a device rescan
513  * @base_rb_stts: base virtual address of receive buffer status for all queues
514  * @base_rb_stts_dma: base physical address of receive buffer status
515  */
516 struct iwl_trans_pcie {
517 	struct iwl_rxq *rxq;
518 	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
519 	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
520 	struct iwl_rb_allocator rba;
521 	union {
522 		struct iwl_context_info *ctxt_info;
523 		struct iwl_context_info_gen3 *ctxt_info_gen3;
524 	};
525 	struct iwl_prph_info *prph_info;
526 	struct iwl_prph_scratch *prph_scratch;
527 	dma_addr_t ctxt_info_dma_addr;
528 	dma_addr_t prph_info_dma_addr;
529 	dma_addr_t prph_scratch_dma_addr;
530 	dma_addr_t iml_dma_addr;
531 	struct iwl_trans *trans;
532 
533 	struct net_device napi_dev;
534 
535 	struct __percpu iwl_tso_hdr_page *tso_hdr_page;
536 
537 	/* INT ICT Table */
538 	__le32 *ict_tbl;
539 	dma_addr_t ict_tbl_dma;
540 	int ict_index;
541 	bool use_ict;
542 	bool is_down, opmode_down;
543 	bool debug_rfkill;
544 	struct isr_statistics isr_stats;
545 
546 	spinlock_t irq_lock;
547 	struct mutex mutex;
548 	u32 inta_mask;
549 	u32 scd_base_addr;
550 	struct iwl_dma_ptr scd_bc_tbls;
551 	struct iwl_dma_ptr kw;
552 
553 	struct iwl_txq *txq_memory;
554 	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
555 	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
556 	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
557 
558 	/* PCI bus related data */
559 	struct pci_dev *pci_dev;
560 	void __iomem *hw_base;
561 
562 	bool ucode_write_complete;
563 	wait_queue_head_t ucode_write_waitq;
564 	wait_queue_head_t wait_command_queue;
565 	wait_queue_head_t d0i3_waitq;
566 
567 	u8 page_offs, dev_cmd_offs;
568 
569 	u8 cmd_queue;
570 	u8 def_rx_queue;
571 	u8 cmd_fifo;
572 	unsigned int cmd_q_wdg_timeout;
573 	u8 n_no_reclaim_cmds;
574 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
575 	u8 max_tbs;
576 	u16 tfd_size;
577 
578 	enum iwl_amsdu_size rx_buf_size;
579 	bool bc_table_dword;
580 	bool scd_set_active;
581 	bool sw_csum_tx;
582 	bool pcie_dbg_dumped_once;
583 	u32 rx_page_order;
584 
585 	/*protect hw register */
586 	spinlock_t reg_lock;
587 	bool cmd_hold_nic_awake;
588 	bool ref_cmd_in_flight;
589 
590 #ifdef CONFIG_IWLWIFI_DEBUGFS
591 	struct cont_rec fw_mon_data;
592 #endif
593 
594 	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
595 	bool msix_enabled;
596 	u8 shared_vec_mask;
597 	u32 alloc_vecs;
598 	u32 def_irq;
599 	u32 fh_init_mask;
600 	u32 hw_init_mask;
601 	u32 fh_mask;
602 	u32 hw_mask;
603 	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
604 	u16 tx_cmd_queue_size;
605 	bool in_rescan;
606 
607 	void *base_rb_stts;
608 	dma_addr_t base_rb_stts_dma;
609 };
610 
611 static inline struct iwl_trans_pcie *
612 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
613 {
614 	return (void *)trans->trans_specific;
615 }
616 
617 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
618 				      struct msix_entry *entry)
619 {
620 	/*
621 	 * Before sending the interrupt the HW disables it to prevent
622 	 * a nested interrupt. This is done by writing 1 to the corresponding
623 	 * bit in the mask register. After handling the interrupt, it should be
624 	 * re-enabled by clearing this bit. This register is defined as
625 	 * write 1 clear (W1C) register, meaning that it's being clear
626 	 * by writing 1 to the bit.
627 	 */
628 	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
629 }
630 
631 static inline struct iwl_trans *
632 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
633 {
634 	return container_of((void *)trans_pcie, struct iwl_trans,
635 			    trans_specific);
636 }
637 
638 /*
639  * Convention: trans API functions: iwl_trans_pcie_XXX
640  *	Other functions: iwl_pcie_XXX
641  */
642 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
643 				       const struct pci_device_id *ent,
644 				       const struct iwl_cfg *cfg);
645 void iwl_trans_pcie_free(struct iwl_trans *trans);
646 
647 /*****************************************************
648 * RX
649 ******************************************************/
650 int _iwl_pcie_rx_init(struct iwl_trans *trans);
651 int iwl_pcie_rx_init(struct iwl_trans *trans);
652 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
653 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
654 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
655 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
656 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
657 int iwl_pcie_rx_stop(struct iwl_trans *trans);
658 void iwl_pcie_rx_free(struct iwl_trans *trans);
659 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
660 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
661 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
662 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
663 			    struct iwl_rxq *rxq);
664 int iwl_pcie_rx_alloc(struct iwl_trans *trans);
665 
666 /*****************************************************
667 * ICT - interrupt handling
668 ******************************************************/
669 irqreturn_t iwl_pcie_isr(int irq, void *data);
670 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
671 void iwl_pcie_free_ict(struct iwl_trans *trans);
672 void iwl_pcie_reset_ict(struct iwl_trans *trans);
673 void iwl_pcie_disable_ict(struct iwl_trans *trans);
674 
675 /*****************************************************
676 * TX / HCMD
677 ******************************************************/
678 int iwl_pcie_tx_init(struct iwl_trans *trans);
679 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
680 			  int queue_size);
681 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
682 int iwl_pcie_tx_stop(struct iwl_trans *trans);
683 void iwl_pcie_tx_free(struct iwl_trans *trans);
684 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
685 			       const struct iwl_trans_txq_scd_cfg *cfg,
686 			       unsigned int wdg_timeout);
687 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
688 				bool configure_scd);
689 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
690 					bool shared_mode);
691 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
692 				  struct iwl_txq *txq);
693 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
694 		      struct iwl_device_cmd *dev_cmd, int txq_id);
695 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
696 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
697 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
698 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
699 				  struct iwl_txq *txq);
700 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
701 			    struct iwl_rx_cmd_buffer *rxb);
702 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
703 			    struct sk_buff_head *skbs);
704 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
705 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
706 				   struct iwl_txq *txq, u16 byte_cnt,
707 				   int num_tbs);
708 
709 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
710 					  u8 idx)
711 {
712 	if (trans->cfg->use_tfh) {
713 		struct iwl_tfh_tfd *tfd = _tfd;
714 		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
715 
716 		return le16_to_cpu(tb->tb_len);
717 	} else {
718 		struct iwl_tfd *tfd = _tfd;
719 		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
720 
721 		return le16_to_cpu(tb->hi_n_len) >> 4;
722 	}
723 }
724 
725 /*****************************************************
726 * Error handling
727 ******************************************************/
728 void iwl_pcie_dump_csr(struct iwl_trans *trans);
729 
730 /*****************************************************
731 * Helpers
732 ******************************************************/
733 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
734 {
735 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
736 
737 	clear_bit(STATUS_INT_ENABLED, &trans->status);
738 	if (!trans_pcie->msix_enabled) {
739 		/* disable interrupts from uCode/NIC to host */
740 		iwl_write32(trans, CSR_INT_MASK, 0x00000000);
741 
742 		/* acknowledge/clear/reset any interrupts still pending
743 		 * from uCode or flow handler (Rx/Tx DMA) */
744 		iwl_write32(trans, CSR_INT, 0xffffffff);
745 		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
746 	} else {
747 		/* disable all the interrupt we might use */
748 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
749 			    trans_pcie->fh_init_mask);
750 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
751 			    trans_pcie->hw_init_mask);
752 	}
753 	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
754 }
755 
756 #define IWL_NUM_OF_COMPLETION_RINGS	31
757 #define IWL_NUM_OF_TRANSFER_RINGS	527
758 
759 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
760 					    int start)
761 {
762 	int i = 0;
763 
764 	while (start < fw->num_sec &&
765 	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
766 	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
767 		start++;
768 		i++;
769 	}
770 
771 	return i;
772 }
773 
774 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
775 					       const struct fw_desc *sec,
776 					       struct iwl_dram_data *dram)
777 {
778 	dram->block = dma_alloc_coherent(trans->dev, sec->len,
779 					 &dram->physical,
780 					 GFP_KERNEL);
781 	if (!dram->block)
782 		return -ENOMEM;
783 
784 	dram->size = sec->len;
785 	memcpy(dram->block, sec->data, sec->len);
786 
787 	return 0;
788 }
789 
790 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
791 {
792 	struct iwl_self_init_dram *dram = &trans->init_dram;
793 	int i;
794 
795 	if (!dram->fw) {
796 		WARN_ON(dram->fw_cnt);
797 		return;
798 	}
799 
800 	for (i = 0; i < dram->fw_cnt; i++)
801 		dma_free_coherent(trans->dev, dram->fw[i].size,
802 				  dram->fw[i].block, dram->fw[i].physical);
803 
804 	kfree(dram->fw);
805 	dram->fw_cnt = 0;
806 	dram->fw = NULL;
807 }
808 
809 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
810 {
811 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
812 
813 	spin_lock(&trans_pcie->irq_lock);
814 	_iwl_disable_interrupts(trans);
815 	spin_unlock(&trans_pcie->irq_lock);
816 }
817 
818 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
819 {
820 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
821 
822 	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
823 	set_bit(STATUS_INT_ENABLED, &trans->status);
824 	if (!trans_pcie->msix_enabled) {
825 		trans_pcie->inta_mask = CSR_INI_SET_MASK;
826 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
827 	} else {
828 		/*
829 		 * fh/hw_mask keeps all the unmasked causes.
830 		 * Unlike msi, in msix cause is enabled when it is unset.
831 		 */
832 		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
833 		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
834 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
835 			    ~trans_pcie->fh_mask);
836 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
837 			    ~trans_pcie->hw_mask);
838 	}
839 }
840 
841 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
842 {
843 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
844 
845 	spin_lock(&trans_pcie->irq_lock);
846 	_iwl_enable_interrupts(trans);
847 	spin_unlock(&trans_pcie->irq_lock);
848 }
849 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
850 {
851 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
852 
853 	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
854 	trans_pcie->hw_mask = msk;
855 }
856 
857 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
858 {
859 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
860 
861 	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
862 	trans_pcie->fh_mask = msk;
863 }
864 
865 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
866 {
867 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
868 
869 	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
870 	if (!trans_pcie->msix_enabled) {
871 		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
872 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
873 	} else {
874 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
875 			    trans_pcie->hw_init_mask);
876 		iwl_enable_fh_int_msk_msix(trans,
877 					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
878 	}
879 }
880 
881 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
882 {
883 	return index & (q->n_window - 1);
884 }
885 
886 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
887 				     struct iwl_txq *txq, int idx)
888 {
889 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
890 
891 	if (trans->cfg->use_tfh)
892 		idx = iwl_pcie_get_cmd_index(txq, idx);
893 
894 	return txq->tfds + trans_pcie->tfd_size * idx;
895 }
896 
897 static inline const char *queue_name(struct device *dev,
898 				     struct iwl_trans_pcie *trans_p, int i)
899 {
900 	if (trans_p->shared_vec_mask) {
901 		int vec = trans_p->shared_vec_mask &
902 			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
903 
904 		if (i == 0)
905 			return DRV_NAME ": shared IRQ";
906 
907 		return devm_kasprintf(dev, GFP_KERNEL,
908 				      DRV_NAME ": queue %d", i + vec);
909 	}
910 	if (i == 0)
911 		return DRV_NAME ": default queue";
912 
913 	if (i == trans_p->alloc_vecs - 1)
914 		return DRV_NAME ": exception";
915 
916 	return devm_kasprintf(dev, GFP_KERNEL,
917 			      DRV_NAME  ": queue %d", i);
918 }
919 
920 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
921 {
922 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
923 
924 	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
925 	if (!trans_pcie->msix_enabled) {
926 		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
927 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
928 	} else {
929 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
930 			    trans_pcie->fh_init_mask);
931 		iwl_enable_hw_int_msk_msix(trans,
932 					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
933 	}
934 
935 	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
936 		/*
937 		 * On 9000-series devices this bit isn't enabled by default, so
938 		 * when we power down the device we need set the bit to allow it
939 		 * to wake up the PCI-E bus for RF-kill interrupts.
940 		 */
941 		iwl_set_bit(trans, CSR_GP_CNTRL,
942 			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
943 	}
944 }
945 
946 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
947 
948 static inline void iwl_wake_queue(struct iwl_trans *trans,
949 				  struct iwl_txq *txq)
950 {
951 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
952 
953 	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
954 		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
955 		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
956 	}
957 }
958 
959 static inline void iwl_stop_queue(struct iwl_trans *trans,
960 				  struct iwl_txq *txq)
961 {
962 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
963 
964 	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
965 		iwl_op_mode_queue_full(trans->op_mode, txq->id);
966 		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
967 	} else
968 		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
969 				    txq->id);
970 }
971 
972 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
973 {
974 	int index = iwl_pcie_get_cmd_index(q, i);
975 	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
976 	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
977 
978 	return w >= r ?
979 		(index >= r && index < w) :
980 		!(index < r && index >= w);
981 }
982 
983 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
984 {
985 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986 
987 	lockdep_assert_held(&trans_pcie->mutex);
988 
989 	if (trans_pcie->debug_rfkill)
990 		return true;
991 
992 	return !(iwl_read32(trans, CSR_GP_CNTRL) &
993 		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
994 }
995 
996 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
997 						  u32 reg, u32 mask, u32 value)
998 {
999 	u32 v;
1000 
1001 #ifdef CONFIG_IWLWIFI_DEBUG
1002 	WARN_ON_ONCE(value & ~mask);
1003 #endif
1004 
1005 	v = iwl_read32(trans, reg);
1006 	v &= ~mask;
1007 	v |= value;
1008 	iwl_write32(trans, reg, v);
1009 }
1010 
1011 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1012 					      u32 reg, u32 mask)
1013 {
1014 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1015 }
1016 
1017 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1018 					    u32 reg, u32 mask)
1019 {
1020 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1021 }
1022 
1023 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1024 {
1025 	return (trans->dbg_dest_tlv || trans->ini_valid);
1026 }
1027 
1028 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1029 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1030 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1031 
1032 #ifdef CONFIG_IWLWIFI_DEBUGFS
1033 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1034 #else
1035 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1036 #endif
1037 
1038 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
1039 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
1040 
1041 void iwl_pcie_rx_allocator_work(struct work_struct *data);
1042 
1043 /* common functions that are used by gen2 transport */
1044 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1045 void iwl_pcie_apm_config(struct iwl_trans *trans);
1046 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1047 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1048 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1049 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1050 				       bool was_in_rfkill);
1051 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1052 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1053 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1054 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1055 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1056 		      int slots_num, bool cmd_queue);
1057 int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1058 		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
1059 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1060 			   struct iwl_dma_ptr *ptr, size_t size);
1061 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1062 void iwl_pcie_apply_destination(struct iwl_trans *trans);
1063 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1064 			    struct sk_buff *skb);
1065 #ifdef CONFIG_INET
1066 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
1067 #endif
1068 
1069 /* common functions that are used by gen3 transport */
1070 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1071 
1072 /* transport gen 2 exported functions */
1073 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1074 				 const struct fw_img *fw, bool run_in_rfkill);
1075 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1076 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1077 				   struct iwl_txq *txq);
1078 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1079 				     struct iwl_txq **intxq, int size,
1080 				     unsigned int timeout);
1081 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1082 				      struct iwl_txq *txq,
1083 				      struct iwl_host_cmd *hcmd);
1084 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1085 				 __le16 flags, u8 sta_id, u8 tid,
1086 				 int cmd_id, int size,
1087 				 unsigned int timeout);
1088 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1089 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1090 			   struct iwl_device_cmd *dev_cmd, int txq_id);
1091 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1092 				  struct iwl_host_cmd *cmd);
1093 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
1094 				     bool low_power);
1095 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1096 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1097 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1098 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1099 #endif /* __iwl_trans_int_pcie_h__ */
1100