1e705c121SKalle Valo /******************************************************************************
2e705c121SKalle Valo  *
3e705c121SKalle Valo  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4e705c121SKalle Valo  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5eda50cdeSSara Sharon  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6e705c121SKalle Valo  *
7e705c121SKalle Valo  * Portions of this file are derived from the ipw3945 project, as well
8e705c121SKalle Valo  * as portions of the ieee80211 subsystem header files.
9e705c121SKalle Valo  *
10e705c121SKalle Valo  * This program is free software; you can redistribute it and/or modify it
11e705c121SKalle Valo  * under the terms of version 2 of the GNU General Public License as
12e705c121SKalle Valo  * published by the Free Software Foundation.
13e705c121SKalle Valo  *
14e705c121SKalle Valo  * This program is distributed in the hope that it will be useful, but WITHOUT
15e705c121SKalle Valo  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16e705c121SKalle Valo  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17e705c121SKalle Valo  * more details.
18e705c121SKalle Valo  *
19e705c121SKalle Valo  * You should have received a copy of the GNU General Public License along with
20e705c121SKalle Valo  * this program; if not, write to the Free Software Foundation, Inc.,
21e705c121SKalle Valo  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22e705c121SKalle Valo  *
23e705c121SKalle Valo  * The full GNU General Public License is included in this distribution in the
24e705c121SKalle Valo  * file called LICENSE.
25e705c121SKalle Valo  *
26e705c121SKalle Valo  * Contact Information:
27d01c5366SEmmanuel Grumbach  *  Intel Linux Wireless <linuxwifi@intel.com>
28e705c121SKalle Valo  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29e705c121SKalle Valo  *
30e705c121SKalle Valo  *****************************************************************************/
31e705c121SKalle Valo #include <linux/sched.h>
32e705c121SKalle Valo #include <linux/wait.h>
33e705c121SKalle Valo #include <linux/gfp.h>
34e705c121SKalle Valo 
35e705c121SKalle Valo #include "iwl-prph.h"
36e705c121SKalle Valo #include "iwl-io.h"
37e705c121SKalle Valo #include "internal.h"
38e705c121SKalle Valo #include "iwl-op-mode.h"
39e705c121SKalle Valo 
40e705c121SKalle Valo /******************************************************************************
41e705c121SKalle Valo  *
42e705c121SKalle Valo  * RX path functions
43e705c121SKalle Valo  *
44e705c121SKalle Valo  ******************************************************************************/
45e705c121SKalle Valo 
46e705c121SKalle Valo /*
47e705c121SKalle Valo  * Rx theory of operation
48e705c121SKalle Valo  *
49e705c121SKalle Valo  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
50e705c121SKalle Valo  * each of which point to Receive Buffers to be filled by the NIC.  These get
51e705c121SKalle Valo  * used not only for Rx frames, but for any command response or notification
52e705c121SKalle Valo  * from the NIC.  The driver and NIC manage the Rx buffers by means
53e705c121SKalle Valo  * of indexes into the circular buffer.
54e705c121SKalle Valo  *
55e705c121SKalle Valo  * Rx Queue Indexes
56e705c121SKalle Valo  * The host/firmware share two index registers for managing the Rx buffers.
57e705c121SKalle Valo  *
58e705c121SKalle Valo  * The READ index maps to the first position that the firmware may be writing
59e705c121SKalle Valo  * to -- the driver can read up to (but not including) this position and get
60e705c121SKalle Valo  * good data.
61e705c121SKalle Valo  * The READ index is managed by the firmware once the card is enabled.
62e705c121SKalle Valo  *
63e705c121SKalle Valo  * The WRITE index maps to the last position the driver has read from -- the
64e705c121SKalle Valo  * position preceding WRITE is the last slot the firmware can place a packet.
65e705c121SKalle Valo  *
66e705c121SKalle Valo  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
67e705c121SKalle Valo  * WRITE = READ.
68e705c121SKalle Valo  *
69e705c121SKalle Valo  * During initialization, the host sets up the READ queue position to the first
70e705c121SKalle Valo  * INDEX position, and WRITE to the last (READ - 1 wrapped)
71e705c121SKalle Valo  *
72e705c121SKalle Valo  * When the firmware places a packet in a buffer, it will advance the READ index
73e705c121SKalle Valo  * and fire the RX interrupt.  The driver can then query the READ index and
74e705c121SKalle Valo  * process as many packets as possible, moving the WRITE index forward as it
75e705c121SKalle Valo  * resets the Rx queue buffers with new memory.
76e705c121SKalle Valo  *
77e705c121SKalle Valo  * The management in the driver is as follows:
78e705c121SKalle Valo  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
79e705c121SKalle Valo  *   When the interrupt handler is called, the request is processed.
80e705c121SKalle Valo  *   The page is either stolen - transferred to the upper layer
81e705c121SKalle Valo  *   or reused - added immediately to the iwl->rxq->rx_free list.
82e705c121SKalle Valo  * + When the page is stolen - the driver updates the matching queue's used
83e705c121SKalle Valo  *   count, detaches the RBD and transfers it to the queue used list.
84e705c121SKalle Valo  *   When there are two used RBDs - they are transferred to the allocator empty
85e705c121SKalle Valo  *   list. Work is then scheduled for the allocator to start allocating
86e705c121SKalle Valo  *   eight buffers.
87e705c121SKalle Valo  *   When there are another 6 used RBDs - they are transferred to the allocator
88e705c121SKalle Valo  *   empty list and the driver tries to claim the pre-allocated buffers and
89e705c121SKalle Valo  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
90e705c121SKalle Valo  *   until ready.
91e705c121SKalle Valo  *   When there are 8+ buffers in the free list - either from allocation or from
92e705c121SKalle Valo  *   8 reused unstolen pages - restock is called to update the FW and indexes.
93e705c121SKalle Valo  * + In order to make sure the allocator always has RBDs to use for allocation
94e705c121SKalle Valo  *   the allocator has initial pool in the size of num_queues*(8-2) - the
95e705c121SKalle Valo  *   maximum missing RBDs per allocation request (request posted with 2
96e705c121SKalle Valo  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
97e705c121SKalle Valo  *   The queues supplies the recycle of the rest of the RBDs.
98e705c121SKalle Valo  * + A received packet is processed and handed to the kernel network stack,
99e705c121SKalle Valo  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
100e705c121SKalle Valo  * + If there are no allocated buffers in iwl->rxq->rx_free,
101e705c121SKalle Valo  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
102e705c121SKalle Valo  *   If there were enough free buffers and RX_STALLED is set it is cleared.
103e705c121SKalle Valo  *
104e705c121SKalle Valo  *
105e705c121SKalle Valo  * Driver sequence:
106e705c121SKalle Valo  *
107e705c121SKalle Valo  * iwl_rxq_alloc()            Allocates rx_free
108e705c121SKalle Valo  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
109e705c121SKalle Valo  *                            iwl_pcie_rxq_restock.
110e705c121SKalle Valo  *                            Used only during initialization.
111e705c121SKalle Valo  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
112e705c121SKalle Valo  *                            queue, updates firmware pointers, and updates
113e705c121SKalle Valo  *                            the WRITE index.
114e705c121SKalle Valo  * iwl_pcie_rx_allocator()     Background work for allocating pages.
115e705c121SKalle Valo  *
116e705c121SKalle Valo  * -- enable interrupts --
117e705c121SKalle Valo  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
118e705c121SKalle Valo  *                            READ INDEX, detaching the SKB from the pool.
119e705c121SKalle Valo  *                            Moves the packet buffer from queue to rx_used.
120e705c121SKalle Valo  *                            Posts and claims requests to the allocator.
121e705c121SKalle Valo  *                            Calls iwl_pcie_rxq_restock to refill any empty
122e705c121SKalle Valo  *                            slots.
123e705c121SKalle Valo  *
124e705c121SKalle Valo  * RBD life-cycle:
125e705c121SKalle Valo  *
126e705c121SKalle Valo  * Init:
127e705c121SKalle Valo  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
128e705c121SKalle Valo  *
129e705c121SKalle Valo  * Regular Receive interrupt:
130e705c121SKalle Valo  * Page Stolen:
131e705c121SKalle Valo  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
132e705c121SKalle Valo  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
133e705c121SKalle Valo  * Page not Stolen:
134e705c121SKalle Valo  * rxq.queue -> rxq.rx_free -> rxq.queue
135e705c121SKalle Valo  * ...
136e705c121SKalle Valo  *
137e705c121SKalle Valo  */
138e705c121SKalle Valo 
139e705c121SKalle Valo /*
140e705c121SKalle Valo  * iwl_rxq_space - Return number of free slots available in queue.
141e705c121SKalle Valo  */
142e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq)
143e705c121SKalle Valo {
14496a6497bSSara Sharon 	/* Make sure rx queue size is a power of 2 */
14596a6497bSSara Sharon 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
146e705c121SKalle Valo 
147e705c121SKalle Valo 	/*
148e705c121SKalle Valo 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
149e705c121SKalle Valo 	 * between empty and completely full queues.
150e705c121SKalle Valo 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
151e705c121SKalle Valo 	 * defined for negative dividends.
152e705c121SKalle Valo 	 */
15396a6497bSSara Sharon 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
154e705c121SKalle Valo }
155e705c121SKalle Valo 
156e705c121SKalle Valo /*
157e705c121SKalle Valo  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
158e705c121SKalle Valo  */
159e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
160e705c121SKalle Valo {
161e705c121SKalle Valo 	return cpu_to_le32((u32)(dma_addr >> 8));
162e705c121SKalle Valo }
163e705c121SKalle Valo 
164e705c121SKalle Valo /*
165e705c121SKalle Valo  * iwl_pcie_rx_stop - stops the Rx DMA
166e705c121SKalle Valo  */
167e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans)
168e705c121SKalle Valo {
169d7fdd0e5SSara Sharon 	if (trans->cfg->mq_rx_supported) {
170d7fdd0e5SSara Sharon 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
171d7fdd0e5SSara Sharon 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
172d7fdd0e5SSara Sharon 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
173d7fdd0e5SSara Sharon 	} else {
174e705c121SKalle Valo 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
175e705c121SKalle Valo 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
176d7fdd0e5SSara Sharon 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
177d7fdd0e5SSara Sharon 					   1000);
178d7fdd0e5SSara Sharon 	}
179e705c121SKalle Valo }
180e705c121SKalle Valo 
181e705c121SKalle Valo /*
182e705c121SKalle Valo  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
183e705c121SKalle Valo  */
18478485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
18578485054SSara Sharon 				    struct iwl_rxq *rxq)
186e705c121SKalle Valo {
187e705c121SKalle Valo 	u32 reg;
188e705c121SKalle Valo 
189e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
190e705c121SKalle Valo 
191e705c121SKalle Valo 	/*
192e705c121SKalle Valo 	 * explicitly wake up the NIC if:
193e705c121SKalle Valo 	 * 1. shadow registers aren't enabled
194e705c121SKalle Valo 	 * 2. there is a chance that the NIC is asleep
195e705c121SKalle Valo 	 */
196e705c121SKalle Valo 	if (!trans->cfg->base_params->shadow_reg_enable &&
197e705c121SKalle Valo 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
198e705c121SKalle Valo 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
199e705c121SKalle Valo 
200e705c121SKalle Valo 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
201e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
202e705c121SKalle Valo 				       reg);
203e705c121SKalle Valo 			iwl_set_bit(trans, CSR_GP_CNTRL,
204e705c121SKalle Valo 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
205e705c121SKalle Valo 			rxq->need_update = true;
206e705c121SKalle Valo 			return;
207e705c121SKalle Valo 		}
208e705c121SKalle Valo 	}
209e705c121SKalle Valo 
210e705c121SKalle Valo 	rxq->write_actual = round_down(rxq->write, 8);
21196a6497bSSara Sharon 	if (trans->cfg->mq_rx_supported)
2121554ed20SSara Sharon 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
21396a6497bSSara Sharon 			    rxq->write_actual);
2141316d595SSara Sharon 	else
215e705c121SKalle Valo 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
216e705c121SKalle Valo }
217e705c121SKalle Valo 
218e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
219e705c121SKalle Valo {
220e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
22178485054SSara Sharon 	int i;
222e705c121SKalle Valo 
22378485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
22478485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
225e705c121SKalle Valo 
226e705c121SKalle Valo 		if (!rxq->need_update)
22778485054SSara Sharon 			continue;
22878485054SSara Sharon 		spin_lock(&rxq->lock);
22978485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
230e705c121SKalle Valo 		rxq->need_update = false;
231e705c121SKalle Valo 		spin_unlock(&rxq->lock);
232e705c121SKalle Valo 	}
23378485054SSara Sharon }
234e705c121SKalle Valo 
235e0e168dcSGregory Greenman /*
2362047fa54SSara Sharon  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
237e0e168dcSGregory Greenman  */
2382047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
23996a6497bSSara Sharon 				  struct iwl_rxq *rxq)
24096a6497bSSara Sharon {
24196a6497bSSara Sharon 	struct iwl_rx_mem_buffer *rxb;
24296a6497bSSara Sharon 
24396a6497bSSara Sharon 	/*
24496a6497bSSara Sharon 	 * If the device isn't enabled - no need to try to add buffers...
24596a6497bSSara Sharon 	 * This can happen when we stop the device and still have an interrupt
24696a6497bSSara Sharon 	 * pending. We stop the APM before we sync the interrupts because we
24796a6497bSSara Sharon 	 * have to (see comment there). On the other hand, since the APM is
24896a6497bSSara Sharon 	 * stopped, we cannot access the HW (in particular not prph).
24996a6497bSSara Sharon 	 * So don't try to restock if the APM has been already stopped.
25096a6497bSSara Sharon 	 */
25196a6497bSSara Sharon 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
25296a6497bSSara Sharon 		return;
25396a6497bSSara Sharon 
25496a6497bSSara Sharon 	spin_lock(&rxq->lock);
25596a6497bSSara Sharon 	while (rxq->free_count) {
25696a6497bSSara Sharon 		__le64 *bd = (__le64 *)rxq->bd;
25796a6497bSSara Sharon 
25896a6497bSSara Sharon 		/* Get next free Rx buffer, remove from free list */
25996a6497bSSara Sharon 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
26096a6497bSSara Sharon 				       list);
26196a6497bSSara Sharon 		list_del(&rxb->list);
262b1753c62SSara Sharon 		rxb->invalid = false;
26396a6497bSSara Sharon 		/* 12 first bits are expected to be empty */
26496a6497bSSara Sharon 		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
26596a6497bSSara Sharon 		/* Point to Rx buffer via next RBD in circular buffer */
26696a6497bSSara Sharon 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
26796a6497bSSara Sharon 		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
26896a6497bSSara Sharon 		rxq->free_count--;
26996a6497bSSara Sharon 	}
27096a6497bSSara Sharon 	spin_unlock(&rxq->lock);
27196a6497bSSara Sharon 
27296a6497bSSara Sharon 	/*
27396a6497bSSara Sharon 	 * If we've added more space for the firmware to place data, tell it.
27496a6497bSSara Sharon 	 * Increment device's write pointer in multiples of 8.
27596a6497bSSara Sharon 	 */
27696a6497bSSara Sharon 	if (rxq->write_actual != (rxq->write & ~0x7)) {
27796a6497bSSara Sharon 		spin_lock(&rxq->lock);
27896a6497bSSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
27996a6497bSSara Sharon 		spin_unlock(&rxq->lock);
28096a6497bSSara Sharon 	}
28196a6497bSSara Sharon }
28296a6497bSSara Sharon 
283e705c121SKalle Valo /*
2842047fa54SSara Sharon  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
285e705c121SKalle Valo  */
2862047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
287e0e168dcSGregory Greenman 				  struct iwl_rxq *rxq)
288e705c121SKalle Valo {
289e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
290e705c121SKalle Valo 
291e705c121SKalle Valo 	/*
292e705c121SKalle Valo 	 * If the device isn't enabled - not need to try to add buffers...
293e705c121SKalle Valo 	 * This can happen when we stop the device and still have an interrupt
294e705c121SKalle Valo 	 * pending. We stop the APM before we sync the interrupts because we
295e705c121SKalle Valo 	 * have to (see comment there). On the other hand, since the APM is
296e705c121SKalle Valo 	 * stopped, we cannot access the HW (in particular not prph).
297e705c121SKalle Valo 	 * So don't try to restock if the APM has been already stopped.
298e705c121SKalle Valo 	 */
299e705c121SKalle Valo 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
300e705c121SKalle Valo 		return;
301e705c121SKalle Valo 
302e705c121SKalle Valo 	spin_lock(&rxq->lock);
303e705c121SKalle Valo 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
30496a6497bSSara Sharon 		__le32 *bd = (__le32 *)rxq->bd;
305e705c121SKalle Valo 		/* The overwritten rxb must be a used one */
306e705c121SKalle Valo 		rxb = rxq->queue[rxq->write];
307e705c121SKalle Valo 		BUG_ON(rxb && rxb->page);
308e705c121SKalle Valo 
309e705c121SKalle Valo 		/* Get next free Rx buffer, remove from free list */
310e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
311e705c121SKalle Valo 				       list);
312e705c121SKalle Valo 		list_del(&rxb->list);
313b1753c62SSara Sharon 		rxb->invalid = false;
314e705c121SKalle Valo 
315e705c121SKalle Valo 		/* Point to Rx buffer via next RBD in circular buffer */
31696a6497bSSara Sharon 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
317e705c121SKalle Valo 		rxq->queue[rxq->write] = rxb;
318e705c121SKalle Valo 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
319e705c121SKalle Valo 		rxq->free_count--;
320e705c121SKalle Valo 	}
321e705c121SKalle Valo 	spin_unlock(&rxq->lock);
322e705c121SKalle Valo 
323e705c121SKalle Valo 	/* If we've added more space for the firmware to place data, tell it.
324e705c121SKalle Valo 	 * Increment device's write pointer in multiples of 8. */
325e705c121SKalle Valo 	if (rxq->write_actual != (rxq->write & ~0x7)) {
326e705c121SKalle Valo 		spin_lock(&rxq->lock);
32778485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
328e705c121SKalle Valo 		spin_unlock(&rxq->lock);
329e705c121SKalle Valo 	}
330e705c121SKalle Valo }
331e705c121SKalle Valo 
332e705c121SKalle Valo /*
333e0e168dcSGregory Greenman  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
334e0e168dcSGregory Greenman  *
335e0e168dcSGregory Greenman  * If there are slots in the RX queue that need to be restocked,
336e0e168dcSGregory Greenman  * and we have free pre-allocated buffers, fill the ranks as much
337e0e168dcSGregory Greenman  * as we can, pulling from rx_free.
338e0e168dcSGregory Greenman  *
339e0e168dcSGregory Greenman  * This moves the 'write' index forward to catch up with 'processed', and
340e0e168dcSGregory Greenman  * also updates the memory address in the firmware to reference the new
341e0e168dcSGregory Greenman  * target buffer.
342e0e168dcSGregory Greenman  */
343e0e168dcSGregory Greenman static
344e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
345e0e168dcSGregory Greenman {
346e0e168dcSGregory Greenman 	if (trans->cfg->mq_rx_supported)
3472047fa54SSara Sharon 		iwl_pcie_rxmq_restock(trans, rxq);
348e0e168dcSGregory Greenman 	else
3492047fa54SSara Sharon 		iwl_pcie_rxsq_restock(trans, rxq);
350e0e168dcSGregory Greenman }
351e0e168dcSGregory Greenman 
352e0e168dcSGregory Greenman /*
353e705c121SKalle Valo  * iwl_pcie_rx_alloc_page - allocates and returns a page.
354e705c121SKalle Valo  *
355e705c121SKalle Valo  */
356e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
357e705c121SKalle Valo 					   gfp_t priority)
358e705c121SKalle Valo {
359e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
360e705c121SKalle Valo 	struct page *page;
361e705c121SKalle Valo 	gfp_t gfp_mask = priority;
362e705c121SKalle Valo 
363e705c121SKalle Valo 	if (trans_pcie->rx_page_order > 0)
364e705c121SKalle Valo 		gfp_mask |= __GFP_COMP;
365e705c121SKalle Valo 
366e705c121SKalle Valo 	/* Alloc a new receive buffer */
367e705c121SKalle Valo 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
368e705c121SKalle Valo 	if (!page) {
369e705c121SKalle Valo 		if (net_ratelimit())
370e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
371e705c121SKalle Valo 				       trans_pcie->rx_page_order);
37278485054SSara Sharon 		/*
37378485054SSara Sharon 		 * Issue an error if we don't have enough pre-allocated
37478485054SSara Sharon 		  * buffers.
375e705c121SKalle Valo `		 */
37678485054SSara Sharon 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
377e705c121SKalle Valo 			IWL_CRIT(trans,
37878485054SSara Sharon 				 "Failed to alloc_pages\n");
379e705c121SKalle Valo 		return NULL;
380e705c121SKalle Valo 	}
381e705c121SKalle Valo 	return page;
382e705c121SKalle Valo }
383e705c121SKalle Valo 
384e705c121SKalle Valo /*
385e705c121SKalle Valo  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
386e705c121SKalle Valo  *
387e705c121SKalle Valo  * A used RBD is an Rx buffer that has been given to the stack. To use it again
388e705c121SKalle Valo  * a page must be allocated and the RBD must point to the page. This function
389e705c121SKalle Valo  * doesn't change the HW pointer but handles the list of pages that is used by
390e705c121SKalle Valo  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
391e705c121SKalle Valo  * allocated buffers.
392e705c121SKalle Valo  */
39378485054SSara Sharon static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
39478485054SSara Sharon 				   struct iwl_rxq *rxq)
395e705c121SKalle Valo {
396e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
397e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
398e705c121SKalle Valo 	struct page *page;
399e705c121SKalle Valo 
400e705c121SKalle Valo 	while (1) {
401e705c121SKalle Valo 		spin_lock(&rxq->lock);
402e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
403e705c121SKalle Valo 			spin_unlock(&rxq->lock);
404e705c121SKalle Valo 			return;
405e705c121SKalle Valo 		}
406e705c121SKalle Valo 		spin_unlock(&rxq->lock);
407e705c121SKalle Valo 
408e705c121SKalle Valo 		/* Alloc a new receive buffer */
409e705c121SKalle Valo 		page = iwl_pcie_rx_alloc_page(trans, priority);
410e705c121SKalle Valo 		if (!page)
411e705c121SKalle Valo 			return;
412e705c121SKalle Valo 
413e705c121SKalle Valo 		spin_lock(&rxq->lock);
414e705c121SKalle Valo 
415e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
416e705c121SKalle Valo 			spin_unlock(&rxq->lock);
417e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
418e705c121SKalle Valo 			return;
419e705c121SKalle Valo 		}
420e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
421e705c121SKalle Valo 				       list);
422e705c121SKalle Valo 		list_del(&rxb->list);
423e705c121SKalle Valo 		spin_unlock(&rxq->lock);
424e705c121SKalle Valo 
425e705c121SKalle Valo 		BUG_ON(rxb->page);
426e705c121SKalle Valo 		rxb->page = page;
427e705c121SKalle Valo 		/* Get physical address of the RB */
428e705c121SKalle Valo 		rxb->page_dma =
429e705c121SKalle Valo 			dma_map_page(trans->dev, page, 0,
430e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
431e705c121SKalle Valo 				     DMA_FROM_DEVICE);
432e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
433e705c121SKalle Valo 			rxb->page = NULL;
434e705c121SKalle Valo 			spin_lock(&rxq->lock);
435e705c121SKalle Valo 			list_add(&rxb->list, &rxq->rx_used);
436e705c121SKalle Valo 			spin_unlock(&rxq->lock);
437e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
438e705c121SKalle Valo 			return;
439e705c121SKalle Valo 		}
440e705c121SKalle Valo 
441e705c121SKalle Valo 		spin_lock(&rxq->lock);
442e705c121SKalle Valo 
443e705c121SKalle Valo 		list_add_tail(&rxb->list, &rxq->rx_free);
444e705c121SKalle Valo 		rxq->free_count++;
445e705c121SKalle Valo 
446e705c121SKalle Valo 		spin_unlock(&rxq->lock);
447e705c121SKalle Valo 	}
448e705c121SKalle Valo }
449e705c121SKalle Valo 
45078485054SSara Sharon static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
451e705c121SKalle Valo {
452e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
453e705c121SKalle Valo 	int i;
454e705c121SKalle Valo 
4557b542436SSara Sharon 	for (i = 0; i < RX_POOL_SIZE; i++) {
45678485054SSara Sharon 		if (!trans_pcie->rx_pool[i].page)
457e705c121SKalle Valo 			continue;
45878485054SSara Sharon 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
459e705c121SKalle Valo 			       PAGE_SIZE << trans_pcie->rx_page_order,
460e705c121SKalle Valo 			       DMA_FROM_DEVICE);
46178485054SSara Sharon 		__free_pages(trans_pcie->rx_pool[i].page,
46278485054SSara Sharon 			     trans_pcie->rx_page_order);
46378485054SSara Sharon 		trans_pcie->rx_pool[i].page = NULL;
464e705c121SKalle Valo 	}
465e705c121SKalle Valo }
466e705c121SKalle Valo 
467e705c121SKalle Valo /*
468e705c121SKalle Valo  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
469e705c121SKalle Valo  *
470e705c121SKalle Valo  * Allocates for each received request 8 pages
471e705c121SKalle Valo  * Called as a scheduled work item.
472e705c121SKalle Valo  */
473e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
474e705c121SKalle Valo {
475e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
476e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
477e705c121SKalle Valo 	struct list_head local_empty;
478e705c121SKalle Valo 	int pending = atomic_xchg(&rba->req_pending, 0);
479e705c121SKalle Valo 
480e705c121SKalle Valo 	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
481e705c121SKalle Valo 
482e705c121SKalle Valo 	/* If we were scheduled - there is at least one request */
483e705c121SKalle Valo 	spin_lock(&rba->lock);
484e705c121SKalle Valo 	/* swap out the rba->rbd_empty to a local list */
485e705c121SKalle Valo 	list_replace_init(&rba->rbd_empty, &local_empty);
486e705c121SKalle Valo 	spin_unlock(&rba->lock);
487e705c121SKalle Valo 
488e705c121SKalle Valo 	while (pending) {
489e705c121SKalle Valo 		int i;
4900979a913SJohannes Berg 		LIST_HEAD(local_allocated);
49178485054SSara Sharon 		gfp_t gfp_mask = GFP_KERNEL;
49278485054SSara Sharon 
49378485054SSara Sharon 		/* Do not post a warning if there are only a few requests */
49478485054SSara Sharon 		if (pending < RX_PENDING_WATERMARK)
49578485054SSara Sharon 			gfp_mask |= __GFP_NOWARN;
496e705c121SKalle Valo 
497e705c121SKalle Valo 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
498e705c121SKalle Valo 			struct iwl_rx_mem_buffer *rxb;
499e705c121SKalle Valo 			struct page *page;
500e705c121SKalle Valo 
501e705c121SKalle Valo 			/* List should never be empty - each reused RBD is
502e705c121SKalle Valo 			 * returned to the list, and initial pool covers any
503e705c121SKalle Valo 			 * possible gap between the time the page is allocated
504e705c121SKalle Valo 			 * to the time the RBD is added.
505e705c121SKalle Valo 			 */
506e705c121SKalle Valo 			BUG_ON(list_empty(&local_empty));
507e705c121SKalle Valo 			/* Get the first rxb from the rbd list */
508e705c121SKalle Valo 			rxb = list_first_entry(&local_empty,
509e705c121SKalle Valo 					       struct iwl_rx_mem_buffer, list);
510e705c121SKalle Valo 			BUG_ON(rxb->page);
511e705c121SKalle Valo 
512e705c121SKalle Valo 			/* Alloc a new receive buffer */
51378485054SSara Sharon 			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
514e705c121SKalle Valo 			if (!page)
515e705c121SKalle Valo 				continue;
516e705c121SKalle Valo 			rxb->page = page;
517e705c121SKalle Valo 
518e705c121SKalle Valo 			/* Get physical address of the RB */
519e705c121SKalle Valo 			rxb->page_dma = dma_map_page(trans->dev, page, 0,
520e705c121SKalle Valo 					PAGE_SIZE << trans_pcie->rx_page_order,
521e705c121SKalle Valo 					DMA_FROM_DEVICE);
522e705c121SKalle Valo 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
523e705c121SKalle Valo 				rxb->page = NULL;
524e705c121SKalle Valo 				__free_pages(page, trans_pcie->rx_page_order);
525e705c121SKalle Valo 				continue;
526e705c121SKalle Valo 			}
527e705c121SKalle Valo 
528e705c121SKalle Valo 			/* move the allocated entry to the out list */
529e705c121SKalle Valo 			list_move(&rxb->list, &local_allocated);
530e705c121SKalle Valo 			i++;
531e705c121SKalle Valo 		}
532e705c121SKalle Valo 
533e705c121SKalle Valo 		pending--;
534e705c121SKalle Valo 		if (!pending) {
535e705c121SKalle Valo 			pending = atomic_xchg(&rba->req_pending, 0);
536e705c121SKalle Valo 			IWL_DEBUG_RX(trans,
537e705c121SKalle Valo 				     "Pending allocation requests = %d\n",
538e705c121SKalle Valo 				     pending);
539e705c121SKalle Valo 		}
540e705c121SKalle Valo 
541e705c121SKalle Valo 		spin_lock(&rba->lock);
542e705c121SKalle Valo 		/* add the allocated rbds to the allocator allocated list */
543e705c121SKalle Valo 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
544e705c121SKalle Valo 		/* get more empty RBDs for current pending requests */
545e705c121SKalle Valo 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
546e705c121SKalle Valo 		spin_unlock(&rba->lock);
547e705c121SKalle Valo 
548e705c121SKalle Valo 		atomic_inc(&rba->req_ready);
549e705c121SKalle Valo 	}
550e705c121SKalle Valo 
551e705c121SKalle Valo 	spin_lock(&rba->lock);
552e705c121SKalle Valo 	/* return unused rbds to the allocator empty list */
553e705c121SKalle Valo 	list_splice_tail(&local_empty, &rba->rbd_empty);
554e705c121SKalle Valo 	spin_unlock(&rba->lock);
555e705c121SKalle Valo }
556e705c121SKalle Valo 
557e705c121SKalle Valo /*
558d56daea4SSara Sharon  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
559e705c121SKalle Valo .*
560e705c121SKalle Valo .* Called by queue when the queue posted allocation request and
561e705c121SKalle Valo  * has freed 8 RBDs in order to restock itself.
562d56daea4SSara Sharon  * This function directly moves the allocated RBs to the queue's ownership
563d56daea4SSara Sharon  * and updates the relevant counters.
564e705c121SKalle Valo  */
565d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
566d56daea4SSara Sharon 				      struct iwl_rxq *rxq)
567e705c121SKalle Valo {
568e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
569e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
570e705c121SKalle Valo 	int i;
571e705c121SKalle Valo 
572d56daea4SSara Sharon 	lockdep_assert_held(&rxq->lock);
573d56daea4SSara Sharon 
574e705c121SKalle Valo 	/*
575e705c121SKalle Valo 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
576e705c121SKalle Valo 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
577d56daea4SSara Sharon 	 * function will return early, as there are no ready requests.
578e705c121SKalle Valo 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
579e705c121SKalle Valo 	 * req_ready > 0, i.e. - there are ready requests and the function
580e705c121SKalle Valo 	 * hands one request to the caller.
581e705c121SKalle Valo 	 */
582e705c121SKalle Valo 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
583d56daea4SSara Sharon 		return;
584e705c121SKalle Valo 
585e705c121SKalle Valo 	spin_lock(&rba->lock);
586e705c121SKalle Valo 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
587e705c121SKalle Valo 		/* Get next free Rx buffer, remove it from free list */
588d56daea4SSara Sharon 		struct iwl_rx_mem_buffer *rxb =
589d56daea4SSara Sharon 			list_first_entry(&rba->rbd_allocated,
590e705c121SKalle Valo 					 struct iwl_rx_mem_buffer, list);
591d56daea4SSara Sharon 
592d56daea4SSara Sharon 		list_move(&rxb->list, &rxq->rx_free);
593e705c121SKalle Valo 	}
594e705c121SKalle Valo 	spin_unlock(&rba->lock);
595e705c121SKalle Valo 
596d56daea4SSara Sharon 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
597d56daea4SSara Sharon 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
598e705c121SKalle Valo }
599e705c121SKalle Valo 
60010a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data)
601e705c121SKalle Valo {
602e705c121SKalle Valo 	struct iwl_rb_allocator *rba_p =
603e705c121SKalle Valo 		container_of(data, struct iwl_rb_allocator, rx_alloc);
604e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie =
605e705c121SKalle Valo 		container_of(rba_p, struct iwl_trans_pcie, rba);
606e705c121SKalle Valo 
607e705c121SKalle Valo 	iwl_pcie_rx_allocator(trans_pcie->trans);
608e705c121SKalle Valo }
609e705c121SKalle Valo 
610e705c121SKalle Valo static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
611e705c121SKalle Valo {
612e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
613e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
614e705c121SKalle Valo 	struct device *dev = trans->dev;
61578485054SSara Sharon 	int i;
61696a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
61796a6497bSSara Sharon 						      sizeof(__le32);
618e705c121SKalle Valo 
61978485054SSara Sharon 	if (WARN_ON(trans_pcie->rxq))
620e705c121SKalle Valo 		return -EINVAL;
621e705c121SKalle Valo 
62278485054SSara Sharon 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
62378485054SSara Sharon 				  GFP_KERNEL);
62478485054SSara Sharon 	if (!trans_pcie->rxq)
62578485054SSara Sharon 		return -EINVAL;
62678485054SSara Sharon 
62778485054SSara Sharon 	spin_lock_init(&rba->lock);
62878485054SSara Sharon 
62978485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
63078485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
63178485054SSara Sharon 
63278485054SSara Sharon 		spin_lock_init(&rxq->lock);
63396a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported)
63496a6497bSSara Sharon 			rxq->queue_size = MQ_RX_TABLE_SIZE;
63596a6497bSSara Sharon 		else
63696a6497bSSara Sharon 			rxq->queue_size = RX_QUEUE_SIZE;
63796a6497bSSara Sharon 
63878485054SSara Sharon 		/*
63978485054SSara Sharon 		 * Allocate the circular buffer of Read Buffer Descriptors
64078485054SSara Sharon 		 * (RBDs)
64178485054SSara Sharon 		 */
64278485054SSara Sharon 		rxq->bd = dma_zalloc_coherent(dev,
64396a6497bSSara Sharon 					     free_size * rxq->queue_size,
644e705c121SKalle Valo 					     &rxq->bd_dma, GFP_KERNEL);
645e705c121SKalle Valo 		if (!rxq->bd)
64678485054SSara Sharon 			goto err;
64778485054SSara Sharon 
64896a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
64996a6497bSSara Sharon 			rxq->used_bd = dma_zalloc_coherent(dev,
65096a6497bSSara Sharon 							   sizeof(__le32) *
65196a6497bSSara Sharon 							   rxq->queue_size,
65296a6497bSSara Sharon 							   &rxq->used_bd_dma,
65396a6497bSSara Sharon 							   GFP_KERNEL);
65496a6497bSSara Sharon 			if (!rxq->used_bd)
65596a6497bSSara Sharon 				goto err;
65696a6497bSSara Sharon 		}
657e705c121SKalle Valo 
658e705c121SKalle Valo 		/*Allocate the driver's pointer to receive buffer status */
659e705c121SKalle Valo 		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
66078485054SSara Sharon 						   &rxq->rb_stts_dma,
66178485054SSara Sharon 						   GFP_KERNEL);
662e705c121SKalle Valo 		if (!rxq->rb_stts)
66378485054SSara Sharon 			goto err;
66478485054SSara Sharon 	}
665e705c121SKalle Valo 	return 0;
666e705c121SKalle Valo 
66778485054SSara Sharon err:
66878485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
66978485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
67078485054SSara Sharon 
67178485054SSara Sharon 		if (rxq->bd)
67296a6497bSSara Sharon 			dma_free_coherent(dev, free_size * rxq->queue_size,
673e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
674e705c121SKalle Valo 		rxq->bd_dma = 0;
675e705c121SKalle Valo 		rxq->bd = NULL;
67678485054SSara Sharon 
67778485054SSara Sharon 		if (rxq->rb_stts)
67878485054SSara Sharon 			dma_free_coherent(trans->dev,
67978485054SSara Sharon 					  sizeof(struct iwl_rb_status),
68078485054SSara Sharon 					  rxq->rb_stts, rxq->rb_stts_dma);
68196a6497bSSara Sharon 
68296a6497bSSara Sharon 		if (rxq->used_bd)
68396a6497bSSara Sharon 			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
68496a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
68596a6497bSSara Sharon 		rxq->used_bd_dma = 0;
68696a6497bSSara Sharon 		rxq->used_bd = NULL;
68778485054SSara Sharon 	}
68878485054SSara Sharon 	kfree(trans_pcie->rxq);
68996a6497bSSara Sharon 
690e705c121SKalle Valo 	return -ENOMEM;
691e705c121SKalle Valo }
692e705c121SKalle Valo 
693e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
694e705c121SKalle Valo {
695e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
696e705c121SKalle Valo 	u32 rb_size;
697dfcfeef9SSara Sharon 	unsigned long flags;
698e705c121SKalle Valo 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
699e705c121SKalle Valo 
7006c4fbcbcSEmmanuel Grumbach 	switch (trans_pcie->rx_buf_size) {
7016c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_4K:
702e705c121SKalle Valo 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
7036c4fbcbcSEmmanuel Grumbach 		break;
7046c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_8K:
7056c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
7066c4fbcbcSEmmanuel Grumbach 		break;
7076c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_12K:
7086c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
7096c4fbcbcSEmmanuel Grumbach 		break;
7106c4fbcbcSEmmanuel Grumbach 	default:
7116c4fbcbcSEmmanuel Grumbach 		WARN_ON(1);
7126c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
7136c4fbcbcSEmmanuel Grumbach 	}
714e705c121SKalle Valo 
715dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
716dfcfeef9SSara Sharon 		return;
717dfcfeef9SSara Sharon 
718e705c121SKalle Valo 	/* Stop Rx DMA */
719dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
720e705c121SKalle Valo 	/* reset and flush pointers */
721dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
722dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
723dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
724e705c121SKalle Valo 
725e705c121SKalle Valo 	/* Reset driver's Rx queue write index */
726dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
727e705c121SKalle Valo 
728e705c121SKalle Valo 	/* Tell device where to find RBD circular buffer in DRAM */
729dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
730e705c121SKalle Valo 		    (u32)(rxq->bd_dma >> 8));
731e705c121SKalle Valo 
732e705c121SKalle Valo 	/* Tell device where in DRAM to update its Rx status */
733dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
734e705c121SKalle Valo 		    rxq->rb_stts_dma >> 4);
735e705c121SKalle Valo 
736e705c121SKalle Valo 	/* Enable Rx DMA
737e705c121SKalle Valo 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
738e705c121SKalle Valo 	 *      the credit mechanism in 5000 HW RX FIFO
739e705c121SKalle Valo 	 * Direct rx interrupts to hosts
7406c4fbcbcSEmmanuel Grumbach 	 * Rx buffer size 4 or 8k or 12k
741e705c121SKalle Valo 	 * RB timeout 0x10
742e705c121SKalle Valo 	 * 256 RBDs
743e705c121SKalle Valo 	 */
744dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
745e705c121SKalle Valo 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
746e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
747e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
748e705c121SKalle Valo 		    rb_size |
749e705c121SKalle Valo 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
750e705c121SKalle Valo 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
751e705c121SKalle Valo 
752dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
753dfcfeef9SSara Sharon 
754e705c121SKalle Valo 	/* Set interrupt coalescing timer to default (2048 usecs) */
755e705c121SKalle Valo 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
756e705c121SKalle Valo 
757e705c121SKalle Valo 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
758e705c121SKalle Valo 	if (trans->cfg->host_interrupt_operation_mode)
759e705c121SKalle Valo 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
760e705c121SKalle Valo }
761e705c121SKalle Valo 
7621316d595SSara Sharon void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
7631316d595SSara Sharon {
764565291c6SJohannes Berg 	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
765565291c6SJohannes Berg 		return;
766565291c6SJohannes Berg 
767565291c6SJohannes Berg 	if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
768565291c6SJohannes Berg 		return;
769565291c6SJohannes Berg 
770565291c6SJohannes Berg 	if (!trans->cfg->integrated)
771565291c6SJohannes Berg 		return;
772565291c6SJohannes Berg 
7731316d595SSara Sharon 	/*
7741316d595SSara Sharon 	 * Turn on the chicken-bits that cause MAC wakeup for RX-related
7751316d595SSara Sharon 	 * values.
7761316d595SSara Sharon 	 * This costs some power, but needed for W/A 9000 integrated A-step
7771316d595SSara Sharon 	 * bug where shadow registers are not in the retention list and their
7781316d595SSara Sharon 	 * value is lost when NIC powers down
7791316d595SSara Sharon 	 */
7801316d595SSara Sharon 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
7811316d595SSara Sharon 		    CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
7821316d595SSara Sharon 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
7831316d595SSara Sharon 		    CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
7841316d595SSara Sharon }
7851316d595SSara Sharon 
786bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
78796a6497bSSara Sharon {
78896a6497bSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
78996a6497bSSara Sharon 	u32 rb_size, enabled = 0;
790dfcfeef9SSara Sharon 	unsigned long flags;
79196a6497bSSara Sharon 	int i;
79296a6497bSSara Sharon 
79396a6497bSSara Sharon 	switch (trans_pcie->rx_buf_size) {
79496a6497bSSara Sharon 	case IWL_AMSDU_4K:
79596a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
79696a6497bSSara Sharon 		break;
79796a6497bSSara Sharon 	case IWL_AMSDU_8K:
79896a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
79996a6497bSSara Sharon 		break;
80096a6497bSSara Sharon 	case IWL_AMSDU_12K:
80196a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
80296a6497bSSara Sharon 		break;
80396a6497bSSara Sharon 	default:
80496a6497bSSara Sharon 		WARN_ON(1);
80596a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
80696a6497bSSara Sharon 	}
80796a6497bSSara Sharon 
808dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
809dfcfeef9SSara Sharon 		return;
810dfcfeef9SSara Sharon 
81196a6497bSSara Sharon 	/* Stop Rx DMA */
812dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
81396a6497bSSara Sharon 	/* disable free amd used rx queue operation */
814dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
81596a6497bSSara Sharon 
81696a6497bSSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
81796a6497bSSara Sharon 		/* Tell device where to find RBD free table in DRAM */
81812a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
819dfcfeef9SSara Sharon 					 RFH_Q_FRBDCB_BA_LSB(i),
820dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].bd_dma);
82196a6497bSSara Sharon 		/* Tell device where to find RBD used table in DRAM */
82212a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
823dfcfeef9SSara Sharon 					 RFH_Q_URBDCB_BA_LSB(i),
824dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].used_bd_dma);
82596a6497bSSara Sharon 		/* Tell device where in DRAM to update its Rx status */
82612a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
827dfcfeef9SSara Sharon 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
828bce97731SSara Sharon 					 trans_pcie->rxq[i].rb_stts_dma);
82996a6497bSSara Sharon 		/* Reset device indice tables */
830dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
831dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
832dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
83396a6497bSSara Sharon 
83496a6497bSSara Sharon 		enabled |= BIT(i) | BIT(i + 16);
83596a6497bSSara Sharon 	}
83696a6497bSSara Sharon 
83796a6497bSSara Sharon 	/*
83896a6497bSSara Sharon 	 * Enable Rx DMA
83996a6497bSSara Sharon 	 * Rx buffer size 4 or 8k or 12k
84096a6497bSSara Sharon 	 * Min RB size 4 or 8
84188076015SSara Sharon 	 * Drop frames that exceed RB size
84296a6497bSSara Sharon 	 * 512 RBDs
84396a6497bSSara Sharon 	 */
844dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
84563044335SSara Sharon 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
84696a6497bSSara Sharon 			       RFH_RXF_DMA_MIN_RB_4_8 |
84788076015SSara Sharon 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
84896a6497bSSara Sharon 			       RFH_RXF_DMA_RBDCB_SIZE_512);
84996a6497bSSara Sharon 
85088076015SSara Sharon 	/*
85188076015SSara Sharon 	 * Activate DMA snooping.
852b0262f07SSara Sharon 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
85388076015SSara Sharon 	 * Default queue is 0
85488076015SSara Sharon 	 */
855f3779f47SJohannes Berg 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
856f3779f47SJohannes Berg 			       RFH_GEN_CFG_RFH_DMA_SNOOP |
857f3779f47SJohannes Berg 			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
858b0262f07SSara Sharon 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
859f3779f47SJohannes Berg 			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
860f3779f47SJohannes Berg 					       trans->cfg->integrated ?
861b0262f07SSara Sharon 					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
862f3779f47SJohannes Berg 					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
86388076015SSara Sharon 	/* Enable the relevant rx queues */
864dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
865dfcfeef9SSara Sharon 
866dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
86796a6497bSSara Sharon 
86896a6497bSSara Sharon 	/* Set interrupt coalescing timer to default (2048 usecs) */
86996a6497bSSara Sharon 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
8701316d595SSara Sharon 
8711316d595SSara Sharon 	iwl_pcie_enable_rx_wake(trans, true);
87296a6497bSSara Sharon }
87396a6497bSSara Sharon 
874e705c121SKalle Valo static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
875e705c121SKalle Valo {
876e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
877e705c121SKalle Valo 
878e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_free);
879e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_used);
880e705c121SKalle Valo 	rxq->free_count = 0;
881e705c121SKalle Valo 	rxq->used_count = 0;
882e705c121SKalle Valo }
883e705c121SKalle Valo 
884bce97731SSara Sharon static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
885bce97731SSara Sharon {
886bce97731SSara Sharon 	WARN_ON(1);
887bce97731SSara Sharon 	return 0;
888bce97731SSara Sharon }
889bce97731SSara Sharon 
890eda50cdeSSara Sharon static int _iwl_pcie_rx_init(struct iwl_trans *trans)
891e705c121SKalle Valo {
892e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
89378485054SSara Sharon 	struct iwl_rxq *def_rxq;
894e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
8957b542436SSara Sharon 	int i, err, queue_size, allocator_pool_size, num_alloc;
896e705c121SKalle Valo 
89778485054SSara Sharon 	if (!trans_pcie->rxq) {
898e705c121SKalle Valo 		err = iwl_pcie_rx_alloc(trans);
899e705c121SKalle Valo 		if (err)
900e705c121SKalle Valo 			return err;
901e705c121SKalle Valo 	}
90278485054SSara Sharon 	def_rxq = trans_pcie->rxq;
903e705c121SKalle Valo 
904e705c121SKalle Valo 	spin_lock(&rba->lock);
905e705c121SKalle Valo 	atomic_set(&rba->req_pending, 0);
906e705c121SKalle Valo 	atomic_set(&rba->req_ready, 0);
90796a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_allocated);
90896a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_empty);
909e705c121SKalle Valo 	spin_unlock(&rba->lock);
910e705c121SKalle Valo 
911e705c121SKalle Valo 	/* free all first - we might be reconfigured for a different size */
91278485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
913e705c121SKalle Valo 
914e705c121SKalle Valo 	for (i = 0; i < RX_QUEUE_SIZE; i++)
91578485054SSara Sharon 		def_rxq->queue[i] = NULL;
916e705c121SKalle Valo 
91778485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
91878485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
919e705c121SKalle Valo 
92096a6497bSSara Sharon 		rxq->id = i;
92196a6497bSSara Sharon 
922e705c121SKalle Valo 		spin_lock(&rxq->lock);
92378485054SSara Sharon 		/*
92478485054SSara Sharon 		 * Set read write pointer to reflect that we have processed
92578485054SSara Sharon 		 * and used all buffers, but have not restocked the Rx queue
92678485054SSara Sharon 		 * with fresh buffers
92778485054SSara Sharon 		 */
92878485054SSara Sharon 		rxq->read = 0;
92978485054SSara Sharon 		rxq->write = 0;
93078485054SSara Sharon 		rxq->write_actual = 0;
93178485054SSara Sharon 		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
93278485054SSara Sharon 
93378485054SSara Sharon 		iwl_pcie_rx_init_rxb_lists(rxq);
93478485054SSara Sharon 
935bce97731SSara Sharon 		if (!rxq->napi.poll)
936bce97731SSara Sharon 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
937bce97731SSara Sharon 				       iwl_pcie_dummy_napi_poll, 64);
938bce97731SSara Sharon 
939e705c121SKalle Valo 		spin_unlock(&rxq->lock);
94078485054SSara Sharon 	}
94178485054SSara Sharon 
94296a6497bSSara Sharon 	/* move the pool to the default queue and allocator ownerships */
9437b542436SSara Sharon 	queue_size = trans->cfg->mq_rx_supported ?
9447b542436SSara Sharon 		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
94596a6497bSSara Sharon 	allocator_pool_size = trans->num_rx_queues *
94696a6497bSSara Sharon 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
9477b542436SSara Sharon 	num_alloc = queue_size + allocator_pool_size;
94843146925SSara Sharon 	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
94943146925SSara Sharon 		     ARRAY_SIZE(trans_pcie->rx_pool));
9507b542436SSara Sharon 	for (i = 0; i < num_alloc; i++) {
95196a6497bSSara Sharon 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
95296a6497bSSara Sharon 
95396a6497bSSara Sharon 		if (i < allocator_pool_size)
95496a6497bSSara Sharon 			list_add(&rxb->list, &rba->rbd_empty);
95596a6497bSSara Sharon 		else
95696a6497bSSara Sharon 			list_add(&rxb->list, &def_rxq->rx_used);
95796a6497bSSara Sharon 		trans_pcie->global_table[i] = rxb;
958e25d65f2SSara Sharon 		rxb->vid = (u16)(i + 1);
959b1753c62SSara Sharon 		rxb->invalid = true;
96096a6497bSSara Sharon 	}
96178485054SSara Sharon 
96278485054SSara Sharon 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
9632047fa54SSara Sharon 
964eda50cdeSSara Sharon 	return 0;
965eda50cdeSSara Sharon }
966eda50cdeSSara Sharon 
967eda50cdeSSara Sharon int iwl_pcie_rx_init(struct iwl_trans *trans)
968eda50cdeSSara Sharon {
969eda50cdeSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
970eda50cdeSSara Sharon 	int ret = _iwl_pcie_rx_init(trans);
971eda50cdeSSara Sharon 
972eda50cdeSSara Sharon 	if (ret)
973eda50cdeSSara Sharon 		return ret;
974eda50cdeSSara Sharon 
9752047fa54SSara Sharon 	if (trans->cfg->mq_rx_supported)
976bce97731SSara Sharon 		iwl_pcie_rx_mq_hw_init(trans);
9772047fa54SSara Sharon 	else
978eda50cdeSSara Sharon 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
9792047fa54SSara Sharon 
980eda50cdeSSara Sharon 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
98178485054SSara Sharon 
982eda50cdeSSara Sharon 	spin_lock(&trans_pcie->rxq->lock);
983eda50cdeSSara Sharon 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
984eda50cdeSSara Sharon 	spin_unlock(&trans_pcie->rxq->lock);
985e705c121SKalle Valo 
986e705c121SKalle Valo 	return 0;
987e705c121SKalle Valo }
988e705c121SKalle Valo 
989eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
990eda50cdeSSara Sharon {
991eda50cdeSSara Sharon 	/*
992eda50cdeSSara Sharon 	 * We don't configure the RFH.
993eda50cdeSSara Sharon 	 * Restock will be done at alive, after firmware configured the RFH.
994eda50cdeSSara Sharon 	 */
995eda50cdeSSara Sharon 	return _iwl_pcie_rx_init(trans);
996eda50cdeSSara Sharon }
997eda50cdeSSara Sharon 
998e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans)
999e705c121SKalle Valo {
1000e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1001e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
100296a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
100396a6497bSSara Sharon 					      sizeof(__le32);
100478485054SSara Sharon 	int i;
1005e705c121SKalle Valo 
100678485054SSara Sharon 	/*
100778485054SSara Sharon 	 * if rxq is NULL, it means that nothing has been allocated,
100878485054SSara Sharon 	 * exit now
100978485054SSara Sharon 	 */
101078485054SSara Sharon 	if (!trans_pcie->rxq) {
1011e705c121SKalle Valo 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1012e705c121SKalle Valo 		return;
1013e705c121SKalle Valo 	}
1014e705c121SKalle Valo 
1015e705c121SKalle Valo 	cancel_work_sync(&rba->rx_alloc);
1016e705c121SKalle Valo 
101778485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
1018e705c121SKalle Valo 
101978485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
102078485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
102178485054SSara Sharon 
102278485054SSara Sharon 		if (rxq->bd)
102378485054SSara Sharon 			dma_free_coherent(trans->dev,
102496a6497bSSara Sharon 					  free_size * rxq->queue_size,
1025e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
1026e705c121SKalle Valo 		rxq->bd_dma = 0;
1027e705c121SKalle Valo 		rxq->bd = NULL;
1028e705c121SKalle Valo 
1029e705c121SKalle Valo 		if (rxq->rb_stts)
1030e705c121SKalle Valo 			dma_free_coherent(trans->dev,
1031e705c121SKalle Valo 					  sizeof(struct iwl_rb_status),
1032e705c121SKalle Valo 					  rxq->rb_stts, rxq->rb_stts_dma);
1033e705c121SKalle Valo 		else
103478485054SSara Sharon 			IWL_DEBUG_INFO(trans,
103578485054SSara Sharon 				       "Free rxq->rb_stts which is NULL\n");
103678485054SSara Sharon 
103796a6497bSSara Sharon 		if (rxq->used_bd)
103896a6497bSSara Sharon 			dma_free_coherent(trans->dev,
103996a6497bSSara Sharon 					  sizeof(__le32) * rxq->queue_size,
104096a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
104196a6497bSSara Sharon 		rxq->used_bd_dma = 0;
104296a6497bSSara Sharon 		rxq->used_bd = NULL;
1043bce97731SSara Sharon 
1044bce97731SSara Sharon 		if (rxq->napi.poll)
1045bce97731SSara Sharon 			netif_napi_del(&rxq->napi);
104696a6497bSSara Sharon 	}
104778485054SSara Sharon 	kfree(trans_pcie->rxq);
1048e705c121SKalle Valo }
1049e705c121SKalle Valo 
1050e705c121SKalle Valo /*
1051e705c121SKalle Valo  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1052e705c121SKalle Valo  *
1053e705c121SKalle Valo  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1054e705c121SKalle Valo  * When there are 2 empty RBDs - a request for allocation is posted
1055e705c121SKalle Valo  */
1056e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1057e705c121SKalle Valo 				  struct iwl_rx_mem_buffer *rxb,
1058e705c121SKalle Valo 				  struct iwl_rxq *rxq, bool emergency)
1059e705c121SKalle Valo {
1060e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1061e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1062e705c121SKalle Valo 
1063e705c121SKalle Valo 	/* Move the RBD to the used list, will be moved to allocator in batches
1064e705c121SKalle Valo 	 * before claiming or posting a request*/
1065e705c121SKalle Valo 	list_add_tail(&rxb->list, &rxq->rx_used);
1066e705c121SKalle Valo 
1067e705c121SKalle Valo 	if (unlikely(emergency))
1068e705c121SKalle Valo 		return;
1069e705c121SKalle Valo 
1070e705c121SKalle Valo 	/* Count the allocator owned RBDs */
1071e705c121SKalle Valo 	rxq->used_count++;
1072e705c121SKalle Valo 
1073e705c121SKalle Valo 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1074e705c121SKalle Valo 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1075e705c121SKalle Valo 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1076e705c121SKalle Valo 	 * after but we still need to post another request.
1077e705c121SKalle Valo 	 */
1078e705c121SKalle Valo 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1079e705c121SKalle Valo 		/* Move the 2 RBDs to the allocator ownership.
1080e705c121SKalle Valo 		 Allocator has another 6 from pool for the request completion*/
1081e705c121SKalle Valo 		spin_lock(&rba->lock);
1082e705c121SKalle Valo 		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1083e705c121SKalle Valo 		spin_unlock(&rba->lock);
1084e705c121SKalle Valo 
1085e705c121SKalle Valo 		atomic_inc(&rba->req_pending);
1086e705c121SKalle Valo 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1087e705c121SKalle Valo 	}
1088e705c121SKalle Valo }
1089e705c121SKalle Valo 
1090e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
109178485054SSara Sharon 				struct iwl_rxq *rxq,
1092e705c121SKalle Valo 				struct iwl_rx_mem_buffer *rxb,
1093e705c121SKalle Valo 				bool emergency)
1094e705c121SKalle Valo {
1095e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1096b2a3b1c1SSara Sharon 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1097e705c121SKalle Valo 	bool page_stolen = false;
1098e705c121SKalle Valo 	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1099e705c121SKalle Valo 	u32 offset = 0;
1100e705c121SKalle Valo 
1101e705c121SKalle Valo 	if (WARN_ON(!rxb))
1102e705c121SKalle Valo 		return;
1103e705c121SKalle Valo 
1104e705c121SKalle Valo 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1105e705c121SKalle Valo 
1106e705c121SKalle Valo 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1107e705c121SKalle Valo 		struct iwl_rx_packet *pkt;
1108e705c121SKalle Valo 		u16 sequence;
1109e705c121SKalle Valo 		bool reclaim;
1110e705c121SKalle Valo 		int index, cmd_index, len;
1111e705c121SKalle Valo 		struct iwl_rx_cmd_buffer rxcb = {
1112e705c121SKalle Valo 			._offset = offset,
1113e705c121SKalle Valo 			._rx_page_order = trans_pcie->rx_page_order,
1114e705c121SKalle Valo 			._page = rxb->page,
1115e705c121SKalle Valo 			._page_stolen = false,
1116e705c121SKalle Valo 			.truesize = max_len,
1117e705c121SKalle Valo 		};
1118e705c121SKalle Valo 
1119e705c121SKalle Valo 		pkt = rxb_addr(&rxcb);
1120e705c121SKalle Valo 
11213bfdee76SJohannes Berg 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
11223bfdee76SJohannes Berg 			IWL_DEBUG_RX(trans,
11233bfdee76SJohannes Berg 				     "Q %d: RB end marker at offset %d\n",
11243bfdee76SJohannes Berg 				     rxq->id, offset);
1125e705c121SKalle Valo 			break;
11263bfdee76SJohannes Berg 		}
1127e705c121SKalle Valo 
1128a395058eSJohannes Berg 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1129a395058eSJohannes Berg 			FH_RSCSR_RXQ_POS != rxq->id,
1130a395058eSJohannes Berg 		     "frame on invalid queue - is on %d and indicates %d\n",
1131a395058eSJohannes Berg 		     rxq->id,
1132a395058eSJohannes Berg 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1133a395058eSJohannes Berg 			FH_RSCSR_RXQ_POS);
1134ab2e696bSSara Sharon 
1135e705c121SKalle Valo 		IWL_DEBUG_RX(trans,
11363bfdee76SJohannes Berg 			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
11373bfdee76SJohannes Berg 			     rxq->id, offset,
113839bdb17eSSharon Dvir 			     iwl_get_cmd_string(trans,
113939bdb17eSSharon Dvir 						iwl_cmd_id(pkt->hdr.cmd,
114039bdb17eSSharon Dvir 							   pkt->hdr.group_id,
114139bdb17eSSharon Dvir 							   0)),
114235177c99SSara Sharon 			     pkt->hdr.group_id, pkt->hdr.cmd,
114335177c99SSara Sharon 			     le16_to_cpu(pkt->hdr.sequence));
1144e705c121SKalle Valo 
1145e705c121SKalle Valo 		len = iwl_rx_packet_len(pkt);
1146e705c121SKalle Valo 		len += sizeof(u32); /* account for status word */
1147e705c121SKalle Valo 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1148e705c121SKalle Valo 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1149e705c121SKalle Valo 
1150e705c121SKalle Valo 		/* Reclaim a command buffer only if this packet is a response
1151e705c121SKalle Valo 		 *   to a (driver-originated) command.
1152e705c121SKalle Valo 		 * If the packet (e.g. Rx frame) originated from uCode,
1153e705c121SKalle Valo 		 *   there is no command buffer to reclaim.
1154e705c121SKalle Valo 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1155e705c121SKalle Valo 		 *   but apparently a few don't get set; catch them here. */
1156e705c121SKalle Valo 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1157d8a130b0SJohannes Berg 		if (reclaim && !pkt->hdr.group_id) {
1158e705c121SKalle Valo 			int i;
1159e705c121SKalle Valo 
1160e705c121SKalle Valo 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1161e705c121SKalle Valo 				if (trans_pcie->no_reclaim_cmds[i] ==
1162e705c121SKalle Valo 							pkt->hdr.cmd) {
1163e705c121SKalle Valo 					reclaim = false;
1164e705c121SKalle Valo 					break;
1165e705c121SKalle Valo 				}
1166e705c121SKalle Valo 			}
1167e705c121SKalle Valo 		}
1168e705c121SKalle Valo 
1169e705c121SKalle Valo 		sequence = le16_to_cpu(pkt->hdr.sequence);
1170e705c121SKalle Valo 		index = SEQ_TO_INDEX(sequence);
1171bb98ecd4SSara Sharon 		cmd_index = get_cmd_index(txq, index);
1172e705c121SKalle Valo 
1173bce97731SSara Sharon 		if (rxq->id == 0)
1174bce97731SSara Sharon 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1175bce97731SSara Sharon 				       &rxcb);
1176bce97731SSara Sharon 		else
1177bce97731SSara Sharon 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1178bce97731SSara Sharon 					   &rxcb, rxq->id);
1179e705c121SKalle Valo 
1180e705c121SKalle Valo 		if (reclaim) {
1181e705c121SKalle Valo 			kzfree(txq->entries[cmd_index].free_buf);
1182e705c121SKalle Valo 			txq->entries[cmd_index].free_buf = NULL;
1183e705c121SKalle Valo 		}
1184e705c121SKalle Valo 
1185e705c121SKalle Valo 		/*
1186e705c121SKalle Valo 		 * After here, we should always check rxcb._page_stolen,
1187e705c121SKalle Valo 		 * if it is true then one of the handlers took the page.
1188e705c121SKalle Valo 		 */
1189e705c121SKalle Valo 
1190e705c121SKalle Valo 		if (reclaim) {
1191e705c121SKalle Valo 			/* Invoke any callbacks, transfer the buffer to caller,
1192e705c121SKalle Valo 			 * and fire off the (possibly) blocking
1193e705c121SKalle Valo 			 * iwl_trans_send_cmd()
1194e705c121SKalle Valo 			 * as we reclaim the driver command queue */
1195e705c121SKalle Valo 			if (!rxcb._page_stolen)
1196e705c121SKalle Valo 				iwl_pcie_hcmd_complete(trans, &rxcb);
1197e705c121SKalle Valo 			else
1198e705c121SKalle Valo 				IWL_WARN(trans, "Claim null rxb?\n");
1199e705c121SKalle Valo 		}
1200e705c121SKalle Valo 
1201e705c121SKalle Valo 		page_stolen |= rxcb._page_stolen;
1202e705c121SKalle Valo 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1203e705c121SKalle Valo 	}
1204e705c121SKalle Valo 
1205e705c121SKalle Valo 	/* page was stolen from us -- free our reference */
1206e705c121SKalle Valo 	if (page_stolen) {
1207e705c121SKalle Valo 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1208e705c121SKalle Valo 		rxb->page = NULL;
1209e705c121SKalle Valo 	}
1210e705c121SKalle Valo 
1211e705c121SKalle Valo 	/* Reuse the page if possible. For notification packets and
1212e705c121SKalle Valo 	 * SKBs that fail to Rx correctly, add them back into the
1213e705c121SKalle Valo 	 * rx_free list for reuse later. */
1214e705c121SKalle Valo 	if (rxb->page != NULL) {
1215e705c121SKalle Valo 		rxb->page_dma =
1216e705c121SKalle Valo 			dma_map_page(trans->dev, rxb->page, 0,
1217e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
1218e705c121SKalle Valo 				     DMA_FROM_DEVICE);
1219e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1220e705c121SKalle Valo 			/*
1221e705c121SKalle Valo 			 * free the page(s) as well to not break
1222e705c121SKalle Valo 			 * the invariant that the items on the used
1223e705c121SKalle Valo 			 * list have no page(s)
1224e705c121SKalle Valo 			 */
1225e705c121SKalle Valo 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1226e705c121SKalle Valo 			rxb->page = NULL;
1227e705c121SKalle Valo 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1228e705c121SKalle Valo 		} else {
1229e705c121SKalle Valo 			list_add_tail(&rxb->list, &rxq->rx_free);
1230e705c121SKalle Valo 			rxq->free_count++;
1231e705c121SKalle Valo 		}
1232e705c121SKalle Valo 	} else
1233e705c121SKalle Valo 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1234e705c121SKalle Valo }
1235e705c121SKalle Valo 
1236e705c121SKalle Valo /*
1237e705c121SKalle Valo  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1238e705c121SKalle Valo  */
12392e5d4a8fSHaim Dreyfuss static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1240e705c121SKalle Valo {
1241e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
12422e5d4a8fSHaim Dreyfuss 	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1243d56daea4SSara Sharon 	u32 r, i, count = 0;
1244e705c121SKalle Valo 	bool emergency = false;
1245e705c121SKalle Valo 
1246e705c121SKalle Valo restart:
1247e705c121SKalle Valo 	spin_lock(&rxq->lock);
1248e705c121SKalle Valo 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1249e705c121SKalle Valo 	 * buffer that the driver may process (last buffer filled by ucode). */
1250e705c121SKalle Valo 	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1251e705c121SKalle Valo 	i = rxq->read;
1252e705c121SKalle Valo 
12535eae443eSSara Sharon 	/* W/A 9000 device step A0 wrap-around bug */
12545eae443eSSara Sharon 	r &= (rxq->queue_size - 1);
12555eae443eSSara Sharon 
1256e705c121SKalle Valo 	/* Rx interrupt, but nothing sent from uCode */
1257e705c121SKalle Valo 	if (i == r)
12585eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1259e705c121SKalle Valo 
1260e705c121SKalle Valo 	while (i != r) {
1261e705c121SKalle Valo 		struct iwl_rx_mem_buffer *rxb;
1262e705c121SKalle Valo 
126396a6497bSSara Sharon 		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1264e705c121SKalle Valo 			emergency = true;
1265e705c121SKalle Valo 
126696a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
126796a6497bSSara Sharon 			/*
126896a6497bSSara Sharon 			 * used_bd is a 32 bit but only 12 are used to retrieve
126996a6497bSSara Sharon 			 * the vid
127096a6497bSSara Sharon 			 */
12715eae443eSSara Sharon 			u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
127296a6497bSSara Sharon 
1273e25d65f2SSara Sharon 			if (WARN(!vid ||
1274e25d65f2SSara Sharon 				 vid > ARRAY_SIZE(trans_pcie->global_table),
1275e25d65f2SSara Sharon 				 "Invalid rxb index from HW %u\n", (u32)vid)) {
1276e25d65f2SSara Sharon 				iwl_force_nmi(trans);
12775eae443eSSara Sharon 				goto out;
1278e25d65f2SSara Sharon 			}
1279e25d65f2SSara Sharon 			rxb = trans_pcie->global_table[vid - 1];
1280b1753c62SSara Sharon 			if (WARN(rxb->invalid,
1281b1753c62SSara Sharon 				 "Invalid rxb from HW %u\n", (u32)vid)) {
1282b1753c62SSara Sharon 				iwl_force_nmi(trans);
1283b1753c62SSara Sharon 				goto out;
1284b1753c62SSara Sharon 			}
1285b1753c62SSara Sharon 			rxb->invalid = true;
128696a6497bSSara Sharon 		} else {
1287e705c121SKalle Valo 			rxb = rxq->queue[i];
1288e705c121SKalle Valo 			rxq->queue[i] = NULL;
128996a6497bSSara Sharon 		}
1290e705c121SKalle Valo 
12915eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
129278485054SSara Sharon 		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1293e705c121SKalle Valo 
129496a6497bSSara Sharon 		i = (i + 1) & (rxq->queue_size - 1);
1295e705c121SKalle Valo 
1296d56daea4SSara Sharon 		/*
1297d56daea4SSara Sharon 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1298d56daea4SSara Sharon 		 * try to claim the pre-allocated buffers from the allocator.
1299d56daea4SSara Sharon 		 * If not ready - will try to reclaim next time.
1300d56daea4SSara Sharon 		 * There is no need to reschedule work - allocator exits only
1301d56daea4SSara Sharon 		 * on success
1302e705c121SKalle Valo 		 */
1303d56daea4SSara Sharon 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1304d56daea4SSara Sharon 			iwl_pcie_rx_allocator_get(trans, rxq);
1305e705c121SKalle Valo 
1306d56daea4SSara Sharon 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1307d56daea4SSara Sharon 			struct iwl_rb_allocator *rba = &trans_pcie->rba;
1308d56daea4SSara Sharon 
1309d56daea4SSara Sharon 			/* Add the remaining empty RBDs for allocator use */
1310d56daea4SSara Sharon 			spin_lock(&rba->lock);
1311d56daea4SSara Sharon 			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1312d56daea4SSara Sharon 			spin_unlock(&rba->lock);
1313d56daea4SSara Sharon 		} else if (emergency) {
1314e705c121SKalle Valo 			count++;
1315e705c121SKalle Valo 			if (count == 8) {
1316e705c121SKalle Valo 				count = 0;
131796a6497bSSara Sharon 				if (rxq->used_count < rxq->queue_size / 3)
1318e705c121SKalle Valo 					emergency = false;
1319e0e168dcSGregory Greenman 
1320e705c121SKalle Valo 				rxq->read = i;
1321e705c121SKalle Valo 				spin_unlock(&rxq->lock);
1322e0e168dcSGregory Greenman 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
132378485054SSara Sharon 				iwl_pcie_rxq_restock(trans, rxq);
1324e705c121SKalle Valo 				goto restart;
1325e705c121SKalle Valo 			}
1326e705c121SKalle Valo 		}
1327e0e168dcSGregory Greenman 	}
13285eae443eSSara Sharon out:
1329e705c121SKalle Valo 	/* Backtrack one entry */
1330e705c121SKalle Valo 	rxq->read = i;
1331e705c121SKalle Valo 	spin_unlock(&rxq->lock);
1332e705c121SKalle Valo 
1333e705c121SKalle Valo 	/*
1334e705c121SKalle Valo 	 * handle a case where in emergency there are some unallocated RBDs.
1335e705c121SKalle Valo 	 * those RBDs are in the used list, but are not tracked by the queue's
1336e705c121SKalle Valo 	 * used_count which counts allocator owned RBDs.
1337e705c121SKalle Valo 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1338e705c121SKalle Valo 	 * when called again the function may not be in emergency mode and
1339e705c121SKalle Valo 	 * they will be handed to the allocator with no tracking in the RBD
1340e705c121SKalle Valo 	 * allocator counters, which will lead to them never being claimed back
1341e705c121SKalle Valo 	 * by the queue.
1342e705c121SKalle Valo 	 * by allocating them here, they are now in the queue free list, and
1343e705c121SKalle Valo 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1344e705c121SKalle Valo 	 */
1345e705c121SKalle Valo 	if (unlikely(emergency && count))
134678485054SSara Sharon 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1347e705c121SKalle Valo 
1348bce97731SSara Sharon 	if (rxq->napi.poll)
1349bce97731SSara Sharon 		napi_gro_flush(&rxq->napi, false);
1350e0e168dcSGregory Greenman 
1351e0e168dcSGregory Greenman 	iwl_pcie_rxq_restock(trans, rxq);
1352e705c121SKalle Valo }
1353e705c121SKalle Valo 
13542e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
13552e5d4a8fSHaim Dreyfuss {
13562e5d4a8fSHaim Dreyfuss 	u8 queue = entry->entry;
13572e5d4a8fSHaim Dreyfuss 	struct msix_entry *entries = entry - queue;
13582e5d4a8fSHaim Dreyfuss 
13592e5d4a8fSHaim Dreyfuss 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
13602e5d4a8fSHaim Dreyfuss }
13612e5d4a8fSHaim Dreyfuss 
13622e5d4a8fSHaim Dreyfuss static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
13632e5d4a8fSHaim Dreyfuss 				      struct msix_entry *entry)
13642e5d4a8fSHaim Dreyfuss {
13652e5d4a8fSHaim Dreyfuss 	/*
13662e5d4a8fSHaim Dreyfuss 	 * Before sending the interrupt the HW disables it to prevent
13672e5d4a8fSHaim Dreyfuss 	 * a nested interrupt. This is done by writing 1 to the corresponding
13682e5d4a8fSHaim Dreyfuss 	 * bit in the mask register. After handling the interrupt, it should be
13692e5d4a8fSHaim Dreyfuss 	 * re-enabled by clearing this bit. This register is defined as
13702e5d4a8fSHaim Dreyfuss 	 * write 1 clear (W1C) register, meaning that it's being clear
13712e5d4a8fSHaim Dreyfuss 	 * by writing 1 to the bit.
13722e5d4a8fSHaim Dreyfuss 	 */
13737ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
13742e5d4a8fSHaim Dreyfuss }
13752e5d4a8fSHaim Dreyfuss 
13762e5d4a8fSHaim Dreyfuss /*
13772e5d4a8fSHaim Dreyfuss  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
13782e5d4a8fSHaim Dreyfuss  * This interrupt handler should be used with RSS queue only.
13792e5d4a8fSHaim Dreyfuss  */
13802e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
13812e5d4a8fSHaim Dreyfuss {
13822e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
13832e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
13842e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
13852e5d4a8fSHaim Dreyfuss 
1386c42ff65dSJohannes Berg 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1387c42ff65dSJohannes Berg 
13885eae443eSSara Sharon 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
13895eae443eSSara Sharon 		return IRQ_NONE;
13905eae443eSSara Sharon 
13912e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
13922e5d4a8fSHaim Dreyfuss 
13932e5d4a8fSHaim Dreyfuss 	local_bh_disable();
13942e5d4a8fSHaim Dreyfuss 	iwl_pcie_rx_handle(trans, entry->entry);
13952e5d4a8fSHaim Dreyfuss 	local_bh_enable();
13962e5d4a8fSHaim Dreyfuss 
13972e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
13982e5d4a8fSHaim Dreyfuss 
13992e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
14002e5d4a8fSHaim Dreyfuss 
14012e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
14022e5d4a8fSHaim Dreyfuss }
14032e5d4a8fSHaim Dreyfuss 
1404e705c121SKalle Valo /*
1405e705c121SKalle Valo  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1406e705c121SKalle Valo  */
1407e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1408e705c121SKalle Valo {
1409e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1410e705c121SKalle Valo 	int i;
1411e705c121SKalle Valo 
1412e705c121SKalle Valo 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1413e705c121SKalle Valo 	if (trans->cfg->internal_wimax_coex &&
1414e705c121SKalle Valo 	    !trans->cfg->apmg_not_supported &&
1415e705c121SKalle Valo 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1416e705c121SKalle Valo 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1417e705c121SKalle Valo 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1418e705c121SKalle Valo 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1419e705c121SKalle Valo 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1420e705c121SKalle Valo 		iwl_op_mode_wimax_active(trans->op_mode);
1421e705c121SKalle Valo 		wake_up(&trans_pcie->wait_command_queue);
1422e705c121SKalle Valo 		return;
1423e705c121SKalle Valo 	}
1424e705c121SKalle Valo 
142513a3a390SSara Sharon 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
142613a3a390SSara Sharon 		if (!trans_pcie->txq[i])
142713a3a390SSara Sharon 			continue;
1428b2a3b1c1SSara Sharon 		del_timer(&trans_pcie->txq[i]->stuck_timer);
142913a3a390SSara Sharon 	}
1430e705c121SKalle Valo 
14317d75f32eSEmmanuel Grumbach 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
14327d75f32eSEmmanuel Grumbach 	 * before we wake up the command caller, to ensure a proper cleanup. */
14337d75f32eSEmmanuel Grumbach 	iwl_trans_fw_error(trans);
14347d75f32eSEmmanuel Grumbach 
1435e705c121SKalle Valo 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1436e705c121SKalle Valo 	wake_up(&trans_pcie->wait_command_queue);
1437e705c121SKalle Valo }
1438e705c121SKalle Valo 
1439e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1440e705c121SKalle Valo {
1441e705c121SKalle Valo 	u32 inta;
1442e705c121SKalle Valo 
1443e705c121SKalle Valo 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1444e705c121SKalle Valo 
1445e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1446e705c121SKalle Valo 
1447e705c121SKalle Valo 	/* Discover which interrupts are active/pending */
1448e705c121SKalle Valo 	inta = iwl_read32(trans, CSR_INT);
1449e705c121SKalle Valo 
1450e705c121SKalle Valo 	/* the thread will service interrupts and re-enable them */
1451e705c121SKalle Valo 	return inta;
1452e705c121SKalle Valo }
1453e705c121SKalle Valo 
1454e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */
1455e705c121SKalle Valo #define ICT_SHIFT	12
1456e705c121SKalle Valo #define ICT_SIZE	(1 << ICT_SHIFT)
1457e705c121SKalle Valo #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1458e705c121SKalle Valo 
1459e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will
1460e705c121SKalle Valo  * stop using INTA register to get device's interrupt, reading this register
1461e705c121SKalle Valo  * is expensive, device will write interrupts in ICT dram table, increment
1462e705c121SKalle Valo  * index then will fire interrupt to driver, driver will OR all ICT table
1463e705c121SKalle Valo  * entries from current index up to table entry with 0 value. the result is
1464e705c121SKalle Valo  * the interrupt we need to service, driver will set the entries back to 0 and
1465e705c121SKalle Valo  * set index.
1466e705c121SKalle Valo  */
1467e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1468e705c121SKalle Valo {
1469e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1470e705c121SKalle Valo 	u32 inta;
1471e705c121SKalle Valo 	u32 val = 0;
1472e705c121SKalle Valo 	u32 read;
1473e705c121SKalle Valo 
1474e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1475e705c121SKalle Valo 
1476e705c121SKalle Valo 	/* Ignore interrupt if there's nothing in NIC to service.
1477e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1478e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC. */
1479e705c121SKalle Valo 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1480e705c121SKalle Valo 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1481e705c121SKalle Valo 	if (!read)
1482e705c121SKalle Valo 		return 0;
1483e705c121SKalle Valo 
1484e705c121SKalle Valo 	/*
1485e705c121SKalle Valo 	 * Collect all entries up to the first 0, starting from ict_index;
1486e705c121SKalle Valo 	 * note we already read at ict_index.
1487e705c121SKalle Valo 	 */
1488e705c121SKalle Valo 	do {
1489e705c121SKalle Valo 		val |= read;
1490e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1491e705c121SKalle Valo 				trans_pcie->ict_index, read);
1492e705c121SKalle Valo 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1493e705c121SKalle Valo 		trans_pcie->ict_index =
1494e705c121SKalle Valo 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1495e705c121SKalle Valo 
1496e705c121SKalle Valo 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1497e705c121SKalle Valo 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1498e705c121SKalle Valo 					   read);
1499e705c121SKalle Valo 	} while (read);
1500e705c121SKalle Valo 
1501e705c121SKalle Valo 	/* We should not get this value, just ignore it. */
1502e705c121SKalle Valo 	if (val == 0xffffffff)
1503e705c121SKalle Valo 		val = 0;
1504e705c121SKalle Valo 
1505e705c121SKalle Valo 	/*
1506e705c121SKalle Valo 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1507e705c121SKalle Valo 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1508e705c121SKalle Valo 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1509e705c121SKalle Valo 	 * so we use them to decide on the real state of the Rx bit.
1510e705c121SKalle Valo 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1511e705c121SKalle Valo 	 */
1512e705c121SKalle Valo 	if (val & 0xC0000)
1513e705c121SKalle Valo 		val |= 0x8000;
1514e705c121SKalle Valo 
1515e705c121SKalle Valo 	inta = (0xff & val) | ((0xff00 & val) << 16);
1516e705c121SKalle Valo 	return inta;
1517e705c121SKalle Valo }
1518e705c121SKalle Valo 
1519fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
15203a6e168bSJohannes Berg {
15213a6e168bSJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
15223a6e168bSJohannes Berg 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1523326477e4SJohannes Berg 	bool hw_rfkill, prev, report;
15243a6e168bSJohannes Berg 
15253a6e168bSJohannes Berg 	mutex_lock(&trans_pcie->mutex);
1526326477e4SJohannes Berg 	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
15273a6e168bSJohannes Berg 	hw_rfkill = iwl_is_rfkill_set(trans);
1528326477e4SJohannes Berg 	if (hw_rfkill) {
1529326477e4SJohannes Berg 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1530326477e4SJohannes Berg 		set_bit(STATUS_RFKILL_HW, &trans->status);
1531326477e4SJohannes Berg 	}
1532326477e4SJohannes Berg 	if (trans_pcie->opmode_down)
1533326477e4SJohannes Berg 		report = hw_rfkill;
1534326477e4SJohannes Berg 	else
1535326477e4SJohannes Berg 		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
15363a6e168bSJohannes Berg 
15373a6e168bSJohannes Berg 	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
15383a6e168bSJohannes Berg 		 hw_rfkill ? "disable radio" : "enable radio");
15393a6e168bSJohannes Berg 
15403a6e168bSJohannes Berg 	isr_stats->rfkill++;
15413a6e168bSJohannes Berg 
1542326477e4SJohannes Berg 	if (prev != report)
1543326477e4SJohannes Berg 		iwl_trans_pcie_rf_kill(trans, report);
15443a6e168bSJohannes Berg 	mutex_unlock(&trans_pcie->mutex);
15453a6e168bSJohannes Berg 
15463a6e168bSJohannes Berg 	if (hw_rfkill) {
15473a6e168bSJohannes Berg 		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
15483a6e168bSJohannes Berg 				       &trans->status))
15493a6e168bSJohannes Berg 			IWL_DEBUG_RF_KILL(trans,
15503a6e168bSJohannes Berg 					  "Rfkill while SYNC HCMD in flight\n");
15513a6e168bSJohannes Berg 		wake_up(&trans_pcie->wait_command_queue);
15523a6e168bSJohannes Berg 	} else {
1553326477e4SJohannes Berg 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1554326477e4SJohannes Berg 		if (trans_pcie->opmode_down)
1555326477e4SJohannes Berg 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
15563a6e168bSJohannes Berg 	}
15573a6e168bSJohannes Berg }
15583a6e168bSJohannes Berg 
1559e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1560e705c121SKalle Valo {
1561e705c121SKalle Valo 	struct iwl_trans *trans = dev_id;
1562e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1563e705c121SKalle Valo 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1564e705c121SKalle Valo 	u32 inta = 0;
1565e705c121SKalle Valo 	u32 handled = 0;
1566e705c121SKalle Valo 
1567e705c121SKalle Valo 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1568e705c121SKalle Valo 
1569e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1570e705c121SKalle Valo 
1571e705c121SKalle Valo 	/* dram interrupt table not set yet,
1572e705c121SKalle Valo 	 * use legacy interrupt.
1573e705c121SKalle Valo 	 */
1574e705c121SKalle Valo 	if (likely(trans_pcie->use_ict))
1575e705c121SKalle Valo 		inta = iwl_pcie_int_cause_ict(trans);
1576e705c121SKalle Valo 	else
1577e705c121SKalle Valo 		inta = iwl_pcie_int_cause_non_ict(trans);
1578e705c121SKalle Valo 
1579e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1580e705c121SKalle Valo 		IWL_DEBUG_ISR(trans,
1581e705c121SKalle Valo 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1582e705c121SKalle Valo 			      inta, trans_pcie->inta_mask,
1583e705c121SKalle Valo 			      iwl_read32(trans, CSR_INT_MASK),
1584e705c121SKalle Valo 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1585e705c121SKalle Valo 		if (inta & (~trans_pcie->inta_mask))
1586e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1587e705c121SKalle Valo 				      "We got a masked interrupt (0x%08x)\n",
1588e705c121SKalle Valo 				      inta & (~trans_pcie->inta_mask));
1589e705c121SKalle Valo 	}
1590e705c121SKalle Valo 
1591e705c121SKalle Valo 	inta &= trans_pcie->inta_mask;
1592e705c121SKalle Valo 
1593e705c121SKalle Valo 	/*
1594e705c121SKalle Valo 	 * Ignore interrupt if there's nothing in NIC to service.
1595e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1596e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC.
1597e705c121SKalle Valo 	 */
1598e705c121SKalle Valo 	if (unlikely(!inta)) {
1599e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1600e705c121SKalle Valo 		/*
1601e705c121SKalle Valo 		 * Re-enable interrupts here since we don't
1602e705c121SKalle Valo 		 * have anything to service
1603e705c121SKalle Valo 		 */
1604e705c121SKalle Valo 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1605f16c3ebfSEmmanuel Grumbach 			_iwl_enable_interrupts(trans);
1606e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1607e705c121SKalle Valo 		lock_map_release(&trans->sync_cmd_lockdep_map);
1608e705c121SKalle Valo 		return IRQ_NONE;
1609e705c121SKalle Valo 	}
1610e705c121SKalle Valo 
1611e705c121SKalle Valo 	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1612e705c121SKalle Valo 		/*
1613e705c121SKalle Valo 		 * Hardware disappeared. It might have
1614e705c121SKalle Valo 		 * already raised an interrupt.
1615e705c121SKalle Valo 		 */
1616e705c121SKalle Valo 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1617e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1618e705c121SKalle Valo 		goto out;
1619e705c121SKalle Valo 	}
1620e705c121SKalle Valo 
1621e705c121SKalle Valo 	/* Ack/clear/reset pending uCode interrupts.
1622e705c121SKalle Valo 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1623e705c121SKalle Valo 	 */
1624e705c121SKalle Valo 	/* There is a hardware bug in the interrupt mask function that some
1625e705c121SKalle Valo 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1626e705c121SKalle Valo 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1627e705c121SKalle Valo 	 * ICT interrupt handling mechanism has another bug that might cause
1628e705c121SKalle Valo 	 * these unmasked interrupts fail to be detected. We workaround the
1629e705c121SKalle Valo 	 * hardware bugs here by ACKing all the possible interrupts so that
1630e705c121SKalle Valo 	 * interrupt coalescing can still be achieved.
1631e705c121SKalle Valo 	 */
1632e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1633e705c121SKalle Valo 
1634e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR))
1635e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1636e705c121SKalle Valo 			      inta, iwl_read32(trans, CSR_INT_MASK));
1637e705c121SKalle Valo 
1638e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1639e705c121SKalle Valo 
1640e705c121SKalle Valo 	/* Now service all interrupt bits discovered above. */
1641e705c121SKalle Valo 	if (inta & CSR_INT_BIT_HW_ERR) {
1642e705c121SKalle Valo 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1643e705c121SKalle Valo 
1644e705c121SKalle Valo 		/* Tell the device to stop sending interrupts */
1645e705c121SKalle Valo 		iwl_disable_interrupts(trans);
1646e705c121SKalle Valo 
1647e705c121SKalle Valo 		isr_stats->hw++;
1648e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1649e705c121SKalle Valo 
1650e705c121SKalle Valo 		handled |= CSR_INT_BIT_HW_ERR;
1651e705c121SKalle Valo 
1652e705c121SKalle Valo 		goto out;
1653e705c121SKalle Valo 	}
1654e705c121SKalle Valo 
1655e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1656e705c121SKalle Valo 		/* NIC fires this, but we don't use it, redundant with WAKEUP */
1657e705c121SKalle Valo 		if (inta & CSR_INT_BIT_SCD) {
1658e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1659e705c121SKalle Valo 				      "Scheduler finished to transmit the frame/frames.\n");
1660e705c121SKalle Valo 			isr_stats->sch++;
1661e705c121SKalle Valo 		}
1662e705c121SKalle Valo 
1663e705c121SKalle Valo 		/* Alive notification via Rx interrupt will do the real work */
1664e705c121SKalle Valo 		if (inta & CSR_INT_BIT_ALIVE) {
1665e705c121SKalle Valo 			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1666e705c121SKalle Valo 			isr_stats->alive++;
1667eda50cdeSSara Sharon 			if (trans->cfg->gen2) {
1668eda50cdeSSara Sharon 				/*
1669eda50cdeSSara Sharon 				 * We can restock, since firmware configured
1670eda50cdeSSara Sharon 				 * the RFH
1671eda50cdeSSara Sharon 				 */
1672eda50cdeSSara Sharon 				iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1673eda50cdeSSara Sharon 			}
1674e705c121SKalle Valo 		}
1675e705c121SKalle Valo 	}
1676e705c121SKalle Valo 
1677e705c121SKalle Valo 	/* Safely ignore these bits for debug checks below */
1678e705c121SKalle Valo 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1679e705c121SKalle Valo 
1680e705c121SKalle Valo 	/* HW RF KILL switch toggled */
1681e705c121SKalle Valo 	if (inta & CSR_INT_BIT_RF_KILL) {
16823a6e168bSJohannes Berg 		iwl_pcie_handle_rfkill_irq(trans);
1683e705c121SKalle Valo 		handled |= CSR_INT_BIT_RF_KILL;
1684e705c121SKalle Valo 	}
1685e705c121SKalle Valo 
1686e705c121SKalle Valo 	/* Chip got too hot and stopped itself */
1687e705c121SKalle Valo 	if (inta & CSR_INT_BIT_CT_KILL) {
1688e705c121SKalle Valo 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1689e705c121SKalle Valo 		isr_stats->ctkill++;
1690e705c121SKalle Valo 		handled |= CSR_INT_BIT_CT_KILL;
1691e705c121SKalle Valo 	}
1692e705c121SKalle Valo 
1693e705c121SKalle Valo 	/* Error detected by uCode */
1694e705c121SKalle Valo 	if (inta & CSR_INT_BIT_SW_ERR) {
1695e705c121SKalle Valo 		IWL_ERR(trans, "Microcode SW error detected. "
1696e705c121SKalle Valo 			" Restarting 0x%X.\n", inta);
1697e705c121SKalle Valo 		isr_stats->sw++;
1698e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1699e705c121SKalle Valo 		handled |= CSR_INT_BIT_SW_ERR;
1700e705c121SKalle Valo 	}
1701e705c121SKalle Valo 
1702e705c121SKalle Valo 	/* uCode wakes up after power-down sleep */
1703e705c121SKalle Valo 	if (inta & CSR_INT_BIT_WAKEUP) {
1704e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1705e705c121SKalle Valo 		iwl_pcie_rxq_check_wrptr(trans);
1706e705c121SKalle Valo 		iwl_pcie_txq_check_wrptrs(trans);
1707e705c121SKalle Valo 
1708e705c121SKalle Valo 		isr_stats->wakeup++;
1709e705c121SKalle Valo 
1710e705c121SKalle Valo 		handled |= CSR_INT_BIT_WAKEUP;
1711e705c121SKalle Valo 	}
1712e705c121SKalle Valo 
1713e705c121SKalle Valo 	/* All uCode command responses, including Tx command responses,
1714e705c121SKalle Valo 	 * Rx "responses" (frame-received notification), and other
1715e705c121SKalle Valo 	 * notifications from uCode come through here*/
1716e705c121SKalle Valo 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1717e705c121SKalle Valo 		    CSR_INT_BIT_RX_PERIODIC)) {
1718e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1719e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1720e705c121SKalle Valo 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1721e705c121SKalle Valo 			iwl_write32(trans, CSR_FH_INT_STATUS,
1722e705c121SKalle Valo 					CSR_FH_INT_RX_MASK);
1723e705c121SKalle Valo 		}
1724e705c121SKalle Valo 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1725e705c121SKalle Valo 			handled |= CSR_INT_BIT_RX_PERIODIC;
1726e705c121SKalle Valo 			iwl_write32(trans,
1727e705c121SKalle Valo 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1728e705c121SKalle Valo 		}
1729e705c121SKalle Valo 		/* Sending RX interrupt require many steps to be done in the
1730e705c121SKalle Valo 		 * the device:
1731e705c121SKalle Valo 		 * 1- write interrupt to current index in ICT table.
1732e705c121SKalle Valo 		 * 2- dma RX frame.
1733e705c121SKalle Valo 		 * 3- update RX shared data to indicate last write index.
1734e705c121SKalle Valo 		 * 4- send interrupt.
1735e705c121SKalle Valo 		 * This could lead to RX race, driver could receive RX interrupt
1736e705c121SKalle Valo 		 * but the shared data changes does not reflect this;
1737e705c121SKalle Valo 		 * periodic interrupt will detect any dangling Rx activity.
1738e705c121SKalle Valo 		 */
1739e705c121SKalle Valo 
1740e705c121SKalle Valo 		/* Disable periodic interrupt; we use it as just a one-shot. */
1741e705c121SKalle Valo 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1742e705c121SKalle Valo 			    CSR_INT_PERIODIC_DIS);
1743e705c121SKalle Valo 
1744e705c121SKalle Valo 		/*
1745e705c121SKalle Valo 		 * Enable periodic interrupt in 8 msec only if we received
1746e705c121SKalle Valo 		 * real RX interrupt (instead of just periodic int), to catch
1747e705c121SKalle Valo 		 * any dangling Rx interrupt.  If it was just the periodic
1748e705c121SKalle Valo 		 * interrupt, there was no dangling Rx activity, and no need
1749e705c121SKalle Valo 		 * to extend the periodic interrupt; one-shot is enough.
1750e705c121SKalle Valo 		 */
1751e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1752e705c121SKalle Valo 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1753e705c121SKalle Valo 				   CSR_INT_PERIODIC_ENA);
1754e705c121SKalle Valo 
1755e705c121SKalle Valo 		isr_stats->rx++;
1756e705c121SKalle Valo 
1757e705c121SKalle Valo 		local_bh_disable();
17582e5d4a8fSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
1759e705c121SKalle Valo 		local_bh_enable();
1760e705c121SKalle Valo 	}
1761e705c121SKalle Valo 
1762e705c121SKalle Valo 	/* This "Tx" DMA channel is used only for loading uCode */
1763e705c121SKalle Valo 	if (inta & CSR_INT_BIT_FH_TX) {
1764e705c121SKalle Valo 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1765e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1766e705c121SKalle Valo 		isr_stats->tx++;
1767e705c121SKalle Valo 		handled |= CSR_INT_BIT_FH_TX;
1768e705c121SKalle Valo 		/* Wake up uCode load routine, now that load is complete */
1769e705c121SKalle Valo 		trans_pcie->ucode_write_complete = true;
1770e705c121SKalle Valo 		wake_up(&trans_pcie->ucode_write_waitq);
1771e705c121SKalle Valo 	}
1772e705c121SKalle Valo 
1773e705c121SKalle Valo 	if (inta & ~handled) {
1774e705c121SKalle Valo 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1775e705c121SKalle Valo 		isr_stats->unhandled++;
1776e705c121SKalle Valo 	}
1777e705c121SKalle Valo 
1778e705c121SKalle Valo 	if (inta & ~(trans_pcie->inta_mask)) {
1779e705c121SKalle Valo 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1780e705c121SKalle Valo 			 inta & ~trans_pcie->inta_mask);
1781e705c121SKalle Valo 	}
1782e705c121SKalle Valo 
1783f16c3ebfSEmmanuel Grumbach 	spin_lock(&trans_pcie->irq_lock);
1784a6bd005fSEmmanuel Grumbach 	/* only Re-enable all interrupt if disabled by irq */
1785f16c3ebfSEmmanuel Grumbach 	if (test_bit(STATUS_INT_ENABLED, &trans->status))
1786f16c3ebfSEmmanuel Grumbach 		_iwl_enable_interrupts(trans);
1787f16c3ebfSEmmanuel Grumbach 	/* we are loading the firmware, enable FH_TX interrupt only */
1788f16c3ebfSEmmanuel Grumbach 	else if (handled & CSR_INT_BIT_FH_TX)
1789f16c3ebfSEmmanuel Grumbach 		iwl_enable_fw_load_int(trans);
1790e705c121SKalle Valo 	/* Re-enable RF_KILL if it occurred */
1791e705c121SKalle Valo 	else if (handled & CSR_INT_BIT_RF_KILL)
1792e705c121SKalle Valo 		iwl_enable_rfkill_int(trans);
1793f16c3ebfSEmmanuel Grumbach 	spin_unlock(&trans_pcie->irq_lock);
1794e705c121SKalle Valo 
1795e705c121SKalle Valo out:
1796e705c121SKalle Valo 	lock_map_release(&trans->sync_cmd_lockdep_map);
1797e705c121SKalle Valo 	return IRQ_HANDLED;
1798e705c121SKalle Valo }
1799e705c121SKalle Valo 
1800e705c121SKalle Valo /******************************************************************************
1801e705c121SKalle Valo  *
1802e705c121SKalle Valo  * ICT functions
1803e705c121SKalle Valo  *
1804e705c121SKalle Valo  ******************************************************************************/
1805e705c121SKalle Valo 
1806e705c121SKalle Valo /* Free dram table */
1807e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans)
1808e705c121SKalle Valo {
1809e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1810e705c121SKalle Valo 
1811e705c121SKalle Valo 	if (trans_pcie->ict_tbl) {
1812e705c121SKalle Valo 		dma_free_coherent(trans->dev, ICT_SIZE,
1813e705c121SKalle Valo 				  trans_pcie->ict_tbl,
1814e705c121SKalle Valo 				  trans_pcie->ict_tbl_dma);
1815e705c121SKalle Valo 		trans_pcie->ict_tbl = NULL;
1816e705c121SKalle Valo 		trans_pcie->ict_tbl_dma = 0;
1817e705c121SKalle Valo 	}
1818e705c121SKalle Valo }
1819e705c121SKalle Valo 
1820e705c121SKalle Valo /*
1821e705c121SKalle Valo  * allocate dram shared table, it is an aligned memory
1822e705c121SKalle Valo  * block of ICT_SIZE.
1823e705c121SKalle Valo  * also reset all data related to ICT table interrupt.
1824e705c121SKalle Valo  */
1825e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1826e705c121SKalle Valo {
1827e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1828e705c121SKalle Valo 
1829e705c121SKalle Valo 	trans_pcie->ict_tbl =
1830e705c121SKalle Valo 		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1831e705c121SKalle Valo 				   &trans_pcie->ict_tbl_dma,
1832e705c121SKalle Valo 				   GFP_KERNEL);
1833e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1834e705c121SKalle Valo 		return -ENOMEM;
1835e705c121SKalle Valo 
1836e705c121SKalle Valo 	/* just an API sanity check ... it is guaranteed to be aligned */
1837e705c121SKalle Valo 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1838e705c121SKalle Valo 		iwl_pcie_free_ict(trans);
1839e705c121SKalle Valo 		return -EINVAL;
1840e705c121SKalle Valo 	}
1841e705c121SKalle Valo 
1842e705c121SKalle Valo 	return 0;
1843e705c121SKalle Valo }
1844e705c121SKalle Valo 
1845e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table,
1846e705c121SKalle Valo  * also we need to tell the driver to start using ICT interrupt.
1847e705c121SKalle Valo  */
1848e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans)
1849e705c121SKalle Valo {
1850e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1851e705c121SKalle Valo 	u32 val;
1852e705c121SKalle Valo 
1853e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1854e705c121SKalle Valo 		return;
1855e705c121SKalle Valo 
1856e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1857f16c3ebfSEmmanuel Grumbach 	_iwl_disable_interrupts(trans);
1858e705c121SKalle Valo 
1859e705c121SKalle Valo 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1860e705c121SKalle Valo 
1861e705c121SKalle Valo 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1862e705c121SKalle Valo 
1863e705c121SKalle Valo 	val |= CSR_DRAM_INT_TBL_ENABLE |
1864e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
1865e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1866e705c121SKalle Valo 
1867e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1868e705c121SKalle Valo 
1869e705c121SKalle Valo 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1870e705c121SKalle Valo 	trans_pcie->use_ict = true;
1871e705c121SKalle Valo 	trans_pcie->ict_index = 0;
1872e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1873f16c3ebfSEmmanuel Grumbach 	_iwl_enable_interrupts(trans);
1874e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1875e705c121SKalle Valo }
1876e705c121SKalle Valo 
1877e705c121SKalle Valo /* Device is going down disable ict interrupt usage */
1878e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans)
1879e705c121SKalle Valo {
1880e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1881e705c121SKalle Valo 
1882e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1883e705c121SKalle Valo 	trans_pcie->use_ict = false;
1884e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1885e705c121SKalle Valo }
1886e705c121SKalle Valo 
1887e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data)
1888e705c121SKalle Valo {
1889e705c121SKalle Valo 	struct iwl_trans *trans = data;
1890e705c121SKalle Valo 
1891e705c121SKalle Valo 	if (!trans)
1892e705c121SKalle Valo 		return IRQ_NONE;
1893e705c121SKalle Valo 
1894e705c121SKalle Valo 	/* Disable (but don't clear!) interrupts here to avoid
1895e705c121SKalle Valo 	 * back-to-back ISRs and sporadic interrupts from our NIC.
1896e705c121SKalle Valo 	 * If we have something to service, the tasklet will re-enable ints.
1897e705c121SKalle Valo 	 * If we *don't* have something, we'll re-enable before leaving here.
1898e705c121SKalle Valo 	 */
1899e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1900e705c121SKalle Valo 
1901e705c121SKalle Valo 	return IRQ_WAKE_THREAD;
1902e705c121SKalle Valo }
19032e5d4a8fSHaim Dreyfuss 
19042e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
19052e5d4a8fSHaim Dreyfuss {
19062e5d4a8fSHaim Dreyfuss 	return IRQ_WAKE_THREAD;
19072e5d4a8fSHaim Dreyfuss }
19082e5d4a8fSHaim Dreyfuss 
19092e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
19102e5d4a8fSHaim Dreyfuss {
19112e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
19122e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
19132e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
191446167a8fSColin Ian King 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
19152e5d4a8fSHaim Dreyfuss 	u32 inta_fh, inta_hw;
19162e5d4a8fSHaim Dreyfuss 
19172e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
19182e5d4a8fSHaim Dreyfuss 
19192e5d4a8fSHaim Dreyfuss 	spin_lock(&trans_pcie->irq_lock);
19207ef3dd26SHaim Dreyfuss 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
19217ef3dd26SHaim Dreyfuss 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
19222e5d4a8fSHaim Dreyfuss 	/*
19232e5d4a8fSHaim Dreyfuss 	 * Clear causes registers to avoid being handling the same cause.
19242e5d4a8fSHaim Dreyfuss 	 */
19257ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
19267ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
19272e5d4a8fSHaim Dreyfuss 	spin_unlock(&trans_pcie->irq_lock);
19282e5d4a8fSHaim Dreyfuss 
1929c42ff65dSJohannes Berg 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
1930c42ff65dSJohannes Berg 
19312e5d4a8fSHaim Dreyfuss 	if (unlikely(!(inta_fh | inta_hw))) {
19322e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
19332e5d4a8fSHaim Dreyfuss 		lock_map_release(&trans->sync_cmd_lockdep_map);
19342e5d4a8fSHaim Dreyfuss 		return IRQ_NONE;
19352e5d4a8fSHaim Dreyfuss 	}
19362e5d4a8fSHaim Dreyfuss 
19372e5d4a8fSHaim Dreyfuss 	if (iwl_have_debug_level(IWL_DL_ISR))
19382e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
19392e5d4a8fSHaim Dreyfuss 			      inta_fh,
19402e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
19412e5d4a8fSHaim Dreyfuss 
1942496d83caSHaim Dreyfuss 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
1943496d83caSHaim Dreyfuss 	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
1944496d83caSHaim Dreyfuss 		local_bh_disable();
1945496d83caSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
1946496d83caSHaim Dreyfuss 		local_bh_enable();
1947496d83caSHaim Dreyfuss 	}
1948496d83caSHaim Dreyfuss 
1949496d83caSHaim Dreyfuss 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
1950496d83caSHaim Dreyfuss 	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
1951496d83caSHaim Dreyfuss 		local_bh_disable();
1952496d83caSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 1);
1953496d83caSHaim Dreyfuss 		local_bh_enable();
1954496d83caSHaim Dreyfuss 	}
1955496d83caSHaim Dreyfuss 
19562e5d4a8fSHaim Dreyfuss 	/* This "Tx" DMA channel is used only for loading uCode */
19572e5d4a8fSHaim Dreyfuss 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
19582e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
19592e5d4a8fSHaim Dreyfuss 		isr_stats->tx++;
19602e5d4a8fSHaim Dreyfuss 		/*
19612e5d4a8fSHaim Dreyfuss 		 * Wake up uCode load routine,
19622e5d4a8fSHaim Dreyfuss 		 * now that load is complete
19632e5d4a8fSHaim Dreyfuss 		 */
19642e5d4a8fSHaim Dreyfuss 		trans_pcie->ucode_write_complete = true;
19652e5d4a8fSHaim Dreyfuss 		wake_up(&trans_pcie->ucode_write_waitq);
19662e5d4a8fSHaim Dreyfuss 	}
19672e5d4a8fSHaim Dreyfuss 
19682e5d4a8fSHaim Dreyfuss 	/* Error detected by uCode */
19692e5d4a8fSHaim Dreyfuss 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
19702e5d4a8fSHaim Dreyfuss 	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
19712e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
19722e5d4a8fSHaim Dreyfuss 			"Microcode SW error detected. Restarting 0x%X.\n",
19732e5d4a8fSHaim Dreyfuss 			inta_fh);
19742e5d4a8fSHaim Dreyfuss 		isr_stats->sw++;
19752e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
19762e5d4a8fSHaim Dreyfuss 	}
19772e5d4a8fSHaim Dreyfuss 
19782e5d4a8fSHaim Dreyfuss 	/* After checking FH register check HW register */
19792e5d4a8fSHaim Dreyfuss 	if (iwl_have_debug_level(IWL_DL_ISR))
19802e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans,
19812e5d4a8fSHaim Dreyfuss 			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
19822e5d4a8fSHaim Dreyfuss 			      inta_hw,
19832e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
19842e5d4a8fSHaim Dreyfuss 
19852e5d4a8fSHaim Dreyfuss 	/* Alive notification via Rx interrupt will do the real work */
19862e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
19872e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
19882e5d4a8fSHaim Dreyfuss 		isr_stats->alive++;
1989eda50cdeSSara Sharon 		if (trans->cfg->gen2) {
1990eda50cdeSSara Sharon 			/* We can restock, since firmware configured the RFH */
1991eda50cdeSSara Sharon 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1992eda50cdeSSara Sharon 		}
19932e5d4a8fSHaim Dreyfuss 	}
19942e5d4a8fSHaim Dreyfuss 
19952e5d4a8fSHaim Dreyfuss 	/* uCode wakes up after power-down sleep */
19962e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
19972e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
19982e5d4a8fSHaim Dreyfuss 		iwl_pcie_rxq_check_wrptr(trans);
19992e5d4a8fSHaim Dreyfuss 		iwl_pcie_txq_check_wrptrs(trans);
20002e5d4a8fSHaim Dreyfuss 
20012e5d4a8fSHaim Dreyfuss 		isr_stats->wakeup++;
20022e5d4a8fSHaim Dreyfuss 	}
20032e5d4a8fSHaim Dreyfuss 
20042e5d4a8fSHaim Dreyfuss 	/* Chip got too hot and stopped itself */
20052e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
20062e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
20072e5d4a8fSHaim Dreyfuss 		isr_stats->ctkill++;
20082e5d4a8fSHaim Dreyfuss 	}
20092e5d4a8fSHaim Dreyfuss 
20102e5d4a8fSHaim Dreyfuss 	/* HW RF KILL switch toggled */
20113a6e168bSJohannes Berg 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
20123a6e168bSJohannes Berg 		iwl_pcie_handle_rfkill_irq(trans);
20132e5d4a8fSHaim Dreyfuss 
20142e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
20152e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
20162e5d4a8fSHaim Dreyfuss 			"Hardware error detected. Restarting.\n");
20172e5d4a8fSHaim Dreyfuss 
20182e5d4a8fSHaim Dreyfuss 		isr_stats->hw++;
20192e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
20202e5d4a8fSHaim Dreyfuss 	}
20212e5d4a8fSHaim Dreyfuss 
20222e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
20232e5d4a8fSHaim Dreyfuss 
20242e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
20252e5d4a8fSHaim Dreyfuss 
20262e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
20272e5d4a8fSHaim Dreyfuss }
2028