1e705c121SKalle Valo /******************************************************************************
2e705c121SKalle Valo  *
3e705c121SKalle Valo  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4e705c121SKalle Valo  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5eda50cdeSSara Sharon  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6a8cbb46fSGolan Ben Ami  * Copyright(c) 2018 Intel Corporation
7e705c121SKalle Valo  *
8e705c121SKalle Valo  * Portions of this file are derived from the ipw3945 project, as well
9e705c121SKalle Valo  * as portions of the ieee80211 subsystem header files.
10e705c121SKalle Valo  *
11e705c121SKalle Valo  * This program is free software; you can redistribute it and/or modify it
12e705c121SKalle Valo  * under the terms of version 2 of the GNU General Public License as
13e705c121SKalle Valo  * published by the Free Software Foundation.
14e705c121SKalle Valo  *
15e705c121SKalle Valo  * This program is distributed in the hope that it will be useful, but WITHOUT
16e705c121SKalle Valo  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17e705c121SKalle Valo  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18e705c121SKalle Valo  * more details.
19e705c121SKalle Valo  *
20e705c121SKalle Valo  * You should have received a copy of the GNU General Public License along with
21e705c121SKalle Valo  * this program; if not, write to the Free Software Foundation, Inc.,
22e705c121SKalle Valo  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23e705c121SKalle Valo  *
24e705c121SKalle Valo  * The full GNU General Public License is included in this distribution in the
25e705c121SKalle Valo  * file called LICENSE.
26e705c121SKalle Valo  *
27e705c121SKalle Valo  * Contact Information:
28d01c5366SEmmanuel Grumbach  *  Intel Linux Wireless <linuxwifi@intel.com>
29e705c121SKalle Valo  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30e705c121SKalle Valo  *
31e705c121SKalle Valo  *****************************************************************************/
32e705c121SKalle Valo #include <linux/sched.h>
33e705c121SKalle Valo #include <linux/wait.h>
34e705c121SKalle Valo #include <linux/gfp.h>
35e705c121SKalle Valo 
36e705c121SKalle Valo #include "iwl-prph.h"
37e705c121SKalle Valo #include "iwl-io.h"
38e705c121SKalle Valo #include "internal.h"
39e705c121SKalle Valo #include "iwl-op-mode.h"
40e705c121SKalle Valo 
41e705c121SKalle Valo /******************************************************************************
42e705c121SKalle Valo  *
43e705c121SKalle Valo  * RX path functions
44e705c121SKalle Valo  *
45e705c121SKalle Valo  ******************************************************************************/
46e705c121SKalle Valo 
47e705c121SKalle Valo /*
48e705c121SKalle Valo  * Rx theory of operation
49e705c121SKalle Valo  *
50e705c121SKalle Valo  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
51e705c121SKalle Valo  * each of which point to Receive Buffers to be filled by the NIC.  These get
52e705c121SKalle Valo  * used not only for Rx frames, but for any command response or notification
53e705c121SKalle Valo  * from the NIC.  The driver and NIC manage the Rx buffers by means
54e705c121SKalle Valo  * of indexes into the circular buffer.
55e705c121SKalle Valo  *
56e705c121SKalle Valo  * Rx Queue Indexes
57e705c121SKalle Valo  * The host/firmware share two index registers for managing the Rx buffers.
58e705c121SKalle Valo  *
59e705c121SKalle Valo  * The READ index maps to the first position that the firmware may be writing
60e705c121SKalle Valo  * to -- the driver can read up to (but not including) this position and get
61e705c121SKalle Valo  * good data.
62e705c121SKalle Valo  * The READ index is managed by the firmware once the card is enabled.
63e705c121SKalle Valo  *
64e705c121SKalle Valo  * The WRITE index maps to the last position the driver has read from -- the
65e705c121SKalle Valo  * position preceding WRITE is the last slot the firmware can place a packet.
66e705c121SKalle Valo  *
67e705c121SKalle Valo  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
68e705c121SKalle Valo  * WRITE = READ.
69e705c121SKalle Valo  *
70e705c121SKalle Valo  * During initialization, the host sets up the READ queue position to the first
71e705c121SKalle Valo  * INDEX position, and WRITE to the last (READ - 1 wrapped)
72e705c121SKalle Valo  *
73e705c121SKalle Valo  * When the firmware places a packet in a buffer, it will advance the READ index
74e705c121SKalle Valo  * and fire the RX interrupt.  The driver can then query the READ index and
75e705c121SKalle Valo  * process as many packets as possible, moving the WRITE index forward as it
76e705c121SKalle Valo  * resets the Rx queue buffers with new memory.
77e705c121SKalle Valo  *
78e705c121SKalle Valo  * The management in the driver is as follows:
79e705c121SKalle Valo  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
80e705c121SKalle Valo  *   When the interrupt handler is called, the request is processed.
81e705c121SKalle Valo  *   The page is either stolen - transferred to the upper layer
82e705c121SKalle Valo  *   or reused - added immediately to the iwl->rxq->rx_free list.
83e705c121SKalle Valo  * + When the page is stolen - the driver updates the matching queue's used
84e705c121SKalle Valo  *   count, detaches the RBD and transfers it to the queue used list.
85e705c121SKalle Valo  *   When there are two used RBDs - they are transferred to the allocator empty
86e705c121SKalle Valo  *   list. Work is then scheduled for the allocator to start allocating
87e705c121SKalle Valo  *   eight buffers.
88e705c121SKalle Valo  *   When there are another 6 used RBDs - they are transferred to the allocator
89e705c121SKalle Valo  *   empty list and the driver tries to claim the pre-allocated buffers and
90e705c121SKalle Valo  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
91e705c121SKalle Valo  *   until ready.
92e705c121SKalle Valo  *   When there are 8+ buffers in the free list - either from allocation or from
93e705c121SKalle Valo  *   8 reused unstolen pages - restock is called to update the FW and indexes.
94e705c121SKalle Valo  * + In order to make sure the allocator always has RBDs to use for allocation
95e705c121SKalle Valo  *   the allocator has initial pool in the size of num_queues*(8-2) - the
96e705c121SKalle Valo  *   maximum missing RBDs per allocation request (request posted with 2
97e705c121SKalle Valo  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
98e705c121SKalle Valo  *   The queues supplies the recycle of the rest of the RBDs.
99e705c121SKalle Valo  * + A received packet is processed and handed to the kernel network stack,
100e705c121SKalle Valo  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
101e705c121SKalle Valo  * + If there are no allocated buffers in iwl->rxq->rx_free,
102e705c121SKalle Valo  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
103e705c121SKalle Valo  *   If there were enough free buffers and RX_STALLED is set it is cleared.
104e705c121SKalle Valo  *
105e705c121SKalle Valo  *
106e705c121SKalle Valo  * Driver sequence:
107e705c121SKalle Valo  *
108e705c121SKalle Valo  * iwl_rxq_alloc()            Allocates rx_free
109e705c121SKalle Valo  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
110e705c121SKalle Valo  *                            iwl_pcie_rxq_restock.
111e705c121SKalle Valo  *                            Used only during initialization.
112e705c121SKalle Valo  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
113e705c121SKalle Valo  *                            queue, updates firmware pointers, and updates
114e705c121SKalle Valo  *                            the WRITE index.
115e705c121SKalle Valo  * iwl_pcie_rx_allocator()     Background work for allocating pages.
116e705c121SKalle Valo  *
117e705c121SKalle Valo  * -- enable interrupts --
118e705c121SKalle Valo  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
119e705c121SKalle Valo  *                            READ INDEX, detaching the SKB from the pool.
120e705c121SKalle Valo  *                            Moves the packet buffer from queue to rx_used.
121e705c121SKalle Valo  *                            Posts and claims requests to the allocator.
122e705c121SKalle Valo  *                            Calls iwl_pcie_rxq_restock to refill any empty
123e705c121SKalle Valo  *                            slots.
124e705c121SKalle Valo  *
125e705c121SKalle Valo  * RBD life-cycle:
126e705c121SKalle Valo  *
127e705c121SKalle Valo  * Init:
128e705c121SKalle Valo  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
129e705c121SKalle Valo  *
130e705c121SKalle Valo  * Regular Receive interrupt:
131e705c121SKalle Valo  * Page Stolen:
132e705c121SKalle Valo  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
133e705c121SKalle Valo  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
134e705c121SKalle Valo  * Page not Stolen:
135e705c121SKalle Valo  * rxq.queue -> rxq.rx_free -> rxq.queue
136e705c121SKalle Valo  * ...
137e705c121SKalle Valo  *
138e705c121SKalle Valo  */
139e705c121SKalle Valo 
140e705c121SKalle Valo /*
141e705c121SKalle Valo  * iwl_rxq_space - Return number of free slots available in queue.
142e705c121SKalle Valo  */
143e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq)
144e705c121SKalle Valo {
14596a6497bSSara Sharon 	/* Make sure rx queue size is a power of 2 */
14696a6497bSSara Sharon 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
147e705c121SKalle Valo 
148e705c121SKalle Valo 	/*
149e705c121SKalle Valo 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
150e705c121SKalle Valo 	 * between empty and completely full queues.
151e705c121SKalle Valo 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
152e705c121SKalle Valo 	 * defined for negative dividends.
153e705c121SKalle Valo 	 */
15496a6497bSSara Sharon 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
155e705c121SKalle Valo }
156e705c121SKalle Valo 
157e705c121SKalle Valo /*
158e705c121SKalle Valo  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
159e705c121SKalle Valo  */
160e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
161e705c121SKalle Valo {
162e705c121SKalle Valo 	return cpu_to_le32((u32)(dma_addr >> 8));
163e705c121SKalle Valo }
164e705c121SKalle Valo 
165e705c121SKalle Valo /*
166e705c121SKalle Valo  * iwl_pcie_rx_stop - stops the Rx DMA
167e705c121SKalle Valo  */
168e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans)
169e705c121SKalle Valo {
170d7fdd0e5SSara Sharon 	if (trans->cfg->mq_rx_supported) {
171d7fdd0e5SSara Sharon 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
172d7fdd0e5SSara Sharon 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
173d7fdd0e5SSara Sharon 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
174d7fdd0e5SSara Sharon 	} else {
175e705c121SKalle Valo 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
176e705c121SKalle Valo 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
177d7fdd0e5SSara Sharon 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
178d7fdd0e5SSara Sharon 					   1000);
179d7fdd0e5SSara Sharon 	}
180e705c121SKalle Valo }
181e705c121SKalle Valo 
182e705c121SKalle Valo /*
183e705c121SKalle Valo  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
184e705c121SKalle Valo  */
18578485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
18678485054SSara Sharon 				    struct iwl_rxq *rxq)
187e705c121SKalle Valo {
188e705c121SKalle Valo 	u32 reg;
189e705c121SKalle Valo 
190e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
191e705c121SKalle Valo 
192e705c121SKalle Valo 	/*
193e705c121SKalle Valo 	 * explicitly wake up the NIC if:
194e705c121SKalle Valo 	 * 1. shadow registers aren't enabled
195e705c121SKalle Valo 	 * 2. there is a chance that the NIC is asleep
196e705c121SKalle Valo 	 */
197e705c121SKalle Valo 	if (!trans->cfg->base_params->shadow_reg_enable &&
198e705c121SKalle Valo 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
199e705c121SKalle Valo 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
200e705c121SKalle Valo 
201e705c121SKalle Valo 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
202e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
203e705c121SKalle Valo 				       reg);
204e705c121SKalle Valo 			iwl_set_bit(trans, CSR_GP_CNTRL,
205a8cbb46fSGolan Ben Ami 				    BIT(trans->cfg->csr->flag_mac_access_req));
206e705c121SKalle Valo 			rxq->need_update = true;
207e705c121SKalle Valo 			return;
208e705c121SKalle Valo 		}
209e705c121SKalle Valo 	}
210e705c121SKalle Valo 
211e705c121SKalle Valo 	rxq->write_actual = round_down(rxq->write, 8);
21296a6497bSSara Sharon 	if (trans->cfg->mq_rx_supported)
2131554ed20SSara Sharon 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
21496a6497bSSara Sharon 			    rxq->write_actual);
2151316d595SSara Sharon 	else
216e705c121SKalle Valo 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
217e705c121SKalle Valo }
218e705c121SKalle Valo 
219e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
220e705c121SKalle Valo {
221e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
22278485054SSara Sharon 	int i;
223e705c121SKalle Valo 
22478485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
22578485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
226e705c121SKalle Valo 
227e705c121SKalle Valo 		if (!rxq->need_update)
22878485054SSara Sharon 			continue;
22978485054SSara Sharon 		spin_lock(&rxq->lock);
23078485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
231e705c121SKalle Valo 		rxq->need_update = false;
232e705c121SKalle Valo 		spin_unlock(&rxq->lock);
233e705c121SKalle Valo 	}
23478485054SSara Sharon }
235e705c121SKalle Valo 
236e0e168dcSGregory Greenman /*
2372047fa54SSara Sharon  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
238e0e168dcSGregory Greenman  */
2392047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
24096a6497bSSara Sharon 				  struct iwl_rxq *rxq)
24196a6497bSSara Sharon {
24296a6497bSSara Sharon 	struct iwl_rx_mem_buffer *rxb;
24396a6497bSSara Sharon 
24496a6497bSSara Sharon 	/*
24596a6497bSSara Sharon 	 * If the device isn't enabled - no need to try to add buffers...
24696a6497bSSara Sharon 	 * This can happen when we stop the device and still have an interrupt
24796a6497bSSara Sharon 	 * pending. We stop the APM before we sync the interrupts because we
24896a6497bSSara Sharon 	 * have to (see comment there). On the other hand, since the APM is
24996a6497bSSara Sharon 	 * stopped, we cannot access the HW (in particular not prph).
25096a6497bSSara Sharon 	 * So don't try to restock if the APM has been already stopped.
25196a6497bSSara Sharon 	 */
25296a6497bSSara Sharon 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
25396a6497bSSara Sharon 		return;
25496a6497bSSara Sharon 
25596a6497bSSara Sharon 	spin_lock(&rxq->lock);
25696a6497bSSara Sharon 	while (rxq->free_count) {
25796a6497bSSara Sharon 		__le64 *bd = (__le64 *)rxq->bd;
25896a6497bSSara Sharon 
25996a6497bSSara Sharon 		/* Get next free Rx buffer, remove from free list */
26096a6497bSSara Sharon 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
26196a6497bSSara Sharon 				       list);
26296a6497bSSara Sharon 		list_del(&rxb->list);
263b1753c62SSara Sharon 		rxb->invalid = false;
26496a6497bSSara Sharon 		/* 12 first bits are expected to be empty */
26596a6497bSSara Sharon 		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
26696a6497bSSara Sharon 		/* Point to Rx buffer via next RBD in circular buffer */
26796a6497bSSara Sharon 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
26896a6497bSSara Sharon 		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
26996a6497bSSara Sharon 		rxq->free_count--;
27096a6497bSSara Sharon 	}
27196a6497bSSara Sharon 	spin_unlock(&rxq->lock);
27296a6497bSSara Sharon 
27396a6497bSSara Sharon 	/*
27496a6497bSSara Sharon 	 * If we've added more space for the firmware to place data, tell it.
27596a6497bSSara Sharon 	 * Increment device's write pointer in multiples of 8.
27696a6497bSSara Sharon 	 */
27796a6497bSSara Sharon 	if (rxq->write_actual != (rxq->write & ~0x7)) {
27896a6497bSSara Sharon 		spin_lock(&rxq->lock);
27996a6497bSSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
28096a6497bSSara Sharon 		spin_unlock(&rxq->lock);
28196a6497bSSara Sharon 	}
28296a6497bSSara Sharon }
28396a6497bSSara Sharon 
284e705c121SKalle Valo /*
2852047fa54SSara Sharon  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
286e705c121SKalle Valo  */
2872047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
288e0e168dcSGregory Greenman 				  struct iwl_rxq *rxq)
289e705c121SKalle Valo {
290e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
291e705c121SKalle Valo 
292e705c121SKalle Valo 	/*
293e705c121SKalle Valo 	 * If the device isn't enabled - not need to try to add buffers...
294e705c121SKalle Valo 	 * This can happen when we stop the device and still have an interrupt
295e705c121SKalle Valo 	 * pending. We stop the APM before we sync the interrupts because we
296e705c121SKalle Valo 	 * have to (see comment there). On the other hand, since the APM is
297e705c121SKalle Valo 	 * stopped, we cannot access the HW (in particular not prph).
298e705c121SKalle Valo 	 * So don't try to restock if the APM has been already stopped.
299e705c121SKalle Valo 	 */
300e705c121SKalle Valo 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
301e705c121SKalle Valo 		return;
302e705c121SKalle Valo 
303e705c121SKalle Valo 	spin_lock(&rxq->lock);
304e705c121SKalle Valo 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
30596a6497bSSara Sharon 		__le32 *bd = (__le32 *)rxq->bd;
306e705c121SKalle Valo 		/* The overwritten rxb must be a used one */
307e705c121SKalle Valo 		rxb = rxq->queue[rxq->write];
308e705c121SKalle Valo 		BUG_ON(rxb && rxb->page);
309e705c121SKalle Valo 
310e705c121SKalle Valo 		/* Get next free Rx buffer, remove from free list */
311e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
312e705c121SKalle Valo 				       list);
313e705c121SKalle Valo 		list_del(&rxb->list);
314b1753c62SSara Sharon 		rxb->invalid = false;
315e705c121SKalle Valo 
316e705c121SKalle Valo 		/* Point to Rx buffer via next RBD in circular buffer */
31796a6497bSSara Sharon 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
318e705c121SKalle Valo 		rxq->queue[rxq->write] = rxb;
319e705c121SKalle Valo 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
320e705c121SKalle Valo 		rxq->free_count--;
321e705c121SKalle Valo 	}
322e705c121SKalle Valo 	spin_unlock(&rxq->lock);
323e705c121SKalle Valo 
324e705c121SKalle Valo 	/* If we've added more space for the firmware to place data, tell it.
325e705c121SKalle Valo 	 * Increment device's write pointer in multiples of 8. */
326e705c121SKalle Valo 	if (rxq->write_actual != (rxq->write & ~0x7)) {
327e705c121SKalle Valo 		spin_lock(&rxq->lock);
32878485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
329e705c121SKalle Valo 		spin_unlock(&rxq->lock);
330e705c121SKalle Valo 	}
331e705c121SKalle Valo }
332e705c121SKalle Valo 
333e705c121SKalle Valo /*
334e0e168dcSGregory Greenman  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
335e0e168dcSGregory Greenman  *
336e0e168dcSGregory Greenman  * If there are slots in the RX queue that need to be restocked,
337e0e168dcSGregory Greenman  * and we have free pre-allocated buffers, fill the ranks as much
338e0e168dcSGregory Greenman  * as we can, pulling from rx_free.
339e0e168dcSGregory Greenman  *
340e0e168dcSGregory Greenman  * This moves the 'write' index forward to catch up with 'processed', and
341e0e168dcSGregory Greenman  * also updates the memory address in the firmware to reference the new
342e0e168dcSGregory Greenman  * target buffer.
343e0e168dcSGregory Greenman  */
344e0e168dcSGregory Greenman static
345e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
346e0e168dcSGregory Greenman {
347e0e168dcSGregory Greenman 	if (trans->cfg->mq_rx_supported)
3482047fa54SSara Sharon 		iwl_pcie_rxmq_restock(trans, rxq);
349e0e168dcSGregory Greenman 	else
3502047fa54SSara Sharon 		iwl_pcie_rxsq_restock(trans, rxq);
351e0e168dcSGregory Greenman }
352e0e168dcSGregory Greenman 
353e0e168dcSGregory Greenman /*
354e705c121SKalle Valo  * iwl_pcie_rx_alloc_page - allocates and returns a page.
355e705c121SKalle Valo  *
356e705c121SKalle Valo  */
357e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
358e705c121SKalle Valo 					   gfp_t priority)
359e705c121SKalle Valo {
360e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
361e705c121SKalle Valo 	struct page *page;
362e705c121SKalle Valo 	gfp_t gfp_mask = priority;
363e705c121SKalle Valo 
364e705c121SKalle Valo 	if (trans_pcie->rx_page_order > 0)
365e705c121SKalle Valo 		gfp_mask |= __GFP_COMP;
366e705c121SKalle Valo 
367e705c121SKalle Valo 	/* Alloc a new receive buffer */
368e705c121SKalle Valo 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
369e705c121SKalle Valo 	if (!page) {
370e705c121SKalle Valo 		if (net_ratelimit())
371e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
372e705c121SKalle Valo 				       trans_pcie->rx_page_order);
37378485054SSara Sharon 		/*
37478485054SSara Sharon 		 * Issue an error if we don't have enough pre-allocated
37578485054SSara Sharon 		  * buffers.
376e705c121SKalle Valo `		 */
37778485054SSara Sharon 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
378e705c121SKalle Valo 			IWL_CRIT(trans,
37978485054SSara Sharon 				 "Failed to alloc_pages\n");
380e705c121SKalle Valo 		return NULL;
381e705c121SKalle Valo 	}
382e705c121SKalle Valo 	return page;
383e705c121SKalle Valo }
384e705c121SKalle Valo 
385e705c121SKalle Valo /*
386e705c121SKalle Valo  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
387e705c121SKalle Valo  *
388e705c121SKalle Valo  * A used RBD is an Rx buffer that has been given to the stack. To use it again
389e705c121SKalle Valo  * a page must be allocated and the RBD must point to the page. This function
390e705c121SKalle Valo  * doesn't change the HW pointer but handles the list of pages that is used by
391e705c121SKalle Valo  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
392e705c121SKalle Valo  * allocated buffers.
393e705c121SKalle Valo  */
39478485054SSara Sharon static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
39578485054SSara Sharon 				   struct iwl_rxq *rxq)
396e705c121SKalle Valo {
397e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
398e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
399e705c121SKalle Valo 	struct page *page;
400e705c121SKalle Valo 
401e705c121SKalle Valo 	while (1) {
402e705c121SKalle Valo 		spin_lock(&rxq->lock);
403e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
404e705c121SKalle Valo 			spin_unlock(&rxq->lock);
405e705c121SKalle Valo 			return;
406e705c121SKalle Valo 		}
407e705c121SKalle Valo 		spin_unlock(&rxq->lock);
408e705c121SKalle Valo 
409e705c121SKalle Valo 		/* Alloc a new receive buffer */
410e705c121SKalle Valo 		page = iwl_pcie_rx_alloc_page(trans, priority);
411e705c121SKalle Valo 		if (!page)
412e705c121SKalle Valo 			return;
413e705c121SKalle Valo 
414e705c121SKalle Valo 		spin_lock(&rxq->lock);
415e705c121SKalle Valo 
416e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
417e705c121SKalle Valo 			spin_unlock(&rxq->lock);
418e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
419e705c121SKalle Valo 			return;
420e705c121SKalle Valo 		}
421e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
422e705c121SKalle Valo 				       list);
423e705c121SKalle Valo 		list_del(&rxb->list);
424e705c121SKalle Valo 		spin_unlock(&rxq->lock);
425e705c121SKalle Valo 
426e705c121SKalle Valo 		BUG_ON(rxb->page);
427e705c121SKalle Valo 		rxb->page = page;
428e705c121SKalle Valo 		/* Get physical address of the RB */
429e705c121SKalle Valo 		rxb->page_dma =
430e705c121SKalle Valo 			dma_map_page(trans->dev, page, 0,
431e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
432e705c121SKalle Valo 				     DMA_FROM_DEVICE);
433e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
434e705c121SKalle Valo 			rxb->page = NULL;
435e705c121SKalle Valo 			spin_lock(&rxq->lock);
436e705c121SKalle Valo 			list_add(&rxb->list, &rxq->rx_used);
437e705c121SKalle Valo 			spin_unlock(&rxq->lock);
438e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
439e705c121SKalle Valo 			return;
440e705c121SKalle Valo 		}
441e705c121SKalle Valo 
442e705c121SKalle Valo 		spin_lock(&rxq->lock);
443e705c121SKalle Valo 
444e705c121SKalle Valo 		list_add_tail(&rxb->list, &rxq->rx_free);
445e705c121SKalle Valo 		rxq->free_count++;
446e705c121SKalle Valo 
447e705c121SKalle Valo 		spin_unlock(&rxq->lock);
448e705c121SKalle Valo 	}
449e705c121SKalle Valo }
450e705c121SKalle Valo 
45178485054SSara Sharon static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
452e705c121SKalle Valo {
453e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
454e705c121SKalle Valo 	int i;
455e705c121SKalle Valo 
4567b542436SSara Sharon 	for (i = 0; i < RX_POOL_SIZE; i++) {
45778485054SSara Sharon 		if (!trans_pcie->rx_pool[i].page)
458e705c121SKalle Valo 			continue;
45978485054SSara Sharon 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
460e705c121SKalle Valo 			       PAGE_SIZE << trans_pcie->rx_page_order,
461e705c121SKalle Valo 			       DMA_FROM_DEVICE);
46278485054SSara Sharon 		__free_pages(trans_pcie->rx_pool[i].page,
46378485054SSara Sharon 			     trans_pcie->rx_page_order);
46478485054SSara Sharon 		trans_pcie->rx_pool[i].page = NULL;
465e705c121SKalle Valo 	}
466e705c121SKalle Valo }
467e705c121SKalle Valo 
468e705c121SKalle Valo /*
469e705c121SKalle Valo  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
470e705c121SKalle Valo  *
471e705c121SKalle Valo  * Allocates for each received request 8 pages
472e705c121SKalle Valo  * Called as a scheduled work item.
473e705c121SKalle Valo  */
474e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
475e705c121SKalle Valo {
476e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
477e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
478e705c121SKalle Valo 	struct list_head local_empty;
479e705c121SKalle Valo 	int pending = atomic_xchg(&rba->req_pending, 0);
480e705c121SKalle Valo 
481e705c121SKalle Valo 	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
482e705c121SKalle Valo 
483e705c121SKalle Valo 	/* If we were scheduled - there is at least one request */
484e705c121SKalle Valo 	spin_lock(&rba->lock);
485e705c121SKalle Valo 	/* swap out the rba->rbd_empty to a local list */
486e705c121SKalle Valo 	list_replace_init(&rba->rbd_empty, &local_empty);
487e705c121SKalle Valo 	spin_unlock(&rba->lock);
488e705c121SKalle Valo 
489e705c121SKalle Valo 	while (pending) {
490e705c121SKalle Valo 		int i;
4910979a913SJohannes Berg 		LIST_HEAD(local_allocated);
49278485054SSara Sharon 		gfp_t gfp_mask = GFP_KERNEL;
49378485054SSara Sharon 
49478485054SSara Sharon 		/* Do not post a warning if there are only a few requests */
49578485054SSara Sharon 		if (pending < RX_PENDING_WATERMARK)
49678485054SSara Sharon 			gfp_mask |= __GFP_NOWARN;
497e705c121SKalle Valo 
498e705c121SKalle Valo 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
499e705c121SKalle Valo 			struct iwl_rx_mem_buffer *rxb;
500e705c121SKalle Valo 			struct page *page;
501e705c121SKalle Valo 
502e705c121SKalle Valo 			/* List should never be empty - each reused RBD is
503e705c121SKalle Valo 			 * returned to the list, and initial pool covers any
504e705c121SKalle Valo 			 * possible gap between the time the page is allocated
505e705c121SKalle Valo 			 * to the time the RBD is added.
506e705c121SKalle Valo 			 */
507e705c121SKalle Valo 			BUG_ON(list_empty(&local_empty));
508e705c121SKalle Valo 			/* Get the first rxb from the rbd list */
509e705c121SKalle Valo 			rxb = list_first_entry(&local_empty,
510e705c121SKalle Valo 					       struct iwl_rx_mem_buffer, list);
511e705c121SKalle Valo 			BUG_ON(rxb->page);
512e705c121SKalle Valo 
513e705c121SKalle Valo 			/* Alloc a new receive buffer */
51478485054SSara Sharon 			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
515e705c121SKalle Valo 			if (!page)
516e705c121SKalle Valo 				continue;
517e705c121SKalle Valo 			rxb->page = page;
518e705c121SKalle Valo 
519e705c121SKalle Valo 			/* Get physical address of the RB */
520e705c121SKalle Valo 			rxb->page_dma = dma_map_page(trans->dev, page, 0,
521e705c121SKalle Valo 					PAGE_SIZE << trans_pcie->rx_page_order,
522e705c121SKalle Valo 					DMA_FROM_DEVICE);
523e705c121SKalle Valo 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
524e705c121SKalle Valo 				rxb->page = NULL;
525e705c121SKalle Valo 				__free_pages(page, trans_pcie->rx_page_order);
526e705c121SKalle Valo 				continue;
527e705c121SKalle Valo 			}
528e705c121SKalle Valo 
529e705c121SKalle Valo 			/* move the allocated entry to the out list */
530e705c121SKalle Valo 			list_move(&rxb->list, &local_allocated);
531e705c121SKalle Valo 			i++;
532e705c121SKalle Valo 		}
533e705c121SKalle Valo 
534e705c121SKalle Valo 		pending--;
535e705c121SKalle Valo 		if (!pending) {
536e705c121SKalle Valo 			pending = atomic_xchg(&rba->req_pending, 0);
537e705c121SKalle Valo 			IWL_DEBUG_RX(trans,
538e705c121SKalle Valo 				     "Pending allocation requests = %d\n",
539e705c121SKalle Valo 				     pending);
540e705c121SKalle Valo 		}
541e705c121SKalle Valo 
542e705c121SKalle Valo 		spin_lock(&rba->lock);
543e705c121SKalle Valo 		/* add the allocated rbds to the allocator allocated list */
544e705c121SKalle Valo 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
545e705c121SKalle Valo 		/* get more empty RBDs for current pending requests */
546e705c121SKalle Valo 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
547e705c121SKalle Valo 		spin_unlock(&rba->lock);
548e705c121SKalle Valo 
549e705c121SKalle Valo 		atomic_inc(&rba->req_ready);
550e705c121SKalle Valo 	}
551e705c121SKalle Valo 
552e705c121SKalle Valo 	spin_lock(&rba->lock);
553e705c121SKalle Valo 	/* return unused rbds to the allocator empty list */
554e705c121SKalle Valo 	list_splice_tail(&local_empty, &rba->rbd_empty);
555e705c121SKalle Valo 	spin_unlock(&rba->lock);
556e705c121SKalle Valo }
557e705c121SKalle Valo 
558e705c121SKalle Valo /*
559d56daea4SSara Sharon  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
560e705c121SKalle Valo .*
561e705c121SKalle Valo .* Called by queue when the queue posted allocation request and
562e705c121SKalle Valo  * has freed 8 RBDs in order to restock itself.
563d56daea4SSara Sharon  * This function directly moves the allocated RBs to the queue's ownership
564d56daea4SSara Sharon  * and updates the relevant counters.
565e705c121SKalle Valo  */
566d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
567d56daea4SSara Sharon 				      struct iwl_rxq *rxq)
568e705c121SKalle Valo {
569e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
570e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
571e705c121SKalle Valo 	int i;
572e705c121SKalle Valo 
573d56daea4SSara Sharon 	lockdep_assert_held(&rxq->lock);
574d56daea4SSara Sharon 
575e705c121SKalle Valo 	/*
576e705c121SKalle Valo 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
577e705c121SKalle Valo 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
578d56daea4SSara Sharon 	 * function will return early, as there are no ready requests.
579e705c121SKalle Valo 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
580e705c121SKalle Valo 	 * req_ready > 0, i.e. - there are ready requests and the function
581e705c121SKalle Valo 	 * hands one request to the caller.
582e705c121SKalle Valo 	 */
583e705c121SKalle Valo 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
584d56daea4SSara Sharon 		return;
585e705c121SKalle Valo 
586e705c121SKalle Valo 	spin_lock(&rba->lock);
587e705c121SKalle Valo 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
588e705c121SKalle Valo 		/* Get next free Rx buffer, remove it from free list */
589d56daea4SSara Sharon 		struct iwl_rx_mem_buffer *rxb =
590d56daea4SSara Sharon 			list_first_entry(&rba->rbd_allocated,
591e705c121SKalle Valo 					 struct iwl_rx_mem_buffer, list);
592d56daea4SSara Sharon 
593d56daea4SSara Sharon 		list_move(&rxb->list, &rxq->rx_free);
594e705c121SKalle Valo 	}
595e705c121SKalle Valo 	spin_unlock(&rba->lock);
596e705c121SKalle Valo 
597d56daea4SSara Sharon 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
598d56daea4SSara Sharon 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
599e705c121SKalle Valo }
600e705c121SKalle Valo 
60110a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data)
602e705c121SKalle Valo {
603e705c121SKalle Valo 	struct iwl_rb_allocator *rba_p =
604e705c121SKalle Valo 		container_of(data, struct iwl_rb_allocator, rx_alloc);
605e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie =
606e705c121SKalle Valo 		container_of(rba_p, struct iwl_trans_pcie, rba);
607e705c121SKalle Valo 
608e705c121SKalle Valo 	iwl_pcie_rx_allocator(trans_pcie->trans);
609e705c121SKalle Valo }
610e705c121SKalle Valo 
611e705c121SKalle Valo static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
612e705c121SKalle Valo {
613e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
614e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
615e705c121SKalle Valo 	struct device *dev = trans->dev;
61678485054SSara Sharon 	int i;
61796a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
61896a6497bSSara Sharon 						      sizeof(__le32);
619e705c121SKalle Valo 
62078485054SSara Sharon 	if (WARN_ON(trans_pcie->rxq))
621e705c121SKalle Valo 		return -EINVAL;
622e705c121SKalle Valo 
62378485054SSara Sharon 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
62478485054SSara Sharon 				  GFP_KERNEL);
62578485054SSara Sharon 	if (!trans_pcie->rxq)
62678485054SSara Sharon 		return -EINVAL;
62778485054SSara Sharon 
62878485054SSara Sharon 	spin_lock_init(&rba->lock);
62978485054SSara Sharon 
63078485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
63178485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
63278485054SSara Sharon 
63378485054SSara Sharon 		spin_lock_init(&rxq->lock);
63496a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported)
63596a6497bSSara Sharon 			rxq->queue_size = MQ_RX_TABLE_SIZE;
63696a6497bSSara Sharon 		else
63796a6497bSSara Sharon 			rxq->queue_size = RX_QUEUE_SIZE;
63896a6497bSSara Sharon 
63978485054SSara Sharon 		/*
64078485054SSara Sharon 		 * Allocate the circular buffer of Read Buffer Descriptors
64178485054SSara Sharon 		 * (RBDs)
64278485054SSara Sharon 		 */
64378485054SSara Sharon 		rxq->bd = dma_zalloc_coherent(dev,
64496a6497bSSara Sharon 					     free_size * rxq->queue_size,
645e705c121SKalle Valo 					     &rxq->bd_dma, GFP_KERNEL);
646e705c121SKalle Valo 		if (!rxq->bd)
64778485054SSara Sharon 			goto err;
64878485054SSara Sharon 
64996a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
65096a6497bSSara Sharon 			rxq->used_bd = dma_zalloc_coherent(dev,
65196a6497bSSara Sharon 							   sizeof(__le32) *
65296a6497bSSara Sharon 							   rxq->queue_size,
65396a6497bSSara Sharon 							   &rxq->used_bd_dma,
65496a6497bSSara Sharon 							   GFP_KERNEL);
65596a6497bSSara Sharon 			if (!rxq->used_bd)
65696a6497bSSara Sharon 				goto err;
65796a6497bSSara Sharon 		}
658e705c121SKalle Valo 
659e705c121SKalle Valo 		/*Allocate the driver's pointer to receive buffer status */
660e705c121SKalle Valo 		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
66178485054SSara Sharon 						   &rxq->rb_stts_dma,
66278485054SSara Sharon 						   GFP_KERNEL);
663e705c121SKalle Valo 		if (!rxq->rb_stts)
66478485054SSara Sharon 			goto err;
66578485054SSara Sharon 	}
666e705c121SKalle Valo 	return 0;
667e705c121SKalle Valo 
66878485054SSara Sharon err:
66978485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
67078485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
67178485054SSara Sharon 
67278485054SSara Sharon 		if (rxq->bd)
67396a6497bSSara Sharon 			dma_free_coherent(dev, free_size * rxq->queue_size,
674e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
675e705c121SKalle Valo 		rxq->bd_dma = 0;
676e705c121SKalle Valo 		rxq->bd = NULL;
67778485054SSara Sharon 
67878485054SSara Sharon 		if (rxq->rb_stts)
67978485054SSara Sharon 			dma_free_coherent(trans->dev,
68078485054SSara Sharon 					  sizeof(struct iwl_rb_status),
68178485054SSara Sharon 					  rxq->rb_stts, rxq->rb_stts_dma);
68296a6497bSSara Sharon 
68396a6497bSSara Sharon 		if (rxq->used_bd)
68496a6497bSSara Sharon 			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
68596a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
68696a6497bSSara Sharon 		rxq->used_bd_dma = 0;
68796a6497bSSara Sharon 		rxq->used_bd = NULL;
68878485054SSara Sharon 	}
68978485054SSara Sharon 	kfree(trans_pcie->rxq);
69096a6497bSSara Sharon 
691e705c121SKalle Valo 	return -ENOMEM;
692e705c121SKalle Valo }
693e705c121SKalle Valo 
694e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
695e705c121SKalle Valo {
696e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
697e705c121SKalle Valo 	u32 rb_size;
698dfcfeef9SSara Sharon 	unsigned long flags;
699e705c121SKalle Valo 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
700e705c121SKalle Valo 
7016c4fbcbcSEmmanuel Grumbach 	switch (trans_pcie->rx_buf_size) {
7026c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_4K:
703e705c121SKalle Valo 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
7046c4fbcbcSEmmanuel Grumbach 		break;
7056c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_8K:
7066c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
7076c4fbcbcSEmmanuel Grumbach 		break;
7086c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_12K:
7096c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
7106c4fbcbcSEmmanuel Grumbach 		break;
7116c4fbcbcSEmmanuel Grumbach 	default:
7126c4fbcbcSEmmanuel Grumbach 		WARN_ON(1);
7136c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
7146c4fbcbcSEmmanuel Grumbach 	}
715e705c121SKalle Valo 
716dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
717dfcfeef9SSara Sharon 		return;
718dfcfeef9SSara Sharon 
719e705c121SKalle Valo 	/* Stop Rx DMA */
720dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
721e705c121SKalle Valo 	/* reset and flush pointers */
722dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
723dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
724dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
725e705c121SKalle Valo 
726e705c121SKalle Valo 	/* Reset driver's Rx queue write index */
727dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
728e705c121SKalle Valo 
729e705c121SKalle Valo 	/* Tell device where to find RBD circular buffer in DRAM */
730dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
731e705c121SKalle Valo 		    (u32)(rxq->bd_dma >> 8));
732e705c121SKalle Valo 
733e705c121SKalle Valo 	/* Tell device where in DRAM to update its Rx status */
734dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
735e705c121SKalle Valo 		    rxq->rb_stts_dma >> 4);
736e705c121SKalle Valo 
737e705c121SKalle Valo 	/* Enable Rx DMA
738e705c121SKalle Valo 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
739e705c121SKalle Valo 	 *      the credit mechanism in 5000 HW RX FIFO
740e705c121SKalle Valo 	 * Direct rx interrupts to hosts
7416c4fbcbcSEmmanuel Grumbach 	 * Rx buffer size 4 or 8k or 12k
742e705c121SKalle Valo 	 * RB timeout 0x10
743e705c121SKalle Valo 	 * 256 RBDs
744e705c121SKalle Valo 	 */
745dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
746e705c121SKalle Valo 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
747e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
748e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
749e705c121SKalle Valo 		    rb_size |
750e705c121SKalle Valo 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
751e705c121SKalle Valo 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
752e705c121SKalle Valo 
753dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
754dfcfeef9SSara Sharon 
755e705c121SKalle Valo 	/* Set interrupt coalescing timer to default (2048 usecs) */
756e705c121SKalle Valo 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
757e705c121SKalle Valo 
758e705c121SKalle Valo 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
759e705c121SKalle Valo 	if (trans->cfg->host_interrupt_operation_mode)
760e705c121SKalle Valo 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
761e705c121SKalle Valo }
762e705c121SKalle Valo 
7631316d595SSara Sharon void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
7641316d595SSara Sharon {
765565291c6SJohannes Berg 	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
766565291c6SJohannes Berg 		return;
767565291c6SJohannes Berg 
768565291c6SJohannes Berg 	if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
769565291c6SJohannes Berg 		return;
770565291c6SJohannes Berg 
771565291c6SJohannes Berg 	if (!trans->cfg->integrated)
772565291c6SJohannes Berg 		return;
773565291c6SJohannes Berg 
7741316d595SSara Sharon 	/*
7751316d595SSara Sharon 	 * Turn on the chicken-bits that cause MAC wakeup for RX-related
7761316d595SSara Sharon 	 * values.
7771316d595SSara Sharon 	 * This costs some power, but needed for W/A 9000 integrated A-step
7781316d595SSara Sharon 	 * bug where shadow registers are not in the retention list and their
7791316d595SSara Sharon 	 * value is lost when NIC powers down
7801316d595SSara Sharon 	 */
7811316d595SSara Sharon 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
7821316d595SSara Sharon 		    CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
7831316d595SSara Sharon 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
7841316d595SSara Sharon 		    CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
7851316d595SSara Sharon }
7861316d595SSara Sharon 
787bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
78896a6497bSSara Sharon {
78996a6497bSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
79096a6497bSSara Sharon 	u32 rb_size, enabled = 0;
791dfcfeef9SSara Sharon 	unsigned long flags;
79296a6497bSSara Sharon 	int i;
79396a6497bSSara Sharon 
79496a6497bSSara Sharon 	switch (trans_pcie->rx_buf_size) {
79596a6497bSSara Sharon 	case IWL_AMSDU_4K:
79696a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
79796a6497bSSara Sharon 		break;
79896a6497bSSara Sharon 	case IWL_AMSDU_8K:
79996a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
80096a6497bSSara Sharon 		break;
80196a6497bSSara Sharon 	case IWL_AMSDU_12K:
80296a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
80396a6497bSSara Sharon 		break;
80496a6497bSSara Sharon 	default:
80596a6497bSSara Sharon 		WARN_ON(1);
80696a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
80796a6497bSSara Sharon 	}
80896a6497bSSara Sharon 
809dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
810dfcfeef9SSara Sharon 		return;
811dfcfeef9SSara Sharon 
81296a6497bSSara Sharon 	/* Stop Rx DMA */
813dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
81496a6497bSSara Sharon 	/* disable free amd used rx queue operation */
815dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
81696a6497bSSara Sharon 
81796a6497bSSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
81896a6497bSSara Sharon 		/* Tell device where to find RBD free table in DRAM */
81912a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
820dfcfeef9SSara Sharon 					 RFH_Q_FRBDCB_BA_LSB(i),
821dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].bd_dma);
82296a6497bSSara Sharon 		/* Tell device where to find RBD used table in DRAM */
82312a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
824dfcfeef9SSara Sharon 					 RFH_Q_URBDCB_BA_LSB(i),
825dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].used_bd_dma);
82696a6497bSSara Sharon 		/* Tell device where in DRAM to update its Rx status */
82712a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
828dfcfeef9SSara Sharon 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
829bce97731SSara Sharon 					 trans_pcie->rxq[i].rb_stts_dma);
83096a6497bSSara Sharon 		/* Reset device indice tables */
831dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
832dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
833dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
83496a6497bSSara Sharon 
83596a6497bSSara Sharon 		enabled |= BIT(i) | BIT(i + 16);
83696a6497bSSara Sharon 	}
83796a6497bSSara Sharon 
83896a6497bSSara Sharon 	/*
83996a6497bSSara Sharon 	 * Enable Rx DMA
84096a6497bSSara Sharon 	 * Rx buffer size 4 or 8k or 12k
84196a6497bSSara Sharon 	 * Min RB size 4 or 8
84288076015SSara Sharon 	 * Drop frames that exceed RB size
84396a6497bSSara Sharon 	 * 512 RBDs
84496a6497bSSara Sharon 	 */
845dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
84663044335SSara Sharon 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
84796a6497bSSara Sharon 			       RFH_RXF_DMA_MIN_RB_4_8 |
84888076015SSara Sharon 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
84996a6497bSSara Sharon 			       RFH_RXF_DMA_RBDCB_SIZE_512);
85096a6497bSSara Sharon 
85188076015SSara Sharon 	/*
85288076015SSara Sharon 	 * Activate DMA snooping.
853b0262f07SSara Sharon 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
85488076015SSara Sharon 	 * Default queue is 0
85588076015SSara Sharon 	 */
856f3779f47SJohannes Berg 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
857f3779f47SJohannes Berg 			       RFH_GEN_CFG_RFH_DMA_SNOOP |
858f3779f47SJohannes Berg 			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
859b0262f07SSara Sharon 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
860f3779f47SJohannes Berg 			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
861f3779f47SJohannes Berg 					       trans->cfg->integrated ?
862b0262f07SSara Sharon 					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
863f3779f47SJohannes Berg 					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
86488076015SSara Sharon 	/* Enable the relevant rx queues */
865dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
866dfcfeef9SSara Sharon 
867dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
86896a6497bSSara Sharon 
86996a6497bSSara Sharon 	/* Set interrupt coalescing timer to default (2048 usecs) */
87096a6497bSSara Sharon 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
8711316d595SSara Sharon 
8721316d595SSara Sharon 	iwl_pcie_enable_rx_wake(trans, true);
87396a6497bSSara Sharon }
87496a6497bSSara Sharon 
875e705c121SKalle Valo static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
876e705c121SKalle Valo {
877e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
878e705c121SKalle Valo 
879e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_free);
880e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_used);
881e705c121SKalle Valo 	rxq->free_count = 0;
882e705c121SKalle Valo 	rxq->used_count = 0;
883e705c121SKalle Valo }
884e705c121SKalle Valo 
885bce97731SSara Sharon static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
886bce97731SSara Sharon {
887bce97731SSara Sharon 	WARN_ON(1);
888bce97731SSara Sharon 	return 0;
889bce97731SSara Sharon }
890bce97731SSara Sharon 
891eda50cdeSSara Sharon static int _iwl_pcie_rx_init(struct iwl_trans *trans)
892e705c121SKalle Valo {
893e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
89478485054SSara Sharon 	struct iwl_rxq *def_rxq;
895e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
8967b542436SSara Sharon 	int i, err, queue_size, allocator_pool_size, num_alloc;
897e705c121SKalle Valo 
89878485054SSara Sharon 	if (!trans_pcie->rxq) {
899e705c121SKalle Valo 		err = iwl_pcie_rx_alloc(trans);
900e705c121SKalle Valo 		if (err)
901e705c121SKalle Valo 			return err;
902e705c121SKalle Valo 	}
90378485054SSara Sharon 	def_rxq = trans_pcie->rxq;
904e705c121SKalle Valo 
9050f22e400SShaul Triebitz 	cancel_work_sync(&rba->rx_alloc);
9060f22e400SShaul Triebitz 
907e705c121SKalle Valo 	spin_lock(&rba->lock);
908e705c121SKalle Valo 	atomic_set(&rba->req_pending, 0);
909e705c121SKalle Valo 	atomic_set(&rba->req_ready, 0);
91096a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_allocated);
91196a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_empty);
912e705c121SKalle Valo 	spin_unlock(&rba->lock);
913e705c121SKalle Valo 
914e705c121SKalle Valo 	/* free all first - we might be reconfigured for a different size */
91578485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
916e705c121SKalle Valo 
917e705c121SKalle Valo 	for (i = 0; i < RX_QUEUE_SIZE; i++)
91878485054SSara Sharon 		def_rxq->queue[i] = NULL;
919e705c121SKalle Valo 
92078485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
92178485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
922e705c121SKalle Valo 
92396a6497bSSara Sharon 		rxq->id = i;
92496a6497bSSara Sharon 
925e705c121SKalle Valo 		spin_lock(&rxq->lock);
92678485054SSara Sharon 		/*
92778485054SSara Sharon 		 * Set read write pointer to reflect that we have processed
92878485054SSara Sharon 		 * and used all buffers, but have not restocked the Rx queue
92978485054SSara Sharon 		 * with fresh buffers
93078485054SSara Sharon 		 */
93178485054SSara Sharon 		rxq->read = 0;
93278485054SSara Sharon 		rxq->write = 0;
93378485054SSara Sharon 		rxq->write_actual = 0;
93478485054SSara Sharon 		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
93578485054SSara Sharon 
93678485054SSara Sharon 		iwl_pcie_rx_init_rxb_lists(rxq);
93778485054SSara Sharon 
938bce97731SSara Sharon 		if (!rxq->napi.poll)
939bce97731SSara Sharon 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
940bce97731SSara Sharon 				       iwl_pcie_dummy_napi_poll, 64);
941bce97731SSara Sharon 
942e705c121SKalle Valo 		spin_unlock(&rxq->lock);
94378485054SSara Sharon 	}
94478485054SSara Sharon 
94596a6497bSSara Sharon 	/* move the pool to the default queue and allocator ownerships */
9467b542436SSara Sharon 	queue_size = trans->cfg->mq_rx_supported ?
9477b542436SSara Sharon 		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
94896a6497bSSara Sharon 	allocator_pool_size = trans->num_rx_queues *
94996a6497bSSara Sharon 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
9507b542436SSara Sharon 	num_alloc = queue_size + allocator_pool_size;
95143146925SSara Sharon 	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
95243146925SSara Sharon 		     ARRAY_SIZE(trans_pcie->rx_pool));
9537b542436SSara Sharon 	for (i = 0; i < num_alloc; i++) {
95496a6497bSSara Sharon 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
95596a6497bSSara Sharon 
95696a6497bSSara Sharon 		if (i < allocator_pool_size)
95796a6497bSSara Sharon 			list_add(&rxb->list, &rba->rbd_empty);
95896a6497bSSara Sharon 		else
95996a6497bSSara Sharon 			list_add(&rxb->list, &def_rxq->rx_used);
96096a6497bSSara Sharon 		trans_pcie->global_table[i] = rxb;
961e25d65f2SSara Sharon 		rxb->vid = (u16)(i + 1);
962b1753c62SSara Sharon 		rxb->invalid = true;
96396a6497bSSara Sharon 	}
96478485054SSara Sharon 
96578485054SSara Sharon 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
9662047fa54SSara Sharon 
967eda50cdeSSara Sharon 	return 0;
968eda50cdeSSara Sharon }
969eda50cdeSSara Sharon 
970eda50cdeSSara Sharon int iwl_pcie_rx_init(struct iwl_trans *trans)
971eda50cdeSSara Sharon {
972eda50cdeSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
973eda50cdeSSara Sharon 	int ret = _iwl_pcie_rx_init(trans);
974eda50cdeSSara Sharon 
975eda50cdeSSara Sharon 	if (ret)
976eda50cdeSSara Sharon 		return ret;
977eda50cdeSSara Sharon 
9782047fa54SSara Sharon 	if (trans->cfg->mq_rx_supported)
979bce97731SSara Sharon 		iwl_pcie_rx_mq_hw_init(trans);
9802047fa54SSara Sharon 	else
981eda50cdeSSara Sharon 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
9822047fa54SSara Sharon 
983eda50cdeSSara Sharon 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
98478485054SSara Sharon 
985eda50cdeSSara Sharon 	spin_lock(&trans_pcie->rxq->lock);
986eda50cdeSSara Sharon 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
987eda50cdeSSara Sharon 	spin_unlock(&trans_pcie->rxq->lock);
988e705c121SKalle Valo 
989e705c121SKalle Valo 	return 0;
990e705c121SKalle Valo }
991e705c121SKalle Valo 
992eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
993eda50cdeSSara Sharon {
994eda50cdeSSara Sharon 	/*
995eda50cdeSSara Sharon 	 * We don't configure the RFH.
996eda50cdeSSara Sharon 	 * Restock will be done at alive, after firmware configured the RFH.
997eda50cdeSSara Sharon 	 */
998eda50cdeSSara Sharon 	return _iwl_pcie_rx_init(trans);
999eda50cdeSSara Sharon }
1000eda50cdeSSara Sharon 
1001e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans)
1002e705c121SKalle Valo {
1003e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1004e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
100596a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
100696a6497bSSara Sharon 					      sizeof(__le32);
100778485054SSara Sharon 	int i;
1008e705c121SKalle Valo 
100978485054SSara Sharon 	/*
101078485054SSara Sharon 	 * if rxq is NULL, it means that nothing has been allocated,
101178485054SSara Sharon 	 * exit now
101278485054SSara Sharon 	 */
101378485054SSara Sharon 	if (!trans_pcie->rxq) {
1014e705c121SKalle Valo 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1015e705c121SKalle Valo 		return;
1016e705c121SKalle Valo 	}
1017e705c121SKalle Valo 
1018e705c121SKalle Valo 	cancel_work_sync(&rba->rx_alloc);
1019e705c121SKalle Valo 
102078485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
1021e705c121SKalle Valo 
102278485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
102378485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
102478485054SSara Sharon 
102578485054SSara Sharon 		if (rxq->bd)
102678485054SSara Sharon 			dma_free_coherent(trans->dev,
102796a6497bSSara Sharon 					  free_size * rxq->queue_size,
1028e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
1029e705c121SKalle Valo 		rxq->bd_dma = 0;
1030e705c121SKalle Valo 		rxq->bd = NULL;
1031e705c121SKalle Valo 
1032e705c121SKalle Valo 		if (rxq->rb_stts)
1033e705c121SKalle Valo 			dma_free_coherent(trans->dev,
1034e705c121SKalle Valo 					  sizeof(struct iwl_rb_status),
1035e705c121SKalle Valo 					  rxq->rb_stts, rxq->rb_stts_dma);
1036e705c121SKalle Valo 		else
103778485054SSara Sharon 			IWL_DEBUG_INFO(trans,
103878485054SSara Sharon 				       "Free rxq->rb_stts which is NULL\n");
103978485054SSara Sharon 
104096a6497bSSara Sharon 		if (rxq->used_bd)
104196a6497bSSara Sharon 			dma_free_coherent(trans->dev,
104296a6497bSSara Sharon 					  sizeof(__le32) * rxq->queue_size,
104396a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
104496a6497bSSara Sharon 		rxq->used_bd_dma = 0;
104596a6497bSSara Sharon 		rxq->used_bd = NULL;
1046bce97731SSara Sharon 
1047bce97731SSara Sharon 		if (rxq->napi.poll)
1048bce97731SSara Sharon 			netif_napi_del(&rxq->napi);
104996a6497bSSara Sharon 	}
105078485054SSara Sharon 	kfree(trans_pcie->rxq);
1051e705c121SKalle Valo }
1052e705c121SKalle Valo 
1053e705c121SKalle Valo /*
1054e705c121SKalle Valo  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1055e705c121SKalle Valo  *
1056e705c121SKalle Valo  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1057e705c121SKalle Valo  * When there are 2 empty RBDs - a request for allocation is posted
1058e705c121SKalle Valo  */
1059e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1060e705c121SKalle Valo 				  struct iwl_rx_mem_buffer *rxb,
1061e705c121SKalle Valo 				  struct iwl_rxq *rxq, bool emergency)
1062e705c121SKalle Valo {
1063e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1064e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1065e705c121SKalle Valo 
1066e705c121SKalle Valo 	/* Move the RBD to the used list, will be moved to allocator in batches
1067e705c121SKalle Valo 	 * before claiming or posting a request*/
1068e705c121SKalle Valo 	list_add_tail(&rxb->list, &rxq->rx_used);
1069e705c121SKalle Valo 
1070e705c121SKalle Valo 	if (unlikely(emergency))
1071e705c121SKalle Valo 		return;
1072e705c121SKalle Valo 
1073e705c121SKalle Valo 	/* Count the allocator owned RBDs */
1074e705c121SKalle Valo 	rxq->used_count++;
1075e705c121SKalle Valo 
1076e705c121SKalle Valo 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1077e705c121SKalle Valo 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1078e705c121SKalle Valo 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1079e705c121SKalle Valo 	 * after but we still need to post another request.
1080e705c121SKalle Valo 	 */
1081e705c121SKalle Valo 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1082e705c121SKalle Valo 		/* Move the 2 RBDs to the allocator ownership.
1083e705c121SKalle Valo 		 Allocator has another 6 from pool for the request completion*/
1084e705c121SKalle Valo 		spin_lock(&rba->lock);
1085e705c121SKalle Valo 		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1086e705c121SKalle Valo 		spin_unlock(&rba->lock);
1087e705c121SKalle Valo 
1088e705c121SKalle Valo 		atomic_inc(&rba->req_pending);
1089e705c121SKalle Valo 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1090e705c121SKalle Valo 	}
1091e705c121SKalle Valo }
1092e705c121SKalle Valo 
1093e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
109478485054SSara Sharon 				struct iwl_rxq *rxq,
1095e705c121SKalle Valo 				struct iwl_rx_mem_buffer *rxb,
1096e705c121SKalle Valo 				bool emergency)
1097e705c121SKalle Valo {
1098e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1099b2a3b1c1SSara Sharon 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1100e705c121SKalle Valo 	bool page_stolen = false;
1101e705c121SKalle Valo 	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1102e705c121SKalle Valo 	u32 offset = 0;
1103e705c121SKalle Valo 
1104e705c121SKalle Valo 	if (WARN_ON(!rxb))
1105e705c121SKalle Valo 		return;
1106e705c121SKalle Valo 
1107e705c121SKalle Valo 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1108e705c121SKalle Valo 
1109e705c121SKalle Valo 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1110e705c121SKalle Valo 		struct iwl_rx_packet *pkt;
1111e705c121SKalle Valo 		u16 sequence;
1112e705c121SKalle Valo 		bool reclaim;
1113e705c121SKalle Valo 		int index, cmd_index, len;
1114e705c121SKalle Valo 		struct iwl_rx_cmd_buffer rxcb = {
1115e705c121SKalle Valo 			._offset = offset,
1116e705c121SKalle Valo 			._rx_page_order = trans_pcie->rx_page_order,
1117e705c121SKalle Valo 			._page = rxb->page,
1118e705c121SKalle Valo 			._page_stolen = false,
1119e705c121SKalle Valo 			.truesize = max_len,
1120e705c121SKalle Valo 		};
1121e705c121SKalle Valo 
1122e705c121SKalle Valo 		pkt = rxb_addr(&rxcb);
1123e705c121SKalle Valo 
11243bfdee76SJohannes Berg 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
11253bfdee76SJohannes Berg 			IWL_DEBUG_RX(trans,
11263bfdee76SJohannes Berg 				     "Q %d: RB end marker at offset %d\n",
11273bfdee76SJohannes Berg 				     rxq->id, offset);
1128e705c121SKalle Valo 			break;
11293bfdee76SJohannes Berg 		}
1130e705c121SKalle Valo 
1131a395058eSJohannes Berg 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1132a395058eSJohannes Berg 			FH_RSCSR_RXQ_POS != rxq->id,
1133a395058eSJohannes Berg 		     "frame on invalid queue - is on %d and indicates %d\n",
1134a395058eSJohannes Berg 		     rxq->id,
1135a395058eSJohannes Berg 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1136a395058eSJohannes Berg 			FH_RSCSR_RXQ_POS);
1137ab2e696bSSara Sharon 
1138e705c121SKalle Valo 		IWL_DEBUG_RX(trans,
11393bfdee76SJohannes Berg 			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
11403bfdee76SJohannes Berg 			     rxq->id, offset,
114139bdb17eSSharon Dvir 			     iwl_get_cmd_string(trans,
114239bdb17eSSharon Dvir 						iwl_cmd_id(pkt->hdr.cmd,
114339bdb17eSSharon Dvir 							   pkt->hdr.group_id,
114439bdb17eSSharon Dvir 							   0)),
114535177c99SSara Sharon 			     pkt->hdr.group_id, pkt->hdr.cmd,
114635177c99SSara Sharon 			     le16_to_cpu(pkt->hdr.sequence));
1147e705c121SKalle Valo 
1148e705c121SKalle Valo 		len = iwl_rx_packet_len(pkt);
1149e705c121SKalle Valo 		len += sizeof(u32); /* account for status word */
1150e705c121SKalle Valo 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1151e705c121SKalle Valo 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1152e705c121SKalle Valo 
1153e705c121SKalle Valo 		/* Reclaim a command buffer only if this packet is a response
1154e705c121SKalle Valo 		 *   to a (driver-originated) command.
1155e705c121SKalle Valo 		 * If the packet (e.g. Rx frame) originated from uCode,
1156e705c121SKalle Valo 		 *   there is no command buffer to reclaim.
1157e705c121SKalle Valo 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1158e705c121SKalle Valo 		 *   but apparently a few don't get set; catch them here. */
1159e705c121SKalle Valo 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1160d8a130b0SJohannes Berg 		if (reclaim && !pkt->hdr.group_id) {
1161e705c121SKalle Valo 			int i;
1162e705c121SKalle Valo 
1163e705c121SKalle Valo 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1164e705c121SKalle Valo 				if (trans_pcie->no_reclaim_cmds[i] ==
1165e705c121SKalle Valo 							pkt->hdr.cmd) {
1166e705c121SKalle Valo 					reclaim = false;
1167e705c121SKalle Valo 					break;
1168e705c121SKalle Valo 				}
1169e705c121SKalle Valo 			}
1170e705c121SKalle Valo 		}
1171e705c121SKalle Valo 
1172e705c121SKalle Valo 		sequence = le16_to_cpu(pkt->hdr.sequence);
1173e705c121SKalle Valo 		index = SEQ_TO_INDEX(sequence);
11744ecab561SEmmanuel Grumbach 		cmd_index = iwl_pcie_get_cmd_index(txq, index);
1175e705c121SKalle Valo 
1176bce97731SSara Sharon 		if (rxq->id == 0)
1177bce97731SSara Sharon 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1178bce97731SSara Sharon 				       &rxcb);
1179bce97731SSara Sharon 		else
1180bce97731SSara Sharon 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1181bce97731SSara Sharon 					   &rxcb, rxq->id);
1182e705c121SKalle Valo 
1183e705c121SKalle Valo 		if (reclaim) {
1184e705c121SKalle Valo 			kzfree(txq->entries[cmd_index].free_buf);
1185e705c121SKalle Valo 			txq->entries[cmd_index].free_buf = NULL;
1186e705c121SKalle Valo 		}
1187e705c121SKalle Valo 
1188e705c121SKalle Valo 		/*
1189e705c121SKalle Valo 		 * After here, we should always check rxcb._page_stolen,
1190e705c121SKalle Valo 		 * if it is true then one of the handlers took the page.
1191e705c121SKalle Valo 		 */
1192e705c121SKalle Valo 
1193e705c121SKalle Valo 		if (reclaim) {
1194e705c121SKalle Valo 			/* Invoke any callbacks, transfer the buffer to caller,
1195e705c121SKalle Valo 			 * and fire off the (possibly) blocking
1196e705c121SKalle Valo 			 * iwl_trans_send_cmd()
1197e705c121SKalle Valo 			 * as we reclaim the driver command queue */
1198e705c121SKalle Valo 			if (!rxcb._page_stolen)
1199e705c121SKalle Valo 				iwl_pcie_hcmd_complete(trans, &rxcb);
1200e705c121SKalle Valo 			else
1201e705c121SKalle Valo 				IWL_WARN(trans, "Claim null rxb?\n");
1202e705c121SKalle Valo 		}
1203e705c121SKalle Valo 
1204e705c121SKalle Valo 		page_stolen |= rxcb._page_stolen;
1205e705c121SKalle Valo 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1206e705c121SKalle Valo 	}
1207e705c121SKalle Valo 
1208e705c121SKalle Valo 	/* page was stolen from us -- free our reference */
1209e705c121SKalle Valo 	if (page_stolen) {
1210e705c121SKalle Valo 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1211e705c121SKalle Valo 		rxb->page = NULL;
1212e705c121SKalle Valo 	}
1213e705c121SKalle Valo 
1214e705c121SKalle Valo 	/* Reuse the page if possible. For notification packets and
1215e705c121SKalle Valo 	 * SKBs that fail to Rx correctly, add them back into the
1216e705c121SKalle Valo 	 * rx_free list for reuse later. */
1217e705c121SKalle Valo 	if (rxb->page != NULL) {
1218e705c121SKalle Valo 		rxb->page_dma =
1219e705c121SKalle Valo 			dma_map_page(trans->dev, rxb->page, 0,
1220e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
1221e705c121SKalle Valo 				     DMA_FROM_DEVICE);
1222e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1223e705c121SKalle Valo 			/*
1224e705c121SKalle Valo 			 * free the page(s) as well to not break
1225e705c121SKalle Valo 			 * the invariant that the items on the used
1226e705c121SKalle Valo 			 * list have no page(s)
1227e705c121SKalle Valo 			 */
1228e705c121SKalle Valo 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1229e705c121SKalle Valo 			rxb->page = NULL;
1230e705c121SKalle Valo 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1231e705c121SKalle Valo 		} else {
1232e705c121SKalle Valo 			list_add_tail(&rxb->list, &rxq->rx_free);
1233e705c121SKalle Valo 			rxq->free_count++;
1234e705c121SKalle Valo 		}
1235e705c121SKalle Valo 	} else
1236e705c121SKalle Valo 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1237e705c121SKalle Valo }
1238e705c121SKalle Valo 
1239e705c121SKalle Valo /*
1240e705c121SKalle Valo  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1241e705c121SKalle Valo  */
12422e5d4a8fSHaim Dreyfuss static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1243e705c121SKalle Valo {
1244e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
12452e5d4a8fSHaim Dreyfuss 	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1246d56daea4SSara Sharon 	u32 r, i, count = 0;
1247e705c121SKalle Valo 	bool emergency = false;
1248e705c121SKalle Valo 
1249e705c121SKalle Valo restart:
1250e705c121SKalle Valo 	spin_lock(&rxq->lock);
1251e705c121SKalle Valo 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1252e705c121SKalle Valo 	 * buffer that the driver may process (last buffer filled by ucode). */
12536aa7de05SMark Rutland 	r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1254e705c121SKalle Valo 	i = rxq->read;
1255e705c121SKalle Valo 
12565eae443eSSara Sharon 	/* W/A 9000 device step A0 wrap-around bug */
12575eae443eSSara Sharon 	r &= (rxq->queue_size - 1);
12585eae443eSSara Sharon 
1259e705c121SKalle Valo 	/* Rx interrupt, but nothing sent from uCode */
1260e705c121SKalle Valo 	if (i == r)
12615eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1262e705c121SKalle Valo 
1263e705c121SKalle Valo 	while (i != r) {
1264e705c121SKalle Valo 		struct iwl_rx_mem_buffer *rxb;
1265e705c121SKalle Valo 
126696a6497bSSara Sharon 		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1267e705c121SKalle Valo 			emergency = true;
1268e705c121SKalle Valo 
126996a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
127096a6497bSSara Sharon 			/*
127196a6497bSSara Sharon 			 * used_bd is a 32 bit but only 12 are used to retrieve
127296a6497bSSara Sharon 			 * the vid
127396a6497bSSara Sharon 			 */
12745eae443eSSara Sharon 			u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
127596a6497bSSara Sharon 
1276e25d65f2SSara Sharon 			if (WARN(!vid ||
1277e25d65f2SSara Sharon 				 vid > ARRAY_SIZE(trans_pcie->global_table),
1278e25d65f2SSara Sharon 				 "Invalid rxb index from HW %u\n", (u32)vid)) {
1279e25d65f2SSara Sharon 				iwl_force_nmi(trans);
12805eae443eSSara Sharon 				goto out;
1281e25d65f2SSara Sharon 			}
1282e25d65f2SSara Sharon 			rxb = trans_pcie->global_table[vid - 1];
1283b1753c62SSara Sharon 			if (WARN(rxb->invalid,
1284b1753c62SSara Sharon 				 "Invalid rxb from HW %u\n", (u32)vid)) {
1285b1753c62SSara Sharon 				iwl_force_nmi(trans);
1286b1753c62SSara Sharon 				goto out;
1287b1753c62SSara Sharon 			}
1288b1753c62SSara Sharon 			rxb->invalid = true;
128996a6497bSSara Sharon 		} else {
1290e705c121SKalle Valo 			rxb = rxq->queue[i];
1291e705c121SKalle Valo 			rxq->queue[i] = NULL;
129296a6497bSSara Sharon 		}
1293e705c121SKalle Valo 
12945eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
129578485054SSara Sharon 		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1296e705c121SKalle Valo 
129796a6497bSSara Sharon 		i = (i + 1) & (rxq->queue_size - 1);
1298e705c121SKalle Valo 
1299d56daea4SSara Sharon 		/*
1300d56daea4SSara Sharon 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1301d56daea4SSara Sharon 		 * try to claim the pre-allocated buffers from the allocator.
1302d56daea4SSara Sharon 		 * If not ready - will try to reclaim next time.
1303d56daea4SSara Sharon 		 * There is no need to reschedule work - allocator exits only
1304d56daea4SSara Sharon 		 * on success
1305e705c121SKalle Valo 		 */
1306d56daea4SSara Sharon 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1307d56daea4SSara Sharon 			iwl_pcie_rx_allocator_get(trans, rxq);
1308e705c121SKalle Valo 
1309d56daea4SSara Sharon 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1310d56daea4SSara Sharon 			struct iwl_rb_allocator *rba = &trans_pcie->rba;
1311d56daea4SSara Sharon 
1312d56daea4SSara Sharon 			/* Add the remaining empty RBDs for allocator use */
1313d56daea4SSara Sharon 			spin_lock(&rba->lock);
1314d56daea4SSara Sharon 			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1315d56daea4SSara Sharon 			spin_unlock(&rba->lock);
1316d56daea4SSara Sharon 		} else if (emergency) {
1317e705c121SKalle Valo 			count++;
1318e705c121SKalle Valo 			if (count == 8) {
1319e705c121SKalle Valo 				count = 0;
132096a6497bSSara Sharon 				if (rxq->used_count < rxq->queue_size / 3)
1321e705c121SKalle Valo 					emergency = false;
1322e0e168dcSGregory Greenman 
1323e705c121SKalle Valo 				rxq->read = i;
1324e705c121SKalle Valo 				spin_unlock(&rxq->lock);
1325e0e168dcSGregory Greenman 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
132678485054SSara Sharon 				iwl_pcie_rxq_restock(trans, rxq);
1327e705c121SKalle Valo 				goto restart;
1328e705c121SKalle Valo 			}
1329e705c121SKalle Valo 		}
1330e0e168dcSGregory Greenman 	}
13315eae443eSSara Sharon out:
1332e705c121SKalle Valo 	/* Backtrack one entry */
1333e705c121SKalle Valo 	rxq->read = i;
1334e705c121SKalle Valo 	spin_unlock(&rxq->lock);
1335e705c121SKalle Valo 
1336e705c121SKalle Valo 	/*
1337e705c121SKalle Valo 	 * handle a case where in emergency there are some unallocated RBDs.
1338e705c121SKalle Valo 	 * those RBDs are in the used list, but are not tracked by the queue's
1339e705c121SKalle Valo 	 * used_count which counts allocator owned RBDs.
1340e705c121SKalle Valo 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1341e705c121SKalle Valo 	 * when called again the function may not be in emergency mode and
1342e705c121SKalle Valo 	 * they will be handed to the allocator with no tracking in the RBD
1343e705c121SKalle Valo 	 * allocator counters, which will lead to them never being claimed back
1344e705c121SKalle Valo 	 * by the queue.
1345e705c121SKalle Valo 	 * by allocating them here, they are now in the queue free list, and
1346e705c121SKalle Valo 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1347e705c121SKalle Valo 	 */
1348e705c121SKalle Valo 	if (unlikely(emergency && count))
134978485054SSara Sharon 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1350e705c121SKalle Valo 
1351bce97731SSara Sharon 	if (rxq->napi.poll)
1352bce97731SSara Sharon 		napi_gro_flush(&rxq->napi, false);
1353e0e168dcSGregory Greenman 
1354e0e168dcSGregory Greenman 	iwl_pcie_rxq_restock(trans, rxq);
1355e705c121SKalle Valo }
1356e705c121SKalle Valo 
13572e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
13582e5d4a8fSHaim Dreyfuss {
13592e5d4a8fSHaim Dreyfuss 	u8 queue = entry->entry;
13602e5d4a8fSHaim Dreyfuss 	struct msix_entry *entries = entry - queue;
13612e5d4a8fSHaim Dreyfuss 
13622e5d4a8fSHaim Dreyfuss 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
13632e5d4a8fSHaim Dreyfuss }
13642e5d4a8fSHaim Dreyfuss 
13652e5d4a8fSHaim Dreyfuss static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
13662e5d4a8fSHaim Dreyfuss 				      struct msix_entry *entry)
13672e5d4a8fSHaim Dreyfuss {
13682e5d4a8fSHaim Dreyfuss 	/*
13692e5d4a8fSHaim Dreyfuss 	 * Before sending the interrupt the HW disables it to prevent
13702e5d4a8fSHaim Dreyfuss 	 * a nested interrupt. This is done by writing 1 to the corresponding
13712e5d4a8fSHaim Dreyfuss 	 * bit in the mask register. After handling the interrupt, it should be
13722e5d4a8fSHaim Dreyfuss 	 * re-enabled by clearing this bit. This register is defined as
13732e5d4a8fSHaim Dreyfuss 	 * write 1 clear (W1C) register, meaning that it's being clear
13742e5d4a8fSHaim Dreyfuss 	 * by writing 1 to the bit.
13752e5d4a8fSHaim Dreyfuss 	 */
13767ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
13772e5d4a8fSHaim Dreyfuss }
13782e5d4a8fSHaim Dreyfuss 
13792e5d4a8fSHaim Dreyfuss /*
13802e5d4a8fSHaim Dreyfuss  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
13812e5d4a8fSHaim Dreyfuss  * This interrupt handler should be used with RSS queue only.
13822e5d4a8fSHaim Dreyfuss  */
13832e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
13842e5d4a8fSHaim Dreyfuss {
13852e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
13862e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
13872e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
13882e5d4a8fSHaim Dreyfuss 
1389c42ff65dSJohannes Berg 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1390c42ff65dSJohannes Berg 
13915eae443eSSara Sharon 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
13925eae443eSSara Sharon 		return IRQ_NONE;
13935eae443eSSara Sharon 
13942e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
13952e5d4a8fSHaim Dreyfuss 
13962e5d4a8fSHaim Dreyfuss 	local_bh_disable();
13972e5d4a8fSHaim Dreyfuss 	iwl_pcie_rx_handle(trans, entry->entry);
13982e5d4a8fSHaim Dreyfuss 	local_bh_enable();
13992e5d4a8fSHaim Dreyfuss 
14002e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
14012e5d4a8fSHaim Dreyfuss 
14022e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
14032e5d4a8fSHaim Dreyfuss 
14042e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
14052e5d4a8fSHaim Dreyfuss }
14062e5d4a8fSHaim Dreyfuss 
1407e705c121SKalle Valo /*
1408e705c121SKalle Valo  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1409e705c121SKalle Valo  */
1410e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1411e705c121SKalle Valo {
1412e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1413e705c121SKalle Valo 	int i;
1414e705c121SKalle Valo 
1415e705c121SKalle Valo 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1416e705c121SKalle Valo 	if (trans->cfg->internal_wimax_coex &&
1417e705c121SKalle Valo 	    !trans->cfg->apmg_not_supported &&
1418e705c121SKalle Valo 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1419e705c121SKalle Valo 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1420e705c121SKalle Valo 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1421e705c121SKalle Valo 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1422e705c121SKalle Valo 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1423e705c121SKalle Valo 		iwl_op_mode_wimax_active(trans->op_mode);
1424e705c121SKalle Valo 		wake_up(&trans_pcie->wait_command_queue);
1425e705c121SKalle Valo 		return;
1426e705c121SKalle Valo 	}
1427e705c121SKalle Valo 
142813a3a390SSara Sharon 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
142913a3a390SSara Sharon 		if (!trans_pcie->txq[i])
143013a3a390SSara Sharon 			continue;
1431b2a3b1c1SSara Sharon 		del_timer(&trans_pcie->txq[i]->stuck_timer);
143213a3a390SSara Sharon 	}
1433e705c121SKalle Valo 
14347d75f32eSEmmanuel Grumbach 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
14357d75f32eSEmmanuel Grumbach 	 * before we wake up the command caller, to ensure a proper cleanup. */
14367d75f32eSEmmanuel Grumbach 	iwl_trans_fw_error(trans);
14377d75f32eSEmmanuel Grumbach 
1438e705c121SKalle Valo 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1439e705c121SKalle Valo 	wake_up(&trans_pcie->wait_command_queue);
1440e705c121SKalle Valo }
1441e705c121SKalle Valo 
1442e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1443e705c121SKalle Valo {
1444e705c121SKalle Valo 	u32 inta;
1445e705c121SKalle Valo 
1446e705c121SKalle Valo 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1447e705c121SKalle Valo 
1448e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1449e705c121SKalle Valo 
1450e705c121SKalle Valo 	/* Discover which interrupts are active/pending */
1451e705c121SKalle Valo 	inta = iwl_read32(trans, CSR_INT);
1452e705c121SKalle Valo 
1453e705c121SKalle Valo 	/* the thread will service interrupts and re-enable them */
1454e705c121SKalle Valo 	return inta;
1455e705c121SKalle Valo }
1456e705c121SKalle Valo 
1457e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */
1458e705c121SKalle Valo #define ICT_SHIFT	12
1459e705c121SKalle Valo #define ICT_SIZE	(1 << ICT_SHIFT)
1460e705c121SKalle Valo #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1461e705c121SKalle Valo 
1462e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will
1463e705c121SKalle Valo  * stop using INTA register to get device's interrupt, reading this register
1464e705c121SKalle Valo  * is expensive, device will write interrupts in ICT dram table, increment
1465e705c121SKalle Valo  * index then will fire interrupt to driver, driver will OR all ICT table
1466e705c121SKalle Valo  * entries from current index up to table entry with 0 value. the result is
1467e705c121SKalle Valo  * the interrupt we need to service, driver will set the entries back to 0 and
1468e705c121SKalle Valo  * set index.
1469e705c121SKalle Valo  */
1470e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1471e705c121SKalle Valo {
1472e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1473e705c121SKalle Valo 	u32 inta;
1474e705c121SKalle Valo 	u32 val = 0;
1475e705c121SKalle Valo 	u32 read;
1476e705c121SKalle Valo 
1477e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1478e705c121SKalle Valo 
1479e705c121SKalle Valo 	/* Ignore interrupt if there's nothing in NIC to service.
1480e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1481e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC. */
1482e705c121SKalle Valo 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1483e705c121SKalle Valo 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1484e705c121SKalle Valo 	if (!read)
1485e705c121SKalle Valo 		return 0;
1486e705c121SKalle Valo 
1487e705c121SKalle Valo 	/*
1488e705c121SKalle Valo 	 * Collect all entries up to the first 0, starting from ict_index;
1489e705c121SKalle Valo 	 * note we already read at ict_index.
1490e705c121SKalle Valo 	 */
1491e705c121SKalle Valo 	do {
1492e705c121SKalle Valo 		val |= read;
1493e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1494e705c121SKalle Valo 				trans_pcie->ict_index, read);
1495e705c121SKalle Valo 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1496e705c121SKalle Valo 		trans_pcie->ict_index =
1497e705c121SKalle Valo 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1498e705c121SKalle Valo 
1499e705c121SKalle Valo 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1500e705c121SKalle Valo 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1501e705c121SKalle Valo 					   read);
1502e705c121SKalle Valo 	} while (read);
1503e705c121SKalle Valo 
1504e705c121SKalle Valo 	/* We should not get this value, just ignore it. */
1505e705c121SKalle Valo 	if (val == 0xffffffff)
1506e705c121SKalle Valo 		val = 0;
1507e705c121SKalle Valo 
1508e705c121SKalle Valo 	/*
1509e705c121SKalle Valo 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1510e705c121SKalle Valo 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1511e705c121SKalle Valo 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1512e705c121SKalle Valo 	 * so we use them to decide on the real state of the Rx bit.
1513e705c121SKalle Valo 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1514e705c121SKalle Valo 	 */
1515e705c121SKalle Valo 	if (val & 0xC0000)
1516e705c121SKalle Valo 		val |= 0x8000;
1517e705c121SKalle Valo 
1518e705c121SKalle Valo 	inta = (0xff & val) | ((0xff00 & val) << 16);
1519e705c121SKalle Valo 	return inta;
1520e705c121SKalle Valo }
1521e705c121SKalle Valo 
1522fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
15233a6e168bSJohannes Berg {
15243a6e168bSJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
15253a6e168bSJohannes Berg 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1526326477e4SJohannes Berg 	bool hw_rfkill, prev, report;
15273a6e168bSJohannes Berg 
15283a6e168bSJohannes Berg 	mutex_lock(&trans_pcie->mutex);
1529326477e4SJohannes Berg 	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
15303a6e168bSJohannes Berg 	hw_rfkill = iwl_is_rfkill_set(trans);
1531326477e4SJohannes Berg 	if (hw_rfkill) {
1532326477e4SJohannes Berg 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1533326477e4SJohannes Berg 		set_bit(STATUS_RFKILL_HW, &trans->status);
1534326477e4SJohannes Berg 	}
1535326477e4SJohannes Berg 	if (trans_pcie->opmode_down)
1536326477e4SJohannes Berg 		report = hw_rfkill;
1537326477e4SJohannes Berg 	else
1538326477e4SJohannes Berg 		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
15393a6e168bSJohannes Berg 
15403a6e168bSJohannes Berg 	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
15413a6e168bSJohannes Berg 		 hw_rfkill ? "disable radio" : "enable radio");
15423a6e168bSJohannes Berg 
15433a6e168bSJohannes Berg 	isr_stats->rfkill++;
15443a6e168bSJohannes Berg 
1545326477e4SJohannes Berg 	if (prev != report)
1546326477e4SJohannes Berg 		iwl_trans_pcie_rf_kill(trans, report);
15473a6e168bSJohannes Berg 	mutex_unlock(&trans_pcie->mutex);
15483a6e168bSJohannes Berg 
15493a6e168bSJohannes Berg 	if (hw_rfkill) {
15503a6e168bSJohannes Berg 		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
15513a6e168bSJohannes Berg 				       &trans->status))
15523a6e168bSJohannes Berg 			IWL_DEBUG_RF_KILL(trans,
15533a6e168bSJohannes Berg 					  "Rfkill while SYNC HCMD in flight\n");
15543a6e168bSJohannes Berg 		wake_up(&trans_pcie->wait_command_queue);
15553a6e168bSJohannes Berg 	} else {
1556326477e4SJohannes Berg 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1557326477e4SJohannes Berg 		if (trans_pcie->opmode_down)
1558326477e4SJohannes Berg 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
15593a6e168bSJohannes Berg 	}
15603a6e168bSJohannes Berg }
15613a6e168bSJohannes Berg 
1562e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1563e705c121SKalle Valo {
1564e705c121SKalle Valo 	struct iwl_trans *trans = dev_id;
1565e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1566e705c121SKalle Valo 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1567e705c121SKalle Valo 	u32 inta = 0;
1568e705c121SKalle Valo 	u32 handled = 0;
1569e705c121SKalle Valo 
1570e705c121SKalle Valo 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1571e705c121SKalle Valo 
1572e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1573e705c121SKalle Valo 
1574e705c121SKalle Valo 	/* dram interrupt table not set yet,
1575e705c121SKalle Valo 	 * use legacy interrupt.
1576e705c121SKalle Valo 	 */
1577e705c121SKalle Valo 	if (likely(trans_pcie->use_ict))
1578e705c121SKalle Valo 		inta = iwl_pcie_int_cause_ict(trans);
1579e705c121SKalle Valo 	else
1580e705c121SKalle Valo 		inta = iwl_pcie_int_cause_non_ict(trans);
1581e705c121SKalle Valo 
1582e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1583e705c121SKalle Valo 		IWL_DEBUG_ISR(trans,
1584e705c121SKalle Valo 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1585e705c121SKalle Valo 			      inta, trans_pcie->inta_mask,
1586e705c121SKalle Valo 			      iwl_read32(trans, CSR_INT_MASK),
1587e705c121SKalle Valo 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1588e705c121SKalle Valo 		if (inta & (~trans_pcie->inta_mask))
1589e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1590e705c121SKalle Valo 				      "We got a masked interrupt (0x%08x)\n",
1591e705c121SKalle Valo 				      inta & (~trans_pcie->inta_mask));
1592e705c121SKalle Valo 	}
1593e705c121SKalle Valo 
1594e705c121SKalle Valo 	inta &= trans_pcie->inta_mask;
1595e705c121SKalle Valo 
1596e705c121SKalle Valo 	/*
1597e705c121SKalle Valo 	 * Ignore interrupt if there's nothing in NIC to service.
1598e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1599e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC.
1600e705c121SKalle Valo 	 */
1601e705c121SKalle Valo 	if (unlikely(!inta)) {
1602e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1603e705c121SKalle Valo 		/*
1604e705c121SKalle Valo 		 * Re-enable interrupts here since we don't
1605e705c121SKalle Valo 		 * have anything to service
1606e705c121SKalle Valo 		 */
1607e705c121SKalle Valo 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1608f16c3ebfSEmmanuel Grumbach 			_iwl_enable_interrupts(trans);
1609e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1610e705c121SKalle Valo 		lock_map_release(&trans->sync_cmd_lockdep_map);
1611e705c121SKalle Valo 		return IRQ_NONE;
1612e705c121SKalle Valo 	}
1613e705c121SKalle Valo 
1614e705c121SKalle Valo 	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1615e705c121SKalle Valo 		/*
1616e705c121SKalle Valo 		 * Hardware disappeared. It might have
1617e705c121SKalle Valo 		 * already raised an interrupt.
1618e705c121SKalle Valo 		 */
1619e705c121SKalle Valo 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1620e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1621e705c121SKalle Valo 		goto out;
1622e705c121SKalle Valo 	}
1623e705c121SKalle Valo 
1624e705c121SKalle Valo 	/* Ack/clear/reset pending uCode interrupts.
1625e705c121SKalle Valo 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1626e705c121SKalle Valo 	 */
1627e705c121SKalle Valo 	/* There is a hardware bug in the interrupt mask function that some
1628e705c121SKalle Valo 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1629e705c121SKalle Valo 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1630e705c121SKalle Valo 	 * ICT interrupt handling mechanism has another bug that might cause
1631e705c121SKalle Valo 	 * these unmasked interrupts fail to be detected. We workaround the
1632e705c121SKalle Valo 	 * hardware bugs here by ACKing all the possible interrupts so that
1633e705c121SKalle Valo 	 * interrupt coalescing can still be achieved.
1634e705c121SKalle Valo 	 */
1635e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1636e705c121SKalle Valo 
1637e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR))
1638e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1639e705c121SKalle Valo 			      inta, iwl_read32(trans, CSR_INT_MASK));
1640e705c121SKalle Valo 
1641e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1642e705c121SKalle Valo 
1643e705c121SKalle Valo 	/* Now service all interrupt bits discovered above. */
1644e705c121SKalle Valo 	if (inta & CSR_INT_BIT_HW_ERR) {
1645e705c121SKalle Valo 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1646e705c121SKalle Valo 
1647e705c121SKalle Valo 		/* Tell the device to stop sending interrupts */
1648e705c121SKalle Valo 		iwl_disable_interrupts(trans);
1649e705c121SKalle Valo 
1650e705c121SKalle Valo 		isr_stats->hw++;
1651e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1652e705c121SKalle Valo 
1653e705c121SKalle Valo 		handled |= CSR_INT_BIT_HW_ERR;
1654e705c121SKalle Valo 
1655e705c121SKalle Valo 		goto out;
1656e705c121SKalle Valo 	}
1657e705c121SKalle Valo 
1658e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1659e705c121SKalle Valo 		/* NIC fires this, but we don't use it, redundant with WAKEUP */
1660e705c121SKalle Valo 		if (inta & CSR_INT_BIT_SCD) {
1661e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1662e705c121SKalle Valo 				      "Scheduler finished to transmit the frame/frames.\n");
1663e705c121SKalle Valo 			isr_stats->sch++;
1664e705c121SKalle Valo 		}
1665e705c121SKalle Valo 
1666e705c121SKalle Valo 		/* Alive notification via Rx interrupt will do the real work */
1667e705c121SKalle Valo 		if (inta & CSR_INT_BIT_ALIVE) {
1668e705c121SKalle Valo 			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1669e705c121SKalle Valo 			isr_stats->alive++;
1670eda50cdeSSara Sharon 			if (trans->cfg->gen2) {
1671eda50cdeSSara Sharon 				/*
1672eda50cdeSSara Sharon 				 * We can restock, since firmware configured
1673eda50cdeSSara Sharon 				 * the RFH
1674eda50cdeSSara Sharon 				 */
1675eda50cdeSSara Sharon 				iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1676eda50cdeSSara Sharon 			}
1677e705c121SKalle Valo 		}
1678e705c121SKalle Valo 	}
1679e705c121SKalle Valo 
1680e705c121SKalle Valo 	/* Safely ignore these bits for debug checks below */
1681e705c121SKalle Valo 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1682e705c121SKalle Valo 
1683e705c121SKalle Valo 	/* HW RF KILL switch toggled */
1684e705c121SKalle Valo 	if (inta & CSR_INT_BIT_RF_KILL) {
16853a6e168bSJohannes Berg 		iwl_pcie_handle_rfkill_irq(trans);
1686e705c121SKalle Valo 		handled |= CSR_INT_BIT_RF_KILL;
1687e705c121SKalle Valo 	}
1688e705c121SKalle Valo 
1689e705c121SKalle Valo 	/* Chip got too hot and stopped itself */
1690e705c121SKalle Valo 	if (inta & CSR_INT_BIT_CT_KILL) {
1691e705c121SKalle Valo 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1692e705c121SKalle Valo 		isr_stats->ctkill++;
1693e705c121SKalle Valo 		handled |= CSR_INT_BIT_CT_KILL;
1694e705c121SKalle Valo 	}
1695e705c121SKalle Valo 
1696e705c121SKalle Valo 	/* Error detected by uCode */
1697e705c121SKalle Valo 	if (inta & CSR_INT_BIT_SW_ERR) {
1698e705c121SKalle Valo 		IWL_ERR(trans, "Microcode SW error detected. "
1699e705c121SKalle Valo 			" Restarting 0x%X.\n", inta);
1700e705c121SKalle Valo 		isr_stats->sw++;
1701e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1702e705c121SKalle Valo 		handled |= CSR_INT_BIT_SW_ERR;
1703e705c121SKalle Valo 	}
1704e705c121SKalle Valo 
1705e705c121SKalle Valo 	/* uCode wakes up after power-down sleep */
1706e705c121SKalle Valo 	if (inta & CSR_INT_BIT_WAKEUP) {
1707e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1708e705c121SKalle Valo 		iwl_pcie_rxq_check_wrptr(trans);
1709e705c121SKalle Valo 		iwl_pcie_txq_check_wrptrs(trans);
1710e705c121SKalle Valo 
1711e705c121SKalle Valo 		isr_stats->wakeup++;
1712e705c121SKalle Valo 
1713e705c121SKalle Valo 		handled |= CSR_INT_BIT_WAKEUP;
1714e705c121SKalle Valo 	}
1715e705c121SKalle Valo 
1716e705c121SKalle Valo 	/* All uCode command responses, including Tx command responses,
1717e705c121SKalle Valo 	 * Rx "responses" (frame-received notification), and other
1718e705c121SKalle Valo 	 * notifications from uCode come through here*/
1719e705c121SKalle Valo 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1720e705c121SKalle Valo 		    CSR_INT_BIT_RX_PERIODIC)) {
1721e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1722e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1723e705c121SKalle Valo 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1724e705c121SKalle Valo 			iwl_write32(trans, CSR_FH_INT_STATUS,
1725e705c121SKalle Valo 					CSR_FH_INT_RX_MASK);
1726e705c121SKalle Valo 		}
1727e705c121SKalle Valo 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1728e705c121SKalle Valo 			handled |= CSR_INT_BIT_RX_PERIODIC;
1729e705c121SKalle Valo 			iwl_write32(trans,
1730e705c121SKalle Valo 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1731e705c121SKalle Valo 		}
1732e705c121SKalle Valo 		/* Sending RX interrupt require many steps to be done in the
1733e705c121SKalle Valo 		 * the device:
1734e705c121SKalle Valo 		 * 1- write interrupt to current index in ICT table.
1735e705c121SKalle Valo 		 * 2- dma RX frame.
1736e705c121SKalle Valo 		 * 3- update RX shared data to indicate last write index.
1737e705c121SKalle Valo 		 * 4- send interrupt.
1738e705c121SKalle Valo 		 * This could lead to RX race, driver could receive RX interrupt
1739e705c121SKalle Valo 		 * but the shared data changes does not reflect this;
1740e705c121SKalle Valo 		 * periodic interrupt will detect any dangling Rx activity.
1741e705c121SKalle Valo 		 */
1742e705c121SKalle Valo 
1743e705c121SKalle Valo 		/* Disable periodic interrupt; we use it as just a one-shot. */
1744e705c121SKalle Valo 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1745e705c121SKalle Valo 			    CSR_INT_PERIODIC_DIS);
1746e705c121SKalle Valo 
1747e705c121SKalle Valo 		/*
1748e705c121SKalle Valo 		 * Enable periodic interrupt in 8 msec only if we received
1749e705c121SKalle Valo 		 * real RX interrupt (instead of just periodic int), to catch
1750e705c121SKalle Valo 		 * any dangling Rx interrupt.  If it was just the periodic
1751e705c121SKalle Valo 		 * interrupt, there was no dangling Rx activity, and no need
1752e705c121SKalle Valo 		 * to extend the periodic interrupt; one-shot is enough.
1753e705c121SKalle Valo 		 */
1754e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1755e705c121SKalle Valo 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1756e705c121SKalle Valo 				   CSR_INT_PERIODIC_ENA);
1757e705c121SKalle Valo 
1758e705c121SKalle Valo 		isr_stats->rx++;
1759e705c121SKalle Valo 
1760e705c121SKalle Valo 		local_bh_disable();
17612e5d4a8fSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
1762e705c121SKalle Valo 		local_bh_enable();
1763e705c121SKalle Valo 	}
1764e705c121SKalle Valo 
1765e705c121SKalle Valo 	/* This "Tx" DMA channel is used only for loading uCode */
1766e705c121SKalle Valo 	if (inta & CSR_INT_BIT_FH_TX) {
1767e705c121SKalle Valo 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1768e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1769e705c121SKalle Valo 		isr_stats->tx++;
1770e705c121SKalle Valo 		handled |= CSR_INT_BIT_FH_TX;
1771e705c121SKalle Valo 		/* Wake up uCode load routine, now that load is complete */
1772e705c121SKalle Valo 		trans_pcie->ucode_write_complete = true;
1773e705c121SKalle Valo 		wake_up(&trans_pcie->ucode_write_waitq);
1774e705c121SKalle Valo 	}
1775e705c121SKalle Valo 
1776e705c121SKalle Valo 	if (inta & ~handled) {
1777e705c121SKalle Valo 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1778e705c121SKalle Valo 		isr_stats->unhandled++;
1779e705c121SKalle Valo 	}
1780e705c121SKalle Valo 
1781e705c121SKalle Valo 	if (inta & ~(trans_pcie->inta_mask)) {
1782e705c121SKalle Valo 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1783e705c121SKalle Valo 			 inta & ~trans_pcie->inta_mask);
1784e705c121SKalle Valo 	}
1785e705c121SKalle Valo 
1786f16c3ebfSEmmanuel Grumbach 	spin_lock(&trans_pcie->irq_lock);
1787a6bd005fSEmmanuel Grumbach 	/* only Re-enable all interrupt if disabled by irq */
1788f16c3ebfSEmmanuel Grumbach 	if (test_bit(STATUS_INT_ENABLED, &trans->status))
1789f16c3ebfSEmmanuel Grumbach 		_iwl_enable_interrupts(trans);
1790f16c3ebfSEmmanuel Grumbach 	/* we are loading the firmware, enable FH_TX interrupt only */
1791f16c3ebfSEmmanuel Grumbach 	else if (handled & CSR_INT_BIT_FH_TX)
1792f16c3ebfSEmmanuel Grumbach 		iwl_enable_fw_load_int(trans);
1793e705c121SKalle Valo 	/* Re-enable RF_KILL if it occurred */
1794e705c121SKalle Valo 	else if (handled & CSR_INT_BIT_RF_KILL)
1795e705c121SKalle Valo 		iwl_enable_rfkill_int(trans);
1796f16c3ebfSEmmanuel Grumbach 	spin_unlock(&trans_pcie->irq_lock);
1797e705c121SKalle Valo 
1798e705c121SKalle Valo out:
1799e705c121SKalle Valo 	lock_map_release(&trans->sync_cmd_lockdep_map);
1800e705c121SKalle Valo 	return IRQ_HANDLED;
1801e705c121SKalle Valo }
1802e705c121SKalle Valo 
1803e705c121SKalle Valo /******************************************************************************
1804e705c121SKalle Valo  *
1805e705c121SKalle Valo  * ICT functions
1806e705c121SKalle Valo  *
1807e705c121SKalle Valo  ******************************************************************************/
1808e705c121SKalle Valo 
1809e705c121SKalle Valo /* Free dram table */
1810e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans)
1811e705c121SKalle Valo {
1812e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1813e705c121SKalle Valo 
1814e705c121SKalle Valo 	if (trans_pcie->ict_tbl) {
1815e705c121SKalle Valo 		dma_free_coherent(trans->dev, ICT_SIZE,
1816e705c121SKalle Valo 				  trans_pcie->ict_tbl,
1817e705c121SKalle Valo 				  trans_pcie->ict_tbl_dma);
1818e705c121SKalle Valo 		trans_pcie->ict_tbl = NULL;
1819e705c121SKalle Valo 		trans_pcie->ict_tbl_dma = 0;
1820e705c121SKalle Valo 	}
1821e705c121SKalle Valo }
1822e705c121SKalle Valo 
1823e705c121SKalle Valo /*
1824e705c121SKalle Valo  * allocate dram shared table, it is an aligned memory
1825e705c121SKalle Valo  * block of ICT_SIZE.
1826e705c121SKalle Valo  * also reset all data related to ICT table interrupt.
1827e705c121SKalle Valo  */
1828e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1829e705c121SKalle Valo {
1830e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1831e705c121SKalle Valo 
1832e705c121SKalle Valo 	trans_pcie->ict_tbl =
1833e705c121SKalle Valo 		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1834e705c121SKalle Valo 				   &trans_pcie->ict_tbl_dma,
1835e705c121SKalle Valo 				   GFP_KERNEL);
1836e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1837e705c121SKalle Valo 		return -ENOMEM;
1838e705c121SKalle Valo 
1839e705c121SKalle Valo 	/* just an API sanity check ... it is guaranteed to be aligned */
1840e705c121SKalle Valo 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1841e705c121SKalle Valo 		iwl_pcie_free_ict(trans);
1842e705c121SKalle Valo 		return -EINVAL;
1843e705c121SKalle Valo 	}
1844e705c121SKalle Valo 
1845e705c121SKalle Valo 	return 0;
1846e705c121SKalle Valo }
1847e705c121SKalle Valo 
1848e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table,
1849e705c121SKalle Valo  * also we need to tell the driver to start using ICT interrupt.
1850e705c121SKalle Valo  */
1851e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans)
1852e705c121SKalle Valo {
1853e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1854e705c121SKalle Valo 	u32 val;
1855e705c121SKalle Valo 
1856e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1857e705c121SKalle Valo 		return;
1858e705c121SKalle Valo 
1859e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1860f16c3ebfSEmmanuel Grumbach 	_iwl_disable_interrupts(trans);
1861e705c121SKalle Valo 
1862e705c121SKalle Valo 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1863e705c121SKalle Valo 
1864e705c121SKalle Valo 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1865e705c121SKalle Valo 
1866e705c121SKalle Valo 	val |= CSR_DRAM_INT_TBL_ENABLE |
1867e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
1868e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1869e705c121SKalle Valo 
1870e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1871e705c121SKalle Valo 
1872e705c121SKalle Valo 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1873e705c121SKalle Valo 	trans_pcie->use_ict = true;
1874e705c121SKalle Valo 	trans_pcie->ict_index = 0;
1875e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1876f16c3ebfSEmmanuel Grumbach 	_iwl_enable_interrupts(trans);
1877e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1878e705c121SKalle Valo }
1879e705c121SKalle Valo 
1880e705c121SKalle Valo /* Device is going down disable ict interrupt usage */
1881e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans)
1882e705c121SKalle Valo {
1883e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1884e705c121SKalle Valo 
1885e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1886e705c121SKalle Valo 	trans_pcie->use_ict = false;
1887e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1888e705c121SKalle Valo }
1889e705c121SKalle Valo 
1890e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data)
1891e705c121SKalle Valo {
1892e705c121SKalle Valo 	struct iwl_trans *trans = data;
1893e705c121SKalle Valo 
1894e705c121SKalle Valo 	if (!trans)
1895e705c121SKalle Valo 		return IRQ_NONE;
1896e705c121SKalle Valo 
1897e705c121SKalle Valo 	/* Disable (but don't clear!) interrupts here to avoid
1898e705c121SKalle Valo 	 * back-to-back ISRs and sporadic interrupts from our NIC.
1899e705c121SKalle Valo 	 * If we have something to service, the tasklet will re-enable ints.
1900e705c121SKalle Valo 	 * If we *don't* have something, we'll re-enable before leaving here.
1901e705c121SKalle Valo 	 */
1902e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1903e705c121SKalle Valo 
1904e705c121SKalle Valo 	return IRQ_WAKE_THREAD;
1905e705c121SKalle Valo }
19062e5d4a8fSHaim Dreyfuss 
19072e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
19082e5d4a8fSHaim Dreyfuss {
19092e5d4a8fSHaim Dreyfuss 	return IRQ_WAKE_THREAD;
19102e5d4a8fSHaim Dreyfuss }
19112e5d4a8fSHaim Dreyfuss 
19122e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
19132e5d4a8fSHaim Dreyfuss {
19142e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
19152e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
19162e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
191746167a8fSColin Ian King 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
19182e5d4a8fSHaim Dreyfuss 	u32 inta_fh, inta_hw;
19192e5d4a8fSHaim Dreyfuss 
19202e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
19212e5d4a8fSHaim Dreyfuss 
19222e5d4a8fSHaim Dreyfuss 	spin_lock(&trans_pcie->irq_lock);
19237ef3dd26SHaim Dreyfuss 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
19247ef3dd26SHaim Dreyfuss 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
19252e5d4a8fSHaim Dreyfuss 	/*
19262e5d4a8fSHaim Dreyfuss 	 * Clear causes registers to avoid being handling the same cause.
19272e5d4a8fSHaim Dreyfuss 	 */
19287ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
19297ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
19302e5d4a8fSHaim Dreyfuss 	spin_unlock(&trans_pcie->irq_lock);
19312e5d4a8fSHaim Dreyfuss 
1932c42ff65dSJohannes Berg 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
1933c42ff65dSJohannes Berg 
19342e5d4a8fSHaim Dreyfuss 	if (unlikely(!(inta_fh | inta_hw))) {
19352e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
19362e5d4a8fSHaim Dreyfuss 		lock_map_release(&trans->sync_cmd_lockdep_map);
19372e5d4a8fSHaim Dreyfuss 		return IRQ_NONE;
19382e5d4a8fSHaim Dreyfuss 	}
19392e5d4a8fSHaim Dreyfuss 
19402e5d4a8fSHaim Dreyfuss 	if (iwl_have_debug_level(IWL_DL_ISR))
19412e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
19422e5d4a8fSHaim Dreyfuss 			      inta_fh,
19432e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
19442e5d4a8fSHaim Dreyfuss 
1945496d83caSHaim Dreyfuss 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
1946496d83caSHaim Dreyfuss 	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
1947496d83caSHaim Dreyfuss 		local_bh_disable();
1948496d83caSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
1949496d83caSHaim Dreyfuss 		local_bh_enable();
1950496d83caSHaim Dreyfuss 	}
1951496d83caSHaim Dreyfuss 
1952496d83caSHaim Dreyfuss 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
1953496d83caSHaim Dreyfuss 	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
1954496d83caSHaim Dreyfuss 		local_bh_disable();
1955496d83caSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 1);
1956496d83caSHaim Dreyfuss 		local_bh_enable();
1957496d83caSHaim Dreyfuss 	}
1958496d83caSHaim Dreyfuss 
19592e5d4a8fSHaim Dreyfuss 	/* This "Tx" DMA channel is used only for loading uCode */
19602e5d4a8fSHaim Dreyfuss 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
19612e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
19622e5d4a8fSHaim Dreyfuss 		isr_stats->tx++;
19632e5d4a8fSHaim Dreyfuss 		/*
19642e5d4a8fSHaim Dreyfuss 		 * Wake up uCode load routine,
19652e5d4a8fSHaim Dreyfuss 		 * now that load is complete
19662e5d4a8fSHaim Dreyfuss 		 */
19672e5d4a8fSHaim Dreyfuss 		trans_pcie->ucode_write_complete = true;
19682e5d4a8fSHaim Dreyfuss 		wake_up(&trans_pcie->ucode_write_waitq);
19692e5d4a8fSHaim Dreyfuss 	}
19702e5d4a8fSHaim Dreyfuss 
19712e5d4a8fSHaim Dreyfuss 	/* Error detected by uCode */
19722e5d4a8fSHaim Dreyfuss 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
19732e5d4a8fSHaim Dreyfuss 	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
19742e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
19752e5d4a8fSHaim Dreyfuss 			"Microcode SW error detected. Restarting 0x%X.\n",
19762e5d4a8fSHaim Dreyfuss 			inta_fh);
19772e5d4a8fSHaim Dreyfuss 		isr_stats->sw++;
19782e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
19792e5d4a8fSHaim Dreyfuss 	}
19802e5d4a8fSHaim Dreyfuss 
19812e5d4a8fSHaim Dreyfuss 	/* After checking FH register check HW register */
19822e5d4a8fSHaim Dreyfuss 	if (iwl_have_debug_level(IWL_DL_ISR))
19832e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans,
19842e5d4a8fSHaim Dreyfuss 			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
19852e5d4a8fSHaim Dreyfuss 			      inta_hw,
19862e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
19872e5d4a8fSHaim Dreyfuss 
19882e5d4a8fSHaim Dreyfuss 	/* Alive notification via Rx interrupt will do the real work */
19892e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
19902e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
19912e5d4a8fSHaim Dreyfuss 		isr_stats->alive++;
1992eda50cdeSSara Sharon 		if (trans->cfg->gen2) {
1993eda50cdeSSara Sharon 			/* We can restock, since firmware configured the RFH */
1994eda50cdeSSara Sharon 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1995eda50cdeSSara Sharon 		}
19962e5d4a8fSHaim Dreyfuss 	}
19972e5d4a8fSHaim Dreyfuss 
19982e5d4a8fSHaim Dreyfuss 	/* uCode wakes up after power-down sleep */
19992e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
20002e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
20012e5d4a8fSHaim Dreyfuss 		iwl_pcie_rxq_check_wrptr(trans);
20022e5d4a8fSHaim Dreyfuss 		iwl_pcie_txq_check_wrptrs(trans);
20032e5d4a8fSHaim Dreyfuss 
20042e5d4a8fSHaim Dreyfuss 		isr_stats->wakeup++;
20052e5d4a8fSHaim Dreyfuss 	}
20062e5d4a8fSHaim Dreyfuss 
20072e5d4a8fSHaim Dreyfuss 	/* Chip got too hot and stopped itself */
20082e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
20092e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
20102e5d4a8fSHaim Dreyfuss 		isr_stats->ctkill++;
20112e5d4a8fSHaim Dreyfuss 	}
20122e5d4a8fSHaim Dreyfuss 
20132e5d4a8fSHaim Dreyfuss 	/* HW RF KILL switch toggled */
20143a6e168bSJohannes Berg 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
20153a6e168bSJohannes Berg 		iwl_pcie_handle_rfkill_irq(trans);
20162e5d4a8fSHaim Dreyfuss 
20172e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
20182e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
20192e5d4a8fSHaim Dreyfuss 			"Hardware error detected. Restarting.\n");
20202e5d4a8fSHaim Dreyfuss 
20212e5d4a8fSHaim Dreyfuss 		isr_stats->hw++;
20222e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
20232e5d4a8fSHaim Dreyfuss 	}
20242e5d4a8fSHaim Dreyfuss 
20252e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
20262e5d4a8fSHaim Dreyfuss 
20272e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
20282e5d4a8fSHaim Dreyfuss 
20292e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
20302e5d4a8fSHaim Dreyfuss }
2031