1*8e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*8e99ea8dSJohannes Berg /*
3*8e99ea8dSJohannes Berg  * Copyright (C) 2003-2014, 2018-2020 Intel Corporation
4*8e99ea8dSJohannes Berg  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5*8e99ea8dSJohannes Berg  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6*8e99ea8dSJohannes Berg  */
7e705c121SKalle Valo #include <linux/sched.h>
8e705c121SKalle Valo #include <linux/wait.h>
9e705c121SKalle Valo #include <linux/gfp.h>
10e705c121SKalle Valo 
11e705c121SKalle Valo #include "iwl-prph.h"
12e705c121SKalle Valo #include "iwl-io.h"
13e705c121SKalle Valo #include "internal.h"
14e705c121SKalle Valo #include "iwl-op-mode.h"
159b58419eSGolan Ben Ami #include "iwl-context-info-gen3.h"
16e705c121SKalle Valo 
17e705c121SKalle Valo /******************************************************************************
18e705c121SKalle Valo  *
19e705c121SKalle Valo  * RX path functions
20e705c121SKalle Valo  *
21e705c121SKalle Valo  ******************************************************************************/
22e705c121SKalle Valo 
23e705c121SKalle Valo /*
24e705c121SKalle Valo  * Rx theory of operation
25e705c121SKalle Valo  *
26e705c121SKalle Valo  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
27e705c121SKalle Valo  * each of which point to Receive Buffers to be filled by the NIC.  These get
28e705c121SKalle Valo  * used not only for Rx frames, but for any command response or notification
29e705c121SKalle Valo  * from the NIC.  The driver and NIC manage the Rx buffers by means
30e705c121SKalle Valo  * of indexes into the circular buffer.
31e705c121SKalle Valo  *
32e705c121SKalle Valo  * Rx Queue Indexes
33e705c121SKalle Valo  * The host/firmware share two index registers for managing the Rx buffers.
34e705c121SKalle Valo  *
35e705c121SKalle Valo  * The READ index maps to the first position that the firmware may be writing
36e705c121SKalle Valo  * to -- the driver can read up to (but not including) this position and get
37e705c121SKalle Valo  * good data.
38e705c121SKalle Valo  * The READ index is managed by the firmware once the card is enabled.
39e705c121SKalle Valo  *
40e705c121SKalle Valo  * The WRITE index maps to the last position the driver has read from -- the
41e705c121SKalle Valo  * position preceding WRITE is the last slot the firmware can place a packet.
42e705c121SKalle Valo  *
43e705c121SKalle Valo  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
44e705c121SKalle Valo  * WRITE = READ.
45e705c121SKalle Valo  *
46e705c121SKalle Valo  * During initialization, the host sets up the READ queue position to the first
47e705c121SKalle Valo  * INDEX position, and WRITE to the last (READ - 1 wrapped)
48e705c121SKalle Valo  *
49e705c121SKalle Valo  * When the firmware places a packet in a buffer, it will advance the READ index
50e705c121SKalle Valo  * and fire the RX interrupt.  The driver can then query the READ index and
51e705c121SKalle Valo  * process as many packets as possible, moving the WRITE index forward as it
52e705c121SKalle Valo  * resets the Rx queue buffers with new memory.
53e705c121SKalle Valo  *
54e705c121SKalle Valo  * The management in the driver is as follows:
55e705c121SKalle Valo  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
56e705c121SKalle Valo  *   When the interrupt handler is called, the request is processed.
57e705c121SKalle Valo  *   The page is either stolen - transferred to the upper layer
58e705c121SKalle Valo  *   or reused - added immediately to the iwl->rxq->rx_free list.
59e705c121SKalle Valo  * + When the page is stolen - the driver updates the matching queue's used
60e705c121SKalle Valo  *   count, detaches the RBD and transfers it to the queue used list.
61e705c121SKalle Valo  *   When there are two used RBDs - they are transferred to the allocator empty
62e705c121SKalle Valo  *   list. Work is then scheduled for the allocator to start allocating
63e705c121SKalle Valo  *   eight buffers.
64e705c121SKalle Valo  *   When there are another 6 used RBDs - they are transferred to the allocator
65e705c121SKalle Valo  *   empty list and the driver tries to claim the pre-allocated buffers and
66e705c121SKalle Valo  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
67e705c121SKalle Valo  *   until ready.
68e705c121SKalle Valo  *   When there are 8+ buffers in the free list - either from allocation or from
69e705c121SKalle Valo  *   8 reused unstolen pages - restock is called to update the FW and indexes.
70e705c121SKalle Valo  * + In order to make sure the allocator always has RBDs to use for allocation
71e705c121SKalle Valo  *   the allocator has initial pool in the size of num_queues*(8-2) - the
72e705c121SKalle Valo  *   maximum missing RBDs per allocation request (request posted with 2
73e705c121SKalle Valo  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
74e705c121SKalle Valo  *   The queues supplies the recycle of the rest of the RBDs.
75e705c121SKalle Valo  * + A received packet is processed and handed to the kernel network stack,
76e705c121SKalle Valo  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
77e705c121SKalle Valo  * + If there are no allocated buffers in iwl->rxq->rx_free,
78e705c121SKalle Valo  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
79e705c121SKalle Valo  *   If there were enough free buffers and RX_STALLED is set it is cleared.
80e705c121SKalle Valo  *
81e705c121SKalle Valo  *
82e705c121SKalle Valo  * Driver sequence:
83e705c121SKalle Valo  *
84e705c121SKalle Valo  * iwl_rxq_alloc()            Allocates rx_free
85e705c121SKalle Valo  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
86e705c121SKalle Valo  *                            iwl_pcie_rxq_restock.
87e705c121SKalle Valo  *                            Used only during initialization.
88e705c121SKalle Valo  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
89e705c121SKalle Valo  *                            queue, updates firmware pointers, and updates
90e705c121SKalle Valo  *                            the WRITE index.
91e705c121SKalle Valo  * iwl_pcie_rx_allocator()     Background work for allocating pages.
92e705c121SKalle Valo  *
93e705c121SKalle Valo  * -- enable interrupts --
94e705c121SKalle Valo  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
95e705c121SKalle Valo  *                            READ INDEX, detaching the SKB from the pool.
96e705c121SKalle Valo  *                            Moves the packet buffer from queue to rx_used.
97e705c121SKalle Valo  *                            Posts and claims requests to the allocator.
98e705c121SKalle Valo  *                            Calls iwl_pcie_rxq_restock to refill any empty
99e705c121SKalle Valo  *                            slots.
100e705c121SKalle Valo  *
101e705c121SKalle Valo  * RBD life-cycle:
102e705c121SKalle Valo  *
103e705c121SKalle Valo  * Init:
104e705c121SKalle Valo  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
105e705c121SKalle Valo  *
106e705c121SKalle Valo  * Regular Receive interrupt:
107e705c121SKalle Valo  * Page Stolen:
108e705c121SKalle Valo  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109e705c121SKalle Valo  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
110e705c121SKalle Valo  * Page not Stolen:
111e705c121SKalle Valo  * rxq.queue -> rxq.rx_free -> rxq.queue
112e705c121SKalle Valo  * ...
113e705c121SKalle Valo  *
114e705c121SKalle Valo  */
115e705c121SKalle Valo 
116e705c121SKalle Valo /*
117e705c121SKalle Valo  * iwl_rxq_space - Return number of free slots available in queue.
118e705c121SKalle Valo  */
119e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq)
120e705c121SKalle Valo {
12196a6497bSSara Sharon 	/* Make sure rx queue size is a power of 2 */
12296a6497bSSara Sharon 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
123e705c121SKalle Valo 
124e705c121SKalle Valo 	/*
125e705c121SKalle Valo 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
126e705c121SKalle Valo 	 * between empty and completely full queues.
127e705c121SKalle Valo 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
128e705c121SKalle Valo 	 * defined for negative dividends.
129e705c121SKalle Valo 	 */
13096a6497bSSara Sharon 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
131e705c121SKalle Valo }
132e705c121SKalle Valo 
133e705c121SKalle Valo /*
134e705c121SKalle Valo  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
135e705c121SKalle Valo  */
136e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
137e705c121SKalle Valo {
138e705c121SKalle Valo 	return cpu_to_le32((u32)(dma_addr >> 8));
139e705c121SKalle Valo }
140e705c121SKalle Valo 
141e705c121SKalle Valo /*
142e705c121SKalle Valo  * iwl_pcie_rx_stop - stops the Rx DMA
143e705c121SKalle Valo  */
144e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans)
145e705c121SKalle Valo {
1463681021fSJohannes Berg 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1473681021fSJohannes Berg 		/* TODO: remove this once fw does it */
148ea695b7cSShaul Triebitz 		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149ea695b7cSShaul Triebitz 		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
150d0158235SGolan Ben Ami 					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
151286ca8ebSLuca Coelho 	} else if (trans->trans_cfg->mq_rx_supported) {
152d7fdd0e5SSara Sharon 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153d7fdd0e5SSara Sharon 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
154d7fdd0e5SSara Sharon 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
155d7fdd0e5SSara Sharon 	} else {
156e705c121SKalle Valo 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157e705c121SKalle Valo 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
158d7fdd0e5SSara Sharon 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
159d7fdd0e5SSara Sharon 					   1000);
160d7fdd0e5SSara Sharon 	}
161e705c121SKalle Valo }
162e705c121SKalle Valo 
163e705c121SKalle Valo /*
164e705c121SKalle Valo  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
165e705c121SKalle Valo  */
16678485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
16778485054SSara Sharon 				    struct iwl_rxq *rxq)
168e705c121SKalle Valo {
169e705c121SKalle Valo 	u32 reg;
170e705c121SKalle Valo 
171e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
172e705c121SKalle Valo 
173e705c121SKalle Valo 	/*
174e705c121SKalle Valo 	 * explicitly wake up the NIC if:
175e705c121SKalle Valo 	 * 1. shadow registers aren't enabled
176e705c121SKalle Valo 	 * 2. there is a chance that the NIC is asleep
177e705c121SKalle Valo 	 */
178286ca8ebSLuca Coelho 	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179e705c121SKalle Valo 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180e705c121SKalle Valo 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
181e705c121SKalle Valo 
182e705c121SKalle Valo 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
183e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
184e705c121SKalle Valo 				       reg);
185e705c121SKalle Valo 			iwl_set_bit(trans, CSR_GP_CNTRL,
1866dece0e9SLuca Coelho 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187e705c121SKalle Valo 			rxq->need_update = true;
188e705c121SKalle Valo 			return;
189e705c121SKalle Valo 		}
190e705c121SKalle Valo 	}
191e705c121SKalle Valo 
192e705c121SKalle Valo 	rxq->write_actual = round_down(rxq->write, 8);
1933681021fSJohannes Berg 	if (trans->trans_cfg->mq_rx_supported)
1941554ed20SSara Sharon 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
19596a6497bSSara Sharon 			    rxq->write_actual);
1961316d595SSara Sharon 	else
197e705c121SKalle Valo 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
198e705c121SKalle Valo }
199e705c121SKalle Valo 
200e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
201e705c121SKalle Valo {
202e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
20378485054SSara Sharon 	int i;
204e705c121SKalle Valo 
20578485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
20678485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
207e705c121SKalle Valo 
208e705c121SKalle Valo 		if (!rxq->need_update)
20978485054SSara Sharon 			continue;
21078485054SSara Sharon 		spin_lock(&rxq->lock);
21178485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
212e705c121SKalle Valo 		rxq->need_update = false;
213e705c121SKalle Valo 		spin_unlock(&rxq->lock);
214e705c121SKalle Valo 	}
21578485054SSara Sharon }
216e705c121SKalle Valo 
2170307c839SGolan Ben Ami static void iwl_pcie_restock_bd(struct iwl_trans *trans,
2180307c839SGolan Ben Ami 				struct iwl_rxq *rxq,
2190307c839SGolan Ben Ami 				struct iwl_rx_mem_buffer *rxb)
2200307c839SGolan Ben Ami {
2213681021fSJohannes Berg 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
2220307c839SGolan Ben Ami 		struct iwl_rx_transfer_desc *bd = rxq->bd;
2230307c839SGolan Ben Ami 
224f826faaaSJohannes Berg 		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
225f826faaaSJohannes Berg 
2260307c839SGolan Ben Ami 		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
2270307c839SGolan Ben Ami 		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
2280307c839SGolan Ben Ami 	} else {
2290307c839SGolan Ben Ami 		__le64 *bd = rxq->bd;
2300307c839SGolan Ben Ami 
2310307c839SGolan Ben Ami 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
2320307c839SGolan Ben Ami 	}
23385d78bb1SSara Sharon 
23485d78bb1SSara Sharon 	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
23585d78bb1SSara Sharon 		     (u32)rxb->vid, rxq->id, rxq->write);
2360307c839SGolan Ben Ami }
2370307c839SGolan Ben Ami 
238e0e168dcSGregory Greenman /*
2392047fa54SSara Sharon  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
240e0e168dcSGregory Greenman  */
2412047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
24296a6497bSSara Sharon 				  struct iwl_rxq *rxq)
24396a6497bSSara Sharon {
244cfdc20efSJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
24596a6497bSSara Sharon 	struct iwl_rx_mem_buffer *rxb;
24696a6497bSSara Sharon 
24796a6497bSSara Sharon 	/*
24896a6497bSSara Sharon 	 * If the device isn't enabled - no need to try to add buffers...
24996a6497bSSara Sharon 	 * This can happen when we stop the device and still have an interrupt
25096a6497bSSara Sharon 	 * pending. We stop the APM before we sync the interrupts because we
25196a6497bSSara Sharon 	 * have to (see comment there). On the other hand, since the APM is
25296a6497bSSara Sharon 	 * stopped, we cannot access the HW (in particular not prph).
25396a6497bSSara Sharon 	 * So don't try to restock if the APM has been already stopped.
25496a6497bSSara Sharon 	 */
25596a6497bSSara Sharon 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
25696a6497bSSara Sharon 		return;
25796a6497bSSara Sharon 
25896a6497bSSara Sharon 	spin_lock(&rxq->lock);
25996a6497bSSara Sharon 	while (rxq->free_count) {
26096a6497bSSara Sharon 		/* Get next free Rx buffer, remove from free list */
26196a6497bSSara Sharon 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
26296a6497bSSara Sharon 				       list);
26396a6497bSSara Sharon 		list_del(&rxb->list);
264b1753c62SSara Sharon 		rxb->invalid = false;
265cfdc20efSJohannes Berg 		/* some low bits are expected to be unset (depending on hw) */
266cfdc20efSJohannes Berg 		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
26796a6497bSSara Sharon 		/* Point to Rx buffer via next RBD in circular buffer */
2680307c839SGolan Ben Ami 		iwl_pcie_restock_bd(trans, rxq, rxb);
2695661925aSJohannes Berg 		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
27096a6497bSSara Sharon 		rxq->free_count--;
27196a6497bSSara Sharon 	}
27296a6497bSSara Sharon 	spin_unlock(&rxq->lock);
27396a6497bSSara Sharon 
27496a6497bSSara Sharon 	/*
27596a6497bSSara Sharon 	 * If we've added more space for the firmware to place data, tell it.
27696a6497bSSara Sharon 	 * Increment device's write pointer in multiples of 8.
27796a6497bSSara Sharon 	 */
27896a6497bSSara Sharon 	if (rxq->write_actual != (rxq->write & ~0x7)) {
27996a6497bSSara Sharon 		spin_lock(&rxq->lock);
28096a6497bSSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
28196a6497bSSara Sharon 		spin_unlock(&rxq->lock);
28296a6497bSSara Sharon 	}
28396a6497bSSara Sharon }
28496a6497bSSara Sharon 
285e705c121SKalle Valo /*
2862047fa54SSara Sharon  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
287e705c121SKalle Valo  */
2882047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
289e0e168dcSGregory Greenman 				  struct iwl_rxq *rxq)
290e705c121SKalle Valo {
291e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
292e705c121SKalle Valo 
293e705c121SKalle Valo 	/*
294e705c121SKalle Valo 	 * If the device isn't enabled - not need to try to add buffers...
295e705c121SKalle Valo 	 * This can happen when we stop the device and still have an interrupt
296e705c121SKalle Valo 	 * pending. We stop the APM before we sync the interrupts because we
297e705c121SKalle Valo 	 * have to (see comment there). On the other hand, since the APM is
298e705c121SKalle Valo 	 * stopped, we cannot access the HW (in particular not prph).
299e705c121SKalle Valo 	 * So don't try to restock if the APM has been already stopped.
300e705c121SKalle Valo 	 */
301e705c121SKalle Valo 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
302e705c121SKalle Valo 		return;
303e705c121SKalle Valo 
304e705c121SKalle Valo 	spin_lock(&rxq->lock);
305e705c121SKalle Valo 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
30696a6497bSSara Sharon 		__le32 *bd = (__le32 *)rxq->bd;
307e705c121SKalle Valo 		/* The overwritten rxb must be a used one */
308e705c121SKalle Valo 		rxb = rxq->queue[rxq->write];
309e705c121SKalle Valo 		BUG_ON(rxb && rxb->page);
310e705c121SKalle Valo 
311e705c121SKalle Valo 		/* Get next free Rx buffer, remove from free list */
312e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
313e705c121SKalle Valo 				       list);
314e705c121SKalle Valo 		list_del(&rxb->list);
315b1753c62SSara Sharon 		rxb->invalid = false;
316e705c121SKalle Valo 
317e705c121SKalle Valo 		/* Point to Rx buffer via next RBD in circular buffer */
31896a6497bSSara Sharon 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
319e705c121SKalle Valo 		rxq->queue[rxq->write] = rxb;
320e705c121SKalle Valo 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
321e705c121SKalle Valo 		rxq->free_count--;
322e705c121SKalle Valo 	}
323e705c121SKalle Valo 	spin_unlock(&rxq->lock);
324e705c121SKalle Valo 
325e705c121SKalle Valo 	/* If we've added more space for the firmware to place data, tell it.
326e705c121SKalle Valo 	 * Increment device's write pointer in multiples of 8. */
327e705c121SKalle Valo 	if (rxq->write_actual != (rxq->write & ~0x7)) {
328e705c121SKalle Valo 		spin_lock(&rxq->lock);
32978485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
330e705c121SKalle Valo 		spin_unlock(&rxq->lock);
331e705c121SKalle Valo 	}
332e705c121SKalle Valo }
333e705c121SKalle Valo 
334e705c121SKalle Valo /*
335e0e168dcSGregory Greenman  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
336e0e168dcSGregory Greenman  *
337e0e168dcSGregory Greenman  * If there are slots in the RX queue that need to be restocked,
338e0e168dcSGregory Greenman  * and we have free pre-allocated buffers, fill the ranks as much
339e0e168dcSGregory Greenman  * as we can, pulling from rx_free.
340e0e168dcSGregory Greenman  *
341e0e168dcSGregory Greenman  * This moves the 'write' index forward to catch up with 'processed', and
342e0e168dcSGregory Greenman  * also updates the memory address in the firmware to reference the new
343e0e168dcSGregory Greenman  * target buffer.
344e0e168dcSGregory Greenman  */
345e0e168dcSGregory Greenman static
346e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
347e0e168dcSGregory Greenman {
348286ca8ebSLuca Coelho 	if (trans->trans_cfg->mq_rx_supported)
3492047fa54SSara Sharon 		iwl_pcie_rxmq_restock(trans, rxq);
350e0e168dcSGregory Greenman 	else
3512047fa54SSara Sharon 		iwl_pcie_rxsq_restock(trans, rxq);
352e0e168dcSGregory Greenman }
353e0e168dcSGregory Greenman 
354e0e168dcSGregory Greenman /*
355e705c121SKalle Valo  * iwl_pcie_rx_alloc_page - allocates and returns a page.
356e705c121SKalle Valo  *
357e705c121SKalle Valo  */
358e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
359cfdc20efSJohannes Berg 					   u32 *offset, gfp_t priority)
360e705c121SKalle Valo {
361e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
362cfdc20efSJohannes Berg 	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
363cfdc20efSJohannes Berg 	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
364e705c121SKalle Valo 	struct page *page;
365e705c121SKalle Valo 	gfp_t gfp_mask = priority;
366e705c121SKalle Valo 
367e705c121SKalle Valo 	if (trans_pcie->rx_page_order > 0)
368e705c121SKalle Valo 		gfp_mask |= __GFP_COMP;
369e705c121SKalle Valo 
370cfdc20efSJohannes Berg 	if (trans_pcie->alloc_page) {
371cfdc20efSJohannes Berg 		spin_lock_bh(&trans_pcie->alloc_page_lock);
372cfdc20efSJohannes Berg 		/* recheck */
373cfdc20efSJohannes Berg 		if (trans_pcie->alloc_page) {
374cfdc20efSJohannes Berg 			*offset = trans_pcie->alloc_page_used;
375cfdc20efSJohannes Berg 			page = trans_pcie->alloc_page;
376cfdc20efSJohannes Berg 			trans_pcie->alloc_page_used += rbsize;
377cfdc20efSJohannes Berg 			if (trans_pcie->alloc_page_used >= allocsize)
378cfdc20efSJohannes Berg 				trans_pcie->alloc_page = NULL;
379cfdc20efSJohannes Berg 			else
380cfdc20efSJohannes Berg 				get_page(page);
381cfdc20efSJohannes Berg 			spin_unlock_bh(&trans_pcie->alloc_page_lock);
382cfdc20efSJohannes Berg 			return page;
383cfdc20efSJohannes Berg 		}
384cfdc20efSJohannes Berg 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
385cfdc20efSJohannes Berg 	}
386cfdc20efSJohannes Berg 
387e705c121SKalle Valo 	/* Alloc a new receive buffer */
388e705c121SKalle Valo 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
389e705c121SKalle Valo 	if (!page) {
390e705c121SKalle Valo 		if (net_ratelimit())
391e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
392e705c121SKalle Valo 				       trans_pcie->rx_page_order);
39378485054SSara Sharon 		/*
39478485054SSara Sharon 		 * Issue an error if we don't have enough pre-allocated
39578485054SSara Sharon 		  * buffers.
3961da3823dSLuca Coelho 		 */
39778485054SSara Sharon 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
398e705c121SKalle Valo 			IWL_CRIT(trans,
39978485054SSara Sharon 				 "Failed to alloc_pages\n");
400e705c121SKalle Valo 		return NULL;
401e705c121SKalle Valo 	}
402cfdc20efSJohannes Berg 
403cfdc20efSJohannes Berg 	if (2 * rbsize <= allocsize) {
404cfdc20efSJohannes Berg 		spin_lock_bh(&trans_pcie->alloc_page_lock);
405cfdc20efSJohannes Berg 		if (!trans_pcie->alloc_page) {
406cfdc20efSJohannes Berg 			get_page(page);
407cfdc20efSJohannes Berg 			trans_pcie->alloc_page = page;
408cfdc20efSJohannes Berg 			trans_pcie->alloc_page_used = rbsize;
409cfdc20efSJohannes Berg 		}
410cfdc20efSJohannes Berg 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
411cfdc20efSJohannes Berg 	}
412cfdc20efSJohannes Berg 
413cfdc20efSJohannes Berg 	*offset = 0;
414e705c121SKalle Valo 	return page;
415e705c121SKalle Valo }
416e705c121SKalle Valo 
417e705c121SKalle Valo /*
418e705c121SKalle Valo  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
419e705c121SKalle Valo  *
420e705c121SKalle Valo  * A used RBD is an Rx buffer that has been given to the stack. To use it again
421e705c121SKalle Valo  * a page must be allocated and the RBD must point to the page. This function
422e705c121SKalle Valo  * doesn't change the HW pointer but handles the list of pages that is used by
423e705c121SKalle Valo  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
424e705c121SKalle Valo  * allocated buffers.
425e705c121SKalle Valo  */
426ff932f61SGolan Ben Ami void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
42778485054SSara Sharon 			    struct iwl_rxq *rxq)
428e705c121SKalle Valo {
429e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
430e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
431e705c121SKalle Valo 	struct page *page;
432e705c121SKalle Valo 
433e705c121SKalle Valo 	while (1) {
434cfdc20efSJohannes Berg 		unsigned int offset;
435cfdc20efSJohannes Berg 
436e705c121SKalle Valo 		spin_lock(&rxq->lock);
437e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
438e705c121SKalle Valo 			spin_unlock(&rxq->lock);
439e705c121SKalle Valo 			return;
440e705c121SKalle Valo 		}
441e705c121SKalle Valo 		spin_unlock(&rxq->lock);
442e705c121SKalle Valo 
443cfdc20efSJohannes Berg 		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
444e705c121SKalle Valo 		if (!page)
445e705c121SKalle Valo 			return;
446e705c121SKalle Valo 
447e705c121SKalle Valo 		spin_lock(&rxq->lock);
448e705c121SKalle Valo 
449e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
450e705c121SKalle Valo 			spin_unlock(&rxq->lock);
451e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
452e705c121SKalle Valo 			return;
453e705c121SKalle Valo 		}
454e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
455e705c121SKalle Valo 				       list);
456e705c121SKalle Valo 		list_del(&rxb->list);
457e705c121SKalle Valo 		spin_unlock(&rxq->lock);
458e705c121SKalle Valo 
459e705c121SKalle Valo 		BUG_ON(rxb->page);
460e705c121SKalle Valo 		rxb->page = page;
461cfdc20efSJohannes Berg 		rxb->offset = offset;
462e705c121SKalle Valo 		/* Get physical address of the RB */
463e705c121SKalle Valo 		rxb->page_dma =
464cfdc20efSJohannes Berg 			dma_map_page(trans->dev, page, rxb->offset,
46580084e35SJohannes Berg 				     trans_pcie->rx_buf_bytes,
466e705c121SKalle Valo 				     DMA_FROM_DEVICE);
467e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
468e705c121SKalle Valo 			rxb->page = NULL;
469e705c121SKalle Valo 			spin_lock(&rxq->lock);
470e705c121SKalle Valo 			list_add(&rxb->list, &rxq->rx_used);
471e705c121SKalle Valo 			spin_unlock(&rxq->lock);
472e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
473e705c121SKalle Valo 			return;
474e705c121SKalle Valo 		}
475e705c121SKalle Valo 
476e705c121SKalle Valo 		spin_lock(&rxq->lock);
477e705c121SKalle Valo 
478e705c121SKalle Valo 		list_add_tail(&rxb->list, &rxq->rx_free);
479e705c121SKalle Valo 		rxq->free_count++;
480e705c121SKalle Valo 
481e705c121SKalle Valo 		spin_unlock(&rxq->lock);
482e705c121SKalle Valo 	}
483e705c121SKalle Valo }
484e705c121SKalle Valo 
485ff932f61SGolan Ben Ami void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
486e705c121SKalle Valo {
487e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
488e705c121SKalle Valo 	int i;
489e705c121SKalle Valo 
490c042f0c7SJohannes Berg 	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
49178485054SSara Sharon 		if (!trans_pcie->rx_pool[i].page)
492e705c121SKalle Valo 			continue;
49378485054SSara Sharon 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
49480084e35SJohannes Berg 			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
49578485054SSara Sharon 		__free_pages(trans_pcie->rx_pool[i].page,
49678485054SSara Sharon 			     trans_pcie->rx_page_order);
49778485054SSara Sharon 		trans_pcie->rx_pool[i].page = NULL;
498e705c121SKalle Valo 	}
499e705c121SKalle Valo }
500e705c121SKalle Valo 
501e705c121SKalle Valo /*
502e705c121SKalle Valo  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
503e705c121SKalle Valo  *
504e705c121SKalle Valo  * Allocates for each received request 8 pages
505e705c121SKalle Valo  * Called as a scheduled work item.
506e705c121SKalle Valo  */
507e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
508e705c121SKalle Valo {
509e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
510e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
511e705c121SKalle Valo 	struct list_head local_empty;
512c6ac9f9fSSara Sharon 	int pending = atomic_read(&rba->req_pending);
513e705c121SKalle Valo 
5146dcdd165SSara Sharon 	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
515e705c121SKalle Valo 
516e705c121SKalle Valo 	/* If we were scheduled - there is at least one request */
517e705c121SKalle Valo 	spin_lock(&rba->lock);
518e705c121SKalle Valo 	/* swap out the rba->rbd_empty to a local list */
519e705c121SKalle Valo 	list_replace_init(&rba->rbd_empty, &local_empty);
520e705c121SKalle Valo 	spin_unlock(&rba->lock);
521e705c121SKalle Valo 
522e705c121SKalle Valo 	while (pending) {
523e705c121SKalle Valo 		int i;
5240979a913SJohannes Berg 		LIST_HEAD(local_allocated);
52578485054SSara Sharon 		gfp_t gfp_mask = GFP_KERNEL;
52678485054SSara Sharon 
52778485054SSara Sharon 		/* Do not post a warning if there are only a few requests */
52878485054SSara Sharon 		if (pending < RX_PENDING_WATERMARK)
52978485054SSara Sharon 			gfp_mask |= __GFP_NOWARN;
530e705c121SKalle Valo 
531e705c121SKalle Valo 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
532e705c121SKalle Valo 			struct iwl_rx_mem_buffer *rxb;
533e705c121SKalle Valo 			struct page *page;
534e705c121SKalle Valo 
535e705c121SKalle Valo 			/* List should never be empty - each reused RBD is
536e705c121SKalle Valo 			 * returned to the list, and initial pool covers any
537e705c121SKalle Valo 			 * possible gap between the time the page is allocated
538e705c121SKalle Valo 			 * to the time the RBD is added.
539e705c121SKalle Valo 			 */
540e705c121SKalle Valo 			BUG_ON(list_empty(&local_empty));
541e705c121SKalle Valo 			/* Get the first rxb from the rbd list */
542e705c121SKalle Valo 			rxb = list_first_entry(&local_empty,
543e705c121SKalle Valo 					       struct iwl_rx_mem_buffer, list);
544e705c121SKalle Valo 			BUG_ON(rxb->page);
545e705c121SKalle Valo 
546e705c121SKalle Valo 			/* Alloc a new receive buffer */
547cfdc20efSJohannes Berg 			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
548cfdc20efSJohannes Berg 						      gfp_mask);
549e705c121SKalle Valo 			if (!page)
550e705c121SKalle Valo 				continue;
551e705c121SKalle Valo 			rxb->page = page;
552e705c121SKalle Valo 
553e705c121SKalle Valo 			/* Get physical address of the RB */
554cfdc20efSJohannes Berg 			rxb->page_dma = dma_map_page(trans->dev, page,
555cfdc20efSJohannes Berg 						     rxb->offset,
55680084e35SJohannes Berg 						     trans_pcie->rx_buf_bytes,
557e705c121SKalle Valo 						     DMA_FROM_DEVICE);
558e705c121SKalle Valo 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
559e705c121SKalle Valo 				rxb->page = NULL;
560e705c121SKalle Valo 				__free_pages(page, trans_pcie->rx_page_order);
561e705c121SKalle Valo 				continue;
562e705c121SKalle Valo 			}
563e705c121SKalle Valo 
564e705c121SKalle Valo 			/* move the allocated entry to the out list */
565e705c121SKalle Valo 			list_move(&rxb->list, &local_allocated);
566e705c121SKalle Valo 			i++;
567e705c121SKalle Valo 		}
568e705c121SKalle Valo 
569c6ac9f9fSSara Sharon 		atomic_dec(&rba->req_pending);
570e705c121SKalle Valo 		pending--;
571c6ac9f9fSSara Sharon 
572e705c121SKalle Valo 		if (!pending) {
573c6ac9f9fSSara Sharon 			pending = atomic_read(&rba->req_pending);
5746dcdd165SSara Sharon 			if (pending)
5756dcdd165SSara Sharon 				IWL_DEBUG_TPT(trans,
576c6ac9f9fSSara Sharon 					      "Got more pending allocation requests = %d\n",
577e705c121SKalle Valo 					      pending);
578e705c121SKalle Valo 		}
579e705c121SKalle Valo 
580e705c121SKalle Valo 		spin_lock(&rba->lock);
581e705c121SKalle Valo 		/* add the allocated rbds to the allocator allocated list */
582e705c121SKalle Valo 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
583e705c121SKalle Valo 		/* get more empty RBDs for current pending requests */
584e705c121SKalle Valo 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
585e705c121SKalle Valo 		spin_unlock(&rba->lock);
586e705c121SKalle Valo 
587e705c121SKalle Valo 		atomic_inc(&rba->req_ready);
588c6ac9f9fSSara Sharon 
589e705c121SKalle Valo 	}
590e705c121SKalle Valo 
591e705c121SKalle Valo 	spin_lock(&rba->lock);
592e705c121SKalle Valo 	/* return unused rbds to the allocator empty list */
593e705c121SKalle Valo 	list_splice_tail(&local_empty, &rba->rbd_empty);
594e705c121SKalle Valo 	spin_unlock(&rba->lock);
595c6ac9f9fSSara Sharon 
5966dcdd165SSara Sharon 	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
597e705c121SKalle Valo }
598e705c121SKalle Valo 
599e705c121SKalle Valo /*
600d56daea4SSara Sharon  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
601e705c121SKalle Valo .*
602e705c121SKalle Valo .* Called by queue when the queue posted allocation request and
603e705c121SKalle Valo  * has freed 8 RBDs in order to restock itself.
604d56daea4SSara Sharon  * This function directly moves the allocated RBs to the queue's ownership
605d56daea4SSara Sharon  * and updates the relevant counters.
606e705c121SKalle Valo  */
607d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
608d56daea4SSara Sharon 				      struct iwl_rxq *rxq)
609e705c121SKalle Valo {
610e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
611e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
612e705c121SKalle Valo 	int i;
613e705c121SKalle Valo 
614d56daea4SSara Sharon 	lockdep_assert_held(&rxq->lock);
615d56daea4SSara Sharon 
616e705c121SKalle Valo 	/*
617e705c121SKalle Valo 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
618e705c121SKalle Valo 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
619d56daea4SSara Sharon 	 * function will return early, as there are no ready requests.
620e705c121SKalle Valo 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
621e705c121SKalle Valo 	 * req_ready > 0, i.e. - there are ready requests and the function
622e705c121SKalle Valo 	 * hands one request to the caller.
623e705c121SKalle Valo 	 */
624e705c121SKalle Valo 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
625d56daea4SSara Sharon 		return;
626e705c121SKalle Valo 
627e705c121SKalle Valo 	spin_lock(&rba->lock);
628e705c121SKalle Valo 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
629e705c121SKalle Valo 		/* Get next free Rx buffer, remove it from free list */
630d56daea4SSara Sharon 		struct iwl_rx_mem_buffer *rxb =
631d56daea4SSara Sharon 			list_first_entry(&rba->rbd_allocated,
632e705c121SKalle Valo 					 struct iwl_rx_mem_buffer, list);
633d56daea4SSara Sharon 
634d56daea4SSara Sharon 		list_move(&rxb->list, &rxq->rx_free);
635e705c121SKalle Valo 	}
636e705c121SKalle Valo 	spin_unlock(&rba->lock);
637e705c121SKalle Valo 
638d56daea4SSara Sharon 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
639d56daea4SSara Sharon 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
640e705c121SKalle Valo }
641e705c121SKalle Valo 
64210a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data)
643e705c121SKalle Valo {
644e705c121SKalle Valo 	struct iwl_rb_allocator *rba_p =
645e705c121SKalle Valo 		container_of(data, struct iwl_rb_allocator, rx_alloc);
646e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie =
647e705c121SKalle Valo 		container_of(rba_p, struct iwl_trans_pcie, rba);
648e705c121SKalle Valo 
649e705c121SKalle Valo 	iwl_pcie_rx_allocator(trans_pcie->trans);
650e705c121SKalle Valo }
651e705c121SKalle Valo 
6520307c839SGolan Ben Ami static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
6530307c839SGolan Ben Ami {
6540307c839SGolan Ben Ami 	struct iwl_rx_transfer_desc *rx_td;
6550307c839SGolan Ben Ami 
6560307c839SGolan Ben Ami 	if (use_rx_td)
6570307c839SGolan Ben Ami 		return sizeof(*rx_td);
6580307c839SGolan Ben Ami 	else
659286ca8ebSLuca Coelho 		return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
6600307c839SGolan Ben Ami 			sizeof(__le32);
6610307c839SGolan Ben Ami }
6620307c839SGolan Ben Ami 
6631b493e30SGolan Ben Ami static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
6641b493e30SGolan Ben Ami 				  struct iwl_rxq *rxq)
6651b493e30SGolan Ben Ami {
6661b493e30SGolan Ben Ami 	struct device *dev = trans->dev;
667286ca8ebSLuca Coelho 	bool use_rx_td = (trans->trans_cfg->device_family >=
6683681021fSJohannes Berg 			  IWL_DEVICE_FAMILY_AX210);
6690307c839SGolan Ben Ami 	int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
6701b493e30SGolan Ben Ami 
6711b493e30SGolan Ben Ami 	if (rxq->bd)
6720307c839SGolan Ben Ami 		dma_free_coherent(trans->dev,
6730307c839SGolan Ben Ami 				  free_size * rxq->queue_size,
6741b493e30SGolan Ben Ami 				  rxq->bd, rxq->bd_dma);
6751b493e30SGolan Ben Ami 	rxq->bd_dma = 0;
6761b493e30SGolan Ben Ami 	rxq->bd = NULL;
6771b493e30SGolan Ben Ami 
6781b493e30SGolan Ben Ami 	rxq->rb_stts_dma = 0;
6791b493e30SGolan Ben Ami 	rxq->rb_stts = NULL;
6801b493e30SGolan Ben Ami 
6811b493e30SGolan Ben Ami 	if (rxq->used_bd)
6820307c839SGolan Ben Ami 		dma_free_coherent(trans->dev,
683b2a58c97SSara Sharon 				  (use_rx_td ? sizeof(*rxq->cd) :
6840307c839SGolan Ben Ami 				   sizeof(__le32)) * rxq->queue_size,
6851b493e30SGolan Ben Ami 				  rxq->used_bd, rxq->used_bd_dma);
6861b493e30SGolan Ben Ami 	rxq->used_bd_dma = 0;
6871b493e30SGolan Ben Ami 	rxq->used_bd = NULL;
6881b493e30SGolan Ben Ami 
6893681021fSJohannes Berg 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
6901b493e30SGolan Ben Ami 		return;
6911b493e30SGolan Ben Ami 
6921b493e30SGolan Ben Ami 	if (rxq->tr_tail)
6931b493e30SGolan Ben Ami 		dma_free_coherent(dev, sizeof(__le16),
6941b493e30SGolan Ben Ami 				  rxq->tr_tail, rxq->tr_tail_dma);
6951b493e30SGolan Ben Ami 	rxq->tr_tail_dma = 0;
6961b493e30SGolan Ben Ami 	rxq->tr_tail = NULL;
6971b493e30SGolan Ben Ami 
6981b493e30SGolan Ben Ami 	if (rxq->cr_tail)
6991b493e30SGolan Ben Ami 		dma_free_coherent(dev, sizeof(__le16),
7001b493e30SGolan Ben Ami 				  rxq->cr_tail, rxq->cr_tail_dma);
7011b493e30SGolan Ben Ami 	rxq->cr_tail_dma = 0;
7021b493e30SGolan Ben Ami 	rxq->cr_tail = NULL;
7031b493e30SGolan Ben Ami }
7041b493e30SGolan Ben Ami 
7051b493e30SGolan Ben Ami static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
7061b493e30SGolan Ben Ami 				  struct iwl_rxq *rxq)
707e705c121SKalle Valo {
708e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
709e705c121SKalle Valo 	struct device *dev = trans->dev;
71078485054SSara Sharon 	int i;
7110307c839SGolan Ben Ami 	int free_size;
712286ca8ebSLuca Coelho 	bool use_rx_td = (trans->trans_cfg->device_family >=
7133681021fSJohannes Berg 			  IWL_DEVICE_FAMILY_AX210);
7146cc6ba3aSTriebitz 	size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
7156cc6ba3aSTriebitz 			      sizeof(struct iwl_rb_status);
716e705c121SKalle Valo 
71778485054SSara Sharon 	spin_lock_init(&rxq->lock);
718286ca8ebSLuca Coelho 	if (trans->trans_cfg->mq_rx_supported)
719c042f0c7SJohannes Berg 		rxq->queue_size = trans->cfg->num_rbds;
72096a6497bSSara Sharon 	else
72196a6497bSSara Sharon 		rxq->queue_size = RX_QUEUE_SIZE;
72296a6497bSSara Sharon 
7230307c839SGolan Ben Ami 	free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
7240307c839SGolan Ben Ami 
72578485054SSara Sharon 	/*
72678485054SSara Sharon 	 * Allocate the circular buffer of Read Buffer Descriptors
72778485054SSara Sharon 	 * (RBDs)
72878485054SSara Sharon 	 */
729750afb08SLuis Chamberlain 	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
730e705c121SKalle Valo 				     &rxq->bd_dma, GFP_KERNEL);
731e705c121SKalle Valo 	if (!rxq->bd)
73278485054SSara Sharon 		goto err;
73378485054SSara Sharon 
734286ca8ebSLuca Coelho 	if (trans->trans_cfg->mq_rx_supported) {
735750afb08SLuis Chamberlain 		rxq->used_bd = dma_alloc_coherent(dev,
736750afb08SLuis Chamberlain 						  (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
73796a6497bSSara Sharon 						  &rxq->used_bd_dma,
73896a6497bSSara Sharon 						  GFP_KERNEL);
73996a6497bSSara Sharon 		if (!rxq->used_bd)
74096a6497bSSara Sharon 			goto err;
74196a6497bSSara Sharon 	}
742e705c121SKalle Valo 
7436cc6ba3aSTriebitz 	rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
7446cc6ba3aSTriebitz 	rxq->rb_stts_dma =
7456cc6ba3aSTriebitz 		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
7461b493e30SGolan Ben Ami 
7470307c839SGolan Ben Ami 	if (!use_rx_td)
7481b493e30SGolan Ben Ami 		return 0;
7491b493e30SGolan Ben Ami 
7501b493e30SGolan Ben Ami 	/* Allocate the driver's pointer to TR tail */
751750afb08SLuis Chamberlain 	rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
752750afb08SLuis Chamberlain 					  &rxq->tr_tail_dma, GFP_KERNEL);
7531b493e30SGolan Ben Ami 	if (!rxq->tr_tail)
7541b493e30SGolan Ben Ami 		goto err;
7551b493e30SGolan Ben Ami 
7561b493e30SGolan Ben Ami 	/* Allocate the driver's pointer to CR tail */
757750afb08SLuis Chamberlain 	rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
758750afb08SLuis Chamberlain 					  &rxq->cr_tail_dma, GFP_KERNEL);
7591b493e30SGolan Ben Ami 	if (!rxq->cr_tail)
7601b493e30SGolan Ben Ami 		goto err;
7611b493e30SGolan Ben Ami 
762e705c121SKalle Valo 	return 0;
763e705c121SKalle Valo 
76478485054SSara Sharon err:
76578485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
76678485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
76778485054SSara Sharon 
7681b493e30SGolan Ben Ami 		iwl_pcie_free_rxq_dma(trans, rxq);
76978485054SSara Sharon 	}
77096a6497bSSara Sharon 
771e705c121SKalle Valo 	return -ENOMEM;
772e705c121SKalle Valo }
773e705c121SKalle Valo 
774ab393cb1SJohannes Berg static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
7751b493e30SGolan Ben Ami {
7761b493e30SGolan Ben Ami 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7771b493e30SGolan Ben Ami 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
7781b493e30SGolan Ben Ami 	int i, ret;
779286ca8ebSLuca Coelho 	size_t rb_stts_size = trans->trans_cfg->device_family >=
7803681021fSJohannes Berg 				IWL_DEVICE_FAMILY_AX210 ?
7816cc6ba3aSTriebitz 			      sizeof(__le16) : sizeof(struct iwl_rb_status);
7821b493e30SGolan Ben Ami 
7831b493e30SGolan Ben Ami 	if (WARN_ON(trans_pcie->rxq))
7841b493e30SGolan Ben Ami 		return -EINVAL;
7851b493e30SGolan Ben Ami 
7861b493e30SGolan Ben Ami 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
7871b493e30SGolan Ben Ami 				  GFP_KERNEL);
788c042f0c7SJohannes Berg 	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
789c042f0c7SJohannes Berg 				      sizeof(trans_pcie->rx_pool[0]),
790c042f0c7SJohannes Berg 				      GFP_KERNEL);
791c042f0c7SJohannes Berg 	trans_pcie->global_table =
792c042f0c7SJohannes Berg 		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
793c042f0c7SJohannes Berg 			sizeof(trans_pcie->global_table[0]),
794c042f0c7SJohannes Berg 			GFP_KERNEL);
795c042f0c7SJohannes Berg 	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
796c042f0c7SJohannes Berg 	    !trans_pcie->global_table) {
797c042f0c7SJohannes Berg 		ret = -ENOMEM;
798c042f0c7SJohannes Berg 		goto err;
799c042f0c7SJohannes Berg 	}
8001b493e30SGolan Ben Ami 
8011b493e30SGolan Ben Ami 	spin_lock_init(&rba->lock);
8021b493e30SGolan Ben Ami 
8036cc6ba3aSTriebitz 	/*
8046cc6ba3aSTriebitz 	 * Allocate the driver's pointer to receive buffer status.
8056cc6ba3aSTriebitz 	 * Allocate for all queues continuously (HW requirement).
8066cc6ba3aSTriebitz 	 */
8076cc6ba3aSTriebitz 	trans_pcie->base_rb_stts =
8086cc6ba3aSTriebitz 			dma_alloc_coherent(trans->dev,
8096cc6ba3aSTriebitz 					   rb_stts_size * trans->num_rx_queues,
8106cc6ba3aSTriebitz 					   &trans_pcie->base_rb_stts_dma,
8116cc6ba3aSTriebitz 					   GFP_KERNEL);
8126cc6ba3aSTriebitz 	if (!trans_pcie->base_rb_stts) {
8136cc6ba3aSTriebitz 		ret = -ENOMEM;
8146cc6ba3aSTriebitz 		goto err;
8156cc6ba3aSTriebitz 	}
8166cc6ba3aSTriebitz 
8171b493e30SGolan Ben Ami 	for (i = 0; i < trans->num_rx_queues; i++) {
8181b493e30SGolan Ben Ami 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
8191b493e30SGolan Ben Ami 
8206cc6ba3aSTriebitz 		rxq->id = i;
8211b493e30SGolan Ben Ami 		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
8221b493e30SGolan Ben Ami 		if (ret)
8236cc6ba3aSTriebitz 			goto err;
8241b493e30SGolan Ben Ami 	}
8251b493e30SGolan Ben Ami 	return 0;
8266cc6ba3aSTriebitz 
8276cc6ba3aSTriebitz err:
8286cc6ba3aSTriebitz 	if (trans_pcie->base_rb_stts) {
8296cc6ba3aSTriebitz 		dma_free_coherent(trans->dev,
8306cc6ba3aSTriebitz 				  rb_stts_size * trans->num_rx_queues,
8316cc6ba3aSTriebitz 				  trans_pcie->base_rb_stts,
8326cc6ba3aSTriebitz 				  trans_pcie->base_rb_stts_dma);
8336cc6ba3aSTriebitz 		trans_pcie->base_rb_stts = NULL;
8346cc6ba3aSTriebitz 		trans_pcie->base_rb_stts_dma = 0;
8356cc6ba3aSTriebitz 	}
836c042f0c7SJohannes Berg 	kfree(trans_pcie->rx_pool);
837c042f0c7SJohannes Berg 	kfree(trans_pcie->global_table);
8386cc6ba3aSTriebitz 	kfree(trans_pcie->rxq);
8396cc6ba3aSTriebitz 
8406cc6ba3aSTriebitz 	return ret;
8411b493e30SGolan Ben Ami }
8421b493e30SGolan Ben Ami 
843e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
844e705c121SKalle Valo {
845e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
846e705c121SKalle Valo 	u32 rb_size;
847dfcfeef9SSara Sharon 	unsigned long flags;
848e705c121SKalle Valo 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
849e705c121SKalle Valo 
8506c4fbcbcSEmmanuel Grumbach 	switch (trans_pcie->rx_buf_size) {
8516c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_4K:
852e705c121SKalle Valo 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
8536c4fbcbcSEmmanuel Grumbach 		break;
8546c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_8K:
8556c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
8566c4fbcbcSEmmanuel Grumbach 		break;
8576c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_12K:
8586c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
8596c4fbcbcSEmmanuel Grumbach 		break;
8606c4fbcbcSEmmanuel Grumbach 	default:
8616c4fbcbcSEmmanuel Grumbach 		WARN_ON(1);
8626c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
8636c4fbcbcSEmmanuel Grumbach 	}
864e705c121SKalle Valo 
865dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
866dfcfeef9SSara Sharon 		return;
867dfcfeef9SSara Sharon 
868e705c121SKalle Valo 	/* Stop Rx DMA */
869dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
870e705c121SKalle Valo 	/* reset and flush pointers */
871dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
872dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
873dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
874e705c121SKalle Valo 
875e705c121SKalle Valo 	/* Reset driver's Rx queue write index */
876dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
877e705c121SKalle Valo 
878e705c121SKalle Valo 	/* Tell device where to find RBD circular buffer in DRAM */
879dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
880e705c121SKalle Valo 		    (u32)(rxq->bd_dma >> 8));
881e705c121SKalle Valo 
882e705c121SKalle Valo 	/* Tell device where in DRAM to update its Rx status */
883dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
884e705c121SKalle Valo 		    rxq->rb_stts_dma >> 4);
885e705c121SKalle Valo 
886e705c121SKalle Valo 	/* Enable Rx DMA
887e705c121SKalle Valo 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
888e705c121SKalle Valo 	 *      the credit mechanism in 5000 HW RX FIFO
889e705c121SKalle Valo 	 * Direct rx interrupts to hosts
8906c4fbcbcSEmmanuel Grumbach 	 * Rx buffer size 4 or 8k or 12k
891e705c121SKalle Valo 	 * RB timeout 0x10
892e705c121SKalle Valo 	 * 256 RBDs
893e705c121SKalle Valo 	 */
894dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
895e705c121SKalle Valo 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
896e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
897e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
898e705c121SKalle Valo 		    rb_size |
899e705c121SKalle Valo 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
900e705c121SKalle Valo 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
901e705c121SKalle Valo 
902dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
903dfcfeef9SSara Sharon 
904e705c121SKalle Valo 	/* Set interrupt coalescing timer to default (2048 usecs) */
905e705c121SKalle Valo 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
906e705c121SKalle Valo 
907e705c121SKalle Valo 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
908e705c121SKalle Valo 	if (trans->cfg->host_interrupt_operation_mode)
909e705c121SKalle Valo 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
910e705c121SKalle Valo }
911e705c121SKalle Valo 
912bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
91396a6497bSSara Sharon {
91496a6497bSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
91596a6497bSSara Sharon 	u32 rb_size, enabled = 0;
916dfcfeef9SSara Sharon 	unsigned long flags;
91796a6497bSSara Sharon 	int i;
91896a6497bSSara Sharon 
91996a6497bSSara Sharon 	switch (trans_pcie->rx_buf_size) {
9201a4968d1SGolan Ben Ami 	case IWL_AMSDU_2K:
9211a4968d1SGolan Ben Ami 		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
9221a4968d1SGolan Ben Ami 		break;
92396a6497bSSara Sharon 	case IWL_AMSDU_4K:
92496a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
92596a6497bSSara Sharon 		break;
92696a6497bSSara Sharon 	case IWL_AMSDU_8K:
92796a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
92896a6497bSSara Sharon 		break;
92996a6497bSSara Sharon 	case IWL_AMSDU_12K:
93096a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
93196a6497bSSara Sharon 		break;
93296a6497bSSara Sharon 	default:
93396a6497bSSara Sharon 		WARN_ON(1);
93496a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
93596a6497bSSara Sharon 	}
93696a6497bSSara Sharon 
937dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
938dfcfeef9SSara Sharon 		return;
939dfcfeef9SSara Sharon 
94096a6497bSSara Sharon 	/* Stop Rx DMA */
941dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
94296a6497bSSara Sharon 	/* disable free amd used rx queue operation */
943dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
94496a6497bSSara Sharon 
94596a6497bSSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
94696a6497bSSara Sharon 		/* Tell device where to find RBD free table in DRAM */
94712a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
948dfcfeef9SSara Sharon 					 RFH_Q_FRBDCB_BA_LSB(i),
949dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].bd_dma);
95096a6497bSSara Sharon 		/* Tell device where to find RBD used table in DRAM */
95112a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
952dfcfeef9SSara Sharon 					 RFH_Q_URBDCB_BA_LSB(i),
953dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].used_bd_dma);
95496a6497bSSara Sharon 		/* Tell device where in DRAM to update its Rx status */
95512a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
956dfcfeef9SSara Sharon 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
957bce97731SSara Sharon 					 trans_pcie->rxq[i].rb_stts_dma);
95896a6497bSSara Sharon 		/* Reset device indice tables */
959dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
960dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
961dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
96296a6497bSSara Sharon 
96396a6497bSSara Sharon 		enabled |= BIT(i) | BIT(i + 16);
96496a6497bSSara Sharon 	}
96596a6497bSSara Sharon 
96696a6497bSSara Sharon 	/*
96796a6497bSSara Sharon 	 * Enable Rx DMA
96896a6497bSSara Sharon 	 * Rx buffer size 4 or 8k or 12k
96996a6497bSSara Sharon 	 * Min RB size 4 or 8
97088076015SSara Sharon 	 * Drop frames that exceed RB size
97196a6497bSSara Sharon 	 * 512 RBDs
97296a6497bSSara Sharon 	 */
973dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
97463044335SSara Sharon 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
97596a6497bSSara Sharon 			       RFH_RXF_DMA_MIN_RB_4_8 |
97688076015SSara Sharon 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
97796a6497bSSara Sharon 			       RFH_RXF_DMA_RBDCB_SIZE_512);
97896a6497bSSara Sharon 
97988076015SSara Sharon 	/*
98088076015SSara Sharon 	 * Activate DMA snooping.
981b0262f07SSara Sharon 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
98288076015SSara Sharon 	 * Default queue is 0
98388076015SSara Sharon 	 */
984f3779f47SJohannes Berg 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
985f3779f47SJohannes Berg 			       RFH_GEN_CFG_RFH_DMA_SNOOP |
986f3779f47SJohannes Berg 			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
987b0262f07SSara Sharon 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
988f3779f47SJohannes Berg 			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
9897897dfa2SLuca Coelho 					       trans->trans_cfg->integrated ?
990b0262f07SSara Sharon 					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
991f3779f47SJohannes Berg 					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
99288076015SSara Sharon 	/* Enable the relevant rx queues */
993dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
994dfcfeef9SSara Sharon 
995dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
99696a6497bSSara Sharon 
99796a6497bSSara Sharon 	/* Set interrupt coalescing timer to default (2048 usecs) */
99896a6497bSSara Sharon 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
99996a6497bSSara Sharon }
100096a6497bSSara Sharon 
1001ff932f61SGolan Ben Ami void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
1002e705c121SKalle Valo {
1003e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
1004e705c121SKalle Valo 
1005e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_free);
1006e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_used);
1007e705c121SKalle Valo 	rxq->free_count = 0;
1008e705c121SKalle Valo 	rxq->used_count = 0;
1009e705c121SKalle Valo }
1010e705c121SKalle Valo 
1011ff932f61SGolan Ben Ami int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1012bce97731SSara Sharon {
1013bce97731SSara Sharon 	WARN_ON(1);
1014bce97731SSara Sharon 	return 0;
1015bce97731SSara Sharon }
1016bce97731SSara Sharon 
1017ab393cb1SJohannes Berg static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1018e705c121SKalle Valo {
1019e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
102078485054SSara Sharon 	struct iwl_rxq *def_rxq;
1021e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
10227b542436SSara Sharon 	int i, err, queue_size, allocator_pool_size, num_alloc;
1023e705c121SKalle Valo 
102478485054SSara Sharon 	if (!trans_pcie->rxq) {
1025e705c121SKalle Valo 		err = iwl_pcie_rx_alloc(trans);
1026e705c121SKalle Valo 		if (err)
1027e705c121SKalle Valo 			return err;
1028e705c121SKalle Valo 	}
102978485054SSara Sharon 	def_rxq = trans_pcie->rxq;
1030e705c121SKalle Valo 
10310f22e400SShaul Triebitz 	cancel_work_sync(&rba->rx_alloc);
10320f22e400SShaul Triebitz 
1033e705c121SKalle Valo 	spin_lock(&rba->lock);
1034e705c121SKalle Valo 	atomic_set(&rba->req_pending, 0);
1035e705c121SKalle Valo 	atomic_set(&rba->req_ready, 0);
103696a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_allocated);
103796a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_empty);
1038e705c121SKalle Valo 	spin_unlock(&rba->lock);
1039e705c121SKalle Valo 
1040e705c121SKalle Valo 	/* free all first - we might be reconfigured for a different size */
104178485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
1042e705c121SKalle Valo 
1043e705c121SKalle Valo 	for (i = 0; i < RX_QUEUE_SIZE; i++)
104478485054SSara Sharon 		def_rxq->queue[i] = NULL;
1045e705c121SKalle Valo 
104678485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
104778485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1048e705c121SKalle Valo 
1049e705c121SKalle Valo 		spin_lock(&rxq->lock);
105078485054SSara Sharon 		/*
105178485054SSara Sharon 		 * Set read write pointer to reflect that we have processed
105278485054SSara Sharon 		 * and used all buffers, but have not restocked the Rx queue
105378485054SSara Sharon 		 * with fresh buffers
105478485054SSara Sharon 		 */
105578485054SSara Sharon 		rxq->read = 0;
105678485054SSara Sharon 		rxq->write = 0;
105778485054SSara Sharon 		rxq->write_actual = 0;
10583681021fSJohannes Berg 		memset(rxq->rb_stts, 0,
10593681021fSJohannes Berg 		       (trans->trans_cfg->device_family >=
10603681021fSJohannes Berg 			IWL_DEVICE_FAMILY_AX210) ?
10610307c839SGolan Ben Ami 		       sizeof(__le16) : sizeof(struct iwl_rb_status));
106278485054SSara Sharon 
106378485054SSara Sharon 		iwl_pcie_rx_init_rxb_lists(rxq);
106478485054SSara Sharon 
1065bce97731SSara Sharon 		if (!rxq->napi.poll)
1066bce97731SSara Sharon 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1067bce97731SSara Sharon 				       iwl_pcie_dummy_napi_poll, 64);
1068bce97731SSara Sharon 
1069e705c121SKalle Valo 		spin_unlock(&rxq->lock);
107078485054SSara Sharon 	}
107178485054SSara Sharon 
107296a6497bSSara Sharon 	/* move the pool to the default queue and allocator ownerships */
1073286ca8ebSLuca Coelho 	queue_size = trans->trans_cfg->mq_rx_supported ?
1074c042f0c7SJohannes Berg 			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
107596a6497bSSara Sharon 	allocator_pool_size = trans->num_rx_queues *
107696a6497bSSara Sharon 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
10777b542436SSara Sharon 	num_alloc = queue_size + allocator_pool_size;
1078c042f0c7SJohannes Berg 
10797b542436SSara Sharon 	for (i = 0; i < num_alloc; i++) {
108096a6497bSSara Sharon 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
108196a6497bSSara Sharon 
108296a6497bSSara Sharon 		if (i < allocator_pool_size)
108396a6497bSSara Sharon 			list_add(&rxb->list, &rba->rbd_empty);
108496a6497bSSara Sharon 		else
108596a6497bSSara Sharon 			list_add(&rxb->list, &def_rxq->rx_used);
108696a6497bSSara Sharon 		trans_pcie->global_table[i] = rxb;
1087e25d65f2SSara Sharon 		rxb->vid = (u16)(i + 1);
1088b1753c62SSara Sharon 		rxb->invalid = true;
108996a6497bSSara Sharon 	}
109078485054SSara Sharon 
109178485054SSara Sharon 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
10922047fa54SSara Sharon 
1093eda50cdeSSara Sharon 	return 0;
1094eda50cdeSSara Sharon }
1095eda50cdeSSara Sharon 
1096eda50cdeSSara Sharon int iwl_pcie_rx_init(struct iwl_trans *trans)
1097eda50cdeSSara Sharon {
1098eda50cdeSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1099eda50cdeSSara Sharon 	int ret = _iwl_pcie_rx_init(trans);
1100eda50cdeSSara Sharon 
1101eda50cdeSSara Sharon 	if (ret)
1102eda50cdeSSara Sharon 		return ret;
1103eda50cdeSSara Sharon 
1104286ca8ebSLuca Coelho 	if (trans->trans_cfg->mq_rx_supported)
1105bce97731SSara Sharon 		iwl_pcie_rx_mq_hw_init(trans);
11062047fa54SSara Sharon 	else
1107eda50cdeSSara Sharon 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
11082047fa54SSara Sharon 
1109eda50cdeSSara Sharon 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
111078485054SSara Sharon 
1111eda50cdeSSara Sharon 	spin_lock(&trans_pcie->rxq->lock);
1112eda50cdeSSara Sharon 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1113eda50cdeSSara Sharon 	spin_unlock(&trans_pcie->rxq->lock);
1114e705c121SKalle Valo 
1115e705c121SKalle Valo 	return 0;
1116e705c121SKalle Valo }
1117e705c121SKalle Valo 
1118eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1119eda50cdeSSara Sharon {
1120e506b481SSara Sharon 	/* Set interrupt coalescing timer to default (2048 usecs) */
1121e506b481SSara Sharon 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1122e506b481SSara Sharon 
1123eda50cdeSSara Sharon 	/*
1124eda50cdeSSara Sharon 	 * We don't configure the RFH.
1125eda50cdeSSara Sharon 	 * Restock will be done at alive, after firmware configured the RFH.
1126eda50cdeSSara Sharon 	 */
1127eda50cdeSSara Sharon 	return _iwl_pcie_rx_init(trans);
1128eda50cdeSSara Sharon }
1129eda50cdeSSara Sharon 
1130e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans)
1131e705c121SKalle Valo {
1132e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1133e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
113478485054SSara Sharon 	int i;
1135286ca8ebSLuca Coelho 	size_t rb_stts_size = trans->trans_cfg->device_family >=
11363681021fSJohannes Berg 				IWL_DEVICE_FAMILY_AX210 ?
11376cc6ba3aSTriebitz 			      sizeof(__le16) : sizeof(struct iwl_rb_status);
1138e705c121SKalle Valo 
113978485054SSara Sharon 	/*
114078485054SSara Sharon 	 * if rxq is NULL, it means that nothing has been allocated,
114178485054SSara Sharon 	 * exit now
114278485054SSara Sharon 	 */
114378485054SSara Sharon 	if (!trans_pcie->rxq) {
1144e705c121SKalle Valo 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1145e705c121SKalle Valo 		return;
1146e705c121SKalle Valo 	}
1147e705c121SKalle Valo 
1148e705c121SKalle Valo 	cancel_work_sync(&rba->rx_alloc);
1149e705c121SKalle Valo 
115078485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
1151e705c121SKalle Valo 
11526cc6ba3aSTriebitz 	if (trans_pcie->base_rb_stts) {
11536cc6ba3aSTriebitz 		dma_free_coherent(trans->dev,
11546cc6ba3aSTriebitz 				  rb_stts_size * trans->num_rx_queues,
11556cc6ba3aSTriebitz 				  trans_pcie->base_rb_stts,
11566cc6ba3aSTriebitz 				  trans_pcie->base_rb_stts_dma);
11576cc6ba3aSTriebitz 		trans_pcie->base_rb_stts = NULL;
11586cc6ba3aSTriebitz 		trans_pcie->base_rb_stts_dma = 0;
11596cc6ba3aSTriebitz 	}
11606cc6ba3aSTriebitz 
116178485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
116278485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
116378485054SSara Sharon 
11641b493e30SGolan Ben Ami 		iwl_pcie_free_rxq_dma(trans, rxq);
1165bce97731SSara Sharon 
1166bce97731SSara Sharon 		if (rxq->napi.poll)
1167bce97731SSara Sharon 			netif_napi_del(&rxq->napi);
116896a6497bSSara Sharon 	}
1169c042f0c7SJohannes Berg 	kfree(trans_pcie->rx_pool);
1170c042f0c7SJohannes Berg 	kfree(trans_pcie->global_table);
117178485054SSara Sharon 	kfree(trans_pcie->rxq);
1172cfdc20efSJohannes Berg 
1173cfdc20efSJohannes Berg 	if (trans_pcie->alloc_page)
1174cfdc20efSJohannes Berg 		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1175e705c121SKalle Valo }
1176e705c121SKalle Valo 
1177868a1e86SShaul Triebitz static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1178868a1e86SShaul Triebitz 					  struct iwl_rb_allocator *rba)
1179868a1e86SShaul Triebitz {
1180868a1e86SShaul Triebitz 	spin_lock(&rba->lock);
1181868a1e86SShaul Triebitz 	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1182868a1e86SShaul Triebitz 	spin_unlock(&rba->lock);
1183868a1e86SShaul Triebitz }
1184868a1e86SShaul Triebitz 
1185e705c121SKalle Valo /*
1186e705c121SKalle Valo  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1187e705c121SKalle Valo  *
1188e705c121SKalle Valo  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1189e705c121SKalle Valo  * When there are 2 empty RBDs - a request for allocation is posted
1190e705c121SKalle Valo  */
1191e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1192e705c121SKalle Valo 				  struct iwl_rx_mem_buffer *rxb,
1193e705c121SKalle Valo 				  struct iwl_rxq *rxq, bool emergency)
1194e705c121SKalle Valo {
1195e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1196e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1197e705c121SKalle Valo 
1198e705c121SKalle Valo 	/* Move the RBD to the used list, will be moved to allocator in batches
1199e705c121SKalle Valo 	 * before claiming or posting a request*/
1200e705c121SKalle Valo 	list_add_tail(&rxb->list, &rxq->rx_used);
1201e705c121SKalle Valo 
1202e705c121SKalle Valo 	if (unlikely(emergency))
1203e705c121SKalle Valo 		return;
1204e705c121SKalle Valo 
1205e705c121SKalle Valo 	/* Count the allocator owned RBDs */
1206e705c121SKalle Valo 	rxq->used_count++;
1207e705c121SKalle Valo 
1208e705c121SKalle Valo 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1209e705c121SKalle Valo 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1210e705c121SKalle Valo 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1211e705c121SKalle Valo 	 * after but we still need to post another request.
1212e705c121SKalle Valo 	 */
1213e705c121SKalle Valo 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1214e705c121SKalle Valo 		/* Move the 2 RBDs to the allocator ownership.
1215e705c121SKalle Valo 		 Allocator has another 6 from pool for the request completion*/
1216868a1e86SShaul Triebitz 		iwl_pcie_rx_move_to_allocator(rxq, rba);
1217e705c121SKalle Valo 
1218e705c121SKalle Valo 		atomic_inc(&rba->req_pending);
1219e705c121SKalle Valo 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1220e705c121SKalle Valo 	}
1221e705c121SKalle Valo }
1222e705c121SKalle Valo 
1223e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
122478485054SSara Sharon 				struct iwl_rxq *rxq,
1225e705c121SKalle Valo 				struct iwl_rx_mem_buffer *rxb,
12267891965dSSara Sharon 				bool emergency,
12277891965dSSara Sharon 				int i)
1228e705c121SKalle Valo {
1229e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
12304f4822b7SMordechay Goodstein 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1231e705c121SKalle Valo 	bool page_stolen = false;
123280084e35SJohannes Berg 	int max_len = trans_pcie->rx_buf_bytes;
1233e705c121SKalle Valo 	u32 offset = 0;
1234e705c121SKalle Valo 
1235e705c121SKalle Valo 	if (WARN_ON(!rxb))
1236e705c121SKalle Valo 		return;
1237e705c121SKalle Valo 
1238e705c121SKalle Valo 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1239e705c121SKalle Valo 
1240e705c121SKalle Valo 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1241e705c121SKalle Valo 		struct iwl_rx_packet *pkt;
1242e705c121SKalle Valo 		u16 sequence;
1243e705c121SKalle Valo 		bool reclaim;
1244e705c121SKalle Valo 		int index, cmd_index, len;
1245e705c121SKalle Valo 		struct iwl_rx_cmd_buffer rxcb = {
1246cfdc20efSJohannes Berg 			._offset = rxb->offset + offset,
1247e705c121SKalle Valo 			._rx_page_order = trans_pcie->rx_page_order,
1248e705c121SKalle Valo 			._page = rxb->page,
1249e705c121SKalle Valo 			._page_stolen = false,
1250e705c121SKalle Valo 			.truesize = max_len,
1251e705c121SKalle Valo 		};
1252e705c121SKalle Valo 
1253e705c121SKalle Valo 		pkt = rxb_addr(&rxcb);
1254e705c121SKalle Valo 
12553bfdee76SJohannes Berg 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
12563bfdee76SJohannes Berg 			IWL_DEBUG_RX(trans,
12573bfdee76SJohannes Berg 				     "Q %d: RB end marker at offset %d\n",
12583bfdee76SJohannes Berg 				     rxq->id, offset);
1259e705c121SKalle Valo 			break;
12603bfdee76SJohannes Berg 		}
1261e705c121SKalle Valo 
1262a395058eSJohannes Berg 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1263a395058eSJohannes Berg 			FH_RSCSR_RXQ_POS != rxq->id,
1264a395058eSJohannes Berg 		     "frame on invalid queue - is on %d and indicates %d\n",
1265a395058eSJohannes Berg 		     rxq->id,
1266a395058eSJohannes Berg 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1267a395058eSJohannes Berg 			FH_RSCSR_RXQ_POS);
1268ab2e696bSSara Sharon 
1269e705c121SKalle Valo 		IWL_DEBUG_RX(trans,
12703bfdee76SJohannes Berg 			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
12713bfdee76SJohannes Berg 			     rxq->id, offset,
127239bdb17eSSharon Dvir 			     iwl_get_cmd_string(trans,
127339bdb17eSSharon Dvir 						iwl_cmd_id(pkt->hdr.cmd,
127439bdb17eSSharon Dvir 							   pkt->hdr.group_id,
127539bdb17eSSharon Dvir 							   0)),
127635177c99SSara Sharon 			     pkt->hdr.group_id, pkt->hdr.cmd,
127735177c99SSara Sharon 			     le16_to_cpu(pkt->hdr.sequence));
1278e705c121SKalle Valo 
1279e705c121SKalle Valo 		len = iwl_rx_packet_len(pkt);
1280e705c121SKalle Valo 		len += sizeof(u32); /* account for status word */
1281df72138dSJohannes Berg 
1282df72138dSJohannes Berg 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1283df72138dSJohannes Berg 
1284df72138dSJohannes Berg 		/* check that what the device tells us made sense */
1285df72138dSJohannes Berg 		if (offset > max_len)
1286df72138dSJohannes Berg 			break;
1287df72138dSJohannes Berg 
1288e705c121SKalle Valo 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1289e705c121SKalle Valo 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1290e705c121SKalle Valo 
1291e705c121SKalle Valo 		/* Reclaim a command buffer only if this packet is a response
1292e705c121SKalle Valo 		 *   to a (driver-originated) command.
1293e705c121SKalle Valo 		 * If the packet (e.g. Rx frame) originated from uCode,
1294e705c121SKalle Valo 		 *   there is no command buffer to reclaim.
1295e705c121SKalle Valo 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1296e705c121SKalle Valo 		 *   but apparently a few don't get set; catch them here. */
1297e705c121SKalle Valo 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1298d8a130b0SJohannes Berg 		if (reclaim && !pkt->hdr.group_id) {
1299e705c121SKalle Valo 			int i;
1300e705c121SKalle Valo 
1301e705c121SKalle Valo 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1302e705c121SKalle Valo 				if (trans_pcie->no_reclaim_cmds[i] ==
1303e705c121SKalle Valo 							pkt->hdr.cmd) {
1304e705c121SKalle Valo 					reclaim = false;
1305e705c121SKalle Valo 					break;
1306e705c121SKalle Valo 				}
1307e705c121SKalle Valo 			}
1308e705c121SKalle Valo 		}
1309e705c121SKalle Valo 
1310e705c121SKalle Valo 		sequence = le16_to_cpu(pkt->hdr.sequence);
1311e705c121SKalle Valo 		index = SEQ_TO_INDEX(sequence);
13120cd1ad2dSMordechay Goodstein 		cmd_index = iwl_txq_get_cmd_index(txq, index);
1313e705c121SKalle Valo 
13149416560eSGolan Ben Ami 		if (rxq->id == trans_pcie->def_rx_queue)
1315bce97731SSara Sharon 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1316bce97731SSara Sharon 				       &rxcb);
1317bce97731SSara Sharon 		else
1318bce97731SSara Sharon 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1319bce97731SSara Sharon 					   &rxcb, rxq->id);
1320e705c121SKalle Valo 
1321e705c121SKalle Valo 		if (reclaim) {
1322453431a5SWaiman Long 			kfree_sensitive(txq->entries[cmd_index].free_buf);
1323e705c121SKalle Valo 			txq->entries[cmd_index].free_buf = NULL;
1324e705c121SKalle Valo 		}
1325e705c121SKalle Valo 
1326e705c121SKalle Valo 		/*
1327e705c121SKalle Valo 		 * After here, we should always check rxcb._page_stolen,
1328e705c121SKalle Valo 		 * if it is true then one of the handlers took the page.
1329e705c121SKalle Valo 		 */
1330e705c121SKalle Valo 
1331e705c121SKalle Valo 		if (reclaim) {
1332e705c121SKalle Valo 			/* Invoke any callbacks, transfer the buffer to caller,
1333e705c121SKalle Valo 			 * and fire off the (possibly) blocking
1334e705c121SKalle Valo 			 * iwl_trans_send_cmd()
1335e705c121SKalle Valo 			 * as we reclaim the driver command queue */
1336e705c121SKalle Valo 			if (!rxcb._page_stolen)
1337e705c121SKalle Valo 				iwl_pcie_hcmd_complete(trans, &rxcb);
1338e705c121SKalle Valo 			else
1339e705c121SKalle Valo 				IWL_WARN(trans, "Claim null rxb?\n");
1340e705c121SKalle Valo 		}
1341e705c121SKalle Valo 
1342e705c121SKalle Valo 		page_stolen |= rxcb._page_stolen;
13433681021fSJohannes Berg 		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
13440307c839SGolan Ben Ami 			break;
1345e705c121SKalle Valo 	}
1346e705c121SKalle Valo 
1347e705c121SKalle Valo 	/* page was stolen from us -- free our reference */
1348e705c121SKalle Valo 	if (page_stolen) {
1349e705c121SKalle Valo 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1350e705c121SKalle Valo 		rxb->page = NULL;
1351e705c121SKalle Valo 	}
1352e705c121SKalle Valo 
1353e705c121SKalle Valo 	/* Reuse the page if possible. For notification packets and
1354e705c121SKalle Valo 	 * SKBs that fail to Rx correctly, add them back into the
1355e705c121SKalle Valo 	 * rx_free list for reuse later. */
1356e705c121SKalle Valo 	if (rxb->page != NULL) {
1357e705c121SKalle Valo 		rxb->page_dma =
1358cfdc20efSJohannes Berg 			dma_map_page(trans->dev, rxb->page, rxb->offset,
135980084e35SJohannes Berg 				     trans_pcie->rx_buf_bytes,
1360e705c121SKalle Valo 				     DMA_FROM_DEVICE);
1361e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1362e705c121SKalle Valo 			/*
1363e705c121SKalle Valo 			 * free the page(s) as well to not break
1364e705c121SKalle Valo 			 * the invariant that the items on the used
1365e705c121SKalle Valo 			 * list have no page(s)
1366e705c121SKalle Valo 			 */
1367e705c121SKalle Valo 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1368e705c121SKalle Valo 			rxb->page = NULL;
1369e705c121SKalle Valo 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1370e705c121SKalle Valo 		} else {
1371e705c121SKalle Valo 			list_add_tail(&rxb->list, &rxq->rx_free);
1372e705c121SKalle Valo 			rxq->free_count++;
1373e705c121SKalle Valo 		}
1374e705c121SKalle Valo 	} else
1375e705c121SKalle Valo 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1376e705c121SKalle Valo }
1377e705c121SKalle Valo 
13781b4bbe8bSSara Sharon static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1379b1c860f6SJohannes Berg 						  struct iwl_rxq *rxq, int i,
1380b1c860f6SJohannes Berg 						  bool *join)
13811b4bbe8bSSara Sharon {
13821b4bbe8bSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
13831b4bbe8bSSara Sharon 	struct iwl_rx_mem_buffer *rxb;
13841b4bbe8bSSara Sharon 	u16 vid;
13851b4bbe8bSSara Sharon 
1386f826faaaSJohannes Berg 	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1387f826faaaSJohannes Berg 
1388286ca8ebSLuca Coelho 	if (!trans->trans_cfg->mq_rx_supported) {
13891b4bbe8bSSara Sharon 		rxb = rxq->queue[i];
13901b4bbe8bSSara Sharon 		rxq->queue[i] = NULL;
13911b4bbe8bSSara Sharon 		return rxb;
13921b4bbe8bSSara Sharon 	}
13931b4bbe8bSSara Sharon 
1394b1c860f6SJohannes Berg 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1395c042f0c7SJohannes Berg 		vid = le16_to_cpu(rxq->cd[i].rbid);
1396b1c860f6SJohannes Berg 		*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1397b1c860f6SJohannes Berg 	} else {
1398c042f0c7SJohannes Berg 		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
1399b1c860f6SJohannes Berg 	}
14001b4bbe8bSSara Sharon 
1401c042f0c7SJohannes Berg 	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
14021b4bbe8bSSara Sharon 		goto out_err;
14031b4bbe8bSSara Sharon 
14041b4bbe8bSSara Sharon 	rxb = trans_pcie->global_table[vid - 1];
14051b4bbe8bSSara Sharon 	if (rxb->invalid)
14061b4bbe8bSSara Sharon 		goto out_err;
14071b4bbe8bSSara Sharon 
140885d78bb1SSara Sharon 	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
140985d78bb1SSara Sharon 
14101b4bbe8bSSara Sharon 	rxb->invalid = true;
14111b4bbe8bSSara Sharon 
14121b4bbe8bSSara Sharon 	return rxb;
14131b4bbe8bSSara Sharon 
14141b4bbe8bSSara Sharon out_err:
14151b4bbe8bSSara Sharon 	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
14161b4bbe8bSSara Sharon 	iwl_force_nmi(trans);
14171b4bbe8bSSara Sharon 	return NULL;
14181b4bbe8bSSara Sharon }
14191b4bbe8bSSara Sharon 
1420e705c121SKalle Valo /*
1421e705c121SKalle Valo  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1422e705c121SKalle Valo  */
14232e5d4a8fSHaim Dreyfuss static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1424e705c121SKalle Valo {
1425e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1426b167191eSAlexander Lobakin 	struct napi_struct *napi;
142730f24eabSJohannes Berg 	struct iwl_rxq *rxq;
1428d56daea4SSara Sharon 	u32 r, i, count = 0;
1429e705c121SKalle Valo 	bool emergency = false;
1430e705c121SKalle Valo 
143130f24eabSJohannes Berg 	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
143230f24eabSJohannes Berg 		return;
143330f24eabSJohannes Berg 
143430f24eabSJohannes Berg 	rxq = &trans_pcie->rxq[queue];
143530f24eabSJohannes Berg 
1436e705c121SKalle Valo restart:
1437e705c121SKalle Valo 	spin_lock(&rxq->lock);
1438e705c121SKalle Valo 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1439e705c121SKalle Valo 	 * buffer that the driver may process (last buffer filled by ucode). */
14400307c839SGolan Ben Ami 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1441e705c121SKalle Valo 	i = rxq->read;
1442e705c121SKalle Valo 
14435eae443eSSara Sharon 	/* W/A 9000 device step A0 wrap-around bug */
14445eae443eSSara Sharon 	r &= (rxq->queue_size - 1);
14455eae443eSSara Sharon 
1446e705c121SKalle Valo 	/* Rx interrupt, but nothing sent from uCode */
1447e705c121SKalle Valo 	if (i == r)
14485eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1449e705c121SKalle Valo 
1450e705c121SKalle Valo 	while (i != r) {
1451868a1e86SShaul Triebitz 		struct iwl_rb_allocator *rba = &trans_pcie->rba;
1452e705c121SKalle Valo 		struct iwl_rx_mem_buffer *rxb;
1453868a1e86SShaul Triebitz 		/* number of RBDs still waiting for page allocation */
1454868a1e86SShaul Triebitz 		u32 rb_pending_alloc =
1455868a1e86SShaul Triebitz 			atomic_read(&trans_pcie->rba.req_pending) *
1456868a1e86SShaul Triebitz 			RX_CLAIM_REQ_ALLOC;
1457b1c860f6SJohannes Berg 		bool join = false;
1458e705c121SKalle Valo 
1459868a1e86SShaul Triebitz 		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1460868a1e86SShaul Triebitz 			     !emergency)) {
1461868a1e86SShaul Triebitz 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1462e705c121SKalle Valo 			emergency = true;
14636dcdd165SSara Sharon 			IWL_DEBUG_TPT(trans,
14646dcdd165SSara Sharon 				      "RX path is in emergency. Pending allocations %d\n",
14656dcdd165SSara Sharon 				      rb_pending_alloc);
1466868a1e86SShaul Triebitz 		}
1467e705c121SKalle Valo 
146885d78bb1SSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
146985d78bb1SSara Sharon 
1470b1c860f6SJohannes Berg 		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
14711b4bbe8bSSara Sharon 		if (!rxb)
14725eae443eSSara Sharon 			goto out;
1473e705c121SKalle Valo 
1474b1c860f6SJohannes Berg 		if (unlikely(join || rxq->next_rb_is_fragment)) {
1475b1c860f6SJohannes Berg 			rxq->next_rb_is_fragment = join;
1476b1c860f6SJohannes Berg 			/*
1477b1c860f6SJohannes Berg 			 * We can only get a multi-RB in the following cases:
1478b1c860f6SJohannes Berg 			 *  - firmware issue, sending a too big notification
1479b1c860f6SJohannes Berg 			 *  - sniffer mode with a large A-MSDU
1480b1c860f6SJohannes Berg 			 *  - large MTU frames (>2k)
1481b1c860f6SJohannes Berg 			 * since the multi-RB functionality is limited to newer
1482b1c860f6SJohannes Berg 			 * hardware that cannot put multiple entries into a
1483b1c860f6SJohannes Berg 			 * single RB.
1484b1c860f6SJohannes Berg 			 *
1485b1c860f6SJohannes Berg 			 * Right now, the higher layers aren't set up to deal
1486b1c860f6SJohannes Berg 			 * with that, so discard all of these.
1487b1c860f6SJohannes Berg 			 */
1488b1c860f6SJohannes Berg 			list_add_tail(&rxb->list, &rxq->rx_free);
1489b1c860f6SJohannes Berg 			rxq->free_count++;
1490b1c860f6SJohannes Berg 		} else {
14917891965dSSara Sharon 			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1492b1c860f6SJohannes Berg 		}
1493e705c121SKalle Valo 
149496a6497bSSara Sharon 		i = (i + 1) & (rxq->queue_size - 1);
1495e705c121SKalle Valo 
1496d56daea4SSara Sharon 		/*
1497d56daea4SSara Sharon 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1498d56daea4SSara Sharon 		 * try to claim the pre-allocated buffers from the allocator.
1499d56daea4SSara Sharon 		 * If not ready - will try to reclaim next time.
1500d56daea4SSara Sharon 		 * There is no need to reschedule work - allocator exits only
1501d56daea4SSara Sharon 		 * on success
1502e705c121SKalle Valo 		 */
1503d56daea4SSara Sharon 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1504d56daea4SSara Sharon 			iwl_pcie_rx_allocator_get(trans, rxq);
1505e705c121SKalle Valo 
1506d56daea4SSara Sharon 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1507d56daea4SSara Sharon 			/* Add the remaining empty RBDs for allocator use */
1508868a1e86SShaul Triebitz 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1509d56daea4SSara Sharon 		} else if (emergency) {
1510e705c121SKalle Valo 			count++;
1511e705c121SKalle Valo 			if (count == 8) {
1512e705c121SKalle Valo 				count = 0;
15136dcdd165SSara Sharon 				if (rb_pending_alloc < rxq->queue_size / 3) {
15146dcdd165SSara Sharon 					IWL_DEBUG_TPT(trans,
15156dcdd165SSara Sharon 						      "RX path exited emergency. Pending allocations %d\n",
15166dcdd165SSara Sharon 						      rb_pending_alloc);
1517e705c121SKalle Valo 					emergency = false;
15186dcdd165SSara Sharon 				}
1519e0e168dcSGregory Greenman 
1520e705c121SKalle Valo 				rxq->read = i;
1521e705c121SKalle Valo 				spin_unlock(&rxq->lock);
1522e0e168dcSGregory Greenman 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
152378485054SSara Sharon 				iwl_pcie_rxq_restock(trans, rxq);
1524e705c121SKalle Valo 				goto restart;
1525e705c121SKalle Valo 			}
1526e705c121SKalle Valo 		}
1527e0e168dcSGregory Greenman 	}
15285eae443eSSara Sharon out:
1529e705c121SKalle Valo 	/* Backtrack one entry */
1530e705c121SKalle Valo 	rxq->read = i;
15310307c839SGolan Ben Ami 	/* update cr tail with the rxq read pointer */
15323681021fSJohannes Berg 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
15330307c839SGolan Ben Ami 		*rxq->cr_tail = cpu_to_le16(r);
1534e705c121SKalle Valo 	spin_unlock(&rxq->lock);
1535e705c121SKalle Valo 
1536e705c121SKalle Valo 	/*
1537e705c121SKalle Valo 	 * handle a case where in emergency there are some unallocated RBDs.
1538e705c121SKalle Valo 	 * those RBDs are in the used list, but are not tracked by the queue's
1539e705c121SKalle Valo 	 * used_count which counts allocator owned RBDs.
1540e705c121SKalle Valo 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1541e705c121SKalle Valo 	 * when called again the function may not be in emergency mode and
1542e705c121SKalle Valo 	 * they will be handed to the allocator with no tracking in the RBD
1543e705c121SKalle Valo 	 * allocator counters, which will lead to them never being claimed back
1544e705c121SKalle Valo 	 * by the queue.
1545e705c121SKalle Valo 	 * by allocating them here, they are now in the queue free list, and
1546e705c121SKalle Valo 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1547e705c121SKalle Valo 	 */
1548e705c121SKalle Valo 	if (unlikely(emergency && count))
154978485054SSara Sharon 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1550e705c121SKalle Valo 
1551b167191eSAlexander Lobakin 	napi = &rxq->napi;
1552b167191eSAlexander Lobakin 	if (napi->poll) {
1553c8079432SMaxim Mikityanskiy 		napi_gro_flush(napi, false);
1554c8079432SMaxim Mikityanskiy 
1555b167191eSAlexander Lobakin 		if (napi->rx_count) {
1556b167191eSAlexander Lobakin 			netif_receive_skb_list(&napi->rx_list);
1557b167191eSAlexander Lobakin 			INIT_LIST_HEAD(&napi->rx_list);
1558b167191eSAlexander Lobakin 			napi->rx_count = 0;
1559b167191eSAlexander Lobakin 		}
1560b167191eSAlexander Lobakin 	}
1561e0e168dcSGregory Greenman 
1562e0e168dcSGregory Greenman 	iwl_pcie_rxq_restock(trans, rxq);
1563e705c121SKalle Valo }
1564e705c121SKalle Valo 
15652e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
15662e5d4a8fSHaim Dreyfuss {
15672e5d4a8fSHaim Dreyfuss 	u8 queue = entry->entry;
15682e5d4a8fSHaim Dreyfuss 	struct msix_entry *entries = entry - queue;
15692e5d4a8fSHaim Dreyfuss 
15702e5d4a8fSHaim Dreyfuss 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
15712e5d4a8fSHaim Dreyfuss }
15722e5d4a8fSHaim Dreyfuss 
15732e5d4a8fSHaim Dreyfuss /*
15742e5d4a8fSHaim Dreyfuss  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
15752e5d4a8fSHaim Dreyfuss  * This interrupt handler should be used with RSS queue only.
15762e5d4a8fSHaim Dreyfuss  */
15772e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
15782e5d4a8fSHaim Dreyfuss {
15792e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
15802e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
15812e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
15822e5d4a8fSHaim Dreyfuss 
1583c42ff65dSJohannes Berg 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1584c42ff65dSJohannes Berg 
15855eae443eSSara Sharon 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
15865eae443eSSara Sharon 		return IRQ_NONE;
15875eae443eSSara Sharon 
15882e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
15892e5d4a8fSHaim Dreyfuss 
15902e5d4a8fSHaim Dreyfuss 	local_bh_disable();
15912e5d4a8fSHaim Dreyfuss 	iwl_pcie_rx_handle(trans, entry->entry);
15922e5d4a8fSHaim Dreyfuss 	local_bh_enable();
15932e5d4a8fSHaim Dreyfuss 
15942e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
15952e5d4a8fSHaim Dreyfuss 
15962e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
15972e5d4a8fSHaim Dreyfuss 
15982e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
15992e5d4a8fSHaim Dreyfuss }
16002e5d4a8fSHaim Dreyfuss 
1601e705c121SKalle Valo /*
1602e705c121SKalle Valo  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1603e705c121SKalle Valo  */
1604e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1605e705c121SKalle Valo {
1606e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1607e705c121SKalle Valo 	int i;
1608e705c121SKalle Valo 
1609e705c121SKalle Valo 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1610e705c121SKalle Valo 	if (trans->cfg->internal_wimax_coex &&
1611e705c121SKalle Valo 	    !trans->cfg->apmg_not_supported &&
1612e705c121SKalle Valo 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1613e705c121SKalle Valo 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1614e705c121SKalle Valo 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1615e705c121SKalle Valo 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1616e705c121SKalle Valo 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1617e705c121SKalle Valo 		iwl_op_mode_wimax_active(trans->op_mode);
1618e705c121SKalle Valo 		wake_up(&trans_pcie->wait_command_queue);
1619e705c121SKalle Valo 		return;
1620e705c121SKalle Valo 	}
1621e705c121SKalle Valo 
1622286ca8ebSLuca Coelho 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
16234f4822b7SMordechay Goodstein 		if (!trans->txqs.txq[i])
162413a3a390SSara Sharon 			continue;
16254f4822b7SMordechay Goodstein 		del_timer(&trans->txqs.txq[i]->stuck_timer);
162613a3a390SSara Sharon 	}
1627e705c121SKalle Valo 
16287d75f32eSEmmanuel Grumbach 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
16297d75f32eSEmmanuel Grumbach 	 * before we wake up the command caller, to ensure a proper cleanup. */
16307d75f32eSEmmanuel Grumbach 	iwl_trans_fw_error(trans);
16317d75f32eSEmmanuel Grumbach 
1632e705c121SKalle Valo 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1633e705c121SKalle Valo 	wake_up(&trans_pcie->wait_command_queue);
1634e705c121SKalle Valo }
1635e705c121SKalle Valo 
1636e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1637e705c121SKalle Valo {
1638e705c121SKalle Valo 	u32 inta;
1639e705c121SKalle Valo 
1640e705c121SKalle Valo 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1641e705c121SKalle Valo 
1642e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1643e705c121SKalle Valo 
1644e705c121SKalle Valo 	/* Discover which interrupts are active/pending */
1645e705c121SKalle Valo 	inta = iwl_read32(trans, CSR_INT);
1646e705c121SKalle Valo 
1647e705c121SKalle Valo 	/* the thread will service interrupts and re-enable them */
1648e705c121SKalle Valo 	return inta;
1649e705c121SKalle Valo }
1650e705c121SKalle Valo 
1651e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */
1652e705c121SKalle Valo #define ICT_SHIFT	12
1653e705c121SKalle Valo #define ICT_SIZE	(1 << ICT_SHIFT)
1654e705c121SKalle Valo #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1655e705c121SKalle Valo 
1656e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will
1657e705c121SKalle Valo  * stop using INTA register to get device's interrupt, reading this register
1658e705c121SKalle Valo  * is expensive, device will write interrupts in ICT dram table, increment
1659e705c121SKalle Valo  * index then will fire interrupt to driver, driver will OR all ICT table
1660e705c121SKalle Valo  * entries from current index up to table entry with 0 value. the result is
1661e705c121SKalle Valo  * the interrupt we need to service, driver will set the entries back to 0 and
1662e705c121SKalle Valo  * set index.
1663e705c121SKalle Valo  */
1664e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1665e705c121SKalle Valo {
1666e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1667e705c121SKalle Valo 	u32 inta;
1668e705c121SKalle Valo 	u32 val = 0;
1669e705c121SKalle Valo 	u32 read;
1670e705c121SKalle Valo 
1671e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1672e705c121SKalle Valo 
1673e705c121SKalle Valo 	/* Ignore interrupt if there's nothing in NIC to service.
1674e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1675e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC. */
1676e705c121SKalle Valo 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1677e705c121SKalle Valo 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1678e705c121SKalle Valo 	if (!read)
1679e705c121SKalle Valo 		return 0;
1680e705c121SKalle Valo 
1681e705c121SKalle Valo 	/*
1682e705c121SKalle Valo 	 * Collect all entries up to the first 0, starting from ict_index;
1683e705c121SKalle Valo 	 * note we already read at ict_index.
1684e705c121SKalle Valo 	 */
1685e705c121SKalle Valo 	do {
1686e705c121SKalle Valo 		val |= read;
1687e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1688e705c121SKalle Valo 				trans_pcie->ict_index, read);
1689e705c121SKalle Valo 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1690e705c121SKalle Valo 		trans_pcie->ict_index =
1691e705c121SKalle Valo 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1692e705c121SKalle Valo 
1693e705c121SKalle Valo 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1694e705c121SKalle Valo 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1695e705c121SKalle Valo 					   read);
1696e705c121SKalle Valo 	} while (read);
1697e705c121SKalle Valo 
1698e705c121SKalle Valo 	/* We should not get this value, just ignore it. */
1699e705c121SKalle Valo 	if (val == 0xffffffff)
1700e705c121SKalle Valo 		val = 0;
1701e705c121SKalle Valo 
1702e705c121SKalle Valo 	/*
1703e705c121SKalle Valo 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1704e705c121SKalle Valo 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1705e705c121SKalle Valo 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1706e705c121SKalle Valo 	 * so we use them to decide on the real state of the Rx bit.
1707e705c121SKalle Valo 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1708e705c121SKalle Valo 	 */
1709e705c121SKalle Valo 	if (val & 0xC0000)
1710e705c121SKalle Valo 		val |= 0x8000;
1711e705c121SKalle Valo 
1712e705c121SKalle Valo 	inta = (0xff & val) | ((0xff00 & val) << 16);
1713e705c121SKalle Valo 	return inta;
1714e705c121SKalle Valo }
1715e705c121SKalle Valo 
1716fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
17173a6e168bSJohannes Berg {
17183a6e168bSJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
17193a6e168bSJohannes Berg 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1720326477e4SJohannes Berg 	bool hw_rfkill, prev, report;
17213a6e168bSJohannes Berg 
17223a6e168bSJohannes Berg 	mutex_lock(&trans_pcie->mutex);
1723326477e4SJohannes Berg 	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
17243a6e168bSJohannes Berg 	hw_rfkill = iwl_is_rfkill_set(trans);
1725326477e4SJohannes Berg 	if (hw_rfkill) {
1726326477e4SJohannes Berg 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1727326477e4SJohannes Berg 		set_bit(STATUS_RFKILL_HW, &trans->status);
1728326477e4SJohannes Berg 	}
1729326477e4SJohannes Berg 	if (trans_pcie->opmode_down)
1730326477e4SJohannes Berg 		report = hw_rfkill;
1731326477e4SJohannes Berg 	else
1732326477e4SJohannes Berg 		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
17333a6e168bSJohannes Berg 
17343a6e168bSJohannes Berg 	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
17353a6e168bSJohannes Berg 		 hw_rfkill ? "disable radio" : "enable radio");
17363a6e168bSJohannes Berg 
17373a6e168bSJohannes Berg 	isr_stats->rfkill++;
17383a6e168bSJohannes Berg 
1739326477e4SJohannes Berg 	if (prev != report)
1740326477e4SJohannes Berg 		iwl_trans_pcie_rf_kill(trans, report);
17413a6e168bSJohannes Berg 	mutex_unlock(&trans_pcie->mutex);
17423a6e168bSJohannes Berg 
17433a6e168bSJohannes Berg 	if (hw_rfkill) {
17443a6e168bSJohannes Berg 		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
17453a6e168bSJohannes Berg 				       &trans->status))
17463a6e168bSJohannes Berg 			IWL_DEBUG_RF_KILL(trans,
17473a6e168bSJohannes Berg 					  "Rfkill while SYNC HCMD in flight\n");
17483a6e168bSJohannes Berg 		wake_up(&trans_pcie->wait_command_queue);
17493a6e168bSJohannes Berg 	} else {
1750326477e4SJohannes Berg 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1751326477e4SJohannes Berg 		if (trans_pcie->opmode_down)
1752326477e4SJohannes Berg 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
17533a6e168bSJohannes Berg 	}
17543a6e168bSJohannes Berg }
17553a6e168bSJohannes Berg 
1756e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1757e705c121SKalle Valo {
1758e705c121SKalle Valo 	struct iwl_trans *trans = dev_id;
1759e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1760e705c121SKalle Valo 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1761e705c121SKalle Valo 	u32 inta = 0;
1762e705c121SKalle Valo 	u32 handled = 0;
1763e705c121SKalle Valo 
1764e705c121SKalle Valo 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1765e705c121SKalle Valo 
1766e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1767e705c121SKalle Valo 
1768e705c121SKalle Valo 	/* dram interrupt table not set yet,
1769e705c121SKalle Valo 	 * use legacy interrupt.
1770e705c121SKalle Valo 	 */
1771e705c121SKalle Valo 	if (likely(trans_pcie->use_ict))
1772e705c121SKalle Valo 		inta = iwl_pcie_int_cause_ict(trans);
1773e705c121SKalle Valo 	else
1774e705c121SKalle Valo 		inta = iwl_pcie_int_cause_non_ict(trans);
1775e705c121SKalle Valo 
1776e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1777e705c121SKalle Valo 		IWL_DEBUG_ISR(trans,
1778e705c121SKalle Valo 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1779e705c121SKalle Valo 			      inta, trans_pcie->inta_mask,
1780e705c121SKalle Valo 			      iwl_read32(trans, CSR_INT_MASK),
1781e705c121SKalle Valo 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1782e705c121SKalle Valo 		if (inta & (~trans_pcie->inta_mask))
1783e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1784e705c121SKalle Valo 				      "We got a masked interrupt (0x%08x)\n",
1785e705c121SKalle Valo 				      inta & (~trans_pcie->inta_mask));
1786e705c121SKalle Valo 	}
1787e705c121SKalle Valo 
1788e705c121SKalle Valo 	inta &= trans_pcie->inta_mask;
1789e705c121SKalle Valo 
1790e705c121SKalle Valo 	/*
1791e705c121SKalle Valo 	 * Ignore interrupt if there's nothing in NIC to service.
1792e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1793e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC.
1794e705c121SKalle Valo 	 */
1795e705c121SKalle Valo 	if (unlikely(!inta)) {
1796e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1797e705c121SKalle Valo 		/*
1798e705c121SKalle Valo 		 * Re-enable interrupts here since we don't
1799e705c121SKalle Valo 		 * have anything to service
1800e705c121SKalle Valo 		 */
1801e705c121SKalle Valo 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1802f16c3ebfSEmmanuel Grumbach 			_iwl_enable_interrupts(trans);
1803e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1804e705c121SKalle Valo 		lock_map_release(&trans->sync_cmd_lockdep_map);
1805e705c121SKalle Valo 		return IRQ_NONE;
1806e705c121SKalle Valo 	}
1807e705c121SKalle Valo 
1808e705c121SKalle Valo 	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1809e705c121SKalle Valo 		/*
1810e705c121SKalle Valo 		 * Hardware disappeared. It might have
1811e705c121SKalle Valo 		 * already raised an interrupt.
1812e705c121SKalle Valo 		 */
1813e705c121SKalle Valo 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1814e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1815e705c121SKalle Valo 		goto out;
1816e705c121SKalle Valo 	}
1817e705c121SKalle Valo 
1818e705c121SKalle Valo 	/* Ack/clear/reset pending uCode interrupts.
1819e705c121SKalle Valo 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1820e705c121SKalle Valo 	 */
1821e705c121SKalle Valo 	/* There is a hardware bug in the interrupt mask function that some
1822e705c121SKalle Valo 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1823e705c121SKalle Valo 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1824e705c121SKalle Valo 	 * ICT interrupt handling mechanism has another bug that might cause
1825e705c121SKalle Valo 	 * these unmasked interrupts fail to be detected. We workaround the
1826e705c121SKalle Valo 	 * hardware bugs here by ACKing all the possible interrupts so that
1827e705c121SKalle Valo 	 * interrupt coalescing can still be achieved.
1828e705c121SKalle Valo 	 */
1829e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1830e705c121SKalle Valo 
1831e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR))
1832e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1833e705c121SKalle Valo 			      inta, iwl_read32(trans, CSR_INT_MASK));
1834e705c121SKalle Valo 
1835e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1836e705c121SKalle Valo 
1837e705c121SKalle Valo 	/* Now service all interrupt bits discovered above. */
1838e705c121SKalle Valo 	if (inta & CSR_INT_BIT_HW_ERR) {
1839e705c121SKalle Valo 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1840e705c121SKalle Valo 
1841e705c121SKalle Valo 		/* Tell the device to stop sending interrupts */
1842e705c121SKalle Valo 		iwl_disable_interrupts(trans);
1843e705c121SKalle Valo 
1844e705c121SKalle Valo 		isr_stats->hw++;
1845e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1846e705c121SKalle Valo 
1847e705c121SKalle Valo 		handled |= CSR_INT_BIT_HW_ERR;
1848e705c121SKalle Valo 
1849e705c121SKalle Valo 		goto out;
1850e705c121SKalle Valo 	}
1851e705c121SKalle Valo 
1852e705c121SKalle Valo 	/* NIC fires this, but we don't use it, redundant with WAKEUP */
1853e705c121SKalle Valo 	if (inta & CSR_INT_BIT_SCD) {
1854e705c121SKalle Valo 		IWL_DEBUG_ISR(trans,
1855e705c121SKalle Valo 			      "Scheduler finished to transmit the frame/frames.\n");
1856e705c121SKalle Valo 		isr_stats->sch++;
1857e705c121SKalle Valo 	}
1858e705c121SKalle Valo 
1859e705c121SKalle Valo 	/* Alive notification via Rx interrupt will do the real work */
1860e705c121SKalle Valo 	if (inta & CSR_INT_BIT_ALIVE) {
1861e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1862e705c121SKalle Valo 		isr_stats->alive++;
1863286ca8ebSLuca Coelho 		if (trans->trans_cfg->gen2) {
1864eda50cdeSSara Sharon 			/*
1865eda50cdeSSara Sharon 			 * We can restock, since firmware configured
1866eda50cdeSSara Sharon 			 * the RFH
1867eda50cdeSSara Sharon 			 */
1868eda50cdeSSara Sharon 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1869eda50cdeSSara Sharon 		}
1870ed3e4c6dSEmmanuel Grumbach 
1871ed3e4c6dSEmmanuel Grumbach 		handled |= CSR_INT_BIT_ALIVE;
1872e705c121SKalle Valo 	}
1873e705c121SKalle Valo 
1874e705c121SKalle Valo 	/* Safely ignore these bits for debug checks below */
1875e705c121SKalle Valo 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1876e705c121SKalle Valo 
1877e705c121SKalle Valo 	/* HW RF KILL switch toggled */
1878e705c121SKalle Valo 	if (inta & CSR_INT_BIT_RF_KILL) {
18793a6e168bSJohannes Berg 		iwl_pcie_handle_rfkill_irq(trans);
1880e705c121SKalle Valo 		handled |= CSR_INT_BIT_RF_KILL;
1881e705c121SKalle Valo 	}
1882e705c121SKalle Valo 
1883e705c121SKalle Valo 	/* Chip got too hot and stopped itself */
1884e705c121SKalle Valo 	if (inta & CSR_INT_BIT_CT_KILL) {
1885e705c121SKalle Valo 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1886e705c121SKalle Valo 		isr_stats->ctkill++;
1887e705c121SKalle Valo 		handled |= CSR_INT_BIT_CT_KILL;
1888e705c121SKalle Valo 	}
1889e705c121SKalle Valo 
1890e705c121SKalle Valo 	/* Error detected by uCode */
1891e705c121SKalle Valo 	if (inta & CSR_INT_BIT_SW_ERR) {
1892e705c121SKalle Valo 		IWL_ERR(trans, "Microcode SW error detected. "
1893e705c121SKalle Valo 			" Restarting 0x%X.\n", inta);
1894e705c121SKalle Valo 		isr_stats->sw++;
1895e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1896e705c121SKalle Valo 		handled |= CSR_INT_BIT_SW_ERR;
1897e705c121SKalle Valo 	}
1898e705c121SKalle Valo 
1899e705c121SKalle Valo 	/* uCode wakes up after power-down sleep */
1900e705c121SKalle Valo 	if (inta & CSR_INT_BIT_WAKEUP) {
1901e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1902e705c121SKalle Valo 		iwl_pcie_rxq_check_wrptr(trans);
1903e705c121SKalle Valo 		iwl_pcie_txq_check_wrptrs(trans);
1904e705c121SKalle Valo 
1905e705c121SKalle Valo 		isr_stats->wakeup++;
1906e705c121SKalle Valo 
1907e705c121SKalle Valo 		handled |= CSR_INT_BIT_WAKEUP;
1908e705c121SKalle Valo 	}
1909e705c121SKalle Valo 
1910e705c121SKalle Valo 	/* All uCode command responses, including Tx command responses,
1911e705c121SKalle Valo 	 * Rx "responses" (frame-received notification), and other
1912e705c121SKalle Valo 	 * notifications from uCode come through here*/
1913e705c121SKalle Valo 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1914e705c121SKalle Valo 		    CSR_INT_BIT_RX_PERIODIC)) {
1915e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1916e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1917e705c121SKalle Valo 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1918e705c121SKalle Valo 			iwl_write32(trans, CSR_FH_INT_STATUS,
1919e705c121SKalle Valo 					CSR_FH_INT_RX_MASK);
1920e705c121SKalle Valo 		}
1921e705c121SKalle Valo 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1922e705c121SKalle Valo 			handled |= CSR_INT_BIT_RX_PERIODIC;
1923e705c121SKalle Valo 			iwl_write32(trans,
1924e705c121SKalle Valo 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1925e705c121SKalle Valo 		}
1926e705c121SKalle Valo 		/* Sending RX interrupt require many steps to be done in the
1927e705c121SKalle Valo 		 * the device:
1928e705c121SKalle Valo 		 * 1- write interrupt to current index in ICT table.
1929e705c121SKalle Valo 		 * 2- dma RX frame.
1930e705c121SKalle Valo 		 * 3- update RX shared data to indicate last write index.
1931e705c121SKalle Valo 		 * 4- send interrupt.
1932e705c121SKalle Valo 		 * This could lead to RX race, driver could receive RX interrupt
1933e705c121SKalle Valo 		 * but the shared data changes does not reflect this;
1934e705c121SKalle Valo 		 * periodic interrupt will detect any dangling Rx activity.
1935e705c121SKalle Valo 		 */
1936e705c121SKalle Valo 
1937e705c121SKalle Valo 		/* Disable periodic interrupt; we use it as just a one-shot. */
1938e705c121SKalle Valo 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1939e705c121SKalle Valo 			    CSR_INT_PERIODIC_DIS);
1940e705c121SKalle Valo 
1941e705c121SKalle Valo 		/*
1942e705c121SKalle Valo 		 * Enable periodic interrupt in 8 msec only if we received
1943e705c121SKalle Valo 		 * real RX interrupt (instead of just periodic int), to catch
1944e705c121SKalle Valo 		 * any dangling Rx interrupt.  If it was just the periodic
1945e705c121SKalle Valo 		 * interrupt, there was no dangling Rx activity, and no need
1946e705c121SKalle Valo 		 * to extend the periodic interrupt; one-shot is enough.
1947e705c121SKalle Valo 		 */
1948e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1949e705c121SKalle Valo 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1950e705c121SKalle Valo 				   CSR_INT_PERIODIC_ENA);
1951e705c121SKalle Valo 
1952e705c121SKalle Valo 		isr_stats->rx++;
1953e705c121SKalle Valo 
1954e705c121SKalle Valo 		local_bh_disable();
19552e5d4a8fSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
1956e705c121SKalle Valo 		local_bh_enable();
1957e705c121SKalle Valo 	}
1958e705c121SKalle Valo 
1959e705c121SKalle Valo 	/* This "Tx" DMA channel is used only for loading uCode */
1960e705c121SKalle Valo 	if (inta & CSR_INT_BIT_FH_TX) {
1961e705c121SKalle Valo 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1962e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1963e705c121SKalle Valo 		isr_stats->tx++;
1964e705c121SKalle Valo 		handled |= CSR_INT_BIT_FH_TX;
1965e705c121SKalle Valo 		/* Wake up uCode load routine, now that load is complete */
1966e705c121SKalle Valo 		trans_pcie->ucode_write_complete = true;
1967e705c121SKalle Valo 		wake_up(&trans_pcie->ucode_write_waitq);
1968e705c121SKalle Valo 	}
1969e705c121SKalle Valo 
1970e705c121SKalle Valo 	if (inta & ~handled) {
1971e705c121SKalle Valo 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1972e705c121SKalle Valo 		isr_stats->unhandled++;
1973e705c121SKalle Valo 	}
1974e705c121SKalle Valo 
1975e705c121SKalle Valo 	if (inta & ~(trans_pcie->inta_mask)) {
1976e705c121SKalle Valo 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1977e705c121SKalle Valo 			 inta & ~trans_pcie->inta_mask);
1978e705c121SKalle Valo 	}
1979e705c121SKalle Valo 
1980f16c3ebfSEmmanuel Grumbach 	spin_lock(&trans_pcie->irq_lock);
1981a6bd005fSEmmanuel Grumbach 	/* only Re-enable all interrupt if disabled by irq */
1982f16c3ebfSEmmanuel Grumbach 	if (test_bit(STATUS_INT_ENABLED, &trans->status))
1983f16c3ebfSEmmanuel Grumbach 		_iwl_enable_interrupts(trans);
1984f16c3ebfSEmmanuel Grumbach 	/* we are loading the firmware, enable FH_TX interrupt only */
1985f16c3ebfSEmmanuel Grumbach 	else if (handled & CSR_INT_BIT_FH_TX)
1986f16c3ebfSEmmanuel Grumbach 		iwl_enable_fw_load_int(trans);
1987e705c121SKalle Valo 	/* Re-enable RF_KILL if it occurred */
1988e705c121SKalle Valo 	else if (handled & CSR_INT_BIT_RF_KILL)
1989e705c121SKalle Valo 		iwl_enable_rfkill_int(trans);
1990ed3e4c6dSEmmanuel Grumbach 	/* Re-enable the ALIVE / Rx interrupt if it occurred */
1991ed3e4c6dSEmmanuel Grumbach 	else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
1992ed3e4c6dSEmmanuel Grumbach 		iwl_enable_fw_load_int_ctx_info(trans);
1993f16c3ebfSEmmanuel Grumbach 	spin_unlock(&trans_pcie->irq_lock);
1994e705c121SKalle Valo 
1995e705c121SKalle Valo out:
1996e705c121SKalle Valo 	lock_map_release(&trans->sync_cmd_lockdep_map);
1997e705c121SKalle Valo 	return IRQ_HANDLED;
1998e705c121SKalle Valo }
1999e705c121SKalle Valo 
2000e705c121SKalle Valo /******************************************************************************
2001e705c121SKalle Valo  *
2002e705c121SKalle Valo  * ICT functions
2003e705c121SKalle Valo  *
2004e705c121SKalle Valo  ******************************************************************************/
2005e705c121SKalle Valo 
2006e705c121SKalle Valo /* Free dram table */
2007e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans)
2008e705c121SKalle Valo {
2009e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2010e705c121SKalle Valo 
2011e705c121SKalle Valo 	if (trans_pcie->ict_tbl) {
2012e705c121SKalle Valo 		dma_free_coherent(trans->dev, ICT_SIZE,
2013e705c121SKalle Valo 				  trans_pcie->ict_tbl,
2014e705c121SKalle Valo 				  trans_pcie->ict_tbl_dma);
2015e705c121SKalle Valo 		trans_pcie->ict_tbl = NULL;
2016e705c121SKalle Valo 		trans_pcie->ict_tbl_dma = 0;
2017e705c121SKalle Valo 	}
2018e705c121SKalle Valo }
2019e705c121SKalle Valo 
2020e705c121SKalle Valo /*
2021e705c121SKalle Valo  * allocate dram shared table, it is an aligned memory
2022e705c121SKalle Valo  * block of ICT_SIZE.
2023e705c121SKalle Valo  * also reset all data related to ICT table interrupt.
2024e705c121SKalle Valo  */
2025e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2026e705c121SKalle Valo {
2027e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2028e705c121SKalle Valo 
2029e705c121SKalle Valo 	trans_pcie->ict_tbl =
2030750afb08SLuis Chamberlain 		dma_alloc_coherent(trans->dev, ICT_SIZE,
2031750afb08SLuis Chamberlain 				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2032e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
2033e705c121SKalle Valo 		return -ENOMEM;
2034e705c121SKalle Valo 
2035e705c121SKalle Valo 	/* just an API sanity check ... it is guaranteed to be aligned */
2036e705c121SKalle Valo 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2037e705c121SKalle Valo 		iwl_pcie_free_ict(trans);
2038e705c121SKalle Valo 		return -EINVAL;
2039e705c121SKalle Valo 	}
2040e705c121SKalle Valo 
2041e705c121SKalle Valo 	return 0;
2042e705c121SKalle Valo }
2043e705c121SKalle Valo 
2044e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table,
2045e705c121SKalle Valo  * also we need to tell the driver to start using ICT interrupt.
2046e705c121SKalle Valo  */
2047e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans)
2048e705c121SKalle Valo {
2049e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2050e705c121SKalle Valo 	u32 val;
2051e705c121SKalle Valo 
2052e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
2053e705c121SKalle Valo 		return;
2054e705c121SKalle Valo 
2055e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
2056f16c3ebfSEmmanuel Grumbach 	_iwl_disable_interrupts(trans);
2057e705c121SKalle Valo 
2058e705c121SKalle Valo 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2059e705c121SKalle Valo 
2060e705c121SKalle Valo 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2061e705c121SKalle Valo 
2062e705c121SKalle Valo 	val |= CSR_DRAM_INT_TBL_ENABLE |
2063e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
2064e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
2065e705c121SKalle Valo 
2066e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2067e705c121SKalle Valo 
2068e705c121SKalle Valo 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2069e705c121SKalle Valo 	trans_pcie->use_ict = true;
2070e705c121SKalle Valo 	trans_pcie->ict_index = 0;
2071e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2072f16c3ebfSEmmanuel Grumbach 	_iwl_enable_interrupts(trans);
2073e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
2074e705c121SKalle Valo }
2075e705c121SKalle Valo 
2076e705c121SKalle Valo /* Device is going down disable ict interrupt usage */
2077e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans)
2078e705c121SKalle Valo {
2079e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2080e705c121SKalle Valo 
2081e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
2082e705c121SKalle Valo 	trans_pcie->use_ict = false;
2083e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
2084e705c121SKalle Valo }
2085e705c121SKalle Valo 
2086e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data)
2087e705c121SKalle Valo {
2088e705c121SKalle Valo 	struct iwl_trans *trans = data;
2089e705c121SKalle Valo 
2090e705c121SKalle Valo 	if (!trans)
2091e705c121SKalle Valo 		return IRQ_NONE;
2092e705c121SKalle Valo 
2093e705c121SKalle Valo 	/* Disable (but don't clear!) interrupts here to avoid
2094e705c121SKalle Valo 	 * back-to-back ISRs and sporadic interrupts from our NIC.
2095e705c121SKalle Valo 	 * If we have something to service, the tasklet will re-enable ints.
2096e705c121SKalle Valo 	 * If we *don't* have something, we'll re-enable before leaving here.
2097e705c121SKalle Valo 	 */
2098e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2099e705c121SKalle Valo 
2100e705c121SKalle Valo 	return IRQ_WAKE_THREAD;
2101e705c121SKalle Valo }
21022e5d4a8fSHaim Dreyfuss 
21032e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
21042e5d4a8fSHaim Dreyfuss {
21052e5d4a8fSHaim Dreyfuss 	return IRQ_WAKE_THREAD;
21062e5d4a8fSHaim Dreyfuss }
21072e5d4a8fSHaim Dreyfuss 
21082e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
21092e5d4a8fSHaim Dreyfuss {
21102e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
21112e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
21122e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
211346167a8fSColin Ian King 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
21142e5d4a8fSHaim Dreyfuss 	u32 inta_fh, inta_hw;
21152e5d4a8fSHaim Dreyfuss 
21162e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
21172e5d4a8fSHaim Dreyfuss 
21182e5d4a8fSHaim Dreyfuss 	spin_lock(&trans_pcie->irq_lock);
21197ef3dd26SHaim Dreyfuss 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
21207ef3dd26SHaim Dreyfuss 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
21212e5d4a8fSHaim Dreyfuss 	/*
21222e5d4a8fSHaim Dreyfuss 	 * Clear causes registers to avoid being handling the same cause.
21232e5d4a8fSHaim Dreyfuss 	 */
21247ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
21257ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
21262e5d4a8fSHaim Dreyfuss 	spin_unlock(&trans_pcie->irq_lock);
21272e5d4a8fSHaim Dreyfuss 
2128c42ff65dSJohannes Berg 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2129c42ff65dSJohannes Berg 
21302e5d4a8fSHaim Dreyfuss 	if (unlikely(!(inta_fh | inta_hw))) {
21312e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
21322e5d4a8fSHaim Dreyfuss 		lock_map_release(&trans->sync_cmd_lockdep_map);
21332e5d4a8fSHaim Dreyfuss 		return IRQ_NONE;
21342e5d4a8fSHaim Dreyfuss 	}
21352e5d4a8fSHaim Dreyfuss 
21363b57a10cSEmmanuel Grumbach 	if (iwl_have_debug_level(IWL_DL_ISR)) {
21373b57a10cSEmmanuel Grumbach 		IWL_DEBUG_ISR(trans,
21383b57a10cSEmmanuel Grumbach 			      "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
21393b57a10cSEmmanuel Grumbach 			      inta_fh, trans_pcie->fh_mask,
21402e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
21413b57a10cSEmmanuel Grumbach 		if (inta_fh & ~trans_pcie->fh_mask)
21423b57a10cSEmmanuel Grumbach 			IWL_DEBUG_ISR(trans,
21433b57a10cSEmmanuel Grumbach 				      "We got a masked interrupt (0x%08x)\n",
21443b57a10cSEmmanuel Grumbach 				      inta_fh & ~trans_pcie->fh_mask);
21453b57a10cSEmmanuel Grumbach 	}
21463b57a10cSEmmanuel Grumbach 
21473b57a10cSEmmanuel Grumbach 	inta_fh &= trans_pcie->fh_mask;
21482e5d4a8fSHaim Dreyfuss 
2149496d83caSHaim Dreyfuss 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2150496d83caSHaim Dreyfuss 	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2151496d83caSHaim Dreyfuss 		local_bh_disable();
2152496d83caSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
2153496d83caSHaim Dreyfuss 		local_bh_enable();
2154496d83caSHaim Dreyfuss 	}
2155496d83caSHaim Dreyfuss 
2156496d83caSHaim Dreyfuss 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2157496d83caSHaim Dreyfuss 	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2158496d83caSHaim Dreyfuss 		local_bh_disable();
2159496d83caSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 1);
2160496d83caSHaim Dreyfuss 		local_bh_enable();
2161496d83caSHaim Dreyfuss 	}
2162496d83caSHaim Dreyfuss 
21632e5d4a8fSHaim Dreyfuss 	/* This "Tx" DMA channel is used only for loading uCode */
21642e5d4a8fSHaim Dreyfuss 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
21652e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
21662e5d4a8fSHaim Dreyfuss 		isr_stats->tx++;
21672e5d4a8fSHaim Dreyfuss 		/*
21682e5d4a8fSHaim Dreyfuss 		 * Wake up uCode load routine,
21692e5d4a8fSHaim Dreyfuss 		 * now that load is complete
21702e5d4a8fSHaim Dreyfuss 		 */
21712e5d4a8fSHaim Dreyfuss 		trans_pcie->ucode_write_complete = true;
21722e5d4a8fSHaim Dreyfuss 		wake_up(&trans_pcie->ucode_write_waitq);
21732e5d4a8fSHaim Dreyfuss 	}
21742e5d4a8fSHaim Dreyfuss 
21752e5d4a8fSHaim Dreyfuss 	/* Error detected by uCode */
21762e5d4a8fSHaim Dreyfuss 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
21773681021fSJohannes Berg 	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
21782e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
21792e5d4a8fSHaim Dreyfuss 			"Microcode SW error detected. Restarting 0x%X.\n",
21802e5d4a8fSHaim Dreyfuss 			inta_fh);
21812e5d4a8fSHaim Dreyfuss 		isr_stats->sw++;
21822e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
21832e5d4a8fSHaim Dreyfuss 	}
21842e5d4a8fSHaim Dreyfuss 
21852e5d4a8fSHaim Dreyfuss 	/* After checking FH register check HW register */
21863b57a10cSEmmanuel Grumbach 	if (iwl_have_debug_level(IWL_DL_ISR)) {
21872e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans,
21883b57a10cSEmmanuel Grumbach 			      "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
21893b57a10cSEmmanuel Grumbach 			      inta_hw, trans_pcie->hw_mask,
21902e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
21913b57a10cSEmmanuel Grumbach 		if (inta_hw & ~trans_pcie->hw_mask)
21923b57a10cSEmmanuel Grumbach 			IWL_DEBUG_ISR(trans,
21933b57a10cSEmmanuel Grumbach 				      "We got a masked interrupt 0x%08x\n",
21943b57a10cSEmmanuel Grumbach 				      inta_hw & ~trans_pcie->hw_mask);
21953b57a10cSEmmanuel Grumbach 	}
21963b57a10cSEmmanuel Grumbach 
21973b57a10cSEmmanuel Grumbach 	inta_hw &= trans_pcie->hw_mask;
21982e5d4a8fSHaim Dreyfuss 
21992e5d4a8fSHaim Dreyfuss 	/* Alive notification via Rx interrupt will do the real work */
22002e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
22012e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
22022e5d4a8fSHaim Dreyfuss 		isr_stats->alive++;
2203286ca8ebSLuca Coelho 		if (trans->trans_cfg->gen2) {
2204eda50cdeSSara Sharon 			/* We can restock, since firmware configured the RFH */
2205eda50cdeSSara Sharon 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2206eda50cdeSSara Sharon 		}
22072e5d4a8fSHaim Dreyfuss 	}
22082e5d4a8fSHaim Dreyfuss 
22093681021fSJohannes Berg 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2210e5f3f215SHaim Dreyfuss 		u32 sleep_notif =
2211e5f3f215SHaim Dreyfuss 			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2212e5f3f215SHaim Dreyfuss 		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2213e5f3f215SHaim Dreyfuss 		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2214e5f3f215SHaim Dreyfuss 			IWL_DEBUG_ISR(trans,
2215e5f3f215SHaim Dreyfuss 				      "Sx interrupt: sleep notification = 0x%x\n",
2216e5f3f215SHaim Dreyfuss 				      sleep_notif);
2217e5f3f215SHaim Dreyfuss 			trans_pcie->sx_complete = true;
2218e5f3f215SHaim Dreyfuss 			wake_up(&trans_pcie->sx_waitq);
2219e5f3f215SHaim Dreyfuss 		} else {
22202e5d4a8fSHaim Dreyfuss 			/* uCode wakes up after power-down sleep */
22212e5d4a8fSHaim Dreyfuss 			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
22222e5d4a8fSHaim Dreyfuss 			iwl_pcie_rxq_check_wrptr(trans);
22232e5d4a8fSHaim Dreyfuss 			iwl_pcie_txq_check_wrptrs(trans);
22242e5d4a8fSHaim Dreyfuss 
22252e5d4a8fSHaim Dreyfuss 			isr_stats->wakeup++;
22262e5d4a8fSHaim Dreyfuss 		}
2227e5f3f215SHaim Dreyfuss 	}
22282e5d4a8fSHaim Dreyfuss 
22292e5d4a8fSHaim Dreyfuss 	/* Chip got too hot and stopped itself */
22302e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
22312e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
22322e5d4a8fSHaim Dreyfuss 		isr_stats->ctkill++;
22332e5d4a8fSHaim Dreyfuss 	}
22342e5d4a8fSHaim Dreyfuss 
22352e5d4a8fSHaim Dreyfuss 	/* HW RF KILL switch toggled */
22363a6e168bSJohannes Berg 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
22373a6e168bSJohannes Berg 		iwl_pcie_handle_rfkill_irq(trans);
22382e5d4a8fSHaim Dreyfuss 
22392e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
22402e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
22412e5d4a8fSHaim Dreyfuss 			"Hardware error detected. Restarting.\n");
22422e5d4a8fSHaim Dreyfuss 
22432e5d4a8fSHaim Dreyfuss 		isr_stats->hw++;
224491c28b83SShahar S Matityahu 		trans->dbg.hw_error = true;
22452e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
22462e5d4a8fSHaim Dreyfuss 	}
22472e5d4a8fSHaim Dreyfuss 
22482e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
22492e5d4a8fSHaim Dreyfuss 
22502e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
22512e5d4a8fSHaim Dreyfuss 
22522e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
22532e5d4a8fSHaim Dreyfuss }
2254