1e705c121SKalle Valo /******************************************************************************
2e705c121SKalle Valo  *
3e705c121SKalle Valo  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4e705c121SKalle Valo  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5e705c121SKalle Valo  *
6e705c121SKalle Valo  * Portions of this file are derived from the ipw3945 project, as well
7e705c121SKalle Valo  * as portions of the ieee80211 subsystem header files.
8e705c121SKalle Valo  *
9e705c121SKalle Valo  * This program is free software; you can redistribute it and/or modify it
10e705c121SKalle Valo  * under the terms of version 2 of the GNU General Public License as
11e705c121SKalle Valo  * published by the Free Software Foundation.
12e705c121SKalle Valo  *
13e705c121SKalle Valo  * This program is distributed in the hope that it will be useful, but WITHOUT
14e705c121SKalle Valo  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15e705c121SKalle Valo  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16e705c121SKalle Valo  * more details.
17e705c121SKalle Valo  *
18e705c121SKalle Valo  * You should have received a copy of the GNU General Public License along with
19e705c121SKalle Valo  * this program; if not, write to the Free Software Foundation, Inc.,
20e705c121SKalle Valo  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21e705c121SKalle Valo  *
22e705c121SKalle Valo  * The full GNU General Public License is included in this distribution in the
23e705c121SKalle Valo  * file called LICENSE.
24e705c121SKalle Valo  *
25e705c121SKalle Valo  * Contact Information:
26d01c5366SEmmanuel Grumbach  *  Intel Linux Wireless <linuxwifi@intel.com>
27e705c121SKalle Valo  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28e705c121SKalle Valo  *
29e705c121SKalle Valo  *****************************************************************************/
30e705c121SKalle Valo #include <linux/sched.h>
31e705c121SKalle Valo #include <linux/wait.h>
32e705c121SKalle Valo #include <linux/gfp.h>
33e705c121SKalle Valo 
34e705c121SKalle Valo #include "iwl-prph.h"
35e705c121SKalle Valo #include "iwl-io.h"
36e705c121SKalle Valo #include "internal.h"
37e705c121SKalle Valo #include "iwl-op-mode.h"
38e705c121SKalle Valo 
39e705c121SKalle Valo /******************************************************************************
40e705c121SKalle Valo  *
41e705c121SKalle Valo  * RX path functions
42e705c121SKalle Valo  *
43e705c121SKalle Valo  ******************************************************************************/
44e705c121SKalle Valo 
45e705c121SKalle Valo /*
46e705c121SKalle Valo  * Rx theory of operation
47e705c121SKalle Valo  *
48e705c121SKalle Valo  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
49e705c121SKalle Valo  * each of which point to Receive Buffers to be filled by the NIC.  These get
50e705c121SKalle Valo  * used not only for Rx frames, but for any command response or notification
51e705c121SKalle Valo  * from the NIC.  The driver and NIC manage the Rx buffers by means
52e705c121SKalle Valo  * of indexes into the circular buffer.
53e705c121SKalle Valo  *
54e705c121SKalle Valo  * Rx Queue Indexes
55e705c121SKalle Valo  * The host/firmware share two index registers for managing the Rx buffers.
56e705c121SKalle Valo  *
57e705c121SKalle Valo  * The READ index maps to the first position that the firmware may be writing
58e705c121SKalle Valo  * to -- the driver can read up to (but not including) this position and get
59e705c121SKalle Valo  * good data.
60e705c121SKalle Valo  * The READ index is managed by the firmware once the card is enabled.
61e705c121SKalle Valo  *
62e705c121SKalle Valo  * The WRITE index maps to the last position the driver has read from -- the
63e705c121SKalle Valo  * position preceding WRITE is the last slot the firmware can place a packet.
64e705c121SKalle Valo  *
65e705c121SKalle Valo  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
66e705c121SKalle Valo  * WRITE = READ.
67e705c121SKalle Valo  *
68e705c121SKalle Valo  * During initialization, the host sets up the READ queue position to the first
69e705c121SKalle Valo  * INDEX position, and WRITE to the last (READ - 1 wrapped)
70e705c121SKalle Valo  *
71e705c121SKalle Valo  * When the firmware places a packet in a buffer, it will advance the READ index
72e705c121SKalle Valo  * and fire the RX interrupt.  The driver can then query the READ index and
73e705c121SKalle Valo  * process as many packets as possible, moving the WRITE index forward as it
74e705c121SKalle Valo  * resets the Rx queue buffers with new memory.
75e705c121SKalle Valo  *
76e705c121SKalle Valo  * The management in the driver is as follows:
77e705c121SKalle Valo  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
78e705c121SKalle Valo  *   When the interrupt handler is called, the request is processed.
79e705c121SKalle Valo  *   The page is either stolen - transferred to the upper layer
80e705c121SKalle Valo  *   or reused - added immediately to the iwl->rxq->rx_free list.
81e705c121SKalle Valo  * + When the page is stolen - the driver updates the matching queue's used
82e705c121SKalle Valo  *   count, detaches the RBD and transfers it to the queue used list.
83e705c121SKalle Valo  *   When there are two used RBDs - they are transferred to the allocator empty
84e705c121SKalle Valo  *   list. Work is then scheduled for the allocator to start allocating
85e705c121SKalle Valo  *   eight buffers.
86e705c121SKalle Valo  *   When there are another 6 used RBDs - they are transferred to the allocator
87e705c121SKalle Valo  *   empty list and the driver tries to claim the pre-allocated buffers and
88e705c121SKalle Valo  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89e705c121SKalle Valo  *   until ready.
90e705c121SKalle Valo  *   When there are 8+ buffers in the free list - either from allocation or from
91e705c121SKalle Valo  *   8 reused unstolen pages - restock is called to update the FW and indexes.
92e705c121SKalle Valo  * + In order to make sure the allocator always has RBDs to use for allocation
93e705c121SKalle Valo  *   the allocator has initial pool in the size of num_queues*(8-2) - the
94e705c121SKalle Valo  *   maximum missing RBDs per allocation request (request posted with 2
95e705c121SKalle Valo  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96e705c121SKalle Valo  *   The queues supplies the recycle of the rest of the RBDs.
97e705c121SKalle Valo  * + A received packet is processed and handed to the kernel network stack,
98e705c121SKalle Valo  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
99e705c121SKalle Valo  * + If there are no allocated buffers in iwl->rxq->rx_free,
100e705c121SKalle Valo  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101e705c121SKalle Valo  *   If there were enough free buffers and RX_STALLED is set it is cleared.
102e705c121SKalle Valo  *
103e705c121SKalle Valo  *
104e705c121SKalle Valo  * Driver sequence:
105e705c121SKalle Valo  *
106e705c121SKalle Valo  * iwl_rxq_alloc()            Allocates rx_free
107e705c121SKalle Valo  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
108e705c121SKalle Valo  *                            iwl_pcie_rxq_restock.
109e705c121SKalle Valo  *                            Used only during initialization.
110e705c121SKalle Valo  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
111e705c121SKalle Valo  *                            queue, updates firmware pointers, and updates
112e705c121SKalle Valo  *                            the WRITE index.
113e705c121SKalle Valo  * iwl_pcie_rx_allocator()     Background work for allocating pages.
114e705c121SKalle Valo  *
115e705c121SKalle Valo  * -- enable interrupts --
116e705c121SKalle Valo  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
117e705c121SKalle Valo  *                            READ INDEX, detaching the SKB from the pool.
118e705c121SKalle Valo  *                            Moves the packet buffer from queue to rx_used.
119e705c121SKalle Valo  *                            Posts and claims requests to the allocator.
120e705c121SKalle Valo  *                            Calls iwl_pcie_rxq_restock to refill any empty
121e705c121SKalle Valo  *                            slots.
122e705c121SKalle Valo  *
123e705c121SKalle Valo  * RBD life-cycle:
124e705c121SKalle Valo  *
125e705c121SKalle Valo  * Init:
126e705c121SKalle Valo  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127e705c121SKalle Valo  *
128e705c121SKalle Valo  * Regular Receive interrupt:
129e705c121SKalle Valo  * Page Stolen:
130e705c121SKalle Valo  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131e705c121SKalle Valo  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132e705c121SKalle Valo  * Page not Stolen:
133e705c121SKalle Valo  * rxq.queue -> rxq.rx_free -> rxq.queue
134e705c121SKalle Valo  * ...
135e705c121SKalle Valo  *
136e705c121SKalle Valo  */
137e705c121SKalle Valo 
138e705c121SKalle Valo /*
139e705c121SKalle Valo  * iwl_rxq_space - Return number of free slots available in queue.
140e705c121SKalle Valo  */
141e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq)
142e705c121SKalle Valo {
14396a6497bSSara Sharon 	/* Make sure rx queue size is a power of 2 */
14496a6497bSSara Sharon 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
145e705c121SKalle Valo 
146e705c121SKalle Valo 	/*
147e705c121SKalle Valo 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
148e705c121SKalle Valo 	 * between empty and completely full queues.
149e705c121SKalle Valo 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
150e705c121SKalle Valo 	 * defined for negative dividends.
151e705c121SKalle Valo 	 */
15296a6497bSSara Sharon 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
153e705c121SKalle Valo }
154e705c121SKalle Valo 
155e705c121SKalle Valo /*
156e705c121SKalle Valo  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
157e705c121SKalle Valo  */
158e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
159e705c121SKalle Valo {
160e705c121SKalle Valo 	return cpu_to_le32((u32)(dma_addr >> 8));
161e705c121SKalle Valo }
162e705c121SKalle Valo 
16396a6497bSSara Sharon static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
16496a6497bSSara Sharon {
16596a6497bSSara Sharon 	iwl_write_prph(trans, ofs, val & 0xffffffff);
16696a6497bSSara Sharon 	iwl_write_prph(trans, ofs + 4, val >> 32);
16796a6497bSSara Sharon }
16896a6497bSSara Sharon 
169e705c121SKalle Valo /*
170e705c121SKalle Valo  * iwl_pcie_rx_stop - stops the Rx DMA
171e705c121SKalle Valo  */
172e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans)
173e705c121SKalle Valo {
174e705c121SKalle Valo 	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
175e705c121SKalle Valo 	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
176e705c121SKalle Valo 				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
177e705c121SKalle Valo }
178e705c121SKalle Valo 
179e705c121SKalle Valo /*
180e705c121SKalle Valo  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
181e705c121SKalle Valo  */
18278485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
18378485054SSara Sharon 				    struct iwl_rxq *rxq)
184e705c121SKalle Valo {
185e705c121SKalle Valo 	u32 reg;
186e705c121SKalle Valo 
187e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
188e705c121SKalle Valo 
189e705c121SKalle Valo 	/*
190e705c121SKalle Valo 	 * explicitly wake up the NIC if:
191e705c121SKalle Valo 	 * 1. shadow registers aren't enabled
192e705c121SKalle Valo 	 * 2. there is a chance that the NIC is asleep
193e705c121SKalle Valo 	 */
194e705c121SKalle Valo 	if (!trans->cfg->base_params->shadow_reg_enable &&
195e705c121SKalle Valo 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
196e705c121SKalle Valo 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
197e705c121SKalle Valo 
198e705c121SKalle Valo 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
199e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
200e705c121SKalle Valo 				       reg);
201e705c121SKalle Valo 			iwl_set_bit(trans, CSR_GP_CNTRL,
202e705c121SKalle Valo 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
203e705c121SKalle Valo 			rxq->need_update = true;
204e705c121SKalle Valo 			return;
205e705c121SKalle Valo 		}
206e705c121SKalle Valo 	}
207e705c121SKalle Valo 
208e705c121SKalle Valo 	rxq->write_actual = round_down(rxq->write, 8);
20996a6497bSSara Sharon 	if (trans->cfg->mq_rx_supported)
21096a6497bSSara Sharon 		iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
21196a6497bSSara Sharon 			       rxq->write_actual);
21296a6497bSSara Sharon 	else
213e705c121SKalle Valo 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
214e705c121SKalle Valo }
215e705c121SKalle Valo 
216e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
217e705c121SKalle Valo {
218e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
21978485054SSara Sharon 	int i;
220e705c121SKalle Valo 
22178485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
22278485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
223e705c121SKalle Valo 
224e705c121SKalle Valo 		if (!rxq->need_update)
22578485054SSara Sharon 			continue;
22678485054SSara Sharon 		spin_lock(&rxq->lock);
22778485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
228e705c121SKalle Valo 		rxq->need_update = false;
229e705c121SKalle Valo 		spin_unlock(&rxq->lock);
230e705c121SKalle Valo 	}
23178485054SSara Sharon }
232e705c121SKalle Valo 
23396a6497bSSara Sharon static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
23496a6497bSSara Sharon 				    struct iwl_rxq *rxq)
23596a6497bSSara Sharon {
23696a6497bSSara Sharon 	struct iwl_rx_mem_buffer *rxb;
23796a6497bSSara Sharon 
23896a6497bSSara Sharon 	/*
23996a6497bSSara Sharon 	 * If the device isn't enabled - no need to try to add buffers...
24096a6497bSSara Sharon 	 * This can happen when we stop the device and still have an interrupt
24196a6497bSSara Sharon 	 * pending. We stop the APM before we sync the interrupts because we
24296a6497bSSara Sharon 	 * have to (see comment there). On the other hand, since the APM is
24396a6497bSSara Sharon 	 * stopped, we cannot access the HW (in particular not prph).
24496a6497bSSara Sharon 	 * So don't try to restock if the APM has been already stopped.
24596a6497bSSara Sharon 	 */
24696a6497bSSara Sharon 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
24796a6497bSSara Sharon 		return;
24896a6497bSSara Sharon 
24996a6497bSSara Sharon 	spin_lock(&rxq->lock);
25096a6497bSSara Sharon 	while (rxq->free_count) {
25196a6497bSSara Sharon 		__le64 *bd = (__le64 *)rxq->bd;
25296a6497bSSara Sharon 
25396a6497bSSara Sharon 		/* Get next free Rx buffer, remove from free list */
25496a6497bSSara Sharon 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
25596a6497bSSara Sharon 				       list);
25696a6497bSSara Sharon 		list_del(&rxb->list);
25796a6497bSSara Sharon 
25896a6497bSSara Sharon 		/* 12 first bits are expected to be empty */
25996a6497bSSara Sharon 		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
26096a6497bSSara Sharon 		/* Point to Rx buffer via next RBD in circular buffer */
26196a6497bSSara Sharon 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
26296a6497bSSara Sharon 		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
26396a6497bSSara Sharon 		rxq->free_count--;
26496a6497bSSara Sharon 	}
26596a6497bSSara Sharon 	spin_unlock(&rxq->lock);
26696a6497bSSara Sharon 
26796a6497bSSara Sharon 	/*
26896a6497bSSara Sharon 	 * If we've added more space for the firmware to place data, tell it.
26996a6497bSSara Sharon 	 * Increment device's write pointer in multiples of 8.
27096a6497bSSara Sharon 	 */
27196a6497bSSara Sharon 	if (rxq->write_actual != (rxq->write & ~0x7)) {
27296a6497bSSara Sharon 		spin_lock(&rxq->lock);
27396a6497bSSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
27496a6497bSSara Sharon 		spin_unlock(&rxq->lock);
27596a6497bSSara Sharon 	}
27696a6497bSSara Sharon }
27796a6497bSSara Sharon 
278e705c121SKalle Valo /*
279e705c121SKalle Valo  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
280e705c121SKalle Valo  *
281e705c121SKalle Valo  * If there are slots in the RX queue that need to be restocked,
282e705c121SKalle Valo  * and we have free pre-allocated buffers, fill the ranks as much
283e705c121SKalle Valo  * as we can, pulling from rx_free.
284e705c121SKalle Valo  *
285e705c121SKalle Valo  * This moves the 'write' index forward to catch up with 'processed', and
286e705c121SKalle Valo  * also updates the memory address in the firmware to reference the new
287e705c121SKalle Valo  * target buffer.
288e705c121SKalle Valo  */
28978485054SSara Sharon static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
290e705c121SKalle Valo {
291e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
292e705c121SKalle Valo 
293e705c121SKalle Valo 	/*
294e705c121SKalle Valo 	 * If the device isn't enabled - not need to try to add buffers...
295e705c121SKalle Valo 	 * This can happen when we stop the device and still have an interrupt
296e705c121SKalle Valo 	 * pending. We stop the APM before we sync the interrupts because we
297e705c121SKalle Valo 	 * have to (see comment there). On the other hand, since the APM is
298e705c121SKalle Valo 	 * stopped, we cannot access the HW (in particular not prph).
299e705c121SKalle Valo 	 * So don't try to restock if the APM has been already stopped.
300e705c121SKalle Valo 	 */
301e705c121SKalle Valo 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
302e705c121SKalle Valo 		return;
303e705c121SKalle Valo 
304e705c121SKalle Valo 	spin_lock(&rxq->lock);
305e705c121SKalle Valo 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
30696a6497bSSara Sharon 		__le32 *bd = (__le32 *)rxq->bd;
307e705c121SKalle Valo 		/* The overwritten rxb must be a used one */
308e705c121SKalle Valo 		rxb = rxq->queue[rxq->write];
309e705c121SKalle Valo 		BUG_ON(rxb && rxb->page);
310e705c121SKalle Valo 
311e705c121SKalle Valo 		/* Get next free Rx buffer, remove from free list */
312e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
313e705c121SKalle Valo 				       list);
314e705c121SKalle Valo 		list_del(&rxb->list);
315e705c121SKalle Valo 
316e705c121SKalle Valo 		/* Point to Rx buffer via next RBD in circular buffer */
31796a6497bSSara Sharon 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
318e705c121SKalle Valo 		rxq->queue[rxq->write] = rxb;
319e705c121SKalle Valo 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
320e705c121SKalle Valo 		rxq->free_count--;
321e705c121SKalle Valo 	}
322e705c121SKalle Valo 	spin_unlock(&rxq->lock);
323e705c121SKalle Valo 
324e705c121SKalle Valo 	/* If we've added more space for the firmware to place data, tell it.
325e705c121SKalle Valo 	 * Increment device's write pointer in multiples of 8. */
326e705c121SKalle Valo 	if (rxq->write_actual != (rxq->write & ~0x7)) {
327e705c121SKalle Valo 		spin_lock(&rxq->lock);
32878485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
329e705c121SKalle Valo 		spin_unlock(&rxq->lock);
330e705c121SKalle Valo 	}
331e705c121SKalle Valo }
332e705c121SKalle Valo 
333e705c121SKalle Valo /*
334e705c121SKalle Valo  * iwl_pcie_rx_alloc_page - allocates and returns a page.
335e705c121SKalle Valo  *
336e705c121SKalle Valo  */
337e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
338e705c121SKalle Valo 					   gfp_t priority)
339e705c121SKalle Valo {
340e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
341e705c121SKalle Valo 	struct page *page;
342e705c121SKalle Valo 	gfp_t gfp_mask = priority;
343e705c121SKalle Valo 
344e705c121SKalle Valo 	if (trans_pcie->rx_page_order > 0)
345e705c121SKalle Valo 		gfp_mask |= __GFP_COMP;
346e705c121SKalle Valo 
347e705c121SKalle Valo 	/* Alloc a new receive buffer */
348e705c121SKalle Valo 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
349e705c121SKalle Valo 	if (!page) {
350e705c121SKalle Valo 		if (net_ratelimit())
351e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
352e705c121SKalle Valo 				       trans_pcie->rx_page_order);
35378485054SSara Sharon 		/*
35478485054SSara Sharon 		 * Issue an error if we don't have enough pre-allocated
35578485054SSara Sharon 		  * buffers.
356e705c121SKalle Valo `		 */
35778485054SSara Sharon 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
358e705c121SKalle Valo 			IWL_CRIT(trans,
35978485054SSara Sharon 				 "Failed to alloc_pages\n");
360e705c121SKalle Valo 		return NULL;
361e705c121SKalle Valo 	}
362e705c121SKalle Valo 	return page;
363e705c121SKalle Valo }
364e705c121SKalle Valo 
365e705c121SKalle Valo /*
366e705c121SKalle Valo  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
367e705c121SKalle Valo  *
368e705c121SKalle Valo  * A used RBD is an Rx buffer that has been given to the stack. To use it again
369e705c121SKalle Valo  * a page must be allocated and the RBD must point to the page. This function
370e705c121SKalle Valo  * doesn't change the HW pointer but handles the list of pages that is used by
371e705c121SKalle Valo  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
372e705c121SKalle Valo  * allocated buffers.
373e705c121SKalle Valo  */
37478485054SSara Sharon static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
37578485054SSara Sharon 				   struct iwl_rxq *rxq)
376e705c121SKalle Valo {
377e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
378e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
379e705c121SKalle Valo 	struct page *page;
380e705c121SKalle Valo 
381e705c121SKalle Valo 	while (1) {
382e705c121SKalle Valo 		spin_lock(&rxq->lock);
383e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
384e705c121SKalle Valo 			spin_unlock(&rxq->lock);
385e705c121SKalle Valo 			return;
386e705c121SKalle Valo 		}
387e705c121SKalle Valo 		spin_unlock(&rxq->lock);
388e705c121SKalle Valo 
389e705c121SKalle Valo 		/* Alloc a new receive buffer */
390e705c121SKalle Valo 		page = iwl_pcie_rx_alloc_page(trans, priority);
391e705c121SKalle Valo 		if (!page)
392e705c121SKalle Valo 			return;
393e705c121SKalle Valo 
394e705c121SKalle Valo 		spin_lock(&rxq->lock);
395e705c121SKalle Valo 
396e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
397e705c121SKalle Valo 			spin_unlock(&rxq->lock);
398e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
399e705c121SKalle Valo 			return;
400e705c121SKalle Valo 		}
401e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
402e705c121SKalle Valo 				       list);
403e705c121SKalle Valo 		list_del(&rxb->list);
404e705c121SKalle Valo 		spin_unlock(&rxq->lock);
405e705c121SKalle Valo 
406e705c121SKalle Valo 		BUG_ON(rxb->page);
407e705c121SKalle Valo 		rxb->page = page;
408e705c121SKalle Valo 		/* Get physical address of the RB */
409e705c121SKalle Valo 		rxb->page_dma =
410e705c121SKalle Valo 			dma_map_page(trans->dev, page, 0,
411e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
412e705c121SKalle Valo 				     DMA_FROM_DEVICE);
413e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
414e705c121SKalle Valo 			rxb->page = NULL;
415e705c121SKalle Valo 			spin_lock(&rxq->lock);
416e705c121SKalle Valo 			list_add(&rxb->list, &rxq->rx_used);
417e705c121SKalle Valo 			spin_unlock(&rxq->lock);
418e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
419e705c121SKalle Valo 			return;
420e705c121SKalle Valo 		}
421e705c121SKalle Valo 
422e705c121SKalle Valo 		spin_lock(&rxq->lock);
423e705c121SKalle Valo 
424e705c121SKalle Valo 		list_add_tail(&rxb->list, &rxq->rx_free);
425e705c121SKalle Valo 		rxq->free_count++;
426e705c121SKalle Valo 
427e705c121SKalle Valo 		spin_unlock(&rxq->lock);
428e705c121SKalle Valo 	}
429e705c121SKalle Valo }
430e705c121SKalle Valo 
43178485054SSara Sharon static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
432e705c121SKalle Valo {
433e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
434e705c121SKalle Valo 	int i;
435e705c121SKalle Valo 
43696a6497bSSara Sharon 	for (i = 0; i < MQ_RX_POOL_SIZE; i++) {
43778485054SSara Sharon 		if (!trans_pcie->rx_pool[i].page)
438e705c121SKalle Valo 			continue;
43978485054SSara Sharon 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
440e705c121SKalle Valo 			       PAGE_SIZE << trans_pcie->rx_page_order,
441e705c121SKalle Valo 			       DMA_FROM_DEVICE);
44278485054SSara Sharon 		__free_pages(trans_pcie->rx_pool[i].page,
44378485054SSara Sharon 			     trans_pcie->rx_page_order);
44478485054SSara Sharon 		trans_pcie->rx_pool[i].page = NULL;
445e705c121SKalle Valo 	}
446e705c121SKalle Valo }
447e705c121SKalle Valo 
448e705c121SKalle Valo /*
449e705c121SKalle Valo  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
450e705c121SKalle Valo  *
451e705c121SKalle Valo  * Allocates for each received request 8 pages
452e705c121SKalle Valo  * Called as a scheduled work item.
453e705c121SKalle Valo  */
454e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
455e705c121SKalle Valo {
456e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
457e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
458e705c121SKalle Valo 	struct list_head local_empty;
459e705c121SKalle Valo 	int pending = atomic_xchg(&rba->req_pending, 0);
460e705c121SKalle Valo 
461e705c121SKalle Valo 	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
462e705c121SKalle Valo 
463e705c121SKalle Valo 	/* If we were scheduled - there is at least one request */
464e705c121SKalle Valo 	spin_lock(&rba->lock);
465e705c121SKalle Valo 	/* swap out the rba->rbd_empty to a local list */
466e705c121SKalle Valo 	list_replace_init(&rba->rbd_empty, &local_empty);
467e705c121SKalle Valo 	spin_unlock(&rba->lock);
468e705c121SKalle Valo 
469e705c121SKalle Valo 	while (pending) {
470e705c121SKalle Valo 		int i;
471e705c121SKalle Valo 		struct list_head local_allocated;
47278485054SSara Sharon 		gfp_t gfp_mask = GFP_KERNEL;
47378485054SSara Sharon 
47478485054SSara Sharon 		/* Do not post a warning if there are only a few requests */
47578485054SSara Sharon 		if (pending < RX_PENDING_WATERMARK)
47678485054SSara Sharon 			gfp_mask |= __GFP_NOWARN;
477e705c121SKalle Valo 
478e705c121SKalle Valo 		INIT_LIST_HEAD(&local_allocated);
479e705c121SKalle Valo 
480e705c121SKalle Valo 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
481e705c121SKalle Valo 			struct iwl_rx_mem_buffer *rxb;
482e705c121SKalle Valo 			struct page *page;
483e705c121SKalle Valo 
484e705c121SKalle Valo 			/* List should never be empty - each reused RBD is
485e705c121SKalle Valo 			 * returned to the list, and initial pool covers any
486e705c121SKalle Valo 			 * possible gap between the time the page is allocated
487e705c121SKalle Valo 			 * to the time the RBD is added.
488e705c121SKalle Valo 			 */
489e705c121SKalle Valo 			BUG_ON(list_empty(&local_empty));
490e705c121SKalle Valo 			/* Get the first rxb from the rbd list */
491e705c121SKalle Valo 			rxb = list_first_entry(&local_empty,
492e705c121SKalle Valo 					       struct iwl_rx_mem_buffer, list);
493e705c121SKalle Valo 			BUG_ON(rxb->page);
494e705c121SKalle Valo 
495e705c121SKalle Valo 			/* Alloc a new receive buffer */
49678485054SSara Sharon 			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
497e705c121SKalle Valo 			if (!page)
498e705c121SKalle Valo 				continue;
499e705c121SKalle Valo 			rxb->page = page;
500e705c121SKalle Valo 
501e705c121SKalle Valo 			/* Get physical address of the RB */
502e705c121SKalle Valo 			rxb->page_dma = dma_map_page(trans->dev, page, 0,
503e705c121SKalle Valo 					PAGE_SIZE << trans_pcie->rx_page_order,
504e705c121SKalle Valo 					DMA_FROM_DEVICE);
505e705c121SKalle Valo 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
506e705c121SKalle Valo 				rxb->page = NULL;
507e705c121SKalle Valo 				__free_pages(page, trans_pcie->rx_page_order);
508e705c121SKalle Valo 				continue;
509e705c121SKalle Valo 			}
510e705c121SKalle Valo 
511e705c121SKalle Valo 			/* move the allocated entry to the out list */
512e705c121SKalle Valo 			list_move(&rxb->list, &local_allocated);
513e705c121SKalle Valo 			i++;
514e705c121SKalle Valo 		}
515e705c121SKalle Valo 
516e705c121SKalle Valo 		pending--;
517e705c121SKalle Valo 		if (!pending) {
518e705c121SKalle Valo 			pending = atomic_xchg(&rba->req_pending, 0);
519e705c121SKalle Valo 			IWL_DEBUG_RX(trans,
520e705c121SKalle Valo 				     "Pending allocation requests = %d\n",
521e705c121SKalle Valo 				     pending);
522e705c121SKalle Valo 		}
523e705c121SKalle Valo 
524e705c121SKalle Valo 		spin_lock(&rba->lock);
525e705c121SKalle Valo 		/* add the allocated rbds to the allocator allocated list */
526e705c121SKalle Valo 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
527e705c121SKalle Valo 		/* get more empty RBDs for current pending requests */
528e705c121SKalle Valo 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
529e705c121SKalle Valo 		spin_unlock(&rba->lock);
530e705c121SKalle Valo 
531e705c121SKalle Valo 		atomic_inc(&rba->req_ready);
532e705c121SKalle Valo 	}
533e705c121SKalle Valo 
534e705c121SKalle Valo 	spin_lock(&rba->lock);
535e705c121SKalle Valo 	/* return unused rbds to the allocator empty list */
536e705c121SKalle Valo 	list_splice_tail(&local_empty, &rba->rbd_empty);
537e705c121SKalle Valo 	spin_unlock(&rba->lock);
538e705c121SKalle Valo }
539e705c121SKalle Valo 
540e705c121SKalle Valo /*
541e705c121SKalle Valo  * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
542e705c121SKalle Valo .*
543e705c121SKalle Valo .* Called by queue when the queue posted allocation request and
544e705c121SKalle Valo  * has freed 8 RBDs in order to restock itself.
545e705c121SKalle Valo  */
546e705c121SKalle Valo static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
547e705c121SKalle Valo 				     struct iwl_rx_mem_buffer
548e705c121SKalle Valo 				     *out[RX_CLAIM_REQ_ALLOC])
549e705c121SKalle Valo {
550e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
551e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
552e705c121SKalle Valo 	int i;
553e705c121SKalle Valo 
554e705c121SKalle Valo 	/*
555e705c121SKalle Valo 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
556e705c121SKalle Valo 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
557e705c121SKalle Valo 	 * function will return -ENOMEM, as there are no ready requests.
558e705c121SKalle Valo 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
559e705c121SKalle Valo 	 * req_ready > 0, i.e. - there are ready requests and the function
560e705c121SKalle Valo 	 * hands one request to the caller.
561e705c121SKalle Valo 	 */
562e705c121SKalle Valo 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
563e705c121SKalle Valo 		return -ENOMEM;
564e705c121SKalle Valo 
565e705c121SKalle Valo 	spin_lock(&rba->lock);
566e705c121SKalle Valo 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
567e705c121SKalle Valo 		/* Get next free Rx buffer, remove it from free list */
568e705c121SKalle Valo 		out[i] = list_first_entry(&rba->rbd_allocated,
569e705c121SKalle Valo 			       struct iwl_rx_mem_buffer, list);
570e705c121SKalle Valo 		list_del(&out[i]->list);
571e705c121SKalle Valo 	}
572e705c121SKalle Valo 	spin_unlock(&rba->lock);
573e705c121SKalle Valo 
574e705c121SKalle Valo 	return 0;
575e705c121SKalle Valo }
576e705c121SKalle Valo 
577e705c121SKalle Valo static void iwl_pcie_rx_allocator_work(struct work_struct *data)
578e705c121SKalle Valo {
579e705c121SKalle Valo 	struct iwl_rb_allocator *rba_p =
580e705c121SKalle Valo 		container_of(data, struct iwl_rb_allocator, rx_alloc);
581e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie =
582e705c121SKalle Valo 		container_of(rba_p, struct iwl_trans_pcie, rba);
583e705c121SKalle Valo 
584e705c121SKalle Valo 	iwl_pcie_rx_allocator(trans_pcie->trans);
585e705c121SKalle Valo }
586e705c121SKalle Valo 
587e705c121SKalle Valo static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
588e705c121SKalle Valo {
589e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
590e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
591e705c121SKalle Valo 	struct device *dev = trans->dev;
59278485054SSara Sharon 	int i;
59396a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
59496a6497bSSara Sharon 						      sizeof(__le32);
595e705c121SKalle Valo 
59678485054SSara Sharon 	if (WARN_ON(trans_pcie->rxq))
597e705c121SKalle Valo 		return -EINVAL;
598e705c121SKalle Valo 
59978485054SSara Sharon 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
60078485054SSara Sharon 				  GFP_KERNEL);
60178485054SSara Sharon 	if (!trans_pcie->rxq)
60278485054SSara Sharon 		return -EINVAL;
60378485054SSara Sharon 
60478485054SSara Sharon 	spin_lock_init(&rba->lock);
60578485054SSara Sharon 
60678485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
60778485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
60878485054SSara Sharon 
60978485054SSara Sharon 		spin_lock_init(&rxq->lock);
61096a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported)
61196a6497bSSara Sharon 			rxq->queue_size = MQ_RX_TABLE_SIZE;
61296a6497bSSara Sharon 		else
61396a6497bSSara Sharon 			rxq->queue_size = RX_QUEUE_SIZE;
61496a6497bSSara Sharon 
61578485054SSara Sharon 		/*
61678485054SSara Sharon 		 * Allocate the circular buffer of Read Buffer Descriptors
61778485054SSara Sharon 		 * (RBDs)
61878485054SSara Sharon 		 */
61978485054SSara Sharon 		rxq->bd = dma_zalloc_coherent(dev,
62096a6497bSSara Sharon 					     free_size * rxq->queue_size,
621e705c121SKalle Valo 					     &rxq->bd_dma, GFP_KERNEL);
622e705c121SKalle Valo 		if (!rxq->bd)
62378485054SSara Sharon 			goto err;
62478485054SSara Sharon 
62596a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
62696a6497bSSara Sharon 			rxq->used_bd = dma_zalloc_coherent(dev,
62796a6497bSSara Sharon 							   sizeof(__le32) *
62896a6497bSSara Sharon 							   rxq->queue_size,
62996a6497bSSara Sharon 							   &rxq->used_bd_dma,
63096a6497bSSara Sharon 							   GFP_KERNEL);
63196a6497bSSara Sharon 			if (!rxq->used_bd)
63296a6497bSSara Sharon 				goto err;
63396a6497bSSara Sharon 		}
634e705c121SKalle Valo 
635e705c121SKalle Valo 		/*Allocate the driver's pointer to receive buffer status */
636e705c121SKalle Valo 		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
63778485054SSara Sharon 						   &rxq->rb_stts_dma,
63878485054SSara Sharon 						   GFP_KERNEL);
639e705c121SKalle Valo 		if (!rxq->rb_stts)
64078485054SSara Sharon 			goto err;
64178485054SSara Sharon 	}
642e705c121SKalle Valo 	return 0;
643e705c121SKalle Valo 
64478485054SSara Sharon err:
64578485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
64678485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
64778485054SSara Sharon 
64878485054SSara Sharon 		if (rxq->bd)
64996a6497bSSara Sharon 			dma_free_coherent(dev, free_size * rxq->queue_size,
650e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
651e705c121SKalle Valo 		rxq->bd_dma = 0;
652e705c121SKalle Valo 		rxq->bd = NULL;
65378485054SSara Sharon 
65478485054SSara Sharon 		if (rxq->rb_stts)
65578485054SSara Sharon 			dma_free_coherent(trans->dev,
65678485054SSara Sharon 					  sizeof(struct iwl_rb_status),
65778485054SSara Sharon 					  rxq->rb_stts, rxq->rb_stts_dma);
65896a6497bSSara Sharon 
65996a6497bSSara Sharon 		if (rxq->used_bd)
66096a6497bSSara Sharon 			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
66196a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
66296a6497bSSara Sharon 		rxq->used_bd_dma = 0;
66396a6497bSSara Sharon 		rxq->used_bd = NULL;
66478485054SSara Sharon 	}
66578485054SSara Sharon 	kfree(trans_pcie->rxq);
66696a6497bSSara Sharon 
667e705c121SKalle Valo 	return -ENOMEM;
668e705c121SKalle Valo }
669e705c121SKalle Valo 
670e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
671e705c121SKalle Valo {
672e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
673e705c121SKalle Valo 	u32 rb_size;
674e705c121SKalle Valo 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
675e705c121SKalle Valo 
6766c4fbcbcSEmmanuel Grumbach 	switch (trans_pcie->rx_buf_size) {
6776c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_4K:
678e705c121SKalle Valo 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
6796c4fbcbcSEmmanuel Grumbach 		break;
6806c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_8K:
6816c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
6826c4fbcbcSEmmanuel Grumbach 		break;
6836c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_12K:
6846c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
6856c4fbcbcSEmmanuel Grumbach 		break;
6866c4fbcbcSEmmanuel Grumbach 	default:
6876c4fbcbcSEmmanuel Grumbach 		WARN_ON(1);
6886c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
6896c4fbcbcSEmmanuel Grumbach 	}
690e705c121SKalle Valo 
691e705c121SKalle Valo 	/* Stop Rx DMA */
692e705c121SKalle Valo 	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
693e705c121SKalle Valo 	/* reset and flush pointers */
694e705c121SKalle Valo 	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
695e705c121SKalle Valo 	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
696e705c121SKalle Valo 	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
697e705c121SKalle Valo 
698e705c121SKalle Valo 	/* Reset driver's Rx queue write index */
699e705c121SKalle Valo 	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
700e705c121SKalle Valo 
701e705c121SKalle Valo 	/* Tell device where to find RBD circular buffer in DRAM */
702e705c121SKalle Valo 	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
703e705c121SKalle Valo 			   (u32)(rxq->bd_dma >> 8));
704e705c121SKalle Valo 
705e705c121SKalle Valo 	/* Tell device where in DRAM to update its Rx status */
706e705c121SKalle Valo 	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
707e705c121SKalle Valo 			   rxq->rb_stts_dma >> 4);
708e705c121SKalle Valo 
709e705c121SKalle Valo 	/* Enable Rx DMA
710e705c121SKalle Valo 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
711e705c121SKalle Valo 	 *      the credit mechanism in 5000 HW RX FIFO
712e705c121SKalle Valo 	 * Direct rx interrupts to hosts
7136c4fbcbcSEmmanuel Grumbach 	 * Rx buffer size 4 or 8k or 12k
714e705c121SKalle Valo 	 * RB timeout 0x10
715e705c121SKalle Valo 	 * 256 RBDs
716e705c121SKalle Valo 	 */
717e705c121SKalle Valo 	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
718e705c121SKalle Valo 			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
719e705c121SKalle Valo 			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
720e705c121SKalle Valo 			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
721e705c121SKalle Valo 			   rb_size|
722e705c121SKalle Valo 			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
723e705c121SKalle Valo 			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
724e705c121SKalle Valo 
725e705c121SKalle Valo 	/* Set interrupt coalescing timer to default (2048 usecs) */
726e705c121SKalle Valo 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
727e705c121SKalle Valo 
728e705c121SKalle Valo 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
729e705c121SKalle Valo 	if (trans->cfg->host_interrupt_operation_mode)
730e705c121SKalle Valo 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
731e705c121SKalle Valo }
732e705c121SKalle Valo 
73396a6497bSSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
73496a6497bSSara Sharon {
73596a6497bSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
73696a6497bSSara Sharon 	u32 rb_size, enabled = 0;
73796a6497bSSara Sharon 	int i;
73896a6497bSSara Sharon 
73996a6497bSSara Sharon 	switch (trans_pcie->rx_buf_size) {
74096a6497bSSara Sharon 	case IWL_AMSDU_4K:
74196a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
74296a6497bSSara Sharon 		break;
74396a6497bSSara Sharon 	case IWL_AMSDU_8K:
74496a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
74596a6497bSSara Sharon 		break;
74696a6497bSSara Sharon 	case IWL_AMSDU_12K:
74796a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
74896a6497bSSara Sharon 		break;
74996a6497bSSara Sharon 	default:
75096a6497bSSara Sharon 		WARN_ON(1);
75196a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
75296a6497bSSara Sharon 	}
75396a6497bSSara Sharon 
75496a6497bSSara Sharon 	/* Stop Rx DMA */
75596a6497bSSara Sharon 	iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
75696a6497bSSara Sharon 	/* disable free amd used rx queue operation */
75796a6497bSSara Sharon 	iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
75896a6497bSSara Sharon 
75996a6497bSSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
76096a6497bSSara Sharon 		/* Tell device where to find RBD free table in DRAM */
76196a6497bSSara Sharon 		iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
76296a6497bSSara Sharon 				       (u64)(rxq->bd_dma));
76396a6497bSSara Sharon 		/* Tell device where to find RBD used table in DRAM */
76496a6497bSSara Sharon 		iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
76596a6497bSSara Sharon 				       (u64)(rxq->used_bd_dma));
76696a6497bSSara Sharon 		/* Tell device where in DRAM to update its Rx status */
76796a6497bSSara Sharon 		iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
76896a6497bSSara Sharon 				       rxq->rb_stts_dma);
76996a6497bSSara Sharon 		/* Reset device indice tables */
77096a6497bSSara Sharon 		iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
77196a6497bSSara Sharon 		iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
77296a6497bSSara Sharon 		iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
77396a6497bSSara Sharon 
77496a6497bSSara Sharon 		enabled |= BIT(i) | BIT(i + 16);
77596a6497bSSara Sharon 	}
77696a6497bSSara Sharon 
77796a6497bSSara Sharon 	/* restock default queue */
77896a6497bSSara Sharon 	iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
77996a6497bSSara Sharon 
78096a6497bSSara Sharon 	/*
78196a6497bSSara Sharon 	 * Enable Rx DMA
78296a6497bSSara Sharon 	 * Single frame mode
78396a6497bSSara Sharon 	 * Rx buffer size 4 or 8k or 12k
78496a6497bSSara Sharon 	 * Min RB size 4 or 8
78596a6497bSSara Sharon 	 * 512 RBDs
78696a6497bSSara Sharon 	 */
78796a6497bSSara Sharon 	iwl_write_prph(trans, RFH_RXF_DMA_CFG,
78896a6497bSSara Sharon 		       RFH_DMA_EN_ENABLE_VAL |
78996a6497bSSara Sharon 		       rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
79096a6497bSSara Sharon 		       RFH_RXF_DMA_MIN_RB_4_8 |
79196a6497bSSara Sharon 		       RFH_RXF_DMA_RBDCB_SIZE_512);
79296a6497bSSara Sharon 
79396a6497bSSara Sharon 	iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
79496a6497bSSara Sharon 					  RFH_GEN_CFG_SERVICE_DMA_SNOOP);
79596a6497bSSara Sharon 	iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
79696a6497bSSara Sharon 
79796a6497bSSara Sharon 	/* Set interrupt coalescing timer to default (2048 usecs) */
79896a6497bSSara Sharon 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
79996a6497bSSara Sharon }
80096a6497bSSara Sharon 
801e705c121SKalle Valo static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
802e705c121SKalle Valo {
803e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
804e705c121SKalle Valo 
805e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_free);
806e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_used);
807e705c121SKalle Valo 	rxq->free_count = 0;
808e705c121SKalle Valo 	rxq->used_count = 0;
809e705c121SKalle Valo }
810e705c121SKalle Valo 
811e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans)
812e705c121SKalle Valo {
813e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
81478485054SSara Sharon 	struct iwl_rxq *def_rxq;
815e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
81696a6497bSSara Sharon 	int i, err, num_rbds, allocator_pool_size;
817e705c121SKalle Valo 
81878485054SSara Sharon 	if (!trans_pcie->rxq) {
819e705c121SKalle Valo 		err = iwl_pcie_rx_alloc(trans);
820e705c121SKalle Valo 		if (err)
821e705c121SKalle Valo 			return err;
822e705c121SKalle Valo 	}
82378485054SSara Sharon 	def_rxq = trans_pcie->rxq;
824e705c121SKalle Valo 	if (!rba->alloc_wq)
825e705c121SKalle Valo 		rba->alloc_wq = alloc_workqueue("rb_allocator",
826e705c121SKalle Valo 						WQ_HIGHPRI | WQ_UNBOUND, 1);
827e705c121SKalle Valo 	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
828e705c121SKalle Valo 
829e705c121SKalle Valo 	spin_lock(&rba->lock);
830e705c121SKalle Valo 	atomic_set(&rba->req_pending, 0);
831e705c121SKalle Valo 	atomic_set(&rba->req_ready, 0);
83296a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_allocated);
83396a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_empty);
834e705c121SKalle Valo 	spin_unlock(&rba->lock);
835e705c121SKalle Valo 
836e705c121SKalle Valo 	/* free all first - we might be reconfigured for a different size */
83778485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
838e705c121SKalle Valo 
839e705c121SKalle Valo 	for (i = 0; i < RX_QUEUE_SIZE; i++)
84078485054SSara Sharon 		def_rxq->queue[i] = NULL;
841e705c121SKalle Valo 
84278485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
84378485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
844e705c121SKalle Valo 
84596a6497bSSara Sharon 		rxq->id = i;
84696a6497bSSara Sharon 
847e705c121SKalle Valo 		spin_lock(&rxq->lock);
84878485054SSara Sharon 		/*
84978485054SSara Sharon 		 * Set read write pointer to reflect that we have processed
85078485054SSara Sharon 		 * and used all buffers, but have not restocked the Rx queue
85178485054SSara Sharon 		 * with fresh buffers
85278485054SSara Sharon 		 */
85378485054SSara Sharon 		rxq->read = 0;
85478485054SSara Sharon 		rxq->write = 0;
85578485054SSara Sharon 		rxq->write_actual = 0;
85678485054SSara Sharon 		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
85778485054SSara Sharon 
85878485054SSara Sharon 		iwl_pcie_rx_init_rxb_lists(rxq);
85978485054SSara Sharon 
860e705c121SKalle Valo 		spin_unlock(&rxq->lock);
86178485054SSara Sharon 	}
86278485054SSara Sharon 
86396a6497bSSara Sharon 	/* move the pool to the default queue and allocator ownerships */
86496a6497bSSara Sharon 	num_rbds = trans->cfg->mq_rx_supported ?
86596a6497bSSara Sharon 		     MQ_RX_POOL_SIZE : RX_QUEUE_SIZE;
86696a6497bSSara Sharon 	allocator_pool_size = trans->num_rx_queues *
86796a6497bSSara Sharon 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
86896a6497bSSara Sharon 	for (i = 0; i < num_rbds; i++) {
86996a6497bSSara Sharon 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
87096a6497bSSara Sharon 
87196a6497bSSara Sharon 		if (i < allocator_pool_size)
87296a6497bSSara Sharon 			list_add(&rxb->list, &rba->rbd_empty);
87396a6497bSSara Sharon 		else
87496a6497bSSara Sharon 			list_add(&rxb->list, &def_rxq->rx_used);
87596a6497bSSara Sharon 		trans_pcie->global_table[i] = rxb;
87696a6497bSSara Sharon 		rxb->vid = (u16)i;
87796a6497bSSara Sharon 	}
87878485054SSara Sharon 
87978485054SSara Sharon 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
88096a6497bSSara Sharon 	if (trans->cfg->mq_rx_supported) {
88196a6497bSSara Sharon 		iwl_pcie_rx_mq_hw_init(trans, def_rxq);
88296a6497bSSara Sharon 	} else {
88378485054SSara Sharon 		iwl_pcie_rxq_restock(trans, def_rxq);
88478485054SSara Sharon 		iwl_pcie_rx_hw_init(trans, def_rxq);
88596a6497bSSara Sharon 	}
88678485054SSara Sharon 
88778485054SSara Sharon 	spin_lock(&def_rxq->lock);
88878485054SSara Sharon 	iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
88978485054SSara Sharon 	spin_unlock(&def_rxq->lock);
890e705c121SKalle Valo 
891e705c121SKalle Valo 	return 0;
892e705c121SKalle Valo }
893e705c121SKalle Valo 
894e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans)
895e705c121SKalle Valo {
896e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
897e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
89896a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
89996a6497bSSara Sharon 					      sizeof(__le32);
90078485054SSara Sharon 	int i;
901e705c121SKalle Valo 
90278485054SSara Sharon 	/*
90378485054SSara Sharon 	 * if rxq is NULL, it means that nothing has been allocated,
90478485054SSara Sharon 	 * exit now
90578485054SSara Sharon 	 */
90678485054SSara Sharon 	if (!trans_pcie->rxq) {
907e705c121SKalle Valo 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
908e705c121SKalle Valo 		return;
909e705c121SKalle Valo 	}
910e705c121SKalle Valo 
911e705c121SKalle Valo 	cancel_work_sync(&rba->rx_alloc);
912e705c121SKalle Valo 	if (rba->alloc_wq) {
913e705c121SKalle Valo 		destroy_workqueue(rba->alloc_wq);
914e705c121SKalle Valo 		rba->alloc_wq = NULL;
915e705c121SKalle Valo 	}
916e705c121SKalle Valo 
91778485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
918e705c121SKalle Valo 
91978485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
92078485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
92178485054SSara Sharon 
92278485054SSara Sharon 		if (rxq->bd)
92378485054SSara Sharon 			dma_free_coherent(trans->dev,
92496a6497bSSara Sharon 					  free_size * rxq->queue_size,
925e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
926e705c121SKalle Valo 		rxq->bd_dma = 0;
927e705c121SKalle Valo 		rxq->bd = NULL;
928e705c121SKalle Valo 
929e705c121SKalle Valo 		if (rxq->rb_stts)
930e705c121SKalle Valo 			dma_free_coherent(trans->dev,
931e705c121SKalle Valo 					  sizeof(struct iwl_rb_status),
932e705c121SKalle Valo 					  rxq->rb_stts, rxq->rb_stts_dma);
933e705c121SKalle Valo 		else
93478485054SSara Sharon 			IWL_DEBUG_INFO(trans,
93578485054SSara Sharon 				       "Free rxq->rb_stts which is NULL\n");
93678485054SSara Sharon 
93796a6497bSSara Sharon 		if (rxq->used_bd)
93896a6497bSSara Sharon 			dma_free_coherent(trans->dev,
93996a6497bSSara Sharon 					  sizeof(__le32) * rxq->queue_size,
94096a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
94196a6497bSSara Sharon 		rxq->used_bd_dma = 0;
94296a6497bSSara Sharon 		rxq->used_bd = NULL;
94396a6497bSSara Sharon 	}
94478485054SSara Sharon 	kfree(trans_pcie->rxq);
945e705c121SKalle Valo }
946e705c121SKalle Valo 
947e705c121SKalle Valo /*
948e705c121SKalle Valo  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
949e705c121SKalle Valo  *
950e705c121SKalle Valo  * Called when a RBD can be reused. The RBD is transferred to the allocator.
951e705c121SKalle Valo  * When there are 2 empty RBDs - a request for allocation is posted
952e705c121SKalle Valo  */
953e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
954e705c121SKalle Valo 				  struct iwl_rx_mem_buffer *rxb,
955e705c121SKalle Valo 				  struct iwl_rxq *rxq, bool emergency)
956e705c121SKalle Valo {
957e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
958e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
959e705c121SKalle Valo 
960e705c121SKalle Valo 	/* Move the RBD to the used list, will be moved to allocator in batches
961e705c121SKalle Valo 	 * before claiming or posting a request*/
962e705c121SKalle Valo 	list_add_tail(&rxb->list, &rxq->rx_used);
963e705c121SKalle Valo 
964e705c121SKalle Valo 	if (unlikely(emergency))
965e705c121SKalle Valo 		return;
966e705c121SKalle Valo 
967e705c121SKalle Valo 	/* Count the allocator owned RBDs */
968e705c121SKalle Valo 	rxq->used_count++;
969e705c121SKalle Valo 
970e705c121SKalle Valo 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
971e705c121SKalle Valo 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
972e705c121SKalle Valo 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
973e705c121SKalle Valo 	 * after but we still need to post another request.
974e705c121SKalle Valo 	 */
975e705c121SKalle Valo 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
976e705c121SKalle Valo 		/* Move the 2 RBDs to the allocator ownership.
977e705c121SKalle Valo 		 Allocator has another 6 from pool for the request completion*/
978e705c121SKalle Valo 		spin_lock(&rba->lock);
979e705c121SKalle Valo 		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
980e705c121SKalle Valo 		spin_unlock(&rba->lock);
981e705c121SKalle Valo 
982e705c121SKalle Valo 		atomic_inc(&rba->req_pending);
983e705c121SKalle Valo 		queue_work(rba->alloc_wq, &rba->rx_alloc);
984e705c121SKalle Valo 	}
985e705c121SKalle Valo }
986e705c121SKalle Valo 
987e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
98878485054SSara Sharon 				struct iwl_rxq *rxq,
989e705c121SKalle Valo 				struct iwl_rx_mem_buffer *rxb,
990e705c121SKalle Valo 				bool emergency)
991e705c121SKalle Valo {
992e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
993e705c121SKalle Valo 	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
994e705c121SKalle Valo 	bool page_stolen = false;
995e705c121SKalle Valo 	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
996e705c121SKalle Valo 	u32 offset = 0;
997e705c121SKalle Valo 
998e705c121SKalle Valo 	if (WARN_ON(!rxb))
999e705c121SKalle Valo 		return;
1000e705c121SKalle Valo 
1001e705c121SKalle Valo 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1002e705c121SKalle Valo 
1003e705c121SKalle Valo 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1004e705c121SKalle Valo 		struct iwl_rx_packet *pkt;
1005e705c121SKalle Valo 		u16 sequence;
1006e705c121SKalle Valo 		bool reclaim;
1007e705c121SKalle Valo 		int index, cmd_index, len;
1008e705c121SKalle Valo 		struct iwl_rx_cmd_buffer rxcb = {
1009e705c121SKalle Valo 			._offset = offset,
1010e705c121SKalle Valo 			._rx_page_order = trans_pcie->rx_page_order,
1011e705c121SKalle Valo 			._page = rxb->page,
1012e705c121SKalle Valo 			._page_stolen = false,
1013e705c121SKalle Valo 			.truesize = max_len,
1014e705c121SKalle Valo 		};
1015e705c121SKalle Valo 
1016e705c121SKalle Valo 		pkt = rxb_addr(&rxcb);
1017e705c121SKalle Valo 
1018e705c121SKalle Valo 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
1019e705c121SKalle Valo 			break;
1020e705c121SKalle Valo 
1021e705c121SKalle Valo 		IWL_DEBUG_RX(trans,
1022e705c121SKalle Valo 			     "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
1023e705c121SKalle Valo 			     rxcb._offset,
102439bdb17eSSharon Dvir 			     iwl_get_cmd_string(trans,
102539bdb17eSSharon Dvir 						iwl_cmd_id(pkt->hdr.cmd,
102639bdb17eSSharon Dvir 							   pkt->hdr.group_id,
102739bdb17eSSharon Dvir 							   0)),
1028e705c121SKalle Valo 			     pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
1029e705c121SKalle Valo 
1030e705c121SKalle Valo 		len = iwl_rx_packet_len(pkt);
1031e705c121SKalle Valo 		len += sizeof(u32); /* account for status word */
1032e705c121SKalle Valo 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1033e705c121SKalle Valo 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1034e705c121SKalle Valo 
1035e705c121SKalle Valo 		/* Reclaim a command buffer only if this packet is a response
1036e705c121SKalle Valo 		 *   to a (driver-originated) command.
1037e705c121SKalle Valo 		 * If the packet (e.g. Rx frame) originated from uCode,
1038e705c121SKalle Valo 		 *   there is no command buffer to reclaim.
1039e705c121SKalle Valo 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1040e705c121SKalle Valo 		 *   but apparently a few don't get set; catch them here. */
1041e705c121SKalle Valo 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1042e705c121SKalle Valo 		if (reclaim) {
1043e705c121SKalle Valo 			int i;
1044e705c121SKalle Valo 
1045e705c121SKalle Valo 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1046e705c121SKalle Valo 				if (trans_pcie->no_reclaim_cmds[i] ==
1047e705c121SKalle Valo 							pkt->hdr.cmd) {
1048e705c121SKalle Valo 					reclaim = false;
1049e705c121SKalle Valo 					break;
1050e705c121SKalle Valo 				}
1051e705c121SKalle Valo 			}
1052e705c121SKalle Valo 		}
1053e705c121SKalle Valo 
1054e705c121SKalle Valo 		sequence = le16_to_cpu(pkt->hdr.sequence);
1055e705c121SKalle Valo 		index = SEQ_TO_INDEX(sequence);
1056e705c121SKalle Valo 		cmd_index = get_cmd_index(&txq->q, index);
1057e705c121SKalle Valo 
1058e705c121SKalle Valo 		iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
1059e705c121SKalle Valo 
1060e705c121SKalle Valo 		if (reclaim) {
1061e705c121SKalle Valo 			kzfree(txq->entries[cmd_index].free_buf);
1062e705c121SKalle Valo 			txq->entries[cmd_index].free_buf = NULL;
1063e705c121SKalle Valo 		}
1064e705c121SKalle Valo 
1065e705c121SKalle Valo 		/*
1066e705c121SKalle Valo 		 * After here, we should always check rxcb._page_stolen,
1067e705c121SKalle Valo 		 * if it is true then one of the handlers took the page.
1068e705c121SKalle Valo 		 */
1069e705c121SKalle Valo 
1070e705c121SKalle Valo 		if (reclaim) {
1071e705c121SKalle Valo 			/* Invoke any callbacks, transfer the buffer to caller,
1072e705c121SKalle Valo 			 * and fire off the (possibly) blocking
1073e705c121SKalle Valo 			 * iwl_trans_send_cmd()
1074e705c121SKalle Valo 			 * as we reclaim the driver command queue */
1075e705c121SKalle Valo 			if (!rxcb._page_stolen)
1076e705c121SKalle Valo 				iwl_pcie_hcmd_complete(trans, &rxcb);
1077e705c121SKalle Valo 			else
1078e705c121SKalle Valo 				IWL_WARN(trans, "Claim null rxb?\n");
1079e705c121SKalle Valo 		}
1080e705c121SKalle Valo 
1081e705c121SKalle Valo 		page_stolen |= rxcb._page_stolen;
1082e705c121SKalle Valo 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1083e705c121SKalle Valo 	}
1084e705c121SKalle Valo 
1085e705c121SKalle Valo 	/* page was stolen from us -- free our reference */
1086e705c121SKalle Valo 	if (page_stolen) {
1087e705c121SKalle Valo 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1088e705c121SKalle Valo 		rxb->page = NULL;
1089e705c121SKalle Valo 	}
1090e705c121SKalle Valo 
1091e705c121SKalle Valo 	/* Reuse the page if possible. For notification packets and
1092e705c121SKalle Valo 	 * SKBs that fail to Rx correctly, add them back into the
1093e705c121SKalle Valo 	 * rx_free list for reuse later. */
1094e705c121SKalle Valo 	if (rxb->page != NULL) {
1095e705c121SKalle Valo 		rxb->page_dma =
1096e705c121SKalle Valo 			dma_map_page(trans->dev, rxb->page, 0,
1097e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
1098e705c121SKalle Valo 				     DMA_FROM_DEVICE);
1099e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1100e705c121SKalle Valo 			/*
1101e705c121SKalle Valo 			 * free the page(s) as well to not break
1102e705c121SKalle Valo 			 * the invariant that the items on the used
1103e705c121SKalle Valo 			 * list have no page(s)
1104e705c121SKalle Valo 			 */
1105e705c121SKalle Valo 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1106e705c121SKalle Valo 			rxb->page = NULL;
1107e705c121SKalle Valo 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1108e705c121SKalle Valo 		} else {
1109e705c121SKalle Valo 			list_add_tail(&rxb->list, &rxq->rx_free);
1110e705c121SKalle Valo 			rxq->free_count++;
1111e705c121SKalle Valo 		}
1112e705c121SKalle Valo 	} else
1113e705c121SKalle Valo 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1114e705c121SKalle Valo }
1115e705c121SKalle Valo 
1116e705c121SKalle Valo /*
1117e705c121SKalle Valo  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1118e705c121SKalle Valo  */
1119e705c121SKalle Valo static void iwl_pcie_rx_handle(struct iwl_trans *trans)
1120e705c121SKalle Valo {
1121e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
112278485054SSara Sharon 	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
1123e705c121SKalle Valo 	u32 r, i, j, count = 0;
1124e705c121SKalle Valo 	bool emergency = false;
1125e705c121SKalle Valo 
1126e705c121SKalle Valo restart:
1127e705c121SKalle Valo 	spin_lock(&rxq->lock);
1128e705c121SKalle Valo 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1129e705c121SKalle Valo 	 * buffer that the driver may process (last buffer filled by ucode). */
1130e705c121SKalle Valo 	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1131e705c121SKalle Valo 	i = rxq->read;
1132e705c121SKalle Valo 
1133e705c121SKalle Valo 	/* Rx interrupt, but nothing sent from uCode */
1134e705c121SKalle Valo 	if (i == r)
1135e705c121SKalle Valo 		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
1136e705c121SKalle Valo 
1137e705c121SKalle Valo 	while (i != r) {
1138e705c121SKalle Valo 		struct iwl_rx_mem_buffer *rxb;
1139e705c121SKalle Valo 
114096a6497bSSara Sharon 		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1141e705c121SKalle Valo 			emergency = true;
1142e705c121SKalle Valo 
114396a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
114496a6497bSSara Sharon 			/*
114596a6497bSSara Sharon 			 * used_bd is a 32 bit but only 12 are used to retrieve
114696a6497bSSara Sharon 			 * the vid
114796a6497bSSara Sharon 			 */
114896a6497bSSara Sharon 			u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]);
114996a6497bSSara Sharon 
115096a6497bSSara Sharon 			rxb = trans_pcie->global_table[vid];
115196a6497bSSara Sharon 		} else {
1152e705c121SKalle Valo 			rxb = rxq->queue[i];
1153e705c121SKalle Valo 			rxq->queue[i] = NULL;
115496a6497bSSara Sharon 		}
1155e705c121SKalle Valo 
1156f02d2ccdSJohannes Berg 		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
115778485054SSara Sharon 		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1158e705c121SKalle Valo 
115996a6497bSSara Sharon 		i = (i + 1) & (rxq->queue_size - 1);
1160e705c121SKalle Valo 
1161e705c121SKalle Valo 		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1162e705c121SKalle Valo 		 * try to claim the pre-allocated buffers from the allocator */
1163e705c121SKalle Valo 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
1164e705c121SKalle Valo 			struct iwl_rb_allocator *rba = &trans_pcie->rba;
1165e705c121SKalle Valo 			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
1166e705c121SKalle Valo 
1167e705c121SKalle Valo 			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
1168e705c121SKalle Valo 			    !emergency) {
1169e705c121SKalle Valo 				/* Add the remaining 6 empty RBDs
1170e705c121SKalle Valo 				* for allocator use
1171e705c121SKalle Valo 				 */
1172e705c121SKalle Valo 				spin_lock(&rba->lock);
1173e705c121SKalle Valo 				list_splice_tail_init(&rxq->rx_used,
1174e705c121SKalle Valo 						      &rba->rbd_empty);
1175e705c121SKalle Valo 				spin_unlock(&rba->lock);
1176e705c121SKalle Valo 			}
1177e705c121SKalle Valo 
1178e705c121SKalle Valo 			/* If not ready - continue, will try to reclaim later.
1179e705c121SKalle Valo 			* No need to reschedule work - allocator exits only on
1180e705c121SKalle Valo 			* success */
1181e705c121SKalle Valo 			if (!iwl_pcie_rx_allocator_get(trans, out)) {
1182e705c121SKalle Valo 				/* If success - then RX_CLAIM_REQ_ALLOC
1183e705c121SKalle Valo 				 * buffers were retrieved and should be added
1184e705c121SKalle Valo 				 * to free list */
1185e705c121SKalle Valo 				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
1186e705c121SKalle Valo 				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
1187e705c121SKalle Valo 					list_add_tail(&out[j]->list,
1188e705c121SKalle Valo 						      &rxq->rx_free);
1189e705c121SKalle Valo 					rxq->free_count++;
1190e705c121SKalle Valo 				}
1191e705c121SKalle Valo 			}
1192e705c121SKalle Valo 		}
1193e705c121SKalle Valo 		if (emergency) {
1194e705c121SKalle Valo 			count++;
1195e705c121SKalle Valo 			if (count == 8) {
1196e705c121SKalle Valo 				count = 0;
119796a6497bSSara Sharon 				if (rxq->used_count < rxq->queue_size / 3)
1198e705c121SKalle Valo 					emergency = false;
1199e705c121SKalle Valo 				spin_unlock(&rxq->lock);
120078485054SSara Sharon 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1201e705c121SKalle Valo 				spin_lock(&rxq->lock);
1202e705c121SKalle Valo 			}
1203e705c121SKalle Valo 		}
1204e705c121SKalle Valo 		/* handle restock for three cases, can be all of them at once:
1205e705c121SKalle Valo 		* - we just pulled buffers from the allocator
1206e705c121SKalle Valo 		* - we have 8+ unstolen pages accumulated
1207e705c121SKalle Valo 		* - we are in emergency and allocated buffers
1208e705c121SKalle Valo 		 */
1209e705c121SKalle Valo 		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
1210e705c121SKalle Valo 			rxq->read = i;
1211e705c121SKalle Valo 			spin_unlock(&rxq->lock);
121296a6497bSSara Sharon 			if (trans->cfg->mq_rx_supported)
121396a6497bSSara Sharon 				iwl_pcie_rxq_mq_restock(trans, rxq);
121496a6497bSSara Sharon 			else
121578485054SSara Sharon 				iwl_pcie_rxq_restock(trans, rxq);
1216e705c121SKalle Valo 			goto restart;
1217e705c121SKalle Valo 		}
1218e705c121SKalle Valo 	}
1219e705c121SKalle Valo 
1220e705c121SKalle Valo 	/* Backtrack one entry */
1221e705c121SKalle Valo 	rxq->read = i;
1222e705c121SKalle Valo 	spin_unlock(&rxq->lock);
1223e705c121SKalle Valo 
1224e705c121SKalle Valo 	/*
1225e705c121SKalle Valo 	 * handle a case where in emergency there are some unallocated RBDs.
1226e705c121SKalle Valo 	 * those RBDs are in the used list, but are not tracked by the queue's
1227e705c121SKalle Valo 	 * used_count which counts allocator owned RBDs.
1228e705c121SKalle Valo 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1229e705c121SKalle Valo 	 * when called again the function may not be in emergency mode and
1230e705c121SKalle Valo 	 * they will be handed to the allocator with no tracking in the RBD
1231e705c121SKalle Valo 	 * allocator counters, which will lead to them never being claimed back
1232e705c121SKalle Valo 	 * by the queue.
1233e705c121SKalle Valo 	 * by allocating them here, they are now in the queue free list, and
1234e705c121SKalle Valo 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1235e705c121SKalle Valo 	 */
1236e705c121SKalle Valo 	if (unlikely(emergency && count))
123778485054SSara Sharon 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1238e705c121SKalle Valo 
1239e705c121SKalle Valo 	if (trans_pcie->napi.poll)
1240e705c121SKalle Valo 		napi_gro_flush(&trans_pcie->napi, false);
1241e705c121SKalle Valo }
1242e705c121SKalle Valo 
1243e705c121SKalle Valo /*
1244e705c121SKalle Valo  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1245e705c121SKalle Valo  */
1246e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1247e705c121SKalle Valo {
1248e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1249e705c121SKalle Valo 	int i;
1250e705c121SKalle Valo 
1251e705c121SKalle Valo 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1252e705c121SKalle Valo 	if (trans->cfg->internal_wimax_coex &&
1253e705c121SKalle Valo 	    !trans->cfg->apmg_not_supported &&
1254e705c121SKalle Valo 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1255e705c121SKalle Valo 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1256e705c121SKalle Valo 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1257e705c121SKalle Valo 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1258e705c121SKalle Valo 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1259e705c121SKalle Valo 		iwl_op_mode_wimax_active(trans->op_mode);
1260e705c121SKalle Valo 		wake_up(&trans_pcie->wait_command_queue);
1261e705c121SKalle Valo 		return;
1262e705c121SKalle Valo 	}
1263e705c121SKalle Valo 
1264e705c121SKalle Valo 	iwl_pcie_dump_csr(trans);
1265e705c121SKalle Valo 	iwl_dump_fh(trans, NULL);
1266e705c121SKalle Valo 
1267e705c121SKalle Valo 	local_bh_disable();
1268e705c121SKalle Valo 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1269e705c121SKalle Valo 	 * before we wake up the command caller, to ensure a proper cleanup. */
1270e705c121SKalle Valo 	iwl_trans_fw_error(trans);
1271e705c121SKalle Valo 	local_bh_enable();
1272e705c121SKalle Valo 
1273e705c121SKalle Valo 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1274e705c121SKalle Valo 		del_timer(&trans_pcie->txq[i].stuck_timer);
1275e705c121SKalle Valo 
1276e705c121SKalle Valo 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1277e705c121SKalle Valo 	wake_up(&trans_pcie->wait_command_queue);
1278e705c121SKalle Valo }
1279e705c121SKalle Valo 
1280e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1281e705c121SKalle Valo {
1282e705c121SKalle Valo 	u32 inta;
1283e705c121SKalle Valo 
1284e705c121SKalle Valo 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1285e705c121SKalle Valo 
1286e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1287e705c121SKalle Valo 
1288e705c121SKalle Valo 	/* Discover which interrupts are active/pending */
1289e705c121SKalle Valo 	inta = iwl_read32(trans, CSR_INT);
1290e705c121SKalle Valo 
1291e705c121SKalle Valo 	/* the thread will service interrupts and re-enable them */
1292e705c121SKalle Valo 	return inta;
1293e705c121SKalle Valo }
1294e705c121SKalle Valo 
1295e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */
1296e705c121SKalle Valo #define ICT_SHIFT	12
1297e705c121SKalle Valo #define ICT_SIZE	(1 << ICT_SHIFT)
1298e705c121SKalle Valo #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1299e705c121SKalle Valo 
1300e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will
1301e705c121SKalle Valo  * stop using INTA register to get device's interrupt, reading this register
1302e705c121SKalle Valo  * is expensive, device will write interrupts in ICT dram table, increment
1303e705c121SKalle Valo  * index then will fire interrupt to driver, driver will OR all ICT table
1304e705c121SKalle Valo  * entries from current index up to table entry with 0 value. the result is
1305e705c121SKalle Valo  * the interrupt we need to service, driver will set the entries back to 0 and
1306e705c121SKalle Valo  * set index.
1307e705c121SKalle Valo  */
1308e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1309e705c121SKalle Valo {
1310e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1311e705c121SKalle Valo 	u32 inta;
1312e705c121SKalle Valo 	u32 val = 0;
1313e705c121SKalle Valo 	u32 read;
1314e705c121SKalle Valo 
1315e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1316e705c121SKalle Valo 
1317e705c121SKalle Valo 	/* Ignore interrupt if there's nothing in NIC to service.
1318e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1319e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC. */
1320e705c121SKalle Valo 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1321e705c121SKalle Valo 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1322e705c121SKalle Valo 	if (!read)
1323e705c121SKalle Valo 		return 0;
1324e705c121SKalle Valo 
1325e705c121SKalle Valo 	/*
1326e705c121SKalle Valo 	 * Collect all entries up to the first 0, starting from ict_index;
1327e705c121SKalle Valo 	 * note we already read at ict_index.
1328e705c121SKalle Valo 	 */
1329e705c121SKalle Valo 	do {
1330e705c121SKalle Valo 		val |= read;
1331e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1332e705c121SKalle Valo 				trans_pcie->ict_index, read);
1333e705c121SKalle Valo 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1334e705c121SKalle Valo 		trans_pcie->ict_index =
1335e705c121SKalle Valo 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1336e705c121SKalle Valo 
1337e705c121SKalle Valo 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1338e705c121SKalle Valo 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1339e705c121SKalle Valo 					   read);
1340e705c121SKalle Valo 	} while (read);
1341e705c121SKalle Valo 
1342e705c121SKalle Valo 	/* We should not get this value, just ignore it. */
1343e705c121SKalle Valo 	if (val == 0xffffffff)
1344e705c121SKalle Valo 		val = 0;
1345e705c121SKalle Valo 
1346e705c121SKalle Valo 	/*
1347e705c121SKalle Valo 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1348e705c121SKalle Valo 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1349e705c121SKalle Valo 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1350e705c121SKalle Valo 	 * so we use them to decide on the real state of the Rx bit.
1351e705c121SKalle Valo 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1352e705c121SKalle Valo 	 */
1353e705c121SKalle Valo 	if (val & 0xC0000)
1354e705c121SKalle Valo 		val |= 0x8000;
1355e705c121SKalle Valo 
1356e705c121SKalle Valo 	inta = (0xff & val) | ((0xff00 & val) << 16);
1357e705c121SKalle Valo 	return inta;
1358e705c121SKalle Valo }
1359e705c121SKalle Valo 
1360e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1361e705c121SKalle Valo {
1362e705c121SKalle Valo 	struct iwl_trans *trans = dev_id;
1363e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1364e705c121SKalle Valo 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1365e705c121SKalle Valo 	u32 inta = 0;
1366e705c121SKalle Valo 	u32 handled = 0;
1367e705c121SKalle Valo 
1368e705c121SKalle Valo 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1369e705c121SKalle Valo 
1370e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1371e705c121SKalle Valo 
1372e705c121SKalle Valo 	/* dram interrupt table not set yet,
1373e705c121SKalle Valo 	 * use legacy interrupt.
1374e705c121SKalle Valo 	 */
1375e705c121SKalle Valo 	if (likely(trans_pcie->use_ict))
1376e705c121SKalle Valo 		inta = iwl_pcie_int_cause_ict(trans);
1377e705c121SKalle Valo 	else
1378e705c121SKalle Valo 		inta = iwl_pcie_int_cause_non_ict(trans);
1379e705c121SKalle Valo 
1380e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1381e705c121SKalle Valo 		IWL_DEBUG_ISR(trans,
1382e705c121SKalle Valo 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1383e705c121SKalle Valo 			      inta, trans_pcie->inta_mask,
1384e705c121SKalle Valo 			      iwl_read32(trans, CSR_INT_MASK),
1385e705c121SKalle Valo 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1386e705c121SKalle Valo 		if (inta & (~trans_pcie->inta_mask))
1387e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1388e705c121SKalle Valo 				      "We got a masked interrupt (0x%08x)\n",
1389e705c121SKalle Valo 				      inta & (~trans_pcie->inta_mask));
1390e705c121SKalle Valo 	}
1391e705c121SKalle Valo 
1392e705c121SKalle Valo 	inta &= trans_pcie->inta_mask;
1393e705c121SKalle Valo 
1394e705c121SKalle Valo 	/*
1395e705c121SKalle Valo 	 * Ignore interrupt if there's nothing in NIC to service.
1396e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1397e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC.
1398e705c121SKalle Valo 	 */
1399e705c121SKalle Valo 	if (unlikely(!inta)) {
1400e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1401e705c121SKalle Valo 		/*
1402e705c121SKalle Valo 		 * Re-enable interrupts here since we don't
1403e705c121SKalle Valo 		 * have anything to service
1404e705c121SKalle Valo 		 */
1405e705c121SKalle Valo 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1406e705c121SKalle Valo 			iwl_enable_interrupts(trans);
1407e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1408e705c121SKalle Valo 		lock_map_release(&trans->sync_cmd_lockdep_map);
1409e705c121SKalle Valo 		return IRQ_NONE;
1410e705c121SKalle Valo 	}
1411e705c121SKalle Valo 
1412e705c121SKalle Valo 	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1413e705c121SKalle Valo 		/*
1414e705c121SKalle Valo 		 * Hardware disappeared. It might have
1415e705c121SKalle Valo 		 * already raised an interrupt.
1416e705c121SKalle Valo 		 */
1417e705c121SKalle Valo 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1418e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1419e705c121SKalle Valo 		goto out;
1420e705c121SKalle Valo 	}
1421e705c121SKalle Valo 
1422e705c121SKalle Valo 	/* Ack/clear/reset pending uCode interrupts.
1423e705c121SKalle Valo 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1424e705c121SKalle Valo 	 */
1425e705c121SKalle Valo 	/* There is a hardware bug in the interrupt mask function that some
1426e705c121SKalle Valo 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1427e705c121SKalle Valo 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1428e705c121SKalle Valo 	 * ICT interrupt handling mechanism has another bug that might cause
1429e705c121SKalle Valo 	 * these unmasked interrupts fail to be detected. We workaround the
1430e705c121SKalle Valo 	 * hardware bugs here by ACKing all the possible interrupts so that
1431e705c121SKalle Valo 	 * interrupt coalescing can still be achieved.
1432e705c121SKalle Valo 	 */
1433e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1434e705c121SKalle Valo 
1435e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR))
1436e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1437e705c121SKalle Valo 			      inta, iwl_read32(trans, CSR_INT_MASK));
1438e705c121SKalle Valo 
1439e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1440e705c121SKalle Valo 
1441e705c121SKalle Valo 	/* Now service all interrupt bits discovered above. */
1442e705c121SKalle Valo 	if (inta & CSR_INT_BIT_HW_ERR) {
1443e705c121SKalle Valo 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1444e705c121SKalle Valo 
1445e705c121SKalle Valo 		/* Tell the device to stop sending interrupts */
1446e705c121SKalle Valo 		iwl_disable_interrupts(trans);
1447e705c121SKalle Valo 
1448e705c121SKalle Valo 		isr_stats->hw++;
1449e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1450e705c121SKalle Valo 
1451e705c121SKalle Valo 		handled |= CSR_INT_BIT_HW_ERR;
1452e705c121SKalle Valo 
1453e705c121SKalle Valo 		goto out;
1454e705c121SKalle Valo 	}
1455e705c121SKalle Valo 
1456e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1457e705c121SKalle Valo 		/* NIC fires this, but we don't use it, redundant with WAKEUP */
1458e705c121SKalle Valo 		if (inta & CSR_INT_BIT_SCD) {
1459e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1460e705c121SKalle Valo 				      "Scheduler finished to transmit the frame/frames.\n");
1461e705c121SKalle Valo 			isr_stats->sch++;
1462e705c121SKalle Valo 		}
1463e705c121SKalle Valo 
1464e705c121SKalle Valo 		/* Alive notification via Rx interrupt will do the real work */
1465e705c121SKalle Valo 		if (inta & CSR_INT_BIT_ALIVE) {
1466e705c121SKalle Valo 			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1467e705c121SKalle Valo 			isr_stats->alive++;
1468e705c121SKalle Valo 		}
1469e705c121SKalle Valo 	}
1470e705c121SKalle Valo 
1471e705c121SKalle Valo 	/* Safely ignore these bits for debug checks below */
1472e705c121SKalle Valo 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1473e705c121SKalle Valo 
1474e705c121SKalle Valo 	/* HW RF KILL switch toggled */
1475e705c121SKalle Valo 	if (inta & CSR_INT_BIT_RF_KILL) {
1476e705c121SKalle Valo 		bool hw_rfkill;
1477e705c121SKalle Valo 
1478e705c121SKalle Valo 		hw_rfkill = iwl_is_rfkill_set(trans);
1479e705c121SKalle Valo 		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1480e705c121SKalle Valo 			 hw_rfkill ? "disable radio" : "enable radio");
1481e705c121SKalle Valo 
1482e705c121SKalle Valo 		isr_stats->rfkill++;
1483e705c121SKalle Valo 
1484e705c121SKalle Valo 		mutex_lock(&trans_pcie->mutex);
1485e705c121SKalle Valo 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1486e705c121SKalle Valo 		mutex_unlock(&trans_pcie->mutex);
1487e705c121SKalle Valo 		if (hw_rfkill) {
1488e705c121SKalle Valo 			set_bit(STATUS_RFKILL, &trans->status);
1489e705c121SKalle Valo 			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1490e705c121SKalle Valo 					       &trans->status))
1491e705c121SKalle Valo 				IWL_DEBUG_RF_KILL(trans,
1492e705c121SKalle Valo 						  "Rfkill while SYNC HCMD in flight\n");
1493e705c121SKalle Valo 			wake_up(&trans_pcie->wait_command_queue);
1494e705c121SKalle Valo 		} else {
1495e705c121SKalle Valo 			clear_bit(STATUS_RFKILL, &trans->status);
1496e705c121SKalle Valo 		}
1497e705c121SKalle Valo 
1498e705c121SKalle Valo 		handled |= CSR_INT_BIT_RF_KILL;
1499e705c121SKalle Valo 	}
1500e705c121SKalle Valo 
1501e705c121SKalle Valo 	/* Chip got too hot and stopped itself */
1502e705c121SKalle Valo 	if (inta & CSR_INT_BIT_CT_KILL) {
1503e705c121SKalle Valo 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1504e705c121SKalle Valo 		isr_stats->ctkill++;
1505e705c121SKalle Valo 		handled |= CSR_INT_BIT_CT_KILL;
1506e705c121SKalle Valo 	}
1507e705c121SKalle Valo 
1508e705c121SKalle Valo 	/* Error detected by uCode */
1509e705c121SKalle Valo 	if (inta & CSR_INT_BIT_SW_ERR) {
1510e705c121SKalle Valo 		IWL_ERR(trans, "Microcode SW error detected. "
1511e705c121SKalle Valo 			" Restarting 0x%X.\n", inta);
1512e705c121SKalle Valo 		isr_stats->sw++;
1513e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1514e705c121SKalle Valo 		handled |= CSR_INT_BIT_SW_ERR;
1515e705c121SKalle Valo 	}
1516e705c121SKalle Valo 
1517e705c121SKalle Valo 	/* uCode wakes up after power-down sleep */
1518e705c121SKalle Valo 	if (inta & CSR_INT_BIT_WAKEUP) {
1519e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1520e705c121SKalle Valo 		iwl_pcie_rxq_check_wrptr(trans);
1521e705c121SKalle Valo 		iwl_pcie_txq_check_wrptrs(trans);
1522e705c121SKalle Valo 
1523e705c121SKalle Valo 		isr_stats->wakeup++;
1524e705c121SKalle Valo 
1525e705c121SKalle Valo 		handled |= CSR_INT_BIT_WAKEUP;
1526e705c121SKalle Valo 	}
1527e705c121SKalle Valo 
1528e705c121SKalle Valo 	/* All uCode command responses, including Tx command responses,
1529e705c121SKalle Valo 	 * Rx "responses" (frame-received notification), and other
1530e705c121SKalle Valo 	 * notifications from uCode come through here*/
1531e705c121SKalle Valo 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1532e705c121SKalle Valo 		    CSR_INT_BIT_RX_PERIODIC)) {
1533e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1534e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1535e705c121SKalle Valo 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1536e705c121SKalle Valo 			iwl_write32(trans, CSR_FH_INT_STATUS,
1537e705c121SKalle Valo 					CSR_FH_INT_RX_MASK);
1538e705c121SKalle Valo 		}
1539e705c121SKalle Valo 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1540e705c121SKalle Valo 			handled |= CSR_INT_BIT_RX_PERIODIC;
1541e705c121SKalle Valo 			iwl_write32(trans,
1542e705c121SKalle Valo 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1543e705c121SKalle Valo 		}
1544e705c121SKalle Valo 		/* Sending RX interrupt require many steps to be done in the
1545e705c121SKalle Valo 		 * the device:
1546e705c121SKalle Valo 		 * 1- write interrupt to current index in ICT table.
1547e705c121SKalle Valo 		 * 2- dma RX frame.
1548e705c121SKalle Valo 		 * 3- update RX shared data to indicate last write index.
1549e705c121SKalle Valo 		 * 4- send interrupt.
1550e705c121SKalle Valo 		 * This could lead to RX race, driver could receive RX interrupt
1551e705c121SKalle Valo 		 * but the shared data changes does not reflect this;
1552e705c121SKalle Valo 		 * periodic interrupt will detect any dangling Rx activity.
1553e705c121SKalle Valo 		 */
1554e705c121SKalle Valo 
1555e705c121SKalle Valo 		/* Disable periodic interrupt; we use it as just a one-shot. */
1556e705c121SKalle Valo 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1557e705c121SKalle Valo 			    CSR_INT_PERIODIC_DIS);
1558e705c121SKalle Valo 
1559e705c121SKalle Valo 		/*
1560e705c121SKalle Valo 		 * Enable periodic interrupt in 8 msec only if we received
1561e705c121SKalle Valo 		 * real RX interrupt (instead of just periodic int), to catch
1562e705c121SKalle Valo 		 * any dangling Rx interrupt.  If it was just the periodic
1563e705c121SKalle Valo 		 * interrupt, there was no dangling Rx activity, and no need
1564e705c121SKalle Valo 		 * to extend the periodic interrupt; one-shot is enough.
1565e705c121SKalle Valo 		 */
1566e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1567e705c121SKalle Valo 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1568e705c121SKalle Valo 				   CSR_INT_PERIODIC_ENA);
1569e705c121SKalle Valo 
1570e705c121SKalle Valo 		isr_stats->rx++;
1571e705c121SKalle Valo 
1572e705c121SKalle Valo 		local_bh_disable();
1573e705c121SKalle Valo 		iwl_pcie_rx_handle(trans);
1574e705c121SKalle Valo 		local_bh_enable();
1575e705c121SKalle Valo 	}
1576e705c121SKalle Valo 
1577e705c121SKalle Valo 	/* This "Tx" DMA channel is used only for loading uCode */
1578e705c121SKalle Valo 	if (inta & CSR_INT_BIT_FH_TX) {
1579e705c121SKalle Valo 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1580e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1581e705c121SKalle Valo 		isr_stats->tx++;
1582e705c121SKalle Valo 		handled |= CSR_INT_BIT_FH_TX;
1583e705c121SKalle Valo 		/* Wake up uCode load routine, now that load is complete */
1584e705c121SKalle Valo 		trans_pcie->ucode_write_complete = true;
1585e705c121SKalle Valo 		wake_up(&trans_pcie->ucode_write_waitq);
1586e705c121SKalle Valo 	}
1587e705c121SKalle Valo 
1588e705c121SKalle Valo 	if (inta & ~handled) {
1589e705c121SKalle Valo 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1590e705c121SKalle Valo 		isr_stats->unhandled++;
1591e705c121SKalle Valo 	}
1592e705c121SKalle Valo 
1593e705c121SKalle Valo 	if (inta & ~(trans_pcie->inta_mask)) {
1594e705c121SKalle Valo 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1595e705c121SKalle Valo 			 inta & ~trans_pcie->inta_mask);
1596e705c121SKalle Valo 	}
1597e705c121SKalle Valo 
1598e705c121SKalle Valo 	/* Re-enable all interrupts */
1599e705c121SKalle Valo 	/* only Re-enable if disabled by irq */
1600e705c121SKalle Valo 	if (test_bit(STATUS_INT_ENABLED, &trans->status))
1601e705c121SKalle Valo 		iwl_enable_interrupts(trans);
1602e705c121SKalle Valo 	/* Re-enable RF_KILL if it occurred */
1603e705c121SKalle Valo 	else if (handled & CSR_INT_BIT_RF_KILL)
1604e705c121SKalle Valo 		iwl_enable_rfkill_int(trans);
1605e705c121SKalle Valo 
1606e705c121SKalle Valo out:
1607e705c121SKalle Valo 	lock_map_release(&trans->sync_cmd_lockdep_map);
1608e705c121SKalle Valo 	return IRQ_HANDLED;
1609e705c121SKalle Valo }
1610e705c121SKalle Valo 
1611e705c121SKalle Valo /******************************************************************************
1612e705c121SKalle Valo  *
1613e705c121SKalle Valo  * ICT functions
1614e705c121SKalle Valo  *
1615e705c121SKalle Valo  ******************************************************************************/
1616e705c121SKalle Valo 
1617e705c121SKalle Valo /* Free dram table */
1618e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans)
1619e705c121SKalle Valo {
1620e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1621e705c121SKalle Valo 
1622e705c121SKalle Valo 	if (trans_pcie->ict_tbl) {
1623e705c121SKalle Valo 		dma_free_coherent(trans->dev, ICT_SIZE,
1624e705c121SKalle Valo 				  trans_pcie->ict_tbl,
1625e705c121SKalle Valo 				  trans_pcie->ict_tbl_dma);
1626e705c121SKalle Valo 		trans_pcie->ict_tbl = NULL;
1627e705c121SKalle Valo 		trans_pcie->ict_tbl_dma = 0;
1628e705c121SKalle Valo 	}
1629e705c121SKalle Valo }
1630e705c121SKalle Valo 
1631e705c121SKalle Valo /*
1632e705c121SKalle Valo  * allocate dram shared table, it is an aligned memory
1633e705c121SKalle Valo  * block of ICT_SIZE.
1634e705c121SKalle Valo  * also reset all data related to ICT table interrupt.
1635e705c121SKalle Valo  */
1636e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1637e705c121SKalle Valo {
1638e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1639e705c121SKalle Valo 
1640e705c121SKalle Valo 	trans_pcie->ict_tbl =
1641e705c121SKalle Valo 		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1642e705c121SKalle Valo 				   &trans_pcie->ict_tbl_dma,
1643e705c121SKalle Valo 				   GFP_KERNEL);
1644e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1645e705c121SKalle Valo 		return -ENOMEM;
1646e705c121SKalle Valo 
1647e705c121SKalle Valo 	/* just an API sanity check ... it is guaranteed to be aligned */
1648e705c121SKalle Valo 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1649e705c121SKalle Valo 		iwl_pcie_free_ict(trans);
1650e705c121SKalle Valo 		return -EINVAL;
1651e705c121SKalle Valo 	}
1652e705c121SKalle Valo 
1653e705c121SKalle Valo 	return 0;
1654e705c121SKalle Valo }
1655e705c121SKalle Valo 
1656e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table,
1657e705c121SKalle Valo  * also we need to tell the driver to start using ICT interrupt.
1658e705c121SKalle Valo  */
1659e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans)
1660e705c121SKalle Valo {
1661e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1662e705c121SKalle Valo 	u32 val;
1663e705c121SKalle Valo 
1664e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1665e705c121SKalle Valo 		return;
1666e705c121SKalle Valo 
1667e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1668e705c121SKalle Valo 	iwl_disable_interrupts(trans);
1669e705c121SKalle Valo 
1670e705c121SKalle Valo 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1671e705c121SKalle Valo 
1672e705c121SKalle Valo 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1673e705c121SKalle Valo 
1674e705c121SKalle Valo 	val |= CSR_DRAM_INT_TBL_ENABLE |
1675e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
1676e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1677e705c121SKalle Valo 
1678e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1679e705c121SKalle Valo 
1680e705c121SKalle Valo 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1681e705c121SKalle Valo 	trans_pcie->use_ict = true;
1682e705c121SKalle Valo 	trans_pcie->ict_index = 0;
1683e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1684e705c121SKalle Valo 	iwl_enable_interrupts(trans);
1685e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1686e705c121SKalle Valo }
1687e705c121SKalle Valo 
1688e705c121SKalle Valo /* Device is going down disable ict interrupt usage */
1689e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans)
1690e705c121SKalle Valo {
1691e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1692e705c121SKalle Valo 
1693e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1694e705c121SKalle Valo 	trans_pcie->use_ict = false;
1695e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1696e705c121SKalle Valo }
1697e705c121SKalle Valo 
1698e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data)
1699e705c121SKalle Valo {
1700e705c121SKalle Valo 	struct iwl_trans *trans = data;
1701e705c121SKalle Valo 
1702e705c121SKalle Valo 	if (!trans)
1703e705c121SKalle Valo 		return IRQ_NONE;
1704e705c121SKalle Valo 
1705e705c121SKalle Valo 	/* Disable (but don't clear!) interrupts here to avoid
1706e705c121SKalle Valo 	 * back-to-back ISRs and sporadic interrupts from our NIC.
1707e705c121SKalle Valo 	 * If we have something to service, the tasklet will re-enable ints.
1708e705c121SKalle Valo 	 * If we *don't* have something, we'll re-enable before leaving here.
1709e705c121SKalle Valo 	 */
1710e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1711e705c121SKalle Valo 
1712e705c121SKalle Valo 	return IRQ_WAKE_THREAD;
1713e705c121SKalle Valo }
1714