1e705c121SKalle Valo /******************************************************************************
2e705c121SKalle Valo  *
3e705c121SKalle Valo  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4e705c121SKalle Valo  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5bce97731SSara Sharon  * Copyright(c) 2016 Intel Deutschland GmbH
6e705c121SKalle Valo  *
7e705c121SKalle Valo  * Portions of this file are derived from the ipw3945 project, as well
8e705c121SKalle Valo  * as portions of the ieee80211 subsystem header files.
9e705c121SKalle Valo  *
10e705c121SKalle Valo  * This program is free software; you can redistribute it and/or modify it
11e705c121SKalle Valo  * under the terms of version 2 of the GNU General Public License as
12e705c121SKalle Valo  * published by the Free Software Foundation.
13e705c121SKalle Valo  *
14e705c121SKalle Valo  * This program is distributed in the hope that it will be useful, but WITHOUT
15e705c121SKalle Valo  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16e705c121SKalle Valo  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17e705c121SKalle Valo  * more details.
18e705c121SKalle Valo  *
19e705c121SKalle Valo  * You should have received a copy of the GNU General Public License along with
20e705c121SKalle Valo  * this program; if not, write to the Free Software Foundation, Inc.,
21e705c121SKalle Valo  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22e705c121SKalle Valo  *
23e705c121SKalle Valo  * The full GNU General Public License is included in this distribution in the
24e705c121SKalle Valo  * file called LICENSE.
25e705c121SKalle Valo  *
26e705c121SKalle Valo  * Contact Information:
27d01c5366SEmmanuel Grumbach  *  Intel Linux Wireless <linuxwifi@intel.com>
28e705c121SKalle Valo  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29e705c121SKalle Valo  *
30e705c121SKalle Valo  *****************************************************************************/
31e705c121SKalle Valo #include <linux/sched.h>
32e705c121SKalle Valo #include <linux/wait.h>
33e705c121SKalle Valo #include <linux/gfp.h>
34e705c121SKalle Valo 
35e705c121SKalle Valo #include "iwl-prph.h"
36e705c121SKalle Valo #include "iwl-io.h"
37e705c121SKalle Valo #include "internal.h"
38e705c121SKalle Valo #include "iwl-op-mode.h"
39e705c121SKalle Valo 
40e705c121SKalle Valo /******************************************************************************
41e705c121SKalle Valo  *
42e705c121SKalle Valo  * RX path functions
43e705c121SKalle Valo  *
44e705c121SKalle Valo  ******************************************************************************/
45e705c121SKalle Valo 
46e705c121SKalle Valo /*
47e705c121SKalle Valo  * Rx theory of operation
48e705c121SKalle Valo  *
49e705c121SKalle Valo  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
50e705c121SKalle Valo  * each of which point to Receive Buffers to be filled by the NIC.  These get
51e705c121SKalle Valo  * used not only for Rx frames, but for any command response or notification
52e705c121SKalle Valo  * from the NIC.  The driver and NIC manage the Rx buffers by means
53e705c121SKalle Valo  * of indexes into the circular buffer.
54e705c121SKalle Valo  *
55e705c121SKalle Valo  * Rx Queue Indexes
56e705c121SKalle Valo  * The host/firmware share two index registers for managing the Rx buffers.
57e705c121SKalle Valo  *
58e705c121SKalle Valo  * The READ index maps to the first position that the firmware may be writing
59e705c121SKalle Valo  * to -- the driver can read up to (but not including) this position and get
60e705c121SKalle Valo  * good data.
61e705c121SKalle Valo  * The READ index is managed by the firmware once the card is enabled.
62e705c121SKalle Valo  *
63e705c121SKalle Valo  * The WRITE index maps to the last position the driver has read from -- the
64e705c121SKalle Valo  * position preceding WRITE is the last slot the firmware can place a packet.
65e705c121SKalle Valo  *
66e705c121SKalle Valo  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
67e705c121SKalle Valo  * WRITE = READ.
68e705c121SKalle Valo  *
69e705c121SKalle Valo  * During initialization, the host sets up the READ queue position to the first
70e705c121SKalle Valo  * INDEX position, and WRITE to the last (READ - 1 wrapped)
71e705c121SKalle Valo  *
72e705c121SKalle Valo  * When the firmware places a packet in a buffer, it will advance the READ index
73e705c121SKalle Valo  * and fire the RX interrupt.  The driver can then query the READ index and
74e705c121SKalle Valo  * process as many packets as possible, moving the WRITE index forward as it
75e705c121SKalle Valo  * resets the Rx queue buffers with new memory.
76e705c121SKalle Valo  *
77e705c121SKalle Valo  * The management in the driver is as follows:
78e705c121SKalle Valo  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
79e705c121SKalle Valo  *   When the interrupt handler is called, the request is processed.
80e705c121SKalle Valo  *   The page is either stolen - transferred to the upper layer
81e705c121SKalle Valo  *   or reused - added immediately to the iwl->rxq->rx_free list.
82e705c121SKalle Valo  * + When the page is stolen - the driver updates the matching queue's used
83e705c121SKalle Valo  *   count, detaches the RBD and transfers it to the queue used list.
84e705c121SKalle Valo  *   When there are two used RBDs - they are transferred to the allocator empty
85e705c121SKalle Valo  *   list. Work is then scheduled for the allocator to start allocating
86e705c121SKalle Valo  *   eight buffers.
87e705c121SKalle Valo  *   When there are another 6 used RBDs - they are transferred to the allocator
88e705c121SKalle Valo  *   empty list and the driver tries to claim the pre-allocated buffers and
89e705c121SKalle Valo  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
90e705c121SKalle Valo  *   until ready.
91e705c121SKalle Valo  *   When there are 8+ buffers in the free list - either from allocation or from
92e705c121SKalle Valo  *   8 reused unstolen pages - restock is called to update the FW and indexes.
93e705c121SKalle Valo  * + In order to make sure the allocator always has RBDs to use for allocation
94e705c121SKalle Valo  *   the allocator has initial pool in the size of num_queues*(8-2) - the
95e705c121SKalle Valo  *   maximum missing RBDs per allocation request (request posted with 2
96e705c121SKalle Valo  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
97e705c121SKalle Valo  *   The queues supplies the recycle of the rest of the RBDs.
98e705c121SKalle Valo  * + A received packet is processed and handed to the kernel network stack,
99e705c121SKalle Valo  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
100e705c121SKalle Valo  * + If there are no allocated buffers in iwl->rxq->rx_free,
101e705c121SKalle Valo  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
102e705c121SKalle Valo  *   If there were enough free buffers and RX_STALLED is set it is cleared.
103e705c121SKalle Valo  *
104e705c121SKalle Valo  *
105e705c121SKalle Valo  * Driver sequence:
106e705c121SKalle Valo  *
107e705c121SKalle Valo  * iwl_rxq_alloc()            Allocates rx_free
108e705c121SKalle Valo  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
109e705c121SKalle Valo  *                            iwl_pcie_rxq_restock.
110e705c121SKalle Valo  *                            Used only during initialization.
111e705c121SKalle Valo  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
112e705c121SKalle Valo  *                            queue, updates firmware pointers, and updates
113e705c121SKalle Valo  *                            the WRITE index.
114e705c121SKalle Valo  * iwl_pcie_rx_allocator()     Background work for allocating pages.
115e705c121SKalle Valo  *
116e705c121SKalle Valo  * -- enable interrupts --
117e705c121SKalle Valo  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
118e705c121SKalle Valo  *                            READ INDEX, detaching the SKB from the pool.
119e705c121SKalle Valo  *                            Moves the packet buffer from queue to rx_used.
120e705c121SKalle Valo  *                            Posts and claims requests to the allocator.
121e705c121SKalle Valo  *                            Calls iwl_pcie_rxq_restock to refill any empty
122e705c121SKalle Valo  *                            slots.
123e705c121SKalle Valo  *
124e705c121SKalle Valo  * RBD life-cycle:
125e705c121SKalle Valo  *
126e705c121SKalle Valo  * Init:
127e705c121SKalle Valo  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
128e705c121SKalle Valo  *
129e705c121SKalle Valo  * Regular Receive interrupt:
130e705c121SKalle Valo  * Page Stolen:
131e705c121SKalle Valo  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
132e705c121SKalle Valo  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
133e705c121SKalle Valo  * Page not Stolen:
134e705c121SKalle Valo  * rxq.queue -> rxq.rx_free -> rxq.queue
135e705c121SKalle Valo  * ...
136e705c121SKalle Valo  *
137e705c121SKalle Valo  */
138e705c121SKalle Valo 
139e705c121SKalle Valo /*
140e705c121SKalle Valo  * iwl_rxq_space - Return number of free slots available in queue.
141e705c121SKalle Valo  */
142e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq)
143e705c121SKalle Valo {
14496a6497bSSara Sharon 	/* Make sure rx queue size is a power of 2 */
14596a6497bSSara Sharon 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
146e705c121SKalle Valo 
147e705c121SKalle Valo 	/*
148e705c121SKalle Valo 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
149e705c121SKalle Valo 	 * between empty and completely full queues.
150e705c121SKalle Valo 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
151e705c121SKalle Valo 	 * defined for negative dividends.
152e705c121SKalle Valo 	 */
15396a6497bSSara Sharon 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
154e705c121SKalle Valo }
155e705c121SKalle Valo 
156e705c121SKalle Valo /*
157e705c121SKalle Valo  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
158e705c121SKalle Valo  */
159e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
160e705c121SKalle Valo {
161e705c121SKalle Valo 	return cpu_to_le32((u32)(dma_addr >> 8));
162e705c121SKalle Valo }
163e705c121SKalle Valo 
164e705c121SKalle Valo /*
165e705c121SKalle Valo  * iwl_pcie_rx_stop - stops the Rx DMA
166e705c121SKalle Valo  */
167e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans)
168e705c121SKalle Valo {
169d7fdd0e5SSara Sharon 	if (trans->cfg->mq_rx_supported) {
170d7fdd0e5SSara Sharon 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
171d7fdd0e5SSara Sharon 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
172d7fdd0e5SSara Sharon 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
173d7fdd0e5SSara Sharon 	} else {
174e705c121SKalle Valo 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
175e705c121SKalle Valo 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
176d7fdd0e5SSara Sharon 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
177d7fdd0e5SSara Sharon 					   1000);
178d7fdd0e5SSara Sharon 	}
179e705c121SKalle Valo }
180e705c121SKalle Valo 
181e705c121SKalle Valo /*
182e705c121SKalle Valo  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
183e705c121SKalle Valo  */
18478485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
18578485054SSara Sharon 				    struct iwl_rxq *rxq)
186e705c121SKalle Valo {
187e705c121SKalle Valo 	u32 reg;
188e705c121SKalle Valo 
189e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
190e705c121SKalle Valo 
191e705c121SKalle Valo 	/*
192e705c121SKalle Valo 	 * explicitly wake up the NIC if:
193e705c121SKalle Valo 	 * 1. shadow registers aren't enabled
194e705c121SKalle Valo 	 * 2. there is a chance that the NIC is asleep
195e705c121SKalle Valo 	 */
196e705c121SKalle Valo 	if (!trans->cfg->base_params->shadow_reg_enable &&
197e705c121SKalle Valo 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
198e705c121SKalle Valo 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
199e705c121SKalle Valo 
200e705c121SKalle Valo 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
201e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
202e705c121SKalle Valo 				       reg);
203e705c121SKalle Valo 			iwl_set_bit(trans, CSR_GP_CNTRL,
204e705c121SKalle Valo 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
205e705c121SKalle Valo 			rxq->need_update = true;
206e705c121SKalle Valo 			return;
207e705c121SKalle Valo 		}
208e705c121SKalle Valo 	}
209e705c121SKalle Valo 
210e705c121SKalle Valo 	rxq->write_actual = round_down(rxq->write, 8);
21196a6497bSSara Sharon 	if (trans->cfg->mq_rx_supported)
2121554ed20SSara Sharon 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
21396a6497bSSara Sharon 			    rxq->write_actual);
2141316d595SSara Sharon 	else
215e705c121SKalle Valo 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
216e705c121SKalle Valo }
217e705c121SKalle Valo 
218e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
219e705c121SKalle Valo {
220e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
22178485054SSara Sharon 	int i;
222e705c121SKalle Valo 
22378485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
22478485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
225e705c121SKalle Valo 
226e705c121SKalle Valo 		if (!rxq->need_update)
22778485054SSara Sharon 			continue;
22878485054SSara Sharon 		spin_lock(&rxq->lock);
22978485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
230e705c121SKalle Valo 		rxq->need_update = false;
231e705c121SKalle Valo 		spin_unlock(&rxq->lock);
232e705c121SKalle Valo 	}
23378485054SSara Sharon }
234e705c121SKalle Valo 
235e0e168dcSGregory Greenman /*
2362047fa54SSara Sharon  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
237e0e168dcSGregory Greenman  */
2382047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
23996a6497bSSara Sharon 				  struct iwl_rxq *rxq)
24096a6497bSSara Sharon {
24196a6497bSSara Sharon 	struct iwl_rx_mem_buffer *rxb;
24296a6497bSSara Sharon 
24396a6497bSSara Sharon 	/*
24496a6497bSSara Sharon 	 * If the device isn't enabled - no need to try to add buffers...
24596a6497bSSara Sharon 	 * This can happen when we stop the device and still have an interrupt
24696a6497bSSara Sharon 	 * pending. We stop the APM before we sync the interrupts because we
24796a6497bSSara Sharon 	 * have to (see comment there). On the other hand, since the APM is
24896a6497bSSara Sharon 	 * stopped, we cannot access the HW (in particular not prph).
24996a6497bSSara Sharon 	 * So don't try to restock if the APM has been already stopped.
25096a6497bSSara Sharon 	 */
25196a6497bSSara Sharon 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
25296a6497bSSara Sharon 		return;
25396a6497bSSara Sharon 
25496a6497bSSara Sharon 	spin_lock(&rxq->lock);
25596a6497bSSara Sharon 	while (rxq->free_count) {
25696a6497bSSara Sharon 		__le64 *bd = (__le64 *)rxq->bd;
25796a6497bSSara Sharon 
25896a6497bSSara Sharon 		/* Get next free Rx buffer, remove from free list */
25996a6497bSSara Sharon 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
26096a6497bSSara Sharon 				       list);
26196a6497bSSara Sharon 		list_del(&rxb->list);
262b1753c62SSara Sharon 		rxb->invalid = false;
26396a6497bSSara Sharon 		/* 12 first bits are expected to be empty */
26496a6497bSSara Sharon 		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
26596a6497bSSara Sharon 		/* Point to Rx buffer via next RBD in circular buffer */
26696a6497bSSara Sharon 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
26796a6497bSSara Sharon 		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
26896a6497bSSara Sharon 		rxq->free_count--;
26996a6497bSSara Sharon 	}
27096a6497bSSara Sharon 	spin_unlock(&rxq->lock);
27196a6497bSSara Sharon 
27296a6497bSSara Sharon 	/*
27396a6497bSSara Sharon 	 * If we've added more space for the firmware to place data, tell it.
27496a6497bSSara Sharon 	 * Increment device's write pointer in multiples of 8.
27596a6497bSSara Sharon 	 */
27696a6497bSSara Sharon 	if (rxq->write_actual != (rxq->write & ~0x7)) {
27796a6497bSSara Sharon 		spin_lock(&rxq->lock);
27896a6497bSSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
27996a6497bSSara Sharon 		spin_unlock(&rxq->lock);
28096a6497bSSara Sharon 	}
28196a6497bSSara Sharon }
28296a6497bSSara Sharon 
283e705c121SKalle Valo /*
2842047fa54SSara Sharon  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
285e705c121SKalle Valo  */
2862047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
287e0e168dcSGregory Greenman 				  struct iwl_rxq *rxq)
288e705c121SKalle Valo {
289e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
290e705c121SKalle Valo 
291e705c121SKalle Valo 	/*
292e705c121SKalle Valo 	 * If the device isn't enabled - not need to try to add buffers...
293e705c121SKalle Valo 	 * This can happen when we stop the device and still have an interrupt
294e705c121SKalle Valo 	 * pending. We stop the APM before we sync the interrupts because we
295e705c121SKalle Valo 	 * have to (see comment there). On the other hand, since the APM is
296e705c121SKalle Valo 	 * stopped, we cannot access the HW (in particular not prph).
297e705c121SKalle Valo 	 * So don't try to restock if the APM has been already stopped.
298e705c121SKalle Valo 	 */
299e705c121SKalle Valo 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
300e705c121SKalle Valo 		return;
301e705c121SKalle Valo 
302e705c121SKalle Valo 	spin_lock(&rxq->lock);
303e705c121SKalle Valo 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
30496a6497bSSara Sharon 		__le32 *bd = (__le32 *)rxq->bd;
305e705c121SKalle Valo 		/* The overwritten rxb must be a used one */
306e705c121SKalle Valo 		rxb = rxq->queue[rxq->write];
307e705c121SKalle Valo 		BUG_ON(rxb && rxb->page);
308e705c121SKalle Valo 
309e705c121SKalle Valo 		/* Get next free Rx buffer, remove from free list */
310e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
311e705c121SKalle Valo 				       list);
312e705c121SKalle Valo 		list_del(&rxb->list);
313b1753c62SSara Sharon 		rxb->invalid = false;
314e705c121SKalle Valo 
315e705c121SKalle Valo 		/* Point to Rx buffer via next RBD in circular buffer */
31696a6497bSSara Sharon 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
317e705c121SKalle Valo 		rxq->queue[rxq->write] = rxb;
318e705c121SKalle Valo 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
319e705c121SKalle Valo 		rxq->free_count--;
320e705c121SKalle Valo 	}
321e705c121SKalle Valo 	spin_unlock(&rxq->lock);
322e705c121SKalle Valo 
323e705c121SKalle Valo 	/* If we've added more space for the firmware to place data, tell it.
324e705c121SKalle Valo 	 * Increment device's write pointer in multiples of 8. */
325e705c121SKalle Valo 	if (rxq->write_actual != (rxq->write & ~0x7)) {
326e705c121SKalle Valo 		spin_lock(&rxq->lock);
32778485054SSara Sharon 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
328e705c121SKalle Valo 		spin_unlock(&rxq->lock);
329e705c121SKalle Valo 	}
330e705c121SKalle Valo }
331e705c121SKalle Valo 
332e705c121SKalle Valo /*
333e0e168dcSGregory Greenman  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
334e0e168dcSGregory Greenman  *
335e0e168dcSGregory Greenman  * If there are slots in the RX queue that need to be restocked,
336e0e168dcSGregory Greenman  * and we have free pre-allocated buffers, fill the ranks as much
337e0e168dcSGregory Greenman  * as we can, pulling from rx_free.
338e0e168dcSGregory Greenman  *
339e0e168dcSGregory Greenman  * This moves the 'write' index forward to catch up with 'processed', and
340e0e168dcSGregory Greenman  * also updates the memory address in the firmware to reference the new
341e0e168dcSGregory Greenman  * target buffer.
342e0e168dcSGregory Greenman  */
343e0e168dcSGregory Greenman static
344e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
345e0e168dcSGregory Greenman {
346e0e168dcSGregory Greenman 	if (trans->cfg->mq_rx_supported)
3472047fa54SSara Sharon 		iwl_pcie_rxmq_restock(trans, rxq);
348e0e168dcSGregory Greenman 	else
3492047fa54SSara Sharon 		iwl_pcie_rxsq_restock(trans, rxq);
350e0e168dcSGregory Greenman }
351e0e168dcSGregory Greenman 
352e0e168dcSGregory Greenman /*
353e705c121SKalle Valo  * iwl_pcie_rx_alloc_page - allocates and returns a page.
354e705c121SKalle Valo  *
355e705c121SKalle Valo  */
356e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
357e705c121SKalle Valo 					   gfp_t priority)
358e705c121SKalle Valo {
359e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
360e705c121SKalle Valo 	struct page *page;
361e705c121SKalle Valo 	gfp_t gfp_mask = priority;
362e705c121SKalle Valo 
363e705c121SKalle Valo 	if (trans_pcie->rx_page_order > 0)
364e705c121SKalle Valo 		gfp_mask |= __GFP_COMP;
365e705c121SKalle Valo 
366e705c121SKalle Valo 	/* Alloc a new receive buffer */
367e705c121SKalle Valo 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
368e705c121SKalle Valo 	if (!page) {
369e705c121SKalle Valo 		if (net_ratelimit())
370e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
371e705c121SKalle Valo 				       trans_pcie->rx_page_order);
37278485054SSara Sharon 		/*
37378485054SSara Sharon 		 * Issue an error if we don't have enough pre-allocated
37478485054SSara Sharon 		  * buffers.
375e705c121SKalle Valo `		 */
37678485054SSara Sharon 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
377e705c121SKalle Valo 			IWL_CRIT(trans,
37878485054SSara Sharon 				 "Failed to alloc_pages\n");
379e705c121SKalle Valo 		return NULL;
380e705c121SKalle Valo 	}
381e705c121SKalle Valo 	return page;
382e705c121SKalle Valo }
383e705c121SKalle Valo 
384e705c121SKalle Valo /*
385e705c121SKalle Valo  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
386e705c121SKalle Valo  *
387e705c121SKalle Valo  * A used RBD is an Rx buffer that has been given to the stack. To use it again
388e705c121SKalle Valo  * a page must be allocated and the RBD must point to the page. This function
389e705c121SKalle Valo  * doesn't change the HW pointer but handles the list of pages that is used by
390e705c121SKalle Valo  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
391e705c121SKalle Valo  * allocated buffers.
392e705c121SKalle Valo  */
39378485054SSara Sharon static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
39478485054SSara Sharon 				   struct iwl_rxq *rxq)
395e705c121SKalle Valo {
396e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
397e705c121SKalle Valo 	struct iwl_rx_mem_buffer *rxb;
398e705c121SKalle Valo 	struct page *page;
399e705c121SKalle Valo 
400e705c121SKalle Valo 	while (1) {
401e705c121SKalle Valo 		spin_lock(&rxq->lock);
402e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
403e705c121SKalle Valo 			spin_unlock(&rxq->lock);
404e705c121SKalle Valo 			return;
405e705c121SKalle Valo 		}
406e705c121SKalle Valo 		spin_unlock(&rxq->lock);
407e705c121SKalle Valo 
408e705c121SKalle Valo 		/* Alloc a new receive buffer */
409e705c121SKalle Valo 		page = iwl_pcie_rx_alloc_page(trans, priority);
410e705c121SKalle Valo 		if (!page)
411e705c121SKalle Valo 			return;
412e705c121SKalle Valo 
413e705c121SKalle Valo 		spin_lock(&rxq->lock);
414e705c121SKalle Valo 
415e705c121SKalle Valo 		if (list_empty(&rxq->rx_used)) {
416e705c121SKalle Valo 			spin_unlock(&rxq->lock);
417e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
418e705c121SKalle Valo 			return;
419e705c121SKalle Valo 		}
420e705c121SKalle Valo 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
421e705c121SKalle Valo 				       list);
422e705c121SKalle Valo 		list_del(&rxb->list);
423e705c121SKalle Valo 		spin_unlock(&rxq->lock);
424e705c121SKalle Valo 
425e705c121SKalle Valo 		BUG_ON(rxb->page);
426e705c121SKalle Valo 		rxb->page = page;
427e705c121SKalle Valo 		/* Get physical address of the RB */
428e705c121SKalle Valo 		rxb->page_dma =
429e705c121SKalle Valo 			dma_map_page(trans->dev, page, 0,
430e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
431e705c121SKalle Valo 				     DMA_FROM_DEVICE);
432e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
433e705c121SKalle Valo 			rxb->page = NULL;
434e705c121SKalle Valo 			spin_lock(&rxq->lock);
435e705c121SKalle Valo 			list_add(&rxb->list, &rxq->rx_used);
436e705c121SKalle Valo 			spin_unlock(&rxq->lock);
437e705c121SKalle Valo 			__free_pages(page, trans_pcie->rx_page_order);
438e705c121SKalle Valo 			return;
439e705c121SKalle Valo 		}
440e705c121SKalle Valo 
441e705c121SKalle Valo 		spin_lock(&rxq->lock);
442e705c121SKalle Valo 
443e705c121SKalle Valo 		list_add_tail(&rxb->list, &rxq->rx_free);
444e705c121SKalle Valo 		rxq->free_count++;
445e705c121SKalle Valo 
446e705c121SKalle Valo 		spin_unlock(&rxq->lock);
447e705c121SKalle Valo 	}
448e705c121SKalle Valo }
449e705c121SKalle Valo 
45078485054SSara Sharon static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
451e705c121SKalle Valo {
452e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
453e705c121SKalle Valo 	int i;
454e705c121SKalle Valo 
4557b542436SSara Sharon 	for (i = 0; i < RX_POOL_SIZE; i++) {
45678485054SSara Sharon 		if (!trans_pcie->rx_pool[i].page)
457e705c121SKalle Valo 			continue;
45878485054SSara Sharon 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
459e705c121SKalle Valo 			       PAGE_SIZE << trans_pcie->rx_page_order,
460e705c121SKalle Valo 			       DMA_FROM_DEVICE);
46178485054SSara Sharon 		__free_pages(trans_pcie->rx_pool[i].page,
46278485054SSara Sharon 			     trans_pcie->rx_page_order);
46378485054SSara Sharon 		trans_pcie->rx_pool[i].page = NULL;
464e705c121SKalle Valo 	}
465e705c121SKalle Valo }
466e705c121SKalle Valo 
467e705c121SKalle Valo /*
468e705c121SKalle Valo  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
469e705c121SKalle Valo  *
470e705c121SKalle Valo  * Allocates for each received request 8 pages
471e705c121SKalle Valo  * Called as a scheduled work item.
472e705c121SKalle Valo  */
473e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
474e705c121SKalle Valo {
475e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
476e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
477e705c121SKalle Valo 	struct list_head local_empty;
478e705c121SKalle Valo 	int pending = atomic_xchg(&rba->req_pending, 0);
479e705c121SKalle Valo 
480e705c121SKalle Valo 	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
481e705c121SKalle Valo 
482e705c121SKalle Valo 	/* If we were scheduled - there is at least one request */
483e705c121SKalle Valo 	spin_lock(&rba->lock);
484e705c121SKalle Valo 	/* swap out the rba->rbd_empty to a local list */
485e705c121SKalle Valo 	list_replace_init(&rba->rbd_empty, &local_empty);
486e705c121SKalle Valo 	spin_unlock(&rba->lock);
487e705c121SKalle Valo 
488e705c121SKalle Valo 	while (pending) {
489e705c121SKalle Valo 		int i;
490e705c121SKalle Valo 		struct list_head local_allocated;
49178485054SSara Sharon 		gfp_t gfp_mask = GFP_KERNEL;
49278485054SSara Sharon 
49378485054SSara Sharon 		/* Do not post a warning if there are only a few requests */
49478485054SSara Sharon 		if (pending < RX_PENDING_WATERMARK)
49578485054SSara Sharon 			gfp_mask |= __GFP_NOWARN;
496e705c121SKalle Valo 
497e705c121SKalle Valo 		INIT_LIST_HEAD(&local_allocated);
498e705c121SKalle Valo 
499e705c121SKalle Valo 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
500e705c121SKalle Valo 			struct iwl_rx_mem_buffer *rxb;
501e705c121SKalle Valo 			struct page *page;
502e705c121SKalle Valo 
503e705c121SKalle Valo 			/* List should never be empty - each reused RBD is
504e705c121SKalle Valo 			 * returned to the list, and initial pool covers any
505e705c121SKalle Valo 			 * possible gap between the time the page is allocated
506e705c121SKalle Valo 			 * to the time the RBD is added.
507e705c121SKalle Valo 			 */
508e705c121SKalle Valo 			BUG_ON(list_empty(&local_empty));
509e705c121SKalle Valo 			/* Get the first rxb from the rbd list */
510e705c121SKalle Valo 			rxb = list_first_entry(&local_empty,
511e705c121SKalle Valo 					       struct iwl_rx_mem_buffer, list);
512e705c121SKalle Valo 			BUG_ON(rxb->page);
513e705c121SKalle Valo 
514e705c121SKalle Valo 			/* Alloc a new receive buffer */
51578485054SSara Sharon 			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
516e705c121SKalle Valo 			if (!page)
517e705c121SKalle Valo 				continue;
518e705c121SKalle Valo 			rxb->page = page;
519e705c121SKalle Valo 
520e705c121SKalle Valo 			/* Get physical address of the RB */
521e705c121SKalle Valo 			rxb->page_dma = dma_map_page(trans->dev, page, 0,
522e705c121SKalle Valo 					PAGE_SIZE << trans_pcie->rx_page_order,
523e705c121SKalle Valo 					DMA_FROM_DEVICE);
524e705c121SKalle Valo 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
525e705c121SKalle Valo 				rxb->page = NULL;
526e705c121SKalle Valo 				__free_pages(page, trans_pcie->rx_page_order);
527e705c121SKalle Valo 				continue;
528e705c121SKalle Valo 			}
529e705c121SKalle Valo 
530e705c121SKalle Valo 			/* move the allocated entry to the out list */
531e705c121SKalle Valo 			list_move(&rxb->list, &local_allocated);
532e705c121SKalle Valo 			i++;
533e705c121SKalle Valo 		}
534e705c121SKalle Valo 
535e705c121SKalle Valo 		pending--;
536e705c121SKalle Valo 		if (!pending) {
537e705c121SKalle Valo 			pending = atomic_xchg(&rba->req_pending, 0);
538e705c121SKalle Valo 			IWL_DEBUG_RX(trans,
539e705c121SKalle Valo 				     "Pending allocation requests = %d\n",
540e705c121SKalle Valo 				     pending);
541e705c121SKalle Valo 		}
542e705c121SKalle Valo 
543e705c121SKalle Valo 		spin_lock(&rba->lock);
544e705c121SKalle Valo 		/* add the allocated rbds to the allocator allocated list */
545e705c121SKalle Valo 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
546e705c121SKalle Valo 		/* get more empty RBDs for current pending requests */
547e705c121SKalle Valo 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
548e705c121SKalle Valo 		spin_unlock(&rba->lock);
549e705c121SKalle Valo 
550e705c121SKalle Valo 		atomic_inc(&rba->req_ready);
551e705c121SKalle Valo 	}
552e705c121SKalle Valo 
553e705c121SKalle Valo 	spin_lock(&rba->lock);
554e705c121SKalle Valo 	/* return unused rbds to the allocator empty list */
555e705c121SKalle Valo 	list_splice_tail(&local_empty, &rba->rbd_empty);
556e705c121SKalle Valo 	spin_unlock(&rba->lock);
557e705c121SKalle Valo }
558e705c121SKalle Valo 
559e705c121SKalle Valo /*
560d56daea4SSara Sharon  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
561e705c121SKalle Valo .*
562e705c121SKalle Valo .* Called by queue when the queue posted allocation request and
563e705c121SKalle Valo  * has freed 8 RBDs in order to restock itself.
564d56daea4SSara Sharon  * This function directly moves the allocated RBs to the queue's ownership
565d56daea4SSara Sharon  * and updates the relevant counters.
566e705c121SKalle Valo  */
567d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
568d56daea4SSara Sharon 				      struct iwl_rxq *rxq)
569e705c121SKalle Valo {
570e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
572e705c121SKalle Valo 	int i;
573e705c121SKalle Valo 
574d56daea4SSara Sharon 	lockdep_assert_held(&rxq->lock);
575d56daea4SSara Sharon 
576e705c121SKalle Valo 	/*
577e705c121SKalle Valo 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
578e705c121SKalle Valo 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
579d56daea4SSara Sharon 	 * function will return early, as there are no ready requests.
580e705c121SKalle Valo 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
581e705c121SKalle Valo 	 * req_ready > 0, i.e. - there are ready requests and the function
582e705c121SKalle Valo 	 * hands one request to the caller.
583e705c121SKalle Valo 	 */
584e705c121SKalle Valo 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
585d56daea4SSara Sharon 		return;
586e705c121SKalle Valo 
587e705c121SKalle Valo 	spin_lock(&rba->lock);
588e705c121SKalle Valo 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
589e705c121SKalle Valo 		/* Get next free Rx buffer, remove it from free list */
590d56daea4SSara Sharon 		struct iwl_rx_mem_buffer *rxb =
591d56daea4SSara Sharon 			list_first_entry(&rba->rbd_allocated,
592e705c121SKalle Valo 					 struct iwl_rx_mem_buffer, list);
593d56daea4SSara Sharon 
594d56daea4SSara Sharon 		list_move(&rxb->list, &rxq->rx_free);
595e705c121SKalle Valo 	}
596e705c121SKalle Valo 	spin_unlock(&rba->lock);
597e705c121SKalle Valo 
598d56daea4SSara Sharon 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
599d56daea4SSara Sharon 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
600e705c121SKalle Valo }
601e705c121SKalle Valo 
602e705c121SKalle Valo static void iwl_pcie_rx_allocator_work(struct work_struct *data)
603e705c121SKalle Valo {
604e705c121SKalle Valo 	struct iwl_rb_allocator *rba_p =
605e705c121SKalle Valo 		container_of(data, struct iwl_rb_allocator, rx_alloc);
606e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie =
607e705c121SKalle Valo 		container_of(rba_p, struct iwl_trans_pcie, rba);
608e705c121SKalle Valo 
609e705c121SKalle Valo 	iwl_pcie_rx_allocator(trans_pcie->trans);
610e705c121SKalle Valo }
611e705c121SKalle Valo 
612e705c121SKalle Valo static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
613e705c121SKalle Valo {
614e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
615e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
616e705c121SKalle Valo 	struct device *dev = trans->dev;
61778485054SSara Sharon 	int i;
61896a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
61996a6497bSSara Sharon 						      sizeof(__le32);
620e705c121SKalle Valo 
62178485054SSara Sharon 	if (WARN_ON(trans_pcie->rxq))
622e705c121SKalle Valo 		return -EINVAL;
623e705c121SKalle Valo 
62478485054SSara Sharon 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
62578485054SSara Sharon 				  GFP_KERNEL);
62678485054SSara Sharon 	if (!trans_pcie->rxq)
62778485054SSara Sharon 		return -EINVAL;
62878485054SSara Sharon 
62978485054SSara Sharon 	spin_lock_init(&rba->lock);
63078485054SSara Sharon 
63178485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
63278485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
63378485054SSara Sharon 
63478485054SSara Sharon 		spin_lock_init(&rxq->lock);
63596a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported)
63696a6497bSSara Sharon 			rxq->queue_size = MQ_RX_TABLE_SIZE;
63796a6497bSSara Sharon 		else
63896a6497bSSara Sharon 			rxq->queue_size = RX_QUEUE_SIZE;
63996a6497bSSara Sharon 
64078485054SSara Sharon 		/*
64178485054SSara Sharon 		 * Allocate the circular buffer of Read Buffer Descriptors
64278485054SSara Sharon 		 * (RBDs)
64378485054SSara Sharon 		 */
64478485054SSara Sharon 		rxq->bd = dma_zalloc_coherent(dev,
64596a6497bSSara Sharon 					     free_size * rxq->queue_size,
646e705c121SKalle Valo 					     &rxq->bd_dma, GFP_KERNEL);
647e705c121SKalle Valo 		if (!rxq->bd)
64878485054SSara Sharon 			goto err;
64978485054SSara Sharon 
65096a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
65196a6497bSSara Sharon 			rxq->used_bd = dma_zalloc_coherent(dev,
65296a6497bSSara Sharon 							   sizeof(__le32) *
65396a6497bSSara Sharon 							   rxq->queue_size,
65496a6497bSSara Sharon 							   &rxq->used_bd_dma,
65596a6497bSSara Sharon 							   GFP_KERNEL);
65696a6497bSSara Sharon 			if (!rxq->used_bd)
65796a6497bSSara Sharon 				goto err;
65896a6497bSSara Sharon 		}
659e705c121SKalle Valo 
660e705c121SKalle Valo 		/*Allocate the driver's pointer to receive buffer status */
661e705c121SKalle Valo 		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
66278485054SSara Sharon 						   &rxq->rb_stts_dma,
66378485054SSara Sharon 						   GFP_KERNEL);
664e705c121SKalle Valo 		if (!rxq->rb_stts)
66578485054SSara Sharon 			goto err;
66678485054SSara Sharon 	}
667e705c121SKalle Valo 	return 0;
668e705c121SKalle Valo 
66978485054SSara Sharon err:
67078485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
67178485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
67278485054SSara Sharon 
67378485054SSara Sharon 		if (rxq->bd)
67496a6497bSSara Sharon 			dma_free_coherent(dev, free_size * rxq->queue_size,
675e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
676e705c121SKalle Valo 		rxq->bd_dma = 0;
677e705c121SKalle Valo 		rxq->bd = NULL;
67878485054SSara Sharon 
67978485054SSara Sharon 		if (rxq->rb_stts)
68078485054SSara Sharon 			dma_free_coherent(trans->dev,
68178485054SSara Sharon 					  sizeof(struct iwl_rb_status),
68278485054SSara Sharon 					  rxq->rb_stts, rxq->rb_stts_dma);
68396a6497bSSara Sharon 
68496a6497bSSara Sharon 		if (rxq->used_bd)
68596a6497bSSara Sharon 			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
68696a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
68796a6497bSSara Sharon 		rxq->used_bd_dma = 0;
68896a6497bSSara Sharon 		rxq->used_bd = NULL;
68978485054SSara Sharon 	}
69078485054SSara Sharon 	kfree(trans_pcie->rxq);
69196a6497bSSara Sharon 
692e705c121SKalle Valo 	return -ENOMEM;
693e705c121SKalle Valo }
694e705c121SKalle Valo 
695e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
696e705c121SKalle Valo {
697e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
698e705c121SKalle Valo 	u32 rb_size;
699dfcfeef9SSara Sharon 	unsigned long flags;
700e705c121SKalle Valo 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
701e705c121SKalle Valo 
7026c4fbcbcSEmmanuel Grumbach 	switch (trans_pcie->rx_buf_size) {
7036c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_4K:
704e705c121SKalle Valo 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
7056c4fbcbcSEmmanuel Grumbach 		break;
7066c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_8K:
7076c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
7086c4fbcbcSEmmanuel Grumbach 		break;
7096c4fbcbcSEmmanuel Grumbach 	case IWL_AMSDU_12K:
7106c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
7116c4fbcbcSEmmanuel Grumbach 		break;
7126c4fbcbcSEmmanuel Grumbach 	default:
7136c4fbcbcSEmmanuel Grumbach 		WARN_ON(1);
7146c4fbcbcSEmmanuel Grumbach 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
7156c4fbcbcSEmmanuel Grumbach 	}
716e705c121SKalle Valo 
717dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
718dfcfeef9SSara Sharon 		return;
719dfcfeef9SSara Sharon 
720e705c121SKalle Valo 	/* Stop Rx DMA */
721dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
722e705c121SKalle Valo 	/* reset and flush pointers */
723dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
724dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
725dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
726e705c121SKalle Valo 
727e705c121SKalle Valo 	/* Reset driver's Rx queue write index */
728dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
729e705c121SKalle Valo 
730e705c121SKalle Valo 	/* Tell device where to find RBD circular buffer in DRAM */
731dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
732e705c121SKalle Valo 		    (u32)(rxq->bd_dma >> 8));
733e705c121SKalle Valo 
734e705c121SKalle Valo 	/* Tell device where in DRAM to update its Rx status */
735dfcfeef9SSara Sharon 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
736e705c121SKalle Valo 		    rxq->rb_stts_dma >> 4);
737e705c121SKalle Valo 
738e705c121SKalle Valo 	/* Enable Rx DMA
739e705c121SKalle Valo 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
740e705c121SKalle Valo 	 *      the credit mechanism in 5000 HW RX FIFO
741e705c121SKalle Valo 	 * Direct rx interrupts to hosts
7426c4fbcbcSEmmanuel Grumbach 	 * Rx buffer size 4 or 8k or 12k
743e705c121SKalle Valo 	 * RB timeout 0x10
744e705c121SKalle Valo 	 * 256 RBDs
745e705c121SKalle Valo 	 */
746dfcfeef9SSara Sharon 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
747e705c121SKalle Valo 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
748e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
749e705c121SKalle Valo 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
750e705c121SKalle Valo 		    rb_size |
751e705c121SKalle Valo 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
752e705c121SKalle Valo 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
753e705c121SKalle Valo 
754dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
755dfcfeef9SSara Sharon 
756e705c121SKalle Valo 	/* Set interrupt coalescing timer to default (2048 usecs) */
757e705c121SKalle Valo 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
758e705c121SKalle Valo 
759e705c121SKalle Valo 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
760e705c121SKalle Valo 	if (trans->cfg->host_interrupt_operation_mode)
761e705c121SKalle Valo 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
762e705c121SKalle Valo }
763e705c121SKalle Valo 
7641316d595SSara Sharon void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
7651316d595SSara Sharon {
7661316d595SSara Sharon 	/*
7671316d595SSara Sharon 	 * Turn on the chicken-bits that cause MAC wakeup for RX-related
7681316d595SSara Sharon 	 * values.
7691316d595SSara Sharon 	 * This costs some power, but needed for W/A 9000 integrated A-step
7701316d595SSara Sharon 	 * bug where shadow registers are not in the retention list and their
7711316d595SSara Sharon 	 * value is lost when NIC powers down
7721316d595SSara Sharon 	 */
7731316d595SSara Sharon 	if (trans->cfg->integrated) {
7741316d595SSara Sharon 		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
7751316d595SSara Sharon 			    CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
7761316d595SSara Sharon 		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
7771316d595SSara Sharon 			    CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
7781316d595SSara Sharon 	}
7791316d595SSara Sharon }
7801316d595SSara Sharon 
781bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
78296a6497bSSara Sharon {
78396a6497bSSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
78496a6497bSSara Sharon 	u32 rb_size, enabled = 0;
785dfcfeef9SSara Sharon 	unsigned long flags;
78696a6497bSSara Sharon 	int i;
78796a6497bSSara Sharon 
78896a6497bSSara Sharon 	switch (trans_pcie->rx_buf_size) {
78996a6497bSSara Sharon 	case IWL_AMSDU_4K:
79096a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
79196a6497bSSara Sharon 		break;
79296a6497bSSara Sharon 	case IWL_AMSDU_8K:
79396a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
79496a6497bSSara Sharon 		break;
79596a6497bSSara Sharon 	case IWL_AMSDU_12K:
79696a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
79796a6497bSSara Sharon 		break;
79896a6497bSSara Sharon 	default:
79996a6497bSSara Sharon 		WARN_ON(1);
80096a6497bSSara Sharon 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
80196a6497bSSara Sharon 	}
80296a6497bSSara Sharon 
803dfcfeef9SSara Sharon 	if (!iwl_trans_grab_nic_access(trans, &flags))
804dfcfeef9SSara Sharon 		return;
805dfcfeef9SSara Sharon 
80696a6497bSSara Sharon 	/* Stop Rx DMA */
807dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
80896a6497bSSara Sharon 	/* disable free amd used rx queue operation */
809dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
81096a6497bSSara Sharon 
81196a6497bSSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
81296a6497bSSara Sharon 		/* Tell device where to find RBD free table in DRAM */
81312a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
814dfcfeef9SSara Sharon 					 RFH_Q_FRBDCB_BA_LSB(i),
815dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].bd_dma);
81696a6497bSSara Sharon 		/* Tell device where to find RBD used table in DRAM */
81712a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
818dfcfeef9SSara Sharon 					 RFH_Q_URBDCB_BA_LSB(i),
819dfcfeef9SSara Sharon 					 trans_pcie->rxq[i].used_bd_dma);
82096a6497bSSara Sharon 		/* Tell device where in DRAM to update its Rx status */
82112a17458SSara Sharon 		iwl_write_prph64_no_grab(trans,
822dfcfeef9SSara Sharon 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
823bce97731SSara Sharon 					 trans_pcie->rxq[i].rb_stts_dma);
82496a6497bSSara Sharon 		/* Reset device indice tables */
825dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
826dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
827dfcfeef9SSara Sharon 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
82896a6497bSSara Sharon 
82996a6497bSSara Sharon 		enabled |= BIT(i) | BIT(i + 16);
83096a6497bSSara Sharon 	}
83196a6497bSSara Sharon 
83296a6497bSSara Sharon 	/*
83396a6497bSSara Sharon 	 * Enable Rx DMA
83496a6497bSSara Sharon 	 * Rx buffer size 4 or 8k or 12k
83596a6497bSSara Sharon 	 * Min RB size 4 or 8
83688076015SSara Sharon 	 * Drop frames that exceed RB size
83796a6497bSSara Sharon 	 * 512 RBDs
83896a6497bSSara Sharon 	 */
839dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
84063044335SSara Sharon 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
84196a6497bSSara Sharon 			       RFH_RXF_DMA_MIN_RB_4_8 |
84288076015SSara Sharon 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
84396a6497bSSara Sharon 			       RFH_RXF_DMA_RBDCB_SIZE_512);
84496a6497bSSara Sharon 
84588076015SSara Sharon 	/*
84688076015SSara Sharon 	 * Activate DMA snooping.
847b0262f07SSara Sharon 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
84888076015SSara Sharon 	 * Default queue is 0
84988076015SSara Sharon 	 */
850dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
851dfcfeef9SSara Sharon 			       (DEFAULT_RXQ_NUM <<
852dfcfeef9SSara Sharon 				RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
853b0262f07SSara Sharon 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
854b0262f07SSara Sharon 			       (trans->cfg->integrated ?
855b0262f07SSara Sharon 				RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
856b0262f07SSara Sharon 				RFH_GEN_CFG_RB_CHUNK_SIZE_128) <<
857b0262f07SSara Sharon 			       RFH_GEN_CFG_RB_CHUNK_SIZE_POS);
85888076015SSara Sharon 	/* Enable the relevant rx queues */
859dfcfeef9SSara Sharon 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
860dfcfeef9SSara Sharon 
861dfcfeef9SSara Sharon 	iwl_trans_release_nic_access(trans, &flags);
86296a6497bSSara Sharon 
86396a6497bSSara Sharon 	/* Set interrupt coalescing timer to default (2048 usecs) */
86496a6497bSSara Sharon 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
8651316d595SSara Sharon 
8661316d595SSara Sharon 	iwl_pcie_enable_rx_wake(trans, true);
86796a6497bSSara Sharon }
86896a6497bSSara Sharon 
869e705c121SKalle Valo static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
870e705c121SKalle Valo {
871e705c121SKalle Valo 	lockdep_assert_held(&rxq->lock);
872e705c121SKalle Valo 
873e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_free);
874e705c121SKalle Valo 	INIT_LIST_HEAD(&rxq->rx_used);
875e705c121SKalle Valo 	rxq->free_count = 0;
876e705c121SKalle Valo 	rxq->used_count = 0;
877e705c121SKalle Valo }
878e705c121SKalle Valo 
879bce97731SSara Sharon static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
880bce97731SSara Sharon {
881bce97731SSara Sharon 	WARN_ON(1);
882bce97731SSara Sharon 	return 0;
883bce97731SSara Sharon }
884bce97731SSara Sharon 
885e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans)
886e705c121SKalle Valo {
887e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
88878485054SSara Sharon 	struct iwl_rxq *def_rxq;
889e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
8907b542436SSara Sharon 	int i, err, queue_size, allocator_pool_size, num_alloc;
891e705c121SKalle Valo 
89278485054SSara Sharon 	if (!trans_pcie->rxq) {
893e705c121SKalle Valo 		err = iwl_pcie_rx_alloc(trans);
894e705c121SKalle Valo 		if (err)
895e705c121SKalle Valo 			return err;
896e705c121SKalle Valo 	}
89778485054SSara Sharon 	def_rxq = trans_pcie->rxq;
898e705c121SKalle Valo 	if (!rba->alloc_wq)
899e705c121SKalle Valo 		rba->alloc_wq = alloc_workqueue("rb_allocator",
900e705c121SKalle Valo 						WQ_HIGHPRI | WQ_UNBOUND, 1);
901e705c121SKalle Valo 	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
902e705c121SKalle Valo 
903e705c121SKalle Valo 	spin_lock(&rba->lock);
904e705c121SKalle Valo 	atomic_set(&rba->req_pending, 0);
905e705c121SKalle Valo 	atomic_set(&rba->req_ready, 0);
90696a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_allocated);
90796a6497bSSara Sharon 	INIT_LIST_HEAD(&rba->rbd_empty);
908e705c121SKalle Valo 	spin_unlock(&rba->lock);
909e705c121SKalle Valo 
910e705c121SKalle Valo 	/* free all first - we might be reconfigured for a different size */
91178485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
912e705c121SKalle Valo 
913e705c121SKalle Valo 	for (i = 0; i < RX_QUEUE_SIZE; i++)
91478485054SSara Sharon 		def_rxq->queue[i] = NULL;
915e705c121SKalle Valo 
91678485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
91778485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
918e705c121SKalle Valo 
91996a6497bSSara Sharon 		rxq->id = i;
92096a6497bSSara Sharon 
921e705c121SKalle Valo 		spin_lock(&rxq->lock);
92278485054SSara Sharon 		/*
92378485054SSara Sharon 		 * Set read write pointer to reflect that we have processed
92478485054SSara Sharon 		 * and used all buffers, but have not restocked the Rx queue
92578485054SSara Sharon 		 * with fresh buffers
92678485054SSara Sharon 		 */
92778485054SSara Sharon 		rxq->read = 0;
92878485054SSara Sharon 		rxq->write = 0;
92978485054SSara Sharon 		rxq->write_actual = 0;
93078485054SSara Sharon 		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
93178485054SSara Sharon 
93278485054SSara Sharon 		iwl_pcie_rx_init_rxb_lists(rxq);
93378485054SSara Sharon 
934bce97731SSara Sharon 		if (!rxq->napi.poll)
935bce97731SSara Sharon 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
936bce97731SSara Sharon 				       iwl_pcie_dummy_napi_poll, 64);
937bce97731SSara Sharon 
938e705c121SKalle Valo 		spin_unlock(&rxq->lock);
93978485054SSara Sharon 	}
94078485054SSara Sharon 
94196a6497bSSara Sharon 	/* move the pool to the default queue and allocator ownerships */
9427b542436SSara Sharon 	queue_size = trans->cfg->mq_rx_supported ?
9437b542436SSara Sharon 		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
94496a6497bSSara Sharon 	allocator_pool_size = trans->num_rx_queues *
94596a6497bSSara Sharon 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
9467b542436SSara Sharon 	num_alloc = queue_size + allocator_pool_size;
94743146925SSara Sharon 	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
94843146925SSara Sharon 		     ARRAY_SIZE(trans_pcie->rx_pool));
9497b542436SSara Sharon 	for (i = 0; i < num_alloc; i++) {
95096a6497bSSara Sharon 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
95196a6497bSSara Sharon 
95296a6497bSSara Sharon 		if (i < allocator_pool_size)
95396a6497bSSara Sharon 			list_add(&rxb->list, &rba->rbd_empty);
95496a6497bSSara Sharon 		else
95596a6497bSSara Sharon 			list_add(&rxb->list, &def_rxq->rx_used);
95696a6497bSSara Sharon 		trans_pcie->global_table[i] = rxb;
957e25d65f2SSara Sharon 		rxb->vid = (u16)(i + 1);
958b1753c62SSara Sharon 		rxb->invalid = true;
95996a6497bSSara Sharon 	}
96078485054SSara Sharon 
96178485054SSara Sharon 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
9622047fa54SSara Sharon 
9632047fa54SSara Sharon 	if (trans->cfg->mq_rx_supported)
964bce97731SSara Sharon 		iwl_pcie_rx_mq_hw_init(trans);
9652047fa54SSara Sharon 	else
96678485054SSara Sharon 		iwl_pcie_rx_hw_init(trans, def_rxq);
9672047fa54SSara Sharon 
9682047fa54SSara Sharon 	iwl_pcie_rxq_restock(trans, def_rxq);
96978485054SSara Sharon 
97078485054SSara Sharon 	spin_lock(&def_rxq->lock);
97178485054SSara Sharon 	iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
97278485054SSara Sharon 	spin_unlock(&def_rxq->lock);
973e705c121SKalle Valo 
974e705c121SKalle Valo 	return 0;
975e705c121SKalle Valo }
976e705c121SKalle Valo 
977e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans)
978e705c121SKalle Valo {
979e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
980e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
98196a6497bSSara Sharon 	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
98296a6497bSSara Sharon 					      sizeof(__le32);
98378485054SSara Sharon 	int i;
984e705c121SKalle Valo 
98578485054SSara Sharon 	/*
98678485054SSara Sharon 	 * if rxq is NULL, it means that nothing has been allocated,
98778485054SSara Sharon 	 * exit now
98878485054SSara Sharon 	 */
98978485054SSara Sharon 	if (!trans_pcie->rxq) {
990e705c121SKalle Valo 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
991e705c121SKalle Valo 		return;
992e705c121SKalle Valo 	}
993e705c121SKalle Valo 
994e705c121SKalle Valo 	cancel_work_sync(&rba->rx_alloc);
995e705c121SKalle Valo 	if (rba->alloc_wq) {
996e705c121SKalle Valo 		destroy_workqueue(rba->alloc_wq);
997e705c121SKalle Valo 		rba->alloc_wq = NULL;
998e705c121SKalle Valo 	}
999e705c121SKalle Valo 
100078485054SSara Sharon 	iwl_pcie_free_rbs_pool(trans);
1001e705c121SKalle Valo 
100278485054SSara Sharon 	for (i = 0; i < trans->num_rx_queues; i++) {
100378485054SSara Sharon 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
100478485054SSara Sharon 
100578485054SSara Sharon 		if (rxq->bd)
100678485054SSara Sharon 			dma_free_coherent(trans->dev,
100796a6497bSSara Sharon 					  free_size * rxq->queue_size,
1008e705c121SKalle Valo 					  rxq->bd, rxq->bd_dma);
1009e705c121SKalle Valo 		rxq->bd_dma = 0;
1010e705c121SKalle Valo 		rxq->bd = NULL;
1011e705c121SKalle Valo 
1012e705c121SKalle Valo 		if (rxq->rb_stts)
1013e705c121SKalle Valo 			dma_free_coherent(trans->dev,
1014e705c121SKalle Valo 					  sizeof(struct iwl_rb_status),
1015e705c121SKalle Valo 					  rxq->rb_stts, rxq->rb_stts_dma);
1016e705c121SKalle Valo 		else
101778485054SSara Sharon 			IWL_DEBUG_INFO(trans,
101878485054SSara Sharon 				       "Free rxq->rb_stts which is NULL\n");
101978485054SSara Sharon 
102096a6497bSSara Sharon 		if (rxq->used_bd)
102196a6497bSSara Sharon 			dma_free_coherent(trans->dev,
102296a6497bSSara Sharon 					  sizeof(__le32) * rxq->queue_size,
102396a6497bSSara Sharon 					  rxq->used_bd, rxq->used_bd_dma);
102496a6497bSSara Sharon 		rxq->used_bd_dma = 0;
102596a6497bSSara Sharon 		rxq->used_bd = NULL;
1026bce97731SSara Sharon 
1027bce97731SSara Sharon 		if (rxq->napi.poll)
1028bce97731SSara Sharon 			netif_napi_del(&rxq->napi);
102996a6497bSSara Sharon 	}
103078485054SSara Sharon 	kfree(trans_pcie->rxq);
1031e705c121SKalle Valo }
1032e705c121SKalle Valo 
1033e705c121SKalle Valo /*
1034e705c121SKalle Valo  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1035e705c121SKalle Valo  *
1036e705c121SKalle Valo  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1037e705c121SKalle Valo  * When there are 2 empty RBDs - a request for allocation is posted
1038e705c121SKalle Valo  */
1039e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1040e705c121SKalle Valo 				  struct iwl_rx_mem_buffer *rxb,
1041e705c121SKalle Valo 				  struct iwl_rxq *rxq, bool emergency)
1042e705c121SKalle Valo {
1043e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1044e705c121SKalle Valo 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1045e705c121SKalle Valo 
1046e705c121SKalle Valo 	/* Move the RBD to the used list, will be moved to allocator in batches
1047e705c121SKalle Valo 	 * before claiming or posting a request*/
1048e705c121SKalle Valo 	list_add_tail(&rxb->list, &rxq->rx_used);
1049e705c121SKalle Valo 
1050e705c121SKalle Valo 	if (unlikely(emergency))
1051e705c121SKalle Valo 		return;
1052e705c121SKalle Valo 
1053e705c121SKalle Valo 	/* Count the allocator owned RBDs */
1054e705c121SKalle Valo 	rxq->used_count++;
1055e705c121SKalle Valo 
1056e705c121SKalle Valo 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1057e705c121SKalle Valo 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1058e705c121SKalle Valo 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1059e705c121SKalle Valo 	 * after but we still need to post another request.
1060e705c121SKalle Valo 	 */
1061e705c121SKalle Valo 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1062e705c121SKalle Valo 		/* Move the 2 RBDs to the allocator ownership.
1063e705c121SKalle Valo 		 Allocator has another 6 from pool for the request completion*/
1064e705c121SKalle Valo 		spin_lock(&rba->lock);
1065e705c121SKalle Valo 		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1066e705c121SKalle Valo 		spin_unlock(&rba->lock);
1067e705c121SKalle Valo 
1068e705c121SKalle Valo 		atomic_inc(&rba->req_pending);
1069e705c121SKalle Valo 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1070e705c121SKalle Valo 	}
1071e705c121SKalle Valo }
1072e705c121SKalle Valo 
1073e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
107478485054SSara Sharon 				struct iwl_rxq *rxq,
1075e705c121SKalle Valo 				struct iwl_rx_mem_buffer *rxb,
1076e705c121SKalle Valo 				bool emergency)
1077e705c121SKalle Valo {
1078e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1079e705c121SKalle Valo 	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1080e705c121SKalle Valo 	bool page_stolen = false;
1081e705c121SKalle Valo 	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1082e705c121SKalle Valo 	u32 offset = 0;
1083e705c121SKalle Valo 
1084e705c121SKalle Valo 	if (WARN_ON(!rxb))
1085e705c121SKalle Valo 		return;
1086e705c121SKalle Valo 
1087e705c121SKalle Valo 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1088e705c121SKalle Valo 
1089e705c121SKalle Valo 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1090e705c121SKalle Valo 		struct iwl_rx_packet *pkt;
1091e705c121SKalle Valo 		u16 sequence;
1092e705c121SKalle Valo 		bool reclaim;
1093e705c121SKalle Valo 		int index, cmd_index, len;
1094e705c121SKalle Valo 		struct iwl_rx_cmd_buffer rxcb = {
1095e705c121SKalle Valo 			._offset = offset,
1096e705c121SKalle Valo 			._rx_page_order = trans_pcie->rx_page_order,
1097e705c121SKalle Valo 			._page = rxb->page,
1098e705c121SKalle Valo 			._page_stolen = false,
1099e705c121SKalle Valo 			.truesize = max_len,
1100e705c121SKalle Valo 		};
1101e705c121SKalle Valo 
1102e705c121SKalle Valo 		pkt = rxb_addr(&rxcb);
1103e705c121SKalle Valo 
1104e705c121SKalle Valo 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
1105e705c121SKalle Valo 			break;
1106e705c121SKalle Valo 
1107ab2e696bSSara Sharon 		WARN_ON((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1108ab2e696bSSara Sharon 			FH_RSCSR_RXQ_POS != rxq->id);
1109ab2e696bSSara Sharon 
1110e705c121SKalle Valo 		IWL_DEBUG_RX(trans,
1111e705c121SKalle Valo 			     "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
1112e705c121SKalle Valo 			     rxcb._offset,
111339bdb17eSSharon Dvir 			     iwl_get_cmd_string(trans,
111439bdb17eSSharon Dvir 						iwl_cmd_id(pkt->hdr.cmd,
111539bdb17eSSharon Dvir 							   pkt->hdr.group_id,
111639bdb17eSSharon Dvir 							   0)),
1117e705c121SKalle Valo 			     pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
1118e705c121SKalle Valo 
1119e705c121SKalle Valo 		len = iwl_rx_packet_len(pkt);
1120e705c121SKalle Valo 		len += sizeof(u32); /* account for status word */
1121e705c121SKalle Valo 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1122e705c121SKalle Valo 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1123e705c121SKalle Valo 
1124e705c121SKalle Valo 		/* Reclaim a command buffer only if this packet is a response
1125e705c121SKalle Valo 		 *   to a (driver-originated) command.
1126e705c121SKalle Valo 		 * If the packet (e.g. Rx frame) originated from uCode,
1127e705c121SKalle Valo 		 *   there is no command buffer to reclaim.
1128e705c121SKalle Valo 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1129e705c121SKalle Valo 		 *   but apparently a few don't get set; catch them here. */
1130e705c121SKalle Valo 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1131e705c121SKalle Valo 		if (reclaim) {
1132e705c121SKalle Valo 			int i;
1133e705c121SKalle Valo 
1134e705c121SKalle Valo 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1135e705c121SKalle Valo 				if (trans_pcie->no_reclaim_cmds[i] ==
1136e705c121SKalle Valo 							pkt->hdr.cmd) {
1137e705c121SKalle Valo 					reclaim = false;
1138e705c121SKalle Valo 					break;
1139e705c121SKalle Valo 				}
1140e705c121SKalle Valo 			}
1141e705c121SKalle Valo 		}
1142e705c121SKalle Valo 
1143e705c121SKalle Valo 		sequence = le16_to_cpu(pkt->hdr.sequence);
1144e705c121SKalle Valo 		index = SEQ_TO_INDEX(sequence);
1145e705c121SKalle Valo 		cmd_index = get_cmd_index(&txq->q, index);
1146e705c121SKalle Valo 
1147bce97731SSara Sharon 		if (rxq->id == 0)
1148bce97731SSara Sharon 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1149bce97731SSara Sharon 				       &rxcb);
1150bce97731SSara Sharon 		else
1151bce97731SSara Sharon 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1152bce97731SSara Sharon 					   &rxcb, rxq->id);
1153e705c121SKalle Valo 
1154e705c121SKalle Valo 		if (reclaim) {
1155e705c121SKalle Valo 			kzfree(txq->entries[cmd_index].free_buf);
1156e705c121SKalle Valo 			txq->entries[cmd_index].free_buf = NULL;
1157e705c121SKalle Valo 		}
1158e705c121SKalle Valo 
1159e705c121SKalle Valo 		/*
1160e705c121SKalle Valo 		 * After here, we should always check rxcb._page_stolen,
1161e705c121SKalle Valo 		 * if it is true then one of the handlers took the page.
1162e705c121SKalle Valo 		 */
1163e705c121SKalle Valo 
1164e705c121SKalle Valo 		if (reclaim) {
1165e705c121SKalle Valo 			/* Invoke any callbacks, transfer the buffer to caller,
1166e705c121SKalle Valo 			 * and fire off the (possibly) blocking
1167e705c121SKalle Valo 			 * iwl_trans_send_cmd()
1168e705c121SKalle Valo 			 * as we reclaim the driver command queue */
1169e705c121SKalle Valo 			if (!rxcb._page_stolen)
1170e705c121SKalle Valo 				iwl_pcie_hcmd_complete(trans, &rxcb);
1171e705c121SKalle Valo 			else
1172e705c121SKalle Valo 				IWL_WARN(trans, "Claim null rxb?\n");
1173e705c121SKalle Valo 		}
1174e705c121SKalle Valo 
1175e705c121SKalle Valo 		page_stolen |= rxcb._page_stolen;
1176e705c121SKalle Valo 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1177e705c121SKalle Valo 	}
1178e705c121SKalle Valo 
1179e705c121SKalle Valo 	/* page was stolen from us -- free our reference */
1180e705c121SKalle Valo 	if (page_stolen) {
1181e705c121SKalle Valo 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1182e705c121SKalle Valo 		rxb->page = NULL;
1183e705c121SKalle Valo 	}
1184e705c121SKalle Valo 
1185e705c121SKalle Valo 	/* Reuse the page if possible. For notification packets and
1186e705c121SKalle Valo 	 * SKBs that fail to Rx correctly, add them back into the
1187e705c121SKalle Valo 	 * rx_free list for reuse later. */
1188e705c121SKalle Valo 	if (rxb->page != NULL) {
1189e705c121SKalle Valo 		rxb->page_dma =
1190e705c121SKalle Valo 			dma_map_page(trans->dev, rxb->page, 0,
1191e705c121SKalle Valo 				     PAGE_SIZE << trans_pcie->rx_page_order,
1192e705c121SKalle Valo 				     DMA_FROM_DEVICE);
1193e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1194e705c121SKalle Valo 			/*
1195e705c121SKalle Valo 			 * free the page(s) as well to not break
1196e705c121SKalle Valo 			 * the invariant that the items on the used
1197e705c121SKalle Valo 			 * list have no page(s)
1198e705c121SKalle Valo 			 */
1199e705c121SKalle Valo 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1200e705c121SKalle Valo 			rxb->page = NULL;
1201e705c121SKalle Valo 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1202e705c121SKalle Valo 		} else {
1203e705c121SKalle Valo 			list_add_tail(&rxb->list, &rxq->rx_free);
1204e705c121SKalle Valo 			rxq->free_count++;
1205e705c121SKalle Valo 		}
1206e705c121SKalle Valo 	} else
1207e705c121SKalle Valo 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1208e705c121SKalle Valo }
1209e705c121SKalle Valo 
1210e705c121SKalle Valo /*
1211e705c121SKalle Valo  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1212e705c121SKalle Valo  */
12132e5d4a8fSHaim Dreyfuss static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1214e705c121SKalle Valo {
1215e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
12162e5d4a8fSHaim Dreyfuss 	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1217d56daea4SSara Sharon 	u32 r, i, count = 0;
1218e705c121SKalle Valo 	bool emergency = false;
1219e705c121SKalle Valo 
1220e705c121SKalle Valo restart:
1221e705c121SKalle Valo 	spin_lock(&rxq->lock);
1222e705c121SKalle Valo 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1223e705c121SKalle Valo 	 * buffer that the driver may process (last buffer filled by ucode). */
1224e705c121SKalle Valo 	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1225e705c121SKalle Valo 	i = rxq->read;
1226e705c121SKalle Valo 
12275eae443eSSara Sharon 	/* W/A 9000 device step A0 wrap-around bug */
12285eae443eSSara Sharon 	r &= (rxq->queue_size - 1);
12295eae443eSSara Sharon 
1230e705c121SKalle Valo 	/* Rx interrupt, but nothing sent from uCode */
1231e705c121SKalle Valo 	if (i == r)
12325eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1233e705c121SKalle Valo 
1234e705c121SKalle Valo 	while (i != r) {
1235e705c121SKalle Valo 		struct iwl_rx_mem_buffer *rxb;
1236e705c121SKalle Valo 
123796a6497bSSara Sharon 		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1238e705c121SKalle Valo 			emergency = true;
1239e705c121SKalle Valo 
124096a6497bSSara Sharon 		if (trans->cfg->mq_rx_supported) {
124196a6497bSSara Sharon 			/*
124296a6497bSSara Sharon 			 * used_bd is a 32 bit but only 12 are used to retrieve
124396a6497bSSara Sharon 			 * the vid
124496a6497bSSara Sharon 			 */
12455eae443eSSara Sharon 			u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
124696a6497bSSara Sharon 
1247e25d65f2SSara Sharon 			if (WARN(!vid ||
1248e25d65f2SSara Sharon 				 vid > ARRAY_SIZE(trans_pcie->global_table),
1249e25d65f2SSara Sharon 				 "Invalid rxb index from HW %u\n", (u32)vid)) {
1250e25d65f2SSara Sharon 				iwl_force_nmi(trans);
12515eae443eSSara Sharon 				goto out;
1252e25d65f2SSara Sharon 			}
1253e25d65f2SSara Sharon 			rxb = trans_pcie->global_table[vid - 1];
1254b1753c62SSara Sharon 			if (WARN(rxb->invalid,
1255b1753c62SSara Sharon 				 "Invalid rxb from HW %u\n", (u32)vid)) {
1256b1753c62SSara Sharon 				iwl_force_nmi(trans);
1257b1753c62SSara Sharon 				goto out;
1258b1753c62SSara Sharon 			}
1259b1753c62SSara Sharon 			rxb->invalid = true;
126096a6497bSSara Sharon 		} else {
1261e705c121SKalle Valo 			rxb = rxq->queue[i];
1262e705c121SKalle Valo 			rxq->queue[i] = NULL;
126396a6497bSSara Sharon 		}
1264e705c121SKalle Valo 
12655eae443eSSara Sharon 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
126678485054SSara Sharon 		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1267e705c121SKalle Valo 
126896a6497bSSara Sharon 		i = (i + 1) & (rxq->queue_size - 1);
1269e705c121SKalle Valo 
1270d56daea4SSara Sharon 		/*
1271d56daea4SSara Sharon 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1272d56daea4SSara Sharon 		 * try to claim the pre-allocated buffers from the allocator.
1273d56daea4SSara Sharon 		 * If not ready - will try to reclaim next time.
1274d56daea4SSara Sharon 		 * There is no need to reschedule work - allocator exits only
1275d56daea4SSara Sharon 		 * on success
1276e705c121SKalle Valo 		 */
1277d56daea4SSara Sharon 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1278d56daea4SSara Sharon 			iwl_pcie_rx_allocator_get(trans, rxq);
1279e705c121SKalle Valo 
1280d56daea4SSara Sharon 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1281d56daea4SSara Sharon 			struct iwl_rb_allocator *rba = &trans_pcie->rba;
1282d56daea4SSara Sharon 
1283d56daea4SSara Sharon 			/* Add the remaining empty RBDs for allocator use */
1284d56daea4SSara Sharon 			spin_lock(&rba->lock);
1285d56daea4SSara Sharon 			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1286d56daea4SSara Sharon 			spin_unlock(&rba->lock);
1287d56daea4SSara Sharon 		} else if (emergency) {
1288e705c121SKalle Valo 			count++;
1289e705c121SKalle Valo 			if (count == 8) {
1290e705c121SKalle Valo 				count = 0;
129196a6497bSSara Sharon 				if (rxq->used_count < rxq->queue_size / 3)
1292e705c121SKalle Valo 					emergency = false;
1293e0e168dcSGregory Greenman 
1294e705c121SKalle Valo 				rxq->read = i;
1295e705c121SKalle Valo 				spin_unlock(&rxq->lock);
1296e0e168dcSGregory Greenman 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
129778485054SSara Sharon 				iwl_pcie_rxq_restock(trans, rxq);
1298e705c121SKalle Valo 				goto restart;
1299e705c121SKalle Valo 			}
1300e705c121SKalle Valo 		}
1301e0e168dcSGregory Greenman 	}
13025eae443eSSara Sharon out:
1303e705c121SKalle Valo 	/* Backtrack one entry */
1304e705c121SKalle Valo 	rxq->read = i;
1305e705c121SKalle Valo 	spin_unlock(&rxq->lock);
1306e705c121SKalle Valo 
1307e705c121SKalle Valo 	/*
1308e705c121SKalle Valo 	 * handle a case where in emergency there are some unallocated RBDs.
1309e705c121SKalle Valo 	 * those RBDs are in the used list, but are not tracked by the queue's
1310e705c121SKalle Valo 	 * used_count which counts allocator owned RBDs.
1311e705c121SKalle Valo 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1312e705c121SKalle Valo 	 * when called again the function may not be in emergency mode and
1313e705c121SKalle Valo 	 * they will be handed to the allocator with no tracking in the RBD
1314e705c121SKalle Valo 	 * allocator counters, which will lead to them never being claimed back
1315e705c121SKalle Valo 	 * by the queue.
1316e705c121SKalle Valo 	 * by allocating them here, they are now in the queue free list, and
1317e705c121SKalle Valo 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1318e705c121SKalle Valo 	 */
1319e705c121SKalle Valo 	if (unlikely(emergency && count))
132078485054SSara Sharon 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1321e705c121SKalle Valo 
1322bce97731SSara Sharon 	if (rxq->napi.poll)
1323bce97731SSara Sharon 		napi_gro_flush(&rxq->napi, false);
1324e0e168dcSGregory Greenman 
1325e0e168dcSGregory Greenman 	iwl_pcie_rxq_restock(trans, rxq);
1326e705c121SKalle Valo }
1327e705c121SKalle Valo 
13282e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
13292e5d4a8fSHaim Dreyfuss {
13302e5d4a8fSHaim Dreyfuss 	u8 queue = entry->entry;
13312e5d4a8fSHaim Dreyfuss 	struct msix_entry *entries = entry - queue;
13322e5d4a8fSHaim Dreyfuss 
13332e5d4a8fSHaim Dreyfuss 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
13342e5d4a8fSHaim Dreyfuss }
13352e5d4a8fSHaim Dreyfuss 
13362e5d4a8fSHaim Dreyfuss static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
13372e5d4a8fSHaim Dreyfuss 				      struct msix_entry *entry)
13382e5d4a8fSHaim Dreyfuss {
13392e5d4a8fSHaim Dreyfuss 	/*
13402e5d4a8fSHaim Dreyfuss 	 * Before sending the interrupt the HW disables it to prevent
13412e5d4a8fSHaim Dreyfuss 	 * a nested interrupt. This is done by writing 1 to the corresponding
13422e5d4a8fSHaim Dreyfuss 	 * bit in the mask register. After handling the interrupt, it should be
13432e5d4a8fSHaim Dreyfuss 	 * re-enabled by clearing this bit. This register is defined as
13442e5d4a8fSHaim Dreyfuss 	 * write 1 clear (W1C) register, meaning that it's being clear
13452e5d4a8fSHaim Dreyfuss 	 * by writing 1 to the bit.
13462e5d4a8fSHaim Dreyfuss 	 */
13477ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
13482e5d4a8fSHaim Dreyfuss }
13492e5d4a8fSHaim Dreyfuss 
13502e5d4a8fSHaim Dreyfuss /*
13512e5d4a8fSHaim Dreyfuss  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
13522e5d4a8fSHaim Dreyfuss  * This interrupt handler should be used with RSS queue only.
13532e5d4a8fSHaim Dreyfuss  */
13542e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
13552e5d4a8fSHaim Dreyfuss {
13562e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
13572e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
13582e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
13592e5d4a8fSHaim Dreyfuss 
13605eae443eSSara Sharon 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
13615eae443eSSara Sharon 		return IRQ_NONE;
13625eae443eSSara Sharon 
13632e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
13642e5d4a8fSHaim Dreyfuss 
13652e5d4a8fSHaim Dreyfuss 	local_bh_disable();
13662e5d4a8fSHaim Dreyfuss 	iwl_pcie_rx_handle(trans, entry->entry);
13672e5d4a8fSHaim Dreyfuss 	local_bh_enable();
13682e5d4a8fSHaim Dreyfuss 
13692e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
13702e5d4a8fSHaim Dreyfuss 
13712e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
13722e5d4a8fSHaim Dreyfuss 
13732e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
13742e5d4a8fSHaim Dreyfuss }
13752e5d4a8fSHaim Dreyfuss 
1376e705c121SKalle Valo /*
1377e705c121SKalle Valo  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1378e705c121SKalle Valo  */
1379e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1380e705c121SKalle Valo {
1381e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1382e705c121SKalle Valo 	int i;
1383e705c121SKalle Valo 
1384e705c121SKalle Valo 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1385e705c121SKalle Valo 	if (trans->cfg->internal_wimax_coex &&
1386e705c121SKalle Valo 	    !trans->cfg->apmg_not_supported &&
1387e705c121SKalle Valo 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1388e705c121SKalle Valo 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1389e705c121SKalle Valo 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1390e705c121SKalle Valo 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1391e705c121SKalle Valo 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1392e705c121SKalle Valo 		iwl_op_mode_wimax_active(trans->op_mode);
1393e705c121SKalle Valo 		wake_up(&trans_pcie->wait_command_queue);
1394e705c121SKalle Valo 		return;
1395e705c121SKalle Valo 	}
1396e705c121SKalle Valo 
1397e705c121SKalle Valo 	iwl_pcie_dump_csr(trans);
1398e705c121SKalle Valo 	iwl_dump_fh(trans, NULL);
1399e705c121SKalle Valo 
1400e705c121SKalle Valo 	local_bh_disable();
1401e705c121SKalle Valo 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1402e705c121SKalle Valo 	 * before we wake up the command caller, to ensure a proper cleanup. */
1403e705c121SKalle Valo 	iwl_trans_fw_error(trans);
1404e705c121SKalle Valo 	local_bh_enable();
1405e705c121SKalle Valo 
1406e705c121SKalle Valo 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1407e705c121SKalle Valo 		del_timer(&trans_pcie->txq[i].stuck_timer);
1408e705c121SKalle Valo 
1409e705c121SKalle Valo 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1410e705c121SKalle Valo 	wake_up(&trans_pcie->wait_command_queue);
1411e705c121SKalle Valo }
1412e705c121SKalle Valo 
1413e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1414e705c121SKalle Valo {
1415e705c121SKalle Valo 	u32 inta;
1416e705c121SKalle Valo 
1417e705c121SKalle Valo 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1418e705c121SKalle Valo 
1419e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1420e705c121SKalle Valo 
1421e705c121SKalle Valo 	/* Discover which interrupts are active/pending */
1422e705c121SKalle Valo 	inta = iwl_read32(trans, CSR_INT);
1423e705c121SKalle Valo 
1424e705c121SKalle Valo 	/* the thread will service interrupts and re-enable them */
1425e705c121SKalle Valo 	return inta;
1426e705c121SKalle Valo }
1427e705c121SKalle Valo 
1428e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */
1429e705c121SKalle Valo #define ICT_SHIFT	12
1430e705c121SKalle Valo #define ICT_SIZE	(1 << ICT_SHIFT)
1431e705c121SKalle Valo #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1432e705c121SKalle Valo 
1433e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will
1434e705c121SKalle Valo  * stop using INTA register to get device's interrupt, reading this register
1435e705c121SKalle Valo  * is expensive, device will write interrupts in ICT dram table, increment
1436e705c121SKalle Valo  * index then will fire interrupt to driver, driver will OR all ICT table
1437e705c121SKalle Valo  * entries from current index up to table entry with 0 value. the result is
1438e705c121SKalle Valo  * the interrupt we need to service, driver will set the entries back to 0 and
1439e705c121SKalle Valo  * set index.
1440e705c121SKalle Valo  */
1441e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1442e705c121SKalle Valo {
1443e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1444e705c121SKalle Valo 	u32 inta;
1445e705c121SKalle Valo 	u32 val = 0;
1446e705c121SKalle Valo 	u32 read;
1447e705c121SKalle Valo 
1448e705c121SKalle Valo 	trace_iwlwifi_dev_irq(trans->dev);
1449e705c121SKalle Valo 
1450e705c121SKalle Valo 	/* Ignore interrupt if there's nothing in NIC to service.
1451e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1452e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC. */
1453e705c121SKalle Valo 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1454e705c121SKalle Valo 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1455e705c121SKalle Valo 	if (!read)
1456e705c121SKalle Valo 		return 0;
1457e705c121SKalle Valo 
1458e705c121SKalle Valo 	/*
1459e705c121SKalle Valo 	 * Collect all entries up to the first 0, starting from ict_index;
1460e705c121SKalle Valo 	 * note we already read at ict_index.
1461e705c121SKalle Valo 	 */
1462e705c121SKalle Valo 	do {
1463e705c121SKalle Valo 		val |= read;
1464e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1465e705c121SKalle Valo 				trans_pcie->ict_index, read);
1466e705c121SKalle Valo 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1467e705c121SKalle Valo 		trans_pcie->ict_index =
1468e705c121SKalle Valo 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1469e705c121SKalle Valo 
1470e705c121SKalle Valo 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1471e705c121SKalle Valo 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1472e705c121SKalle Valo 					   read);
1473e705c121SKalle Valo 	} while (read);
1474e705c121SKalle Valo 
1475e705c121SKalle Valo 	/* We should not get this value, just ignore it. */
1476e705c121SKalle Valo 	if (val == 0xffffffff)
1477e705c121SKalle Valo 		val = 0;
1478e705c121SKalle Valo 
1479e705c121SKalle Valo 	/*
1480e705c121SKalle Valo 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1481e705c121SKalle Valo 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1482e705c121SKalle Valo 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1483e705c121SKalle Valo 	 * so we use them to decide on the real state of the Rx bit.
1484e705c121SKalle Valo 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1485e705c121SKalle Valo 	 */
1486e705c121SKalle Valo 	if (val & 0xC0000)
1487e705c121SKalle Valo 		val |= 0x8000;
1488e705c121SKalle Valo 
1489e705c121SKalle Valo 	inta = (0xff & val) | ((0xff00 & val) << 16);
1490e705c121SKalle Valo 	return inta;
1491e705c121SKalle Valo }
1492e705c121SKalle Valo 
1493e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1494e705c121SKalle Valo {
1495e705c121SKalle Valo 	struct iwl_trans *trans = dev_id;
1496e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1497e705c121SKalle Valo 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1498e705c121SKalle Valo 	u32 inta = 0;
1499e705c121SKalle Valo 	u32 handled = 0;
1500e705c121SKalle Valo 
1501e705c121SKalle Valo 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1502e705c121SKalle Valo 
1503e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1504e705c121SKalle Valo 
1505e705c121SKalle Valo 	/* dram interrupt table not set yet,
1506e705c121SKalle Valo 	 * use legacy interrupt.
1507e705c121SKalle Valo 	 */
1508e705c121SKalle Valo 	if (likely(trans_pcie->use_ict))
1509e705c121SKalle Valo 		inta = iwl_pcie_int_cause_ict(trans);
1510e705c121SKalle Valo 	else
1511e705c121SKalle Valo 		inta = iwl_pcie_int_cause_non_ict(trans);
1512e705c121SKalle Valo 
1513e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1514e705c121SKalle Valo 		IWL_DEBUG_ISR(trans,
1515e705c121SKalle Valo 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1516e705c121SKalle Valo 			      inta, trans_pcie->inta_mask,
1517e705c121SKalle Valo 			      iwl_read32(trans, CSR_INT_MASK),
1518e705c121SKalle Valo 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1519e705c121SKalle Valo 		if (inta & (~trans_pcie->inta_mask))
1520e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1521e705c121SKalle Valo 				      "We got a masked interrupt (0x%08x)\n",
1522e705c121SKalle Valo 				      inta & (~trans_pcie->inta_mask));
1523e705c121SKalle Valo 	}
1524e705c121SKalle Valo 
1525e705c121SKalle Valo 	inta &= trans_pcie->inta_mask;
1526e705c121SKalle Valo 
1527e705c121SKalle Valo 	/*
1528e705c121SKalle Valo 	 * Ignore interrupt if there's nothing in NIC to service.
1529e705c121SKalle Valo 	 * This may be due to IRQ shared with another device,
1530e705c121SKalle Valo 	 * or due to sporadic interrupts thrown from our NIC.
1531e705c121SKalle Valo 	 */
1532e705c121SKalle Valo 	if (unlikely(!inta)) {
1533e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1534e705c121SKalle Valo 		/*
1535e705c121SKalle Valo 		 * Re-enable interrupts here since we don't
1536e705c121SKalle Valo 		 * have anything to service
1537e705c121SKalle Valo 		 */
1538e705c121SKalle Valo 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1539f16c3ebfSEmmanuel Grumbach 			_iwl_enable_interrupts(trans);
1540e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1541e705c121SKalle Valo 		lock_map_release(&trans->sync_cmd_lockdep_map);
1542e705c121SKalle Valo 		return IRQ_NONE;
1543e705c121SKalle Valo 	}
1544e705c121SKalle Valo 
1545e705c121SKalle Valo 	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1546e705c121SKalle Valo 		/*
1547e705c121SKalle Valo 		 * Hardware disappeared. It might have
1548e705c121SKalle Valo 		 * already raised an interrupt.
1549e705c121SKalle Valo 		 */
1550e705c121SKalle Valo 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1551e705c121SKalle Valo 		spin_unlock(&trans_pcie->irq_lock);
1552e705c121SKalle Valo 		goto out;
1553e705c121SKalle Valo 	}
1554e705c121SKalle Valo 
1555e705c121SKalle Valo 	/* Ack/clear/reset pending uCode interrupts.
1556e705c121SKalle Valo 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1557e705c121SKalle Valo 	 */
1558e705c121SKalle Valo 	/* There is a hardware bug in the interrupt mask function that some
1559e705c121SKalle Valo 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1560e705c121SKalle Valo 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1561e705c121SKalle Valo 	 * ICT interrupt handling mechanism has another bug that might cause
1562e705c121SKalle Valo 	 * these unmasked interrupts fail to be detected. We workaround the
1563e705c121SKalle Valo 	 * hardware bugs here by ACKing all the possible interrupts so that
1564e705c121SKalle Valo 	 * interrupt coalescing can still be achieved.
1565e705c121SKalle Valo 	 */
1566e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1567e705c121SKalle Valo 
1568e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR))
1569e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1570e705c121SKalle Valo 			      inta, iwl_read32(trans, CSR_INT_MASK));
1571e705c121SKalle Valo 
1572e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1573e705c121SKalle Valo 
1574e705c121SKalle Valo 	/* Now service all interrupt bits discovered above. */
1575e705c121SKalle Valo 	if (inta & CSR_INT_BIT_HW_ERR) {
1576e705c121SKalle Valo 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1577e705c121SKalle Valo 
1578e705c121SKalle Valo 		/* Tell the device to stop sending interrupts */
1579e705c121SKalle Valo 		iwl_disable_interrupts(trans);
1580e705c121SKalle Valo 
1581e705c121SKalle Valo 		isr_stats->hw++;
1582e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1583e705c121SKalle Valo 
1584e705c121SKalle Valo 		handled |= CSR_INT_BIT_HW_ERR;
1585e705c121SKalle Valo 
1586e705c121SKalle Valo 		goto out;
1587e705c121SKalle Valo 	}
1588e705c121SKalle Valo 
1589e705c121SKalle Valo 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1590e705c121SKalle Valo 		/* NIC fires this, but we don't use it, redundant with WAKEUP */
1591e705c121SKalle Valo 		if (inta & CSR_INT_BIT_SCD) {
1592e705c121SKalle Valo 			IWL_DEBUG_ISR(trans,
1593e705c121SKalle Valo 				      "Scheduler finished to transmit the frame/frames.\n");
1594e705c121SKalle Valo 			isr_stats->sch++;
1595e705c121SKalle Valo 		}
1596e705c121SKalle Valo 
1597e705c121SKalle Valo 		/* Alive notification via Rx interrupt will do the real work */
1598e705c121SKalle Valo 		if (inta & CSR_INT_BIT_ALIVE) {
1599e705c121SKalle Valo 			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1600e705c121SKalle Valo 			isr_stats->alive++;
1601e705c121SKalle Valo 		}
1602e705c121SKalle Valo 	}
1603e705c121SKalle Valo 
1604e705c121SKalle Valo 	/* Safely ignore these bits for debug checks below */
1605e705c121SKalle Valo 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1606e705c121SKalle Valo 
1607e705c121SKalle Valo 	/* HW RF KILL switch toggled */
1608e705c121SKalle Valo 	if (inta & CSR_INT_BIT_RF_KILL) {
1609e705c121SKalle Valo 		bool hw_rfkill;
1610e705c121SKalle Valo 
1611e705c121SKalle Valo 		hw_rfkill = iwl_is_rfkill_set(trans);
1612e705c121SKalle Valo 		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1613e705c121SKalle Valo 			 hw_rfkill ? "disable radio" : "enable radio");
1614e705c121SKalle Valo 
1615e705c121SKalle Valo 		isr_stats->rfkill++;
1616e705c121SKalle Valo 
1617e705c121SKalle Valo 		mutex_lock(&trans_pcie->mutex);
1618e705c121SKalle Valo 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1619e705c121SKalle Valo 		mutex_unlock(&trans_pcie->mutex);
1620e705c121SKalle Valo 		if (hw_rfkill) {
1621e705c121SKalle Valo 			set_bit(STATUS_RFKILL, &trans->status);
1622e705c121SKalle Valo 			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1623e705c121SKalle Valo 					       &trans->status))
1624e705c121SKalle Valo 				IWL_DEBUG_RF_KILL(trans,
1625e705c121SKalle Valo 						  "Rfkill while SYNC HCMD in flight\n");
1626e705c121SKalle Valo 			wake_up(&trans_pcie->wait_command_queue);
1627e705c121SKalle Valo 		} else {
1628e705c121SKalle Valo 			clear_bit(STATUS_RFKILL, &trans->status);
1629e705c121SKalle Valo 		}
1630e705c121SKalle Valo 
1631e705c121SKalle Valo 		handled |= CSR_INT_BIT_RF_KILL;
1632e705c121SKalle Valo 	}
1633e705c121SKalle Valo 
1634e705c121SKalle Valo 	/* Chip got too hot and stopped itself */
1635e705c121SKalle Valo 	if (inta & CSR_INT_BIT_CT_KILL) {
1636e705c121SKalle Valo 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1637e705c121SKalle Valo 		isr_stats->ctkill++;
1638e705c121SKalle Valo 		handled |= CSR_INT_BIT_CT_KILL;
1639e705c121SKalle Valo 	}
1640e705c121SKalle Valo 
1641e705c121SKalle Valo 	/* Error detected by uCode */
1642e705c121SKalle Valo 	if (inta & CSR_INT_BIT_SW_ERR) {
1643e705c121SKalle Valo 		IWL_ERR(trans, "Microcode SW error detected. "
1644e705c121SKalle Valo 			" Restarting 0x%X.\n", inta);
1645e705c121SKalle Valo 		isr_stats->sw++;
1646e705c121SKalle Valo 		iwl_pcie_irq_handle_error(trans);
1647e705c121SKalle Valo 		handled |= CSR_INT_BIT_SW_ERR;
1648e705c121SKalle Valo 	}
1649e705c121SKalle Valo 
1650e705c121SKalle Valo 	/* uCode wakes up after power-down sleep */
1651e705c121SKalle Valo 	if (inta & CSR_INT_BIT_WAKEUP) {
1652e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1653e705c121SKalle Valo 		iwl_pcie_rxq_check_wrptr(trans);
1654e705c121SKalle Valo 		iwl_pcie_txq_check_wrptrs(trans);
1655e705c121SKalle Valo 
1656e705c121SKalle Valo 		isr_stats->wakeup++;
1657e705c121SKalle Valo 
1658e705c121SKalle Valo 		handled |= CSR_INT_BIT_WAKEUP;
1659e705c121SKalle Valo 	}
1660e705c121SKalle Valo 
1661e705c121SKalle Valo 	/* All uCode command responses, including Tx command responses,
1662e705c121SKalle Valo 	 * Rx "responses" (frame-received notification), and other
1663e705c121SKalle Valo 	 * notifications from uCode come through here*/
1664e705c121SKalle Valo 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1665e705c121SKalle Valo 		    CSR_INT_BIT_RX_PERIODIC)) {
1666e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1667e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1668e705c121SKalle Valo 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1669e705c121SKalle Valo 			iwl_write32(trans, CSR_FH_INT_STATUS,
1670e705c121SKalle Valo 					CSR_FH_INT_RX_MASK);
1671e705c121SKalle Valo 		}
1672e705c121SKalle Valo 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1673e705c121SKalle Valo 			handled |= CSR_INT_BIT_RX_PERIODIC;
1674e705c121SKalle Valo 			iwl_write32(trans,
1675e705c121SKalle Valo 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1676e705c121SKalle Valo 		}
1677e705c121SKalle Valo 		/* Sending RX interrupt require many steps to be done in the
1678e705c121SKalle Valo 		 * the device:
1679e705c121SKalle Valo 		 * 1- write interrupt to current index in ICT table.
1680e705c121SKalle Valo 		 * 2- dma RX frame.
1681e705c121SKalle Valo 		 * 3- update RX shared data to indicate last write index.
1682e705c121SKalle Valo 		 * 4- send interrupt.
1683e705c121SKalle Valo 		 * This could lead to RX race, driver could receive RX interrupt
1684e705c121SKalle Valo 		 * but the shared data changes does not reflect this;
1685e705c121SKalle Valo 		 * periodic interrupt will detect any dangling Rx activity.
1686e705c121SKalle Valo 		 */
1687e705c121SKalle Valo 
1688e705c121SKalle Valo 		/* Disable periodic interrupt; we use it as just a one-shot. */
1689e705c121SKalle Valo 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1690e705c121SKalle Valo 			    CSR_INT_PERIODIC_DIS);
1691e705c121SKalle Valo 
1692e705c121SKalle Valo 		/*
1693e705c121SKalle Valo 		 * Enable periodic interrupt in 8 msec only if we received
1694e705c121SKalle Valo 		 * real RX interrupt (instead of just periodic int), to catch
1695e705c121SKalle Valo 		 * any dangling Rx interrupt.  If it was just the periodic
1696e705c121SKalle Valo 		 * interrupt, there was no dangling Rx activity, and no need
1697e705c121SKalle Valo 		 * to extend the periodic interrupt; one-shot is enough.
1698e705c121SKalle Valo 		 */
1699e705c121SKalle Valo 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1700e705c121SKalle Valo 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1701e705c121SKalle Valo 				   CSR_INT_PERIODIC_ENA);
1702e705c121SKalle Valo 
1703e705c121SKalle Valo 		isr_stats->rx++;
1704e705c121SKalle Valo 
1705e705c121SKalle Valo 		local_bh_disable();
17062e5d4a8fSHaim Dreyfuss 		iwl_pcie_rx_handle(trans, 0);
1707e705c121SKalle Valo 		local_bh_enable();
1708e705c121SKalle Valo 	}
1709e705c121SKalle Valo 
1710e705c121SKalle Valo 	/* This "Tx" DMA channel is used only for loading uCode */
1711e705c121SKalle Valo 	if (inta & CSR_INT_BIT_FH_TX) {
1712e705c121SKalle Valo 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1713e705c121SKalle Valo 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1714e705c121SKalle Valo 		isr_stats->tx++;
1715e705c121SKalle Valo 		handled |= CSR_INT_BIT_FH_TX;
1716e705c121SKalle Valo 		/* Wake up uCode load routine, now that load is complete */
1717e705c121SKalle Valo 		trans_pcie->ucode_write_complete = true;
1718e705c121SKalle Valo 		wake_up(&trans_pcie->ucode_write_waitq);
1719e705c121SKalle Valo 	}
1720e705c121SKalle Valo 
1721e705c121SKalle Valo 	if (inta & ~handled) {
1722e705c121SKalle Valo 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1723e705c121SKalle Valo 		isr_stats->unhandled++;
1724e705c121SKalle Valo 	}
1725e705c121SKalle Valo 
1726e705c121SKalle Valo 	if (inta & ~(trans_pcie->inta_mask)) {
1727e705c121SKalle Valo 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1728e705c121SKalle Valo 			 inta & ~trans_pcie->inta_mask);
1729e705c121SKalle Valo 	}
1730e705c121SKalle Valo 
1731f16c3ebfSEmmanuel Grumbach 	spin_lock(&trans_pcie->irq_lock);
1732a6bd005fSEmmanuel Grumbach 	/* only Re-enable all interrupt if disabled by irq */
1733f16c3ebfSEmmanuel Grumbach 	if (test_bit(STATUS_INT_ENABLED, &trans->status))
1734f16c3ebfSEmmanuel Grumbach 		_iwl_enable_interrupts(trans);
1735f16c3ebfSEmmanuel Grumbach 	/* we are loading the firmware, enable FH_TX interrupt only */
1736f16c3ebfSEmmanuel Grumbach 	else if (handled & CSR_INT_BIT_FH_TX)
1737f16c3ebfSEmmanuel Grumbach 		iwl_enable_fw_load_int(trans);
1738e705c121SKalle Valo 	/* Re-enable RF_KILL if it occurred */
1739e705c121SKalle Valo 	else if (handled & CSR_INT_BIT_RF_KILL)
1740e705c121SKalle Valo 		iwl_enable_rfkill_int(trans);
1741f16c3ebfSEmmanuel Grumbach 	spin_unlock(&trans_pcie->irq_lock);
1742e705c121SKalle Valo 
1743e705c121SKalle Valo out:
1744e705c121SKalle Valo 	lock_map_release(&trans->sync_cmd_lockdep_map);
1745e705c121SKalle Valo 	return IRQ_HANDLED;
1746e705c121SKalle Valo }
1747e705c121SKalle Valo 
1748e705c121SKalle Valo /******************************************************************************
1749e705c121SKalle Valo  *
1750e705c121SKalle Valo  * ICT functions
1751e705c121SKalle Valo  *
1752e705c121SKalle Valo  ******************************************************************************/
1753e705c121SKalle Valo 
1754e705c121SKalle Valo /* Free dram table */
1755e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans)
1756e705c121SKalle Valo {
1757e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1758e705c121SKalle Valo 
1759e705c121SKalle Valo 	if (trans_pcie->ict_tbl) {
1760e705c121SKalle Valo 		dma_free_coherent(trans->dev, ICT_SIZE,
1761e705c121SKalle Valo 				  trans_pcie->ict_tbl,
1762e705c121SKalle Valo 				  trans_pcie->ict_tbl_dma);
1763e705c121SKalle Valo 		trans_pcie->ict_tbl = NULL;
1764e705c121SKalle Valo 		trans_pcie->ict_tbl_dma = 0;
1765e705c121SKalle Valo 	}
1766e705c121SKalle Valo }
1767e705c121SKalle Valo 
1768e705c121SKalle Valo /*
1769e705c121SKalle Valo  * allocate dram shared table, it is an aligned memory
1770e705c121SKalle Valo  * block of ICT_SIZE.
1771e705c121SKalle Valo  * also reset all data related to ICT table interrupt.
1772e705c121SKalle Valo  */
1773e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1774e705c121SKalle Valo {
1775e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1776e705c121SKalle Valo 
1777e705c121SKalle Valo 	trans_pcie->ict_tbl =
1778e705c121SKalle Valo 		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1779e705c121SKalle Valo 				   &trans_pcie->ict_tbl_dma,
1780e705c121SKalle Valo 				   GFP_KERNEL);
1781e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1782e705c121SKalle Valo 		return -ENOMEM;
1783e705c121SKalle Valo 
1784e705c121SKalle Valo 	/* just an API sanity check ... it is guaranteed to be aligned */
1785e705c121SKalle Valo 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1786e705c121SKalle Valo 		iwl_pcie_free_ict(trans);
1787e705c121SKalle Valo 		return -EINVAL;
1788e705c121SKalle Valo 	}
1789e705c121SKalle Valo 
1790e705c121SKalle Valo 	return 0;
1791e705c121SKalle Valo }
1792e705c121SKalle Valo 
1793e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table,
1794e705c121SKalle Valo  * also we need to tell the driver to start using ICT interrupt.
1795e705c121SKalle Valo  */
1796e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans)
1797e705c121SKalle Valo {
1798e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1799e705c121SKalle Valo 	u32 val;
1800e705c121SKalle Valo 
1801e705c121SKalle Valo 	if (!trans_pcie->ict_tbl)
1802e705c121SKalle Valo 		return;
1803e705c121SKalle Valo 
1804e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1805f16c3ebfSEmmanuel Grumbach 	_iwl_disable_interrupts(trans);
1806e705c121SKalle Valo 
1807e705c121SKalle Valo 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1808e705c121SKalle Valo 
1809e705c121SKalle Valo 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1810e705c121SKalle Valo 
1811e705c121SKalle Valo 	val |= CSR_DRAM_INT_TBL_ENABLE |
1812e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
1813e705c121SKalle Valo 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1814e705c121SKalle Valo 
1815e705c121SKalle Valo 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1816e705c121SKalle Valo 
1817e705c121SKalle Valo 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1818e705c121SKalle Valo 	trans_pcie->use_ict = true;
1819e705c121SKalle Valo 	trans_pcie->ict_index = 0;
1820e705c121SKalle Valo 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1821f16c3ebfSEmmanuel Grumbach 	_iwl_enable_interrupts(trans);
1822e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1823e705c121SKalle Valo }
1824e705c121SKalle Valo 
1825e705c121SKalle Valo /* Device is going down disable ict interrupt usage */
1826e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans)
1827e705c121SKalle Valo {
1828e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1829e705c121SKalle Valo 
1830e705c121SKalle Valo 	spin_lock(&trans_pcie->irq_lock);
1831e705c121SKalle Valo 	trans_pcie->use_ict = false;
1832e705c121SKalle Valo 	spin_unlock(&trans_pcie->irq_lock);
1833e705c121SKalle Valo }
1834e705c121SKalle Valo 
1835e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data)
1836e705c121SKalle Valo {
1837e705c121SKalle Valo 	struct iwl_trans *trans = data;
1838e705c121SKalle Valo 
1839e705c121SKalle Valo 	if (!trans)
1840e705c121SKalle Valo 		return IRQ_NONE;
1841e705c121SKalle Valo 
1842e705c121SKalle Valo 	/* Disable (but don't clear!) interrupts here to avoid
1843e705c121SKalle Valo 	 * back-to-back ISRs and sporadic interrupts from our NIC.
1844e705c121SKalle Valo 	 * If we have something to service, the tasklet will re-enable ints.
1845e705c121SKalle Valo 	 * If we *don't* have something, we'll re-enable before leaving here.
1846e705c121SKalle Valo 	 */
1847e705c121SKalle Valo 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1848e705c121SKalle Valo 
1849e705c121SKalle Valo 	return IRQ_WAKE_THREAD;
1850e705c121SKalle Valo }
18512e5d4a8fSHaim Dreyfuss 
18522e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
18532e5d4a8fSHaim Dreyfuss {
18542e5d4a8fSHaim Dreyfuss 	return IRQ_WAKE_THREAD;
18552e5d4a8fSHaim Dreyfuss }
18562e5d4a8fSHaim Dreyfuss 
18572e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
18582e5d4a8fSHaim Dreyfuss {
18592e5d4a8fSHaim Dreyfuss 	struct msix_entry *entry = dev_id;
18602e5d4a8fSHaim Dreyfuss 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
18612e5d4a8fSHaim Dreyfuss 	struct iwl_trans *trans = trans_pcie->trans;
186246167a8fSColin Ian King 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
18632e5d4a8fSHaim Dreyfuss 	u32 inta_fh, inta_hw;
18642e5d4a8fSHaim Dreyfuss 
18652e5d4a8fSHaim Dreyfuss 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
18662e5d4a8fSHaim Dreyfuss 
18672e5d4a8fSHaim Dreyfuss 	spin_lock(&trans_pcie->irq_lock);
18687ef3dd26SHaim Dreyfuss 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
18697ef3dd26SHaim Dreyfuss 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
18702e5d4a8fSHaim Dreyfuss 	/*
18712e5d4a8fSHaim Dreyfuss 	 * Clear causes registers to avoid being handling the same cause.
18722e5d4a8fSHaim Dreyfuss 	 */
18737ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
18747ef3dd26SHaim Dreyfuss 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
18752e5d4a8fSHaim Dreyfuss 	spin_unlock(&trans_pcie->irq_lock);
18762e5d4a8fSHaim Dreyfuss 
18772e5d4a8fSHaim Dreyfuss 	if (unlikely(!(inta_fh | inta_hw))) {
18782e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
18792e5d4a8fSHaim Dreyfuss 		lock_map_release(&trans->sync_cmd_lockdep_map);
18802e5d4a8fSHaim Dreyfuss 		return IRQ_NONE;
18812e5d4a8fSHaim Dreyfuss 	}
18822e5d4a8fSHaim Dreyfuss 
18832e5d4a8fSHaim Dreyfuss 	if (iwl_have_debug_level(IWL_DL_ISR))
18842e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
18852e5d4a8fSHaim Dreyfuss 			      inta_fh,
18862e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
18872e5d4a8fSHaim Dreyfuss 
18882e5d4a8fSHaim Dreyfuss 	/* This "Tx" DMA channel is used only for loading uCode */
18892e5d4a8fSHaim Dreyfuss 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
18902e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
18912e5d4a8fSHaim Dreyfuss 		isr_stats->tx++;
18922e5d4a8fSHaim Dreyfuss 		/*
18932e5d4a8fSHaim Dreyfuss 		 * Wake up uCode load routine,
18942e5d4a8fSHaim Dreyfuss 		 * now that load is complete
18952e5d4a8fSHaim Dreyfuss 		 */
18962e5d4a8fSHaim Dreyfuss 		trans_pcie->ucode_write_complete = true;
18972e5d4a8fSHaim Dreyfuss 		wake_up(&trans_pcie->ucode_write_waitq);
18982e5d4a8fSHaim Dreyfuss 	}
18992e5d4a8fSHaim Dreyfuss 
19002e5d4a8fSHaim Dreyfuss 	/* Error detected by uCode */
19012e5d4a8fSHaim Dreyfuss 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
19022e5d4a8fSHaim Dreyfuss 	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
19032e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
19042e5d4a8fSHaim Dreyfuss 			"Microcode SW error detected. Restarting 0x%X.\n",
19052e5d4a8fSHaim Dreyfuss 			inta_fh);
19062e5d4a8fSHaim Dreyfuss 		isr_stats->sw++;
19072e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
19082e5d4a8fSHaim Dreyfuss 	}
19092e5d4a8fSHaim Dreyfuss 
19102e5d4a8fSHaim Dreyfuss 	/* After checking FH register check HW register */
19112e5d4a8fSHaim Dreyfuss 	if (iwl_have_debug_level(IWL_DL_ISR))
19122e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans,
19132e5d4a8fSHaim Dreyfuss 			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
19142e5d4a8fSHaim Dreyfuss 			      inta_hw,
19152e5d4a8fSHaim Dreyfuss 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
19162e5d4a8fSHaim Dreyfuss 
19172e5d4a8fSHaim Dreyfuss 	/* Alive notification via Rx interrupt will do the real work */
19182e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
19192e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
19202e5d4a8fSHaim Dreyfuss 		isr_stats->alive++;
19212e5d4a8fSHaim Dreyfuss 	}
19222e5d4a8fSHaim Dreyfuss 
19232e5d4a8fSHaim Dreyfuss 	/* uCode wakes up after power-down sleep */
19242e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
19252e5d4a8fSHaim Dreyfuss 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
19262e5d4a8fSHaim Dreyfuss 		iwl_pcie_rxq_check_wrptr(trans);
19272e5d4a8fSHaim Dreyfuss 		iwl_pcie_txq_check_wrptrs(trans);
19282e5d4a8fSHaim Dreyfuss 
19292e5d4a8fSHaim Dreyfuss 		isr_stats->wakeup++;
19302e5d4a8fSHaim Dreyfuss 	}
19312e5d4a8fSHaim Dreyfuss 
19322e5d4a8fSHaim Dreyfuss 	/* Chip got too hot and stopped itself */
19332e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
19342e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
19352e5d4a8fSHaim Dreyfuss 		isr_stats->ctkill++;
19362e5d4a8fSHaim Dreyfuss 	}
19372e5d4a8fSHaim Dreyfuss 
19382e5d4a8fSHaim Dreyfuss 	/* HW RF KILL switch toggled */
19392e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
19402e5d4a8fSHaim Dreyfuss 		bool hw_rfkill;
19412e5d4a8fSHaim Dreyfuss 
19422e5d4a8fSHaim Dreyfuss 		hw_rfkill = iwl_is_rfkill_set(trans);
19432e5d4a8fSHaim Dreyfuss 		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
19442e5d4a8fSHaim Dreyfuss 			 hw_rfkill ? "disable radio" : "enable radio");
19452e5d4a8fSHaim Dreyfuss 
19462e5d4a8fSHaim Dreyfuss 		isr_stats->rfkill++;
19472e5d4a8fSHaim Dreyfuss 
19482e5d4a8fSHaim Dreyfuss 		mutex_lock(&trans_pcie->mutex);
19492e5d4a8fSHaim Dreyfuss 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
19502e5d4a8fSHaim Dreyfuss 		mutex_unlock(&trans_pcie->mutex);
19512e5d4a8fSHaim Dreyfuss 		if (hw_rfkill) {
19522e5d4a8fSHaim Dreyfuss 			set_bit(STATUS_RFKILL, &trans->status);
19532e5d4a8fSHaim Dreyfuss 			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
19542e5d4a8fSHaim Dreyfuss 					       &trans->status))
19552e5d4a8fSHaim Dreyfuss 				IWL_DEBUG_RF_KILL(trans,
19562e5d4a8fSHaim Dreyfuss 						  "Rfkill while SYNC HCMD in flight\n");
19572e5d4a8fSHaim Dreyfuss 			wake_up(&trans_pcie->wait_command_queue);
19582e5d4a8fSHaim Dreyfuss 		} else {
19592e5d4a8fSHaim Dreyfuss 			clear_bit(STATUS_RFKILL, &trans->status);
19602e5d4a8fSHaim Dreyfuss 		}
19612e5d4a8fSHaim Dreyfuss 	}
19622e5d4a8fSHaim Dreyfuss 
19632e5d4a8fSHaim Dreyfuss 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
19642e5d4a8fSHaim Dreyfuss 		IWL_ERR(trans,
19652e5d4a8fSHaim Dreyfuss 			"Hardware error detected. Restarting.\n");
19662e5d4a8fSHaim Dreyfuss 
19672e5d4a8fSHaim Dreyfuss 		isr_stats->hw++;
19682e5d4a8fSHaim Dreyfuss 		iwl_pcie_irq_handle_error(trans);
19692e5d4a8fSHaim Dreyfuss 	}
19702e5d4a8fSHaim Dreyfuss 
19712e5d4a8fSHaim Dreyfuss 	iwl_pcie_clear_irq(trans, entry);
19722e5d4a8fSHaim Dreyfuss 
19732e5d4a8fSHaim Dreyfuss 	lock_map_release(&trans->sync_cmd_lockdep_map);
19742e5d4a8fSHaim Dreyfuss 
19752e5d4a8fSHaim Dreyfuss 	return IRQ_HANDLED;
19762e5d4a8fSHaim Dreyfuss }
1977