1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5eda50cdeSSara Sharon * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6a8cbb46fSGolan Ben Ami * Copyright(c) 2018 Intel Corporation 7e705c121SKalle Valo * 8e705c121SKalle Valo * Portions of this file are derived from the ipw3945 project, as well 9e705c121SKalle Valo * as portions of the ieee80211 subsystem header files. 10e705c121SKalle Valo * 11e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 12e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 13e705c121SKalle Valo * published by the Free Software Foundation. 14e705c121SKalle Valo * 15e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 16e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18e705c121SKalle Valo * more details. 19e705c121SKalle Valo * 20e705c121SKalle Valo * You should have received a copy of the GNU General Public License along with 219b58419eSGolan Ben Ami * this program. 22e705c121SKalle Valo * 23e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 24e705c121SKalle Valo * file called LICENSE. 25e705c121SKalle Valo * 26e705c121SKalle Valo * Contact Information: 27d01c5366SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 28e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29e705c121SKalle Valo * 30e705c121SKalle Valo *****************************************************************************/ 31e705c121SKalle Valo #include <linux/sched.h> 32e705c121SKalle Valo #include <linux/wait.h> 33e705c121SKalle Valo #include <linux/gfp.h> 34e705c121SKalle Valo 35e705c121SKalle Valo #include "iwl-prph.h" 36e705c121SKalle Valo #include "iwl-io.h" 37e705c121SKalle Valo #include "internal.h" 38e705c121SKalle Valo #include "iwl-op-mode.h" 399b58419eSGolan Ben Ami #include "iwl-context-info-gen3.h" 40e705c121SKalle Valo 41e705c121SKalle Valo /****************************************************************************** 42e705c121SKalle Valo * 43e705c121SKalle Valo * RX path functions 44e705c121SKalle Valo * 45e705c121SKalle Valo ******************************************************************************/ 46e705c121SKalle Valo 47e705c121SKalle Valo /* 48e705c121SKalle Valo * Rx theory of operation 49e705c121SKalle Valo * 50e705c121SKalle Valo * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 51e705c121SKalle Valo * each of which point to Receive Buffers to be filled by the NIC. These get 52e705c121SKalle Valo * used not only for Rx frames, but for any command response or notification 53e705c121SKalle Valo * from the NIC. The driver and NIC manage the Rx buffers by means 54e705c121SKalle Valo * of indexes into the circular buffer. 55e705c121SKalle Valo * 56e705c121SKalle Valo * Rx Queue Indexes 57e705c121SKalle Valo * The host/firmware share two index registers for managing the Rx buffers. 58e705c121SKalle Valo * 59e705c121SKalle Valo * The READ index maps to the first position that the firmware may be writing 60e705c121SKalle Valo * to -- the driver can read up to (but not including) this position and get 61e705c121SKalle Valo * good data. 62e705c121SKalle Valo * The READ index is managed by the firmware once the card is enabled. 63e705c121SKalle Valo * 64e705c121SKalle Valo * The WRITE index maps to the last position the driver has read from -- the 65e705c121SKalle Valo * position preceding WRITE is the last slot the firmware can place a packet. 66e705c121SKalle Valo * 67e705c121SKalle Valo * The queue is empty (no good data) if WRITE = READ - 1, and is full if 68e705c121SKalle Valo * WRITE = READ. 69e705c121SKalle Valo * 70e705c121SKalle Valo * During initialization, the host sets up the READ queue position to the first 71e705c121SKalle Valo * INDEX position, and WRITE to the last (READ - 1 wrapped) 72e705c121SKalle Valo * 73e705c121SKalle Valo * When the firmware places a packet in a buffer, it will advance the READ index 74e705c121SKalle Valo * and fire the RX interrupt. The driver can then query the READ index and 75e705c121SKalle Valo * process as many packets as possible, moving the WRITE index forward as it 76e705c121SKalle Valo * resets the Rx queue buffers with new memory. 77e705c121SKalle Valo * 78e705c121SKalle Valo * The management in the driver is as follows: 79e705c121SKalle Valo * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 80e705c121SKalle Valo * When the interrupt handler is called, the request is processed. 81e705c121SKalle Valo * The page is either stolen - transferred to the upper layer 82e705c121SKalle Valo * or reused - added immediately to the iwl->rxq->rx_free list. 83e705c121SKalle Valo * + When the page is stolen - the driver updates the matching queue's used 84e705c121SKalle Valo * count, detaches the RBD and transfers it to the queue used list. 85e705c121SKalle Valo * When there are two used RBDs - they are transferred to the allocator empty 86e705c121SKalle Valo * list. Work is then scheduled for the allocator to start allocating 87e705c121SKalle Valo * eight buffers. 88e705c121SKalle Valo * When there are another 6 used RBDs - they are transferred to the allocator 89e705c121SKalle Valo * empty list and the driver tries to claim the pre-allocated buffers and 90e705c121SKalle Valo * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 91e705c121SKalle Valo * until ready. 92e705c121SKalle Valo * When there are 8+ buffers in the free list - either from allocation or from 93e705c121SKalle Valo * 8 reused unstolen pages - restock is called to update the FW and indexes. 94e705c121SKalle Valo * + In order to make sure the allocator always has RBDs to use for allocation 95e705c121SKalle Valo * the allocator has initial pool in the size of num_queues*(8-2) - the 96e705c121SKalle Valo * maximum missing RBDs per allocation request (request posted with 2 97e705c121SKalle Valo * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 98e705c121SKalle Valo * The queues supplies the recycle of the rest of the RBDs. 99e705c121SKalle Valo * + A received packet is processed and handed to the kernel network stack, 100e705c121SKalle Valo * detached from the iwl->rxq. The driver 'processed' index is updated. 101e705c121SKalle Valo * + If there are no allocated buffers in iwl->rxq->rx_free, 102e705c121SKalle Valo * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 103e705c121SKalle Valo * If there were enough free buffers and RX_STALLED is set it is cleared. 104e705c121SKalle Valo * 105e705c121SKalle Valo * 106e705c121SKalle Valo * Driver sequence: 107e705c121SKalle Valo * 108e705c121SKalle Valo * iwl_rxq_alloc() Allocates rx_free 109e705c121SKalle Valo * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 110e705c121SKalle Valo * iwl_pcie_rxq_restock. 111e705c121SKalle Valo * Used only during initialization. 112e705c121SKalle Valo * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 113e705c121SKalle Valo * queue, updates firmware pointers, and updates 114e705c121SKalle Valo * the WRITE index. 115e705c121SKalle Valo * iwl_pcie_rx_allocator() Background work for allocating pages. 116e705c121SKalle Valo * 117e705c121SKalle Valo * -- enable interrupts -- 118e705c121SKalle Valo * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 119e705c121SKalle Valo * READ INDEX, detaching the SKB from the pool. 120e705c121SKalle Valo * Moves the packet buffer from queue to rx_used. 121e705c121SKalle Valo * Posts and claims requests to the allocator. 122e705c121SKalle Valo * Calls iwl_pcie_rxq_restock to refill any empty 123e705c121SKalle Valo * slots. 124e705c121SKalle Valo * 125e705c121SKalle Valo * RBD life-cycle: 126e705c121SKalle Valo * 127e705c121SKalle Valo * Init: 128e705c121SKalle Valo * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 129e705c121SKalle Valo * 130e705c121SKalle Valo * Regular Receive interrupt: 131e705c121SKalle Valo * Page Stolen: 132e705c121SKalle Valo * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 133e705c121SKalle Valo * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 134e705c121SKalle Valo * Page not Stolen: 135e705c121SKalle Valo * rxq.queue -> rxq.rx_free -> rxq.queue 136e705c121SKalle Valo * ... 137e705c121SKalle Valo * 138e705c121SKalle Valo */ 139e705c121SKalle Valo 140e705c121SKalle Valo /* 141e705c121SKalle Valo * iwl_rxq_space - Return number of free slots available in queue. 142e705c121SKalle Valo */ 143e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq) 144e705c121SKalle Valo { 14596a6497bSSara Sharon /* Make sure rx queue size is a power of 2 */ 14696a6497bSSara Sharon WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 147e705c121SKalle Valo 148e705c121SKalle Valo /* 149e705c121SKalle Valo * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 150e705c121SKalle Valo * between empty and completely full queues. 151e705c121SKalle Valo * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 152e705c121SKalle Valo * defined for negative dividends. 153e705c121SKalle Valo */ 15496a6497bSSara Sharon return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 155e705c121SKalle Valo } 156e705c121SKalle Valo 157e705c121SKalle Valo /* 158e705c121SKalle Valo * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 159e705c121SKalle Valo */ 160e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 161e705c121SKalle Valo { 162e705c121SKalle Valo return cpu_to_le32((u32)(dma_addr >> 8)); 163e705c121SKalle Valo } 164e705c121SKalle Valo 165e705c121SKalle Valo /* 166e705c121SKalle Valo * iwl_pcie_rx_stop - stops the Rx DMA 167e705c121SKalle Valo */ 168e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans) 169e705c121SKalle Valo { 170d0158235SGolan Ben Ami if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { 171d0158235SGolan Ben Ami /* TODO: remove this for 22560 once fw does it */ 172d0158235SGolan Ben Ami iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); 173d0158235SGolan Ben Ami return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, 174d0158235SGolan Ben Ami RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 175d0158235SGolan Ben Ami } else if (trans->cfg->mq_rx_supported) { 176d7fdd0e5SSara Sharon iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 177d7fdd0e5SSara Sharon return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 178d7fdd0e5SSara Sharon RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 179d7fdd0e5SSara Sharon } else { 180e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 181e705c121SKalle Valo return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 182d7fdd0e5SSara Sharon FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 183d7fdd0e5SSara Sharon 1000); 184d7fdd0e5SSara Sharon } 185e705c121SKalle Valo } 186e705c121SKalle Valo 187e705c121SKalle Valo /* 188e705c121SKalle Valo * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 189e705c121SKalle Valo */ 19078485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 19178485054SSara Sharon struct iwl_rxq *rxq) 192e705c121SKalle Valo { 193e705c121SKalle Valo u32 reg; 194e705c121SKalle Valo 195e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 196e705c121SKalle Valo 197e705c121SKalle Valo /* 198e705c121SKalle Valo * explicitly wake up the NIC if: 199e705c121SKalle Valo * 1. shadow registers aren't enabled 200e705c121SKalle Valo * 2. there is a chance that the NIC is asleep 201e705c121SKalle Valo */ 202e705c121SKalle Valo if (!trans->cfg->base_params->shadow_reg_enable && 203e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 204e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 205e705c121SKalle Valo 206e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 207e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 208e705c121SKalle Valo reg); 209e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 210a8cbb46fSGolan Ben Ami BIT(trans->cfg->csr->flag_mac_access_req)); 211e705c121SKalle Valo rxq->need_update = true; 212e705c121SKalle Valo return; 213e705c121SKalle Valo } 214e705c121SKalle Valo } 215e705c121SKalle Valo 216e705c121SKalle Valo rxq->write_actual = round_down(rxq->write, 8); 2171b493e30SGolan Ben Ami if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) 2181b493e30SGolan Ben Ami iwl_write32(trans, HBUS_TARG_WRPTR, 2191b493e30SGolan Ben Ami (rxq->write_actual | 2201b493e30SGolan Ben Ami ((FIRST_RX_QUEUE + rxq->id) << 16))); 2211b493e30SGolan Ben Ami else if (trans->cfg->mq_rx_supported) 2221554ed20SSara Sharon iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 22396a6497bSSara Sharon rxq->write_actual); 2241316d595SSara Sharon else 225e705c121SKalle Valo iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 226e705c121SKalle Valo } 227e705c121SKalle Valo 228e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 229e705c121SKalle Valo { 230e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 23178485054SSara Sharon int i; 232e705c121SKalle Valo 23378485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 23478485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 235e705c121SKalle Valo 236e705c121SKalle Valo if (!rxq->need_update) 23778485054SSara Sharon continue; 23878485054SSara Sharon spin_lock(&rxq->lock); 23978485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 240e705c121SKalle Valo rxq->need_update = false; 241e705c121SKalle Valo spin_unlock(&rxq->lock); 242e705c121SKalle Valo } 24378485054SSara Sharon } 244e705c121SKalle Valo 245e0e168dcSGregory Greenman /* 2462047fa54SSara Sharon * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 247e0e168dcSGregory Greenman */ 2482047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 24996a6497bSSara Sharon struct iwl_rxq *rxq) 25096a6497bSSara Sharon { 25196a6497bSSara Sharon struct iwl_rx_mem_buffer *rxb; 25296a6497bSSara Sharon 25396a6497bSSara Sharon /* 25496a6497bSSara Sharon * If the device isn't enabled - no need to try to add buffers... 25596a6497bSSara Sharon * This can happen when we stop the device and still have an interrupt 25696a6497bSSara Sharon * pending. We stop the APM before we sync the interrupts because we 25796a6497bSSara Sharon * have to (see comment there). On the other hand, since the APM is 25896a6497bSSara Sharon * stopped, we cannot access the HW (in particular not prph). 25996a6497bSSara Sharon * So don't try to restock if the APM has been already stopped. 26096a6497bSSara Sharon */ 26196a6497bSSara Sharon if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 26296a6497bSSara Sharon return; 26396a6497bSSara Sharon 26496a6497bSSara Sharon spin_lock(&rxq->lock); 26596a6497bSSara Sharon while (rxq->free_count) { 26696a6497bSSara Sharon __le64 *bd = (__le64 *)rxq->bd; 26796a6497bSSara Sharon 26896a6497bSSara Sharon /* Get next free Rx buffer, remove from free list */ 26996a6497bSSara Sharon rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 27096a6497bSSara Sharon list); 27196a6497bSSara Sharon list_del(&rxb->list); 272b1753c62SSara Sharon rxb->invalid = false; 27396a6497bSSara Sharon /* 12 first bits are expected to be empty */ 27496a6497bSSara Sharon WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); 27596a6497bSSara Sharon /* Point to Rx buffer via next RBD in circular buffer */ 27696a6497bSSara Sharon bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 27796a6497bSSara Sharon rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; 27896a6497bSSara Sharon rxq->free_count--; 27996a6497bSSara Sharon } 28096a6497bSSara Sharon spin_unlock(&rxq->lock); 28196a6497bSSara Sharon 28296a6497bSSara Sharon /* 28396a6497bSSara Sharon * If we've added more space for the firmware to place data, tell it. 28496a6497bSSara Sharon * Increment device's write pointer in multiples of 8. 28596a6497bSSara Sharon */ 28696a6497bSSara Sharon if (rxq->write_actual != (rxq->write & ~0x7)) { 28796a6497bSSara Sharon spin_lock(&rxq->lock); 28896a6497bSSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 28996a6497bSSara Sharon spin_unlock(&rxq->lock); 29096a6497bSSara Sharon } 29196a6497bSSara Sharon } 29296a6497bSSara Sharon 293e705c121SKalle Valo /* 2942047fa54SSara Sharon * iwl_pcie_rxsq_restock - restock implementation for single queue rx 295e705c121SKalle Valo */ 2962047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 297e0e168dcSGregory Greenman struct iwl_rxq *rxq) 298e705c121SKalle Valo { 299e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 300e705c121SKalle Valo 301e705c121SKalle Valo /* 302e705c121SKalle Valo * If the device isn't enabled - not need to try to add buffers... 303e705c121SKalle Valo * This can happen when we stop the device and still have an interrupt 304e705c121SKalle Valo * pending. We stop the APM before we sync the interrupts because we 305e705c121SKalle Valo * have to (see comment there). On the other hand, since the APM is 306e705c121SKalle Valo * stopped, we cannot access the HW (in particular not prph). 307e705c121SKalle Valo * So don't try to restock if the APM has been already stopped. 308e705c121SKalle Valo */ 309e705c121SKalle Valo if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 310e705c121SKalle Valo return; 311e705c121SKalle Valo 312e705c121SKalle Valo spin_lock(&rxq->lock); 313e705c121SKalle Valo while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 31496a6497bSSara Sharon __le32 *bd = (__le32 *)rxq->bd; 315e705c121SKalle Valo /* The overwritten rxb must be a used one */ 316e705c121SKalle Valo rxb = rxq->queue[rxq->write]; 317e705c121SKalle Valo BUG_ON(rxb && rxb->page); 318e705c121SKalle Valo 319e705c121SKalle Valo /* Get next free Rx buffer, remove from free list */ 320e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 321e705c121SKalle Valo list); 322e705c121SKalle Valo list_del(&rxb->list); 323b1753c62SSara Sharon rxb->invalid = false; 324e705c121SKalle Valo 325e705c121SKalle Valo /* Point to Rx buffer via next RBD in circular buffer */ 32696a6497bSSara Sharon bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 327e705c121SKalle Valo rxq->queue[rxq->write] = rxb; 328e705c121SKalle Valo rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 329e705c121SKalle Valo rxq->free_count--; 330e705c121SKalle Valo } 331e705c121SKalle Valo spin_unlock(&rxq->lock); 332e705c121SKalle Valo 333e705c121SKalle Valo /* If we've added more space for the firmware to place data, tell it. 334e705c121SKalle Valo * Increment device's write pointer in multiples of 8. */ 335e705c121SKalle Valo if (rxq->write_actual != (rxq->write & ~0x7)) { 336e705c121SKalle Valo spin_lock(&rxq->lock); 33778485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 338e705c121SKalle Valo spin_unlock(&rxq->lock); 339e705c121SKalle Valo } 340e705c121SKalle Valo } 341e705c121SKalle Valo 342e705c121SKalle Valo /* 343e0e168dcSGregory Greenman * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 344e0e168dcSGregory Greenman * 345e0e168dcSGregory Greenman * If there are slots in the RX queue that need to be restocked, 346e0e168dcSGregory Greenman * and we have free pre-allocated buffers, fill the ranks as much 347e0e168dcSGregory Greenman * as we can, pulling from rx_free. 348e0e168dcSGregory Greenman * 349e0e168dcSGregory Greenman * This moves the 'write' index forward to catch up with 'processed', and 350e0e168dcSGregory Greenman * also updates the memory address in the firmware to reference the new 351e0e168dcSGregory Greenman * target buffer. 352e0e168dcSGregory Greenman */ 353e0e168dcSGregory Greenman static 354e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 355e0e168dcSGregory Greenman { 356e0e168dcSGregory Greenman if (trans->cfg->mq_rx_supported) 3572047fa54SSara Sharon iwl_pcie_rxmq_restock(trans, rxq); 358e0e168dcSGregory Greenman else 3592047fa54SSara Sharon iwl_pcie_rxsq_restock(trans, rxq); 360e0e168dcSGregory Greenman } 361e0e168dcSGregory Greenman 362e0e168dcSGregory Greenman /* 363e705c121SKalle Valo * iwl_pcie_rx_alloc_page - allocates and returns a page. 364e705c121SKalle Valo * 365e705c121SKalle Valo */ 366e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 367e705c121SKalle Valo gfp_t priority) 368e705c121SKalle Valo { 369e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 370e705c121SKalle Valo struct page *page; 371e705c121SKalle Valo gfp_t gfp_mask = priority; 372e705c121SKalle Valo 373e705c121SKalle Valo if (trans_pcie->rx_page_order > 0) 374e705c121SKalle Valo gfp_mask |= __GFP_COMP; 375e705c121SKalle Valo 376e705c121SKalle Valo /* Alloc a new receive buffer */ 377e705c121SKalle Valo page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 378e705c121SKalle Valo if (!page) { 379e705c121SKalle Valo if (net_ratelimit()) 380e705c121SKalle Valo IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 381e705c121SKalle Valo trans_pcie->rx_page_order); 38278485054SSara Sharon /* 38378485054SSara Sharon * Issue an error if we don't have enough pre-allocated 38478485054SSara Sharon * buffers. 385e705c121SKalle Valo ` */ 38678485054SSara Sharon if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 387e705c121SKalle Valo IWL_CRIT(trans, 38878485054SSara Sharon "Failed to alloc_pages\n"); 389e705c121SKalle Valo return NULL; 390e705c121SKalle Valo } 391e705c121SKalle Valo return page; 392e705c121SKalle Valo } 393e705c121SKalle Valo 394e705c121SKalle Valo /* 395e705c121SKalle Valo * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 396e705c121SKalle Valo * 397e705c121SKalle Valo * A used RBD is an Rx buffer that has been given to the stack. To use it again 398e705c121SKalle Valo * a page must be allocated and the RBD must point to the page. This function 399e705c121SKalle Valo * doesn't change the HW pointer but handles the list of pages that is used by 400e705c121SKalle Valo * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 401e705c121SKalle Valo * allocated buffers. 402e705c121SKalle Valo */ 40378485054SSara Sharon static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 40478485054SSara Sharon struct iwl_rxq *rxq) 405e705c121SKalle Valo { 406e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 407e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 408e705c121SKalle Valo struct page *page; 409e705c121SKalle Valo 410e705c121SKalle Valo while (1) { 411e705c121SKalle Valo spin_lock(&rxq->lock); 412e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 413e705c121SKalle Valo spin_unlock(&rxq->lock); 414e705c121SKalle Valo return; 415e705c121SKalle Valo } 416e705c121SKalle Valo spin_unlock(&rxq->lock); 417e705c121SKalle Valo 418e705c121SKalle Valo /* Alloc a new receive buffer */ 419e705c121SKalle Valo page = iwl_pcie_rx_alloc_page(trans, priority); 420e705c121SKalle Valo if (!page) 421e705c121SKalle Valo return; 422e705c121SKalle Valo 423e705c121SKalle Valo spin_lock(&rxq->lock); 424e705c121SKalle Valo 425e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 426e705c121SKalle Valo spin_unlock(&rxq->lock); 427e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 428e705c121SKalle Valo return; 429e705c121SKalle Valo } 430e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 431e705c121SKalle Valo list); 432e705c121SKalle Valo list_del(&rxb->list); 433e705c121SKalle Valo spin_unlock(&rxq->lock); 434e705c121SKalle Valo 435e705c121SKalle Valo BUG_ON(rxb->page); 436e705c121SKalle Valo rxb->page = page; 437e705c121SKalle Valo /* Get physical address of the RB */ 438e705c121SKalle Valo rxb->page_dma = 439e705c121SKalle Valo dma_map_page(trans->dev, page, 0, 440e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 441e705c121SKalle Valo DMA_FROM_DEVICE); 442e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 443e705c121SKalle Valo rxb->page = NULL; 444e705c121SKalle Valo spin_lock(&rxq->lock); 445e705c121SKalle Valo list_add(&rxb->list, &rxq->rx_used); 446e705c121SKalle Valo spin_unlock(&rxq->lock); 447e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 448e705c121SKalle Valo return; 449e705c121SKalle Valo } 450e705c121SKalle Valo 451e705c121SKalle Valo spin_lock(&rxq->lock); 452e705c121SKalle Valo 453e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 454e705c121SKalle Valo rxq->free_count++; 455e705c121SKalle Valo 456e705c121SKalle Valo spin_unlock(&rxq->lock); 457e705c121SKalle Valo } 458e705c121SKalle Valo } 459e705c121SKalle Valo 46078485054SSara Sharon static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 461e705c121SKalle Valo { 462e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 463e705c121SKalle Valo int i; 464e705c121SKalle Valo 4657b542436SSara Sharon for (i = 0; i < RX_POOL_SIZE; i++) { 46678485054SSara Sharon if (!trans_pcie->rx_pool[i].page) 467e705c121SKalle Valo continue; 46878485054SSara Sharon dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 469e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 470e705c121SKalle Valo DMA_FROM_DEVICE); 47178485054SSara Sharon __free_pages(trans_pcie->rx_pool[i].page, 47278485054SSara Sharon trans_pcie->rx_page_order); 47378485054SSara Sharon trans_pcie->rx_pool[i].page = NULL; 474e705c121SKalle Valo } 475e705c121SKalle Valo } 476e705c121SKalle Valo 477e705c121SKalle Valo /* 478e705c121SKalle Valo * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 479e705c121SKalle Valo * 480e705c121SKalle Valo * Allocates for each received request 8 pages 481e705c121SKalle Valo * Called as a scheduled work item. 482e705c121SKalle Valo */ 483e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 484e705c121SKalle Valo { 485e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 486e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 487e705c121SKalle Valo struct list_head local_empty; 488e705c121SKalle Valo int pending = atomic_xchg(&rba->req_pending, 0); 489e705c121SKalle Valo 490e705c121SKalle Valo IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); 491e705c121SKalle Valo 492e705c121SKalle Valo /* If we were scheduled - there is at least one request */ 493e705c121SKalle Valo spin_lock(&rba->lock); 494e705c121SKalle Valo /* swap out the rba->rbd_empty to a local list */ 495e705c121SKalle Valo list_replace_init(&rba->rbd_empty, &local_empty); 496e705c121SKalle Valo spin_unlock(&rba->lock); 497e705c121SKalle Valo 498e705c121SKalle Valo while (pending) { 499e705c121SKalle Valo int i; 5000979a913SJohannes Berg LIST_HEAD(local_allocated); 50178485054SSara Sharon gfp_t gfp_mask = GFP_KERNEL; 50278485054SSara Sharon 50378485054SSara Sharon /* Do not post a warning if there are only a few requests */ 50478485054SSara Sharon if (pending < RX_PENDING_WATERMARK) 50578485054SSara Sharon gfp_mask |= __GFP_NOWARN; 506e705c121SKalle Valo 507e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 508e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 509e705c121SKalle Valo struct page *page; 510e705c121SKalle Valo 511e705c121SKalle Valo /* List should never be empty - each reused RBD is 512e705c121SKalle Valo * returned to the list, and initial pool covers any 513e705c121SKalle Valo * possible gap between the time the page is allocated 514e705c121SKalle Valo * to the time the RBD is added. 515e705c121SKalle Valo */ 516e705c121SKalle Valo BUG_ON(list_empty(&local_empty)); 517e705c121SKalle Valo /* Get the first rxb from the rbd list */ 518e705c121SKalle Valo rxb = list_first_entry(&local_empty, 519e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 520e705c121SKalle Valo BUG_ON(rxb->page); 521e705c121SKalle Valo 522e705c121SKalle Valo /* Alloc a new receive buffer */ 52378485054SSara Sharon page = iwl_pcie_rx_alloc_page(trans, gfp_mask); 524e705c121SKalle Valo if (!page) 525e705c121SKalle Valo continue; 526e705c121SKalle Valo rxb->page = page; 527e705c121SKalle Valo 528e705c121SKalle Valo /* Get physical address of the RB */ 529e705c121SKalle Valo rxb->page_dma = dma_map_page(trans->dev, page, 0, 530e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 531e705c121SKalle Valo DMA_FROM_DEVICE); 532e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 533e705c121SKalle Valo rxb->page = NULL; 534e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 535e705c121SKalle Valo continue; 536e705c121SKalle Valo } 537e705c121SKalle Valo 538e705c121SKalle Valo /* move the allocated entry to the out list */ 539e705c121SKalle Valo list_move(&rxb->list, &local_allocated); 540e705c121SKalle Valo i++; 541e705c121SKalle Valo } 542e705c121SKalle Valo 543e705c121SKalle Valo pending--; 544e705c121SKalle Valo if (!pending) { 545e705c121SKalle Valo pending = atomic_xchg(&rba->req_pending, 0); 546e705c121SKalle Valo IWL_DEBUG_RX(trans, 547e705c121SKalle Valo "Pending allocation requests = %d\n", 548e705c121SKalle Valo pending); 549e705c121SKalle Valo } 550e705c121SKalle Valo 551e705c121SKalle Valo spin_lock(&rba->lock); 552e705c121SKalle Valo /* add the allocated rbds to the allocator allocated list */ 553e705c121SKalle Valo list_splice_tail(&local_allocated, &rba->rbd_allocated); 554e705c121SKalle Valo /* get more empty RBDs for current pending requests */ 555e705c121SKalle Valo list_splice_tail_init(&rba->rbd_empty, &local_empty); 556e705c121SKalle Valo spin_unlock(&rba->lock); 557e705c121SKalle Valo 558e705c121SKalle Valo atomic_inc(&rba->req_ready); 559e705c121SKalle Valo } 560e705c121SKalle Valo 561e705c121SKalle Valo spin_lock(&rba->lock); 562e705c121SKalle Valo /* return unused rbds to the allocator empty list */ 563e705c121SKalle Valo list_splice_tail(&local_empty, &rba->rbd_empty); 564e705c121SKalle Valo spin_unlock(&rba->lock); 565e705c121SKalle Valo } 566e705c121SKalle Valo 567e705c121SKalle Valo /* 568d56daea4SSara Sharon * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 569e705c121SKalle Valo .* 570e705c121SKalle Valo .* Called by queue when the queue posted allocation request and 571e705c121SKalle Valo * has freed 8 RBDs in order to restock itself. 572d56daea4SSara Sharon * This function directly moves the allocated RBs to the queue's ownership 573d56daea4SSara Sharon * and updates the relevant counters. 574e705c121SKalle Valo */ 575d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 576d56daea4SSara Sharon struct iwl_rxq *rxq) 577e705c121SKalle Valo { 578e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 579e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 580e705c121SKalle Valo int i; 581e705c121SKalle Valo 582d56daea4SSara Sharon lockdep_assert_held(&rxq->lock); 583d56daea4SSara Sharon 584e705c121SKalle Valo /* 585e705c121SKalle Valo * atomic_dec_if_positive returns req_ready - 1 for any scenario. 586e705c121SKalle Valo * If req_ready is 0 atomic_dec_if_positive will return -1 and this 587d56daea4SSara Sharon * function will return early, as there are no ready requests. 588e705c121SKalle Valo * atomic_dec_if_positive will perofrm the *actual* decrement only if 589e705c121SKalle Valo * req_ready > 0, i.e. - there are ready requests and the function 590e705c121SKalle Valo * hands one request to the caller. 591e705c121SKalle Valo */ 592e705c121SKalle Valo if (atomic_dec_if_positive(&rba->req_ready) < 0) 593d56daea4SSara Sharon return; 594e705c121SKalle Valo 595e705c121SKalle Valo spin_lock(&rba->lock); 596e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 597e705c121SKalle Valo /* Get next free Rx buffer, remove it from free list */ 598d56daea4SSara Sharon struct iwl_rx_mem_buffer *rxb = 599d56daea4SSara Sharon list_first_entry(&rba->rbd_allocated, 600e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 601d56daea4SSara Sharon 602d56daea4SSara Sharon list_move(&rxb->list, &rxq->rx_free); 603e705c121SKalle Valo } 604e705c121SKalle Valo spin_unlock(&rba->lock); 605e705c121SKalle Valo 606d56daea4SSara Sharon rxq->used_count -= RX_CLAIM_REQ_ALLOC; 607d56daea4SSara Sharon rxq->free_count += RX_CLAIM_REQ_ALLOC; 608e705c121SKalle Valo } 609e705c121SKalle Valo 61010a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data) 611e705c121SKalle Valo { 612e705c121SKalle Valo struct iwl_rb_allocator *rba_p = 613e705c121SKalle Valo container_of(data, struct iwl_rb_allocator, rx_alloc); 614e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = 615e705c121SKalle Valo container_of(rba_p, struct iwl_trans_pcie, rba); 616e705c121SKalle Valo 617e705c121SKalle Valo iwl_pcie_rx_allocator(trans_pcie->trans); 618e705c121SKalle Valo } 619e705c121SKalle Valo 6201b493e30SGolan Ben Ami static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, 6211b493e30SGolan Ben Ami struct iwl_rxq *rxq) 6221b493e30SGolan Ben Ami { 6231b493e30SGolan Ben Ami struct device *dev = trans->dev; 6241b493e30SGolan Ben Ami int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 6251b493e30SGolan Ben Ami sizeof(__le32); 6261b493e30SGolan Ben Ami 6271b493e30SGolan Ben Ami if (rxq->bd) 6281b493e30SGolan Ben Ami dma_free_coherent(dev, free_size * rxq->queue_size, 6291b493e30SGolan Ben Ami rxq->bd, rxq->bd_dma); 6301b493e30SGolan Ben Ami rxq->bd_dma = 0; 6311b493e30SGolan Ben Ami rxq->bd = NULL; 6321b493e30SGolan Ben Ami 6331b493e30SGolan Ben Ami if (rxq->rb_stts) 6341b493e30SGolan Ben Ami dma_free_coherent(trans->dev, 6351b493e30SGolan Ben Ami sizeof(struct iwl_rb_status), 6361b493e30SGolan Ben Ami rxq->rb_stts, rxq->rb_stts_dma); 6371b493e30SGolan Ben Ami rxq->rb_stts_dma = 0; 6381b493e30SGolan Ben Ami rxq->rb_stts = NULL; 6391b493e30SGolan Ben Ami 6401b493e30SGolan Ben Ami if (rxq->used_bd) 6411b493e30SGolan Ben Ami dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, 6421b493e30SGolan Ben Ami rxq->used_bd, rxq->used_bd_dma); 6431b493e30SGolan Ben Ami rxq->used_bd_dma = 0; 6441b493e30SGolan Ben Ami rxq->used_bd = NULL; 6451b493e30SGolan Ben Ami 6461b493e30SGolan Ben Ami if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) 6471b493e30SGolan Ben Ami return; 6481b493e30SGolan Ben Ami 6491b493e30SGolan Ben Ami if (rxq->tr_tail) 6501b493e30SGolan Ben Ami dma_free_coherent(dev, sizeof(__le16), 6511b493e30SGolan Ben Ami rxq->tr_tail, rxq->tr_tail_dma); 6521b493e30SGolan Ben Ami rxq->tr_tail_dma = 0; 6531b493e30SGolan Ben Ami rxq->tr_tail = NULL; 6541b493e30SGolan Ben Ami 6551b493e30SGolan Ben Ami if (rxq->cr_tail) 6561b493e30SGolan Ben Ami dma_free_coherent(dev, sizeof(__le16), 6571b493e30SGolan Ben Ami rxq->cr_tail, rxq->cr_tail_dma); 6581b493e30SGolan Ben Ami rxq->cr_tail_dma = 0; 6591b493e30SGolan Ben Ami rxq->cr_tail = NULL; 6601b493e30SGolan Ben Ami } 6611b493e30SGolan Ben Ami 6621b493e30SGolan Ben Ami static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, 6631b493e30SGolan Ben Ami struct iwl_rxq *rxq) 664e705c121SKalle Valo { 665e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 666e705c121SKalle Valo struct device *dev = trans->dev; 66778485054SSara Sharon int i; 66896a6497bSSara Sharon int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 66996a6497bSSara Sharon sizeof(__le32); 670e705c121SKalle Valo 67178485054SSara Sharon spin_lock_init(&rxq->lock); 67296a6497bSSara Sharon if (trans->cfg->mq_rx_supported) 67396a6497bSSara Sharon rxq->queue_size = MQ_RX_TABLE_SIZE; 67496a6497bSSara Sharon else 67596a6497bSSara Sharon rxq->queue_size = RX_QUEUE_SIZE; 67696a6497bSSara Sharon 67778485054SSara Sharon /* 67878485054SSara Sharon * Allocate the circular buffer of Read Buffer Descriptors 67978485054SSara Sharon * (RBDs) 68078485054SSara Sharon */ 68178485054SSara Sharon rxq->bd = dma_zalloc_coherent(dev, 68296a6497bSSara Sharon free_size * rxq->queue_size, 683e705c121SKalle Valo &rxq->bd_dma, GFP_KERNEL); 684e705c121SKalle Valo if (!rxq->bd) 68578485054SSara Sharon goto err; 68678485054SSara Sharon 68796a6497bSSara Sharon if (trans->cfg->mq_rx_supported) { 68896a6497bSSara Sharon rxq->used_bd = dma_zalloc_coherent(dev, 68996a6497bSSara Sharon sizeof(__le32) * 69096a6497bSSara Sharon rxq->queue_size, 69196a6497bSSara Sharon &rxq->used_bd_dma, 69296a6497bSSara Sharon GFP_KERNEL); 69396a6497bSSara Sharon if (!rxq->used_bd) 69496a6497bSSara Sharon goto err; 69596a6497bSSara Sharon } 696e705c121SKalle Valo 697e705c121SKalle Valo /* Allocate the driver's pointer to receive buffer status */ 698e705c121SKalle Valo rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 69978485054SSara Sharon &rxq->rb_stts_dma, 70078485054SSara Sharon GFP_KERNEL); 701e705c121SKalle Valo if (!rxq->rb_stts) 70278485054SSara Sharon goto err; 7031b493e30SGolan Ben Ami 7041b493e30SGolan Ben Ami if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) 7051b493e30SGolan Ben Ami return 0; 7061b493e30SGolan Ben Ami 7071b493e30SGolan Ben Ami /* Allocate the driver's pointer to TR tail */ 7081b493e30SGolan Ben Ami rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 7091b493e30SGolan Ben Ami &rxq->tr_tail_dma, 7101b493e30SGolan Ben Ami GFP_KERNEL); 7111b493e30SGolan Ben Ami if (!rxq->tr_tail) 7121b493e30SGolan Ben Ami goto err; 7131b493e30SGolan Ben Ami 7141b493e30SGolan Ben Ami /* Allocate the driver's pointer to CR tail */ 7151b493e30SGolan Ben Ami rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 7161b493e30SGolan Ben Ami &rxq->cr_tail_dma, 7171b493e30SGolan Ben Ami GFP_KERNEL); 7181b493e30SGolan Ben Ami if (!rxq->cr_tail) 7191b493e30SGolan Ben Ami goto err; 7201b493e30SGolan Ben Ami 721e705c121SKalle Valo return 0; 722e705c121SKalle Valo 72378485054SSara Sharon err: 72478485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 72578485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 72678485054SSara Sharon 7271b493e30SGolan Ben Ami iwl_pcie_free_rxq_dma(trans, rxq); 72878485054SSara Sharon } 72978485054SSara Sharon kfree(trans_pcie->rxq); 73096a6497bSSara Sharon 731e705c121SKalle Valo return -ENOMEM; 732e705c121SKalle Valo } 733e705c121SKalle Valo 7341b493e30SGolan Ben Ami static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 7351b493e30SGolan Ben Ami { 7361b493e30SGolan Ben Ami struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 7371b493e30SGolan Ben Ami struct iwl_rb_allocator *rba = &trans_pcie->rba; 7381b493e30SGolan Ben Ami int i, ret; 7391b493e30SGolan Ben Ami 7401b493e30SGolan Ben Ami if (WARN_ON(trans_pcie->rxq)) 7411b493e30SGolan Ben Ami return -EINVAL; 7421b493e30SGolan Ben Ami 7431b493e30SGolan Ben Ami trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 7441b493e30SGolan Ben Ami GFP_KERNEL); 7451b493e30SGolan Ben Ami if (!trans_pcie->rxq) 7461b493e30SGolan Ben Ami return -EINVAL; 7471b493e30SGolan Ben Ami 7481b493e30SGolan Ben Ami spin_lock_init(&rba->lock); 7491b493e30SGolan Ben Ami 7501b493e30SGolan Ben Ami for (i = 0; i < trans->num_rx_queues; i++) { 7511b493e30SGolan Ben Ami struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 7521b493e30SGolan Ben Ami 7531b493e30SGolan Ben Ami ret = iwl_pcie_alloc_rxq_dma(trans, rxq); 7541b493e30SGolan Ben Ami if (ret) 7551b493e30SGolan Ben Ami return ret; 7561b493e30SGolan Ben Ami } 7571b493e30SGolan Ben Ami return 0; 7581b493e30SGolan Ben Ami } 7591b493e30SGolan Ben Ami 760e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 761e705c121SKalle Valo { 762e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 763e705c121SKalle Valo u32 rb_size; 764dfcfeef9SSara Sharon unsigned long flags; 765e705c121SKalle Valo const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 766e705c121SKalle Valo 7676c4fbcbcSEmmanuel Grumbach switch (trans_pcie->rx_buf_size) { 7686c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_4K: 769e705c121SKalle Valo rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 7706c4fbcbcSEmmanuel Grumbach break; 7716c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_8K: 7726c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 7736c4fbcbcSEmmanuel Grumbach break; 7746c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_12K: 7756c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 7766c4fbcbcSEmmanuel Grumbach break; 7776c4fbcbcSEmmanuel Grumbach default: 7786c4fbcbcSEmmanuel Grumbach WARN_ON(1); 7796c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 7806c4fbcbcSEmmanuel Grumbach } 781e705c121SKalle Valo 782dfcfeef9SSara Sharon if (!iwl_trans_grab_nic_access(trans, &flags)) 783dfcfeef9SSara Sharon return; 784dfcfeef9SSara Sharon 785e705c121SKalle Valo /* Stop Rx DMA */ 786dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 787e705c121SKalle Valo /* reset and flush pointers */ 788dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 789dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 790dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 791e705c121SKalle Valo 792e705c121SKalle Valo /* Reset driver's Rx queue write index */ 793dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 794e705c121SKalle Valo 795e705c121SKalle Valo /* Tell device where to find RBD circular buffer in DRAM */ 796dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 797e705c121SKalle Valo (u32)(rxq->bd_dma >> 8)); 798e705c121SKalle Valo 799e705c121SKalle Valo /* Tell device where in DRAM to update its Rx status */ 800dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 801e705c121SKalle Valo rxq->rb_stts_dma >> 4); 802e705c121SKalle Valo 803e705c121SKalle Valo /* Enable Rx DMA 804e705c121SKalle Valo * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 805e705c121SKalle Valo * the credit mechanism in 5000 HW RX FIFO 806e705c121SKalle Valo * Direct rx interrupts to hosts 8076c4fbcbcSEmmanuel Grumbach * Rx buffer size 4 or 8k or 12k 808e705c121SKalle Valo * RB timeout 0x10 809e705c121SKalle Valo * 256 RBDs 810e705c121SKalle Valo */ 811dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 812e705c121SKalle Valo FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 813e705c121SKalle Valo FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 814e705c121SKalle Valo FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 815e705c121SKalle Valo rb_size | 816e705c121SKalle Valo (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 817e705c121SKalle Valo (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 818e705c121SKalle Valo 819dfcfeef9SSara Sharon iwl_trans_release_nic_access(trans, &flags); 820dfcfeef9SSara Sharon 821e705c121SKalle Valo /* Set interrupt coalescing timer to default (2048 usecs) */ 822e705c121SKalle Valo iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 823e705c121SKalle Valo 824e705c121SKalle Valo /* W/A for interrupt coalescing bug in 7260 and 3160 */ 825e705c121SKalle Valo if (trans->cfg->host_interrupt_operation_mode) 826e705c121SKalle Valo iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 827e705c121SKalle Valo } 828e705c121SKalle Valo 8291316d595SSara Sharon void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) 8301316d595SSara Sharon { 831565291c6SJohannes Berg if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000) 832565291c6SJohannes Berg return; 833565291c6SJohannes Berg 834565291c6SJohannes Berg if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP) 835565291c6SJohannes Berg return; 836565291c6SJohannes Berg 837565291c6SJohannes Berg if (!trans->cfg->integrated) 838565291c6SJohannes Berg return; 839565291c6SJohannes Berg 8401316d595SSara Sharon /* 8411316d595SSara Sharon * Turn on the chicken-bits that cause MAC wakeup for RX-related 8421316d595SSara Sharon * values. 8431316d595SSara Sharon * This costs some power, but needed for W/A 9000 integrated A-step 8441316d595SSara Sharon * bug where shadow registers are not in the retention list and their 8451316d595SSara Sharon * value is lost when NIC powers down 8461316d595SSara Sharon */ 8471316d595SSara Sharon iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 8481316d595SSara Sharon CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); 8491316d595SSara Sharon iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, 8501316d595SSara Sharon CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); 8511316d595SSara Sharon } 8521316d595SSara Sharon 853bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 85496a6497bSSara Sharon { 85596a6497bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 85696a6497bSSara Sharon u32 rb_size, enabled = 0; 857dfcfeef9SSara Sharon unsigned long flags; 85896a6497bSSara Sharon int i; 85996a6497bSSara Sharon 86096a6497bSSara Sharon switch (trans_pcie->rx_buf_size) { 8611a4968d1SGolan Ben Ami case IWL_AMSDU_2K: 8621a4968d1SGolan Ben Ami rb_size = RFH_RXF_DMA_RB_SIZE_2K; 8631a4968d1SGolan Ben Ami break; 86496a6497bSSara Sharon case IWL_AMSDU_4K: 86596a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_4K; 86696a6497bSSara Sharon break; 86796a6497bSSara Sharon case IWL_AMSDU_8K: 86896a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_8K; 86996a6497bSSara Sharon break; 87096a6497bSSara Sharon case IWL_AMSDU_12K: 87196a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_12K; 87296a6497bSSara Sharon break; 87396a6497bSSara Sharon default: 87496a6497bSSara Sharon WARN_ON(1); 87596a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_4K; 87696a6497bSSara Sharon } 87796a6497bSSara Sharon 878dfcfeef9SSara Sharon if (!iwl_trans_grab_nic_access(trans, &flags)) 879dfcfeef9SSara Sharon return; 880dfcfeef9SSara Sharon 88196a6497bSSara Sharon /* Stop Rx DMA */ 882dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 88396a6497bSSara Sharon /* disable free amd used rx queue operation */ 884dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 88596a6497bSSara Sharon 88696a6497bSSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 88796a6497bSSara Sharon /* Tell device where to find RBD free table in DRAM */ 88812a17458SSara Sharon iwl_write_prph64_no_grab(trans, 889dfcfeef9SSara Sharon RFH_Q_FRBDCB_BA_LSB(i), 890dfcfeef9SSara Sharon trans_pcie->rxq[i].bd_dma); 89196a6497bSSara Sharon /* Tell device where to find RBD used table in DRAM */ 89212a17458SSara Sharon iwl_write_prph64_no_grab(trans, 893dfcfeef9SSara Sharon RFH_Q_URBDCB_BA_LSB(i), 894dfcfeef9SSara Sharon trans_pcie->rxq[i].used_bd_dma); 89596a6497bSSara Sharon /* Tell device where in DRAM to update its Rx status */ 89612a17458SSara Sharon iwl_write_prph64_no_grab(trans, 897dfcfeef9SSara Sharon RFH_Q_URBD_STTS_WPTR_LSB(i), 898bce97731SSara Sharon trans_pcie->rxq[i].rb_stts_dma); 89996a6497bSSara Sharon /* Reset device indice tables */ 900dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 901dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 902dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 90396a6497bSSara Sharon 90496a6497bSSara Sharon enabled |= BIT(i) | BIT(i + 16); 90596a6497bSSara Sharon } 90696a6497bSSara Sharon 90796a6497bSSara Sharon /* 90896a6497bSSara Sharon * Enable Rx DMA 90996a6497bSSara Sharon * Rx buffer size 4 or 8k or 12k 91096a6497bSSara Sharon * Min RB size 4 or 8 91188076015SSara Sharon * Drop frames that exceed RB size 91296a6497bSSara Sharon * 512 RBDs 91396a6497bSSara Sharon */ 914dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 91563044335SSara Sharon RFH_DMA_EN_ENABLE_VAL | rb_size | 91696a6497bSSara Sharon RFH_RXF_DMA_MIN_RB_4_8 | 91788076015SSara Sharon RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 91896a6497bSSara Sharon RFH_RXF_DMA_RBDCB_SIZE_512); 91996a6497bSSara Sharon 92088076015SSara Sharon /* 92188076015SSara Sharon * Activate DMA snooping. 922b0262f07SSara Sharon * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 92388076015SSara Sharon * Default queue is 0 92488076015SSara Sharon */ 925f3779f47SJohannes Berg iwl_write_prph_no_grab(trans, RFH_GEN_CFG, 926f3779f47SJohannes Berg RFH_GEN_CFG_RFH_DMA_SNOOP | 927f3779f47SJohannes Berg RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | 928b0262f07SSara Sharon RFH_GEN_CFG_SERVICE_DMA_SNOOP | 929f3779f47SJohannes Berg RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, 930f3779f47SJohannes Berg trans->cfg->integrated ? 931b0262f07SSara Sharon RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 932f3779f47SJohannes Berg RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 93388076015SSara Sharon /* Enable the relevant rx queues */ 934dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 935dfcfeef9SSara Sharon 936dfcfeef9SSara Sharon iwl_trans_release_nic_access(trans, &flags); 93796a6497bSSara Sharon 93896a6497bSSara Sharon /* Set interrupt coalescing timer to default (2048 usecs) */ 93996a6497bSSara Sharon iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 9401316d595SSara Sharon 9411316d595SSara Sharon iwl_pcie_enable_rx_wake(trans, true); 94296a6497bSSara Sharon } 94396a6497bSSara Sharon 944e705c121SKalle Valo static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 945e705c121SKalle Valo { 946e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 947e705c121SKalle Valo 948e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_free); 949e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_used); 950e705c121SKalle Valo rxq->free_count = 0; 951e705c121SKalle Valo rxq->used_count = 0; 952e705c121SKalle Valo } 953e705c121SKalle Valo 954bce97731SSara Sharon static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 955bce97731SSara Sharon { 956bce97731SSara Sharon WARN_ON(1); 957bce97731SSara Sharon return 0; 958bce97731SSara Sharon } 959bce97731SSara Sharon 960eda50cdeSSara Sharon static int _iwl_pcie_rx_init(struct iwl_trans *trans) 961e705c121SKalle Valo { 962e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 96378485054SSara Sharon struct iwl_rxq *def_rxq; 964e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 9657b542436SSara Sharon int i, err, queue_size, allocator_pool_size, num_alloc; 966e705c121SKalle Valo 96778485054SSara Sharon if (!trans_pcie->rxq) { 968e705c121SKalle Valo err = iwl_pcie_rx_alloc(trans); 969e705c121SKalle Valo if (err) 970e705c121SKalle Valo return err; 971e705c121SKalle Valo } 97278485054SSara Sharon def_rxq = trans_pcie->rxq; 973e705c121SKalle Valo 9740f22e400SShaul Triebitz cancel_work_sync(&rba->rx_alloc); 9750f22e400SShaul Triebitz 976e705c121SKalle Valo spin_lock(&rba->lock); 977e705c121SKalle Valo atomic_set(&rba->req_pending, 0); 978e705c121SKalle Valo atomic_set(&rba->req_ready, 0); 97996a6497bSSara Sharon INIT_LIST_HEAD(&rba->rbd_allocated); 98096a6497bSSara Sharon INIT_LIST_HEAD(&rba->rbd_empty); 981e705c121SKalle Valo spin_unlock(&rba->lock); 982e705c121SKalle Valo 983e705c121SKalle Valo /* free all first - we might be reconfigured for a different size */ 98478485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 985e705c121SKalle Valo 986e705c121SKalle Valo for (i = 0; i < RX_QUEUE_SIZE; i++) 98778485054SSara Sharon def_rxq->queue[i] = NULL; 988e705c121SKalle Valo 98978485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 99078485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 991e705c121SKalle Valo 99296a6497bSSara Sharon rxq->id = i; 99396a6497bSSara Sharon 994e705c121SKalle Valo spin_lock(&rxq->lock); 99578485054SSara Sharon /* 99678485054SSara Sharon * Set read write pointer to reflect that we have processed 99778485054SSara Sharon * and used all buffers, but have not restocked the Rx queue 99878485054SSara Sharon * with fresh buffers 99978485054SSara Sharon */ 100078485054SSara Sharon rxq->read = 0; 100178485054SSara Sharon rxq->write = 0; 100278485054SSara Sharon rxq->write_actual = 0; 100378485054SSara Sharon memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 100478485054SSara Sharon 100578485054SSara Sharon iwl_pcie_rx_init_rxb_lists(rxq); 100678485054SSara Sharon 1007bce97731SSara Sharon if (!rxq->napi.poll) 1008bce97731SSara Sharon netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 1009bce97731SSara Sharon iwl_pcie_dummy_napi_poll, 64); 1010bce97731SSara Sharon 1011e705c121SKalle Valo spin_unlock(&rxq->lock); 101278485054SSara Sharon } 101378485054SSara Sharon 101496a6497bSSara Sharon /* move the pool to the default queue and allocator ownerships */ 10157b542436SSara Sharon queue_size = trans->cfg->mq_rx_supported ? 10167b542436SSara Sharon MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; 101796a6497bSSara Sharon allocator_pool_size = trans->num_rx_queues * 101896a6497bSSara Sharon (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 10197b542436SSara Sharon num_alloc = queue_size + allocator_pool_size; 102043146925SSara Sharon BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != 102143146925SSara Sharon ARRAY_SIZE(trans_pcie->rx_pool)); 10227b542436SSara Sharon for (i = 0; i < num_alloc; i++) { 102396a6497bSSara Sharon struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 102496a6497bSSara Sharon 102596a6497bSSara Sharon if (i < allocator_pool_size) 102696a6497bSSara Sharon list_add(&rxb->list, &rba->rbd_empty); 102796a6497bSSara Sharon else 102896a6497bSSara Sharon list_add(&rxb->list, &def_rxq->rx_used); 102996a6497bSSara Sharon trans_pcie->global_table[i] = rxb; 1030e25d65f2SSara Sharon rxb->vid = (u16)(i + 1); 1031b1753c62SSara Sharon rxb->invalid = true; 103296a6497bSSara Sharon } 103378485054SSara Sharon 103478485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 10352047fa54SSara Sharon 1036eda50cdeSSara Sharon return 0; 1037eda50cdeSSara Sharon } 1038eda50cdeSSara Sharon 1039eda50cdeSSara Sharon int iwl_pcie_rx_init(struct iwl_trans *trans) 1040eda50cdeSSara Sharon { 1041eda50cdeSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1042eda50cdeSSara Sharon int ret = _iwl_pcie_rx_init(trans); 1043eda50cdeSSara Sharon 1044eda50cdeSSara Sharon if (ret) 1045eda50cdeSSara Sharon return ret; 1046eda50cdeSSara Sharon 10472047fa54SSara Sharon if (trans->cfg->mq_rx_supported) 1048bce97731SSara Sharon iwl_pcie_rx_mq_hw_init(trans); 10492047fa54SSara Sharon else 1050eda50cdeSSara Sharon iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); 10512047fa54SSara Sharon 1052eda50cdeSSara Sharon iwl_pcie_rxq_restock(trans, trans_pcie->rxq); 105378485054SSara Sharon 1054eda50cdeSSara Sharon spin_lock(&trans_pcie->rxq->lock); 1055eda50cdeSSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); 1056eda50cdeSSara Sharon spin_unlock(&trans_pcie->rxq->lock); 1057e705c121SKalle Valo 1058e705c121SKalle Valo return 0; 1059e705c121SKalle Valo } 1060e705c121SKalle Valo 1061eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) 1062eda50cdeSSara Sharon { 1063eda50cdeSSara Sharon /* 1064eda50cdeSSara Sharon * We don't configure the RFH. 1065eda50cdeSSara Sharon * Restock will be done at alive, after firmware configured the RFH. 1066eda50cdeSSara Sharon */ 1067eda50cdeSSara Sharon return _iwl_pcie_rx_init(trans); 1068eda50cdeSSara Sharon } 1069eda50cdeSSara Sharon 1070e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans) 1071e705c121SKalle Valo { 1072e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1073e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 107478485054SSara Sharon int i; 1075e705c121SKalle Valo 107678485054SSara Sharon /* 107778485054SSara Sharon * if rxq is NULL, it means that nothing has been allocated, 107878485054SSara Sharon * exit now 107978485054SSara Sharon */ 108078485054SSara Sharon if (!trans_pcie->rxq) { 1081e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 1082e705c121SKalle Valo return; 1083e705c121SKalle Valo } 1084e705c121SKalle Valo 1085e705c121SKalle Valo cancel_work_sync(&rba->rx_alloc); 1086e705c121SKalle Valo 108778485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 1088e705c121SKalle Valo 108978485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 109078485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 109178485054SSara Sharon 10921b493e30SGolan Ben Ami iwl_pcie_free_rxq_dma(trans, rxq); 1093bce97731SSara Sharon 1094bce97731SSara Sharon if (rxq->napi.poll) 1095bce97731SSara Sharon netif_napi_del(&rxq->napi); 109696a6497bSSara Sharon } 109778485054SSara Sharon kfree(trans_pcie->rxq); 1098e705c121SKalle Valo } 1099e705c121SKalle Valo 1100e705c121SKalle Valo /* 1101e705c121SKalle Valo * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1102e705c121SKalle Valo * 1103e705c121SKalle Valo * Called when a RBD can be reused. The RBD is transferred to the allocator. 1104e705c121SKalle Valo * When there are 2 empty RBDs - a request for allocation is posted 1105e705c121SKalle Valo */ 1106e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1107e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 1108e705c121SKalle Valo struct iwl_rxq *rxq, bool emergency) 1109e705c121SKalle Valo { 1110e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1111e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 1112e705c121SKalle Valo 1113e705c121SKalle Valo /* Move the RBD to the used list, will be moved to allocator in batches 1114e705c121SKalle Valo * before claiming or posting a request*/ 1115e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_used); 1116e705c121SKalle Valo 1117e705c121SKalle Valo if (unlikely(emergency)) 1118e705c121SKalle Valo return; 1119e705c121SKalle Valo 1120e705c121SKalle Valo /* Count the allocator owned RBDs */ 1121e705c121SKalle Valo rxq->used_count++; 1122e705c121SKalle Valo 1123e705c121SKalle Valo /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1124e705c121SKalle Valo * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1125e705c121SKalle Valo * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1126e705c121SKalle Valo * after but we still need to post another request. 1127e705c121SKalle Valo */ 1128e705c121SKalle Valo if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1129e705c121SKalle Valo /* Move the 2 RBDs to the allocator ownership. 1130e705c121SKalle Valo Allocator has another 6 from pool for the request completion*/ 1131e705c121SKalle Valo spin_lock(&rba->lock); 1132e705c121SKalle Valo list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1133e705c121SKalle Valo spin_unlock(&rba->lock); 1134e705c121SKalle Valo 1135e705c121SKalle Valo atomic_inc(&rba->req_pending); 1136e705c121SKalle Valo queue_work(rba->alloc_wq, &rba->rx_alloc); 1137e705c121SKalle Valo } 1138e705c121SKalle Valo } 1139e705c121SKalle Valo 1140e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 114178485054SSara Sharon struct iwl_rxq *rxq, 1142e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 1143e705c121SKalle Valo bool emergency) 1144e705c121SKalle Valo { 1145e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1146b2a3b1c1SSara Sharon struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1147e705c121SKalle Valo bool page_stolen = false; 1148e705c121SKalle Valo int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 1149e705c121SKalle Valo u32 offset = 0; 1150e705c121SKalle Valo 1151e705c121SKalle Valo if (WARN_ON(!rxb)) 1152e705c121SKalle Valo return; 1153e705c121SKalle Valo 1154e705c121SKalle Valo dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1155e705c121SKalle Valo 1156e705c121SKalle Valo while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1157e705c121SKalle Valo struct iwl_rx_packet *pkt; 1158e705c121SKalle Valo u16 sequence; 1159e705c121SKalle Valo bool reclaim; 1160e705c121SKalle Valo int index, cmd_index, len; 1161e705c121SKalle Valo struct iwl_rx_cmd_buffer rxcb = { 1162e705c121SKalle Valo ._offset = offset, 1163e705c121SKalle Valo ._rx_page_order = trans_pcie->rx_page_order, 1164e705c121SKalle Valo ._page = rxb->page, 1165e705c121SKalle Valo ._page_stolen = false, 1166e705c121SKalle Valo .truesize = max_len, 1167e705c121SKalle Valo }; 1168e705c121SKalle Valo 1169e705c121SKalle Valo pkt = rxb_addr(&rxcb); 1170e705c121SKalle Valo 11713bfdee76SJohannes Berg if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { 11723bfdee76SJohannes Berg IWL_DEBUG_RX(trans, 11733bfdee76SJohannes Berg "Q %d: RB end marker at offset %d\n", 11743bfdee76SJohannes Berg rxq->id, offset); 1175e705c121SKalle Valo break; 11763bfdee76SJohannes Berg } 1177e705c121SKalle Valo 1178a395058eSJohannes Berg WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1179a395058eSJohannes Berg FH_RSCSR_RXQ_POS != rxq->id, 1180a395058eSJohannes Berg "frame on invalid queue - is on %d and indicates %d\n", 1181a395058eSJohannes Berg rxq->id, 1182a395058eSJohannes Berg (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1183a395058eSJohannes Berg FH_RSCSR_RXQ_POS); 1184ab2e696bSSara Sharon 1185e705c121SKalle Valo IWL_DEBUG_RX(trans, 11863bfdee76SJohannes Berg "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", 11873bfdee76SJohannes Berg rxq->id, offset, 118839bdb17eSSharon Dvir iwl_get_cmd_string(trans, 118939bdb17eSSharon Dvir iwl_cmd_id(pkt->hdr.cmd, 119039bdb17eSSharon Dvir pkt->hdr.group_id, 119139bdb17eSSharon Dvir 0)), 119235177c99SSara Sharon pkt->hdr.group_id, pkt->hdr.cmd, 119335177c99SSara Sharon le16_to_cpu(pkt->hdr.sequence)); 1194e705c121SKalle Valo 1195e705c121SKalle Valo len = iwl_rx_packet_len(pkt); 1196e705c121SKalle Valo len += sizeof(u32); /* account for status word */ 1197e705c121SKalle Valo trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1198e705c121SKalle Valo trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1199e705c121SKalle Valo 1200e705c121SKalle Valo /* Reclaim a command buffer only if this packet is a response 1201e705c121SKalle Valo * to a (driver-originated) command. 1202e705c121SKalle Valo * If the packet (e.g. Rx frame) originated from uCode, 1203e705c121SKalle Valo * there is no command buffer to reclaim. 1204e705c121SKalle Valo * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1205e705c121SKalle Valo * but apparently a few don't get set; catch them here. */ 1206e705c121SKalle Valo reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1207d8a130b0SJohannes Berg if (reclaim && !pkt->hdr.group_id) { 1208e705c121SKalle Valo int i; 1209e705c121SKalle Valo 1210e705c121SKalle Valo for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1211e705c121SKalle Valo if (trans_pcie->no_reclaim_cmds[i] == 1212e705c121SKalle Valo pkt->hdr.cmd) { 1213e705c121SKalle Valo reclaim = false; 1214e705c121SKalle Valo break; 1215e705c121SKalle Valo } 1216e705c121SKalle Valo } 1217e705c121SKalle Valo } 1218e705c121SKalle Valo 1219e705c121SKalle Valo sequence = le16_to_cpu(pkt->hdr.sequence); 1220e705c121SKalle Valo index = SEQ_TO_INDEX(sequence); 12214ecab561SEmmanuel Grumbach cmd_index = iwl_pcie_get_cmd_index(txq, index); 1222e705c121SKalle Valo 1223bce97731SSara Sharon if (rxq->id == 0) 1224bce97731SSara Sharon iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1225bce97731SSara Sharon &rxcb); 1226bce97731SSara Sharon else 1227bce97731SSara Sharon iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1228bce97731SSara Sharon &rxcb, rxq->id); 1229e705c121SKalle Valo 1230e705c121SKalle Valo if (reclaim) { 1231e705c121SKalle Valo kzfree(txq->entries[cmd_index].free_buf); 1232e705c121SKalle Valo txq->entries[cmd_index].free_buf = NULL; 1233e705c121SKalle Valo } 1234e705c121SKalle Valo 1235e705c121SKalle Valo /* 1236e705c121SKalle Valo * After here, we should always check rxcb._page_stolen, 1237e705c121SKalle Valo * if it is true then one of the handlers took the page. 1238e705c121SKalle Valo */ 1239e705c121SKalle Valo 1240e705c121SKalle Valo if (reclaim) { 1241e705c121SKalle Valo /* Invoke any callbacks, transfer the buffer to caller, 1242e705c121SKalle Valo * and fire off the (possibly) blocking 1243e705c121SKalle Valo * iwl_trans_send_cmd() 1244e705c121SKalle Valo * as we reclaim the driver command queue */ 1245e705c121SKalle Valo if (!rxcb._page_stolen) 1246e705c121SKalle Valo iwl_pcie_hcmd_complete(trans, &rxcb); 1247e705c121SKalle Valo else 1248e705c121SKalle Valo IWL_WARN(trans, "Claim null rxb?\n"); 1249e705c121SKalle Valo } 1250e705c121SKalle Valo 1251e705c121SKalle Valo page_stolen |= rxcb._page_stolen; 1252e705c121SKalle Valo offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1253e705c121SKalle Valo } 1254e705c121SKalle Valo 1255e705c121SKalle Valo /* page was stolen from us -- free our reference */ 1256e705c121SKalle Valo if (page_stolen) { 1257e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 1258e705c121SKalle Valo rxb->page = NULL; 1259e705c121SKalle Valo } 1260e705c121SKalle Valo 1261e705c121SKalle Valo /* Reuse the page if possible. For notification packets and 1262e705c121SKalle Valo * SKBs that fail to Rx correctly, add them back into the 1263e705c121SKalle Valo * rx_free list for reuse later. */ 1264e705c121SKalle Valo if (rxb->page != NULL) { 1265e705c121SKalle Valo rxb->page_dma = 1266e705c121SKalle Valo dma_map_page(trans->dev, rxb->page, 0, 1267e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 1268e705c121SKalle Valo DMA_FROM_DEVICE); 1269e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1270e705c121SKalle Valo /* 1271e705c121SKalle Valo * free the page(s) as well to not break 1272e705c121SKalle Valo * the invariant that the items on the used 1273e705c121SKalle Valo * list have no page(s) 1274e705c121SKalle Valo */ 1275e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 1276e705c121SKalle Valo rxb->page = NULL; 1277e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1278e705c121SKalle Valo } else { 1279e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 1280e705c121SKalle Valo rxq->free_count++; 1281e705c121SKalle Valo } 1282e705c121SKalle Valo } else 1283e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1284e705c121SKalle Valo } 1285e705c121SKalle Valo 1286e705c121SKalle Valo /* 1287e705c121SKalle Valo * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1288e705c121SKalle Valo */ 12892e5d4a8fSHaim Dreyfuss static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) 1290e705c121SKalle Valo { 1291e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12922e5d4a8fSHaim Dreyfuss struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; 1293d56daea4SSara Sharon u32 r, i, count = 0; 1294e705c121SKalle Valo bool emergency = false; 1295e705c121SKalle Valo 1296e705c121SKalle Valo restart: 1297e705c121SKalle Valo spin_lock(&rxq->lock); 1298e705c121SKalle Valo /* uCode's read index (stored in shared DRAM) indicates the last Rx 1299e705c121SKalle Valo * buffer that the driver may process (last buffer filled by ucode). */ 13006aa7de05SMark Rutland r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 1301e705c121SKalle Valo i = rxq->read; 1302e705c121SKalle Valo 13035eae443eSSara Sharon /* W/A 9000 device step A0 wrap-around bug */ 13045eae443eSSara Sharon r &= (rxq->queue_size - 1); 13055eae443eSSara Sharon 1306e705c121SKalle Valo /* Rx interrupt, but nothing sent from uCode */ 1307e705c121SKalle Valo if (i == r) 13085eae443eSSara Sharon IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1309e705c121SKalle Valo 1310e705c121SKalle Valo while (i != r) { 1311e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 1312e705c121SKalle Valo 131396a6497bSSara Sharon if (unlikely(rxq->used_count == rxq->queue_size / 2)) 1314e705c121SKalle Valo emergency = true; 1315e705c121SKalle Valo 131696a6497bSSara Sharon if (trans->cfg->mq_rx_supported) { 131796a6497bSSara Sharon /* 131896a6497bSSara Sharon * used_bd is a 32 bit but only 12 are used to retrieve 131996a6497bSSara Sharon * the vid 132096a6497bSSara Sharon */ 13215eae443eSSara Sharon u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; 132296a6497bSSara Sharon 1323e25d65f2SSara Sharon if (WARN(!vid || 1324e25d65f2SSara Sharon vid > ARRAY_SIZE(trans_pcie->global_table), 1325e25d65f2SSara Sharon "Invalid rxb index from HW %u\n", (u32)vid)) { 1326e25d65f2SSara Sharon iwl_force_nmi(trans); 13275eae443eSSara Sharon goto out; 1328e25d65f2SSara Sharon } 1329e25d65f2SSara Sharon rxb = trans_pcie->global_table[vid - 1]; 1330b1753c62SSara Sharon if (WARN(rxb->invalid, 1331b1753c62SSara Sharon "Invalid rxb from HW %u\n", (u32)vid)) { 1332b1753c62SSara Sharon iwl_force_nmi(trans); 1333b1753c62SSara Sharon goto out; 1334b1753c62SSara Sharon } 1335b1753c62SSara Sharon rxb->invalid = true; 133696a6497bSSara Sharon } else { 1337e705c121SKalle Valo rxb = rxq->queue[i]; 1338e705c121SKalle Valo rxq->queue[i] = NULL; 133996a6497bSSara Sharon } 1340e705c121SKalle Valo 13415eae443eSSara Sharon IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 134278485054SSara Sharon iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1343e705c121SKalle Valo 134496a6497bSSara Sharon i = (i + 1) & (rxq->queue_size - 1); 1345e705c121SKalle Valo 1346d56daea4SSara Sharon /* 1347d56daea4SSara Sharon * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1348d56daea4SSara Sharon * try to claim the pre-allocated buffers from the allocator. 1349d56daea4SSara Sharon * If not ready - will try to reclaim next time. 1350d56daea4SSara Sharon * There is no need to reschedule work - allocator exits only 1351d56daea4SSara Sharon * on success 1352e705c121SKalle Valo */ 1353d56daea4SSara Sharon if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1354d56daea4SSara Sharon iwl_pcie_rx_allocator_get(trans, rxq); 1355e705c121SKalle Valo 1356d56daea4SSara Sharon if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1357d56daea4SSara Sharon struct iwl_rb_allocator *rba = &trans_pcie->rba; 1358d56daea4SSara Sharon 1359d56daea4SSara Sharon /* Add the remaining empty RBDs for allocator use */ 1360d56daea4SSara Sharon spin_lock(&rba->lock); 1361d56daea4SSara Sharon list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1362d56daea4SSara Sharon spin_unlock(&rba->lock); 1363d56daea4SSara Sharon } else if (emergency) { 1364e705c121SKalle Valo count++; 1365e705c121SKalle Valo if (count == 8) { 1366e705c121SKalle Valo count = 0; 136796a6497bSSara Sharon if (rxq->used_count < rxq->queue_size / 3) 1368e705c121SKalle Valo emergency = false; 1369e0e168dcSGregory Greenman 1370e705c121SKalle Valo rxq->read = i; 1371e705c121SKalle Valo spin_unlock(&rxq->lock); 1372e0e168dcSGregory Greenman iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 137378485054SSara Sharon iwl_pcie_rxq_restock(trans, rxq); 1374e705c121SKalle Valo goto restart; 1375e705c121SKalle Valo } 1376e705c121SKalle Valo } 1377e0e168dcSGregory Greenman } 13785eae443eSSara Sharon out: 1379e705c121SKalle Valo /* Backtrack one entry */ 1380e705c121SKalle Valo rxq->read = i; 1381e705c121SKalle Valo spin_unlock(&rxq->lock); 1382e705c121SKalle Valo 1383e705c121SKalle Valo /* 1384e705c121SKalle Valo * handle a case where in emergency there are some unallocated RBDs. 1385e705c121SKalle Valo * those RBDs are in the used list, but are not tracked by the queue's 1386e705c121SKalle Valo * used_count which counts allocator owned RBDs. 1387e705c121SKalle Valo * unallocated emergency RBDs must be allocated on exit, otherwise 1388e705c121SKalle Valo * when called again the function may not be in emergency mode and 1389e705c121SKalle Valo * they will be handed to the allocator with no tracking in the RBD 1390e705c121SKalle Valo * allocator counters, which will lead to them never being claimed back 1391e705c121SKalle Valo * by the queue. 1392e705c121SKalle Valo * by allocating them here, they are now in the queue free list, and 1393e705c121SKalle Valo * will be restocked by the next call of iwl_pcie_rxq_restock. 1394e705c121SKalle Valo */ 1395e705c121SKalle Valo if (unlikely(emergency && count)) 139678485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1397e705c121SKalle Valo 1398bce97731SSara Sharon if (rxq->napi.poll) 1399bce97731SSara Sharon napi_gro_flush(&rxq->napi, false); 1400e0e168dcSGregory Greenman 1401e0e168dcSGregory Greenman iwl_pcie_rxq_restock(trans, rxq); 1402e705c121SKalle Valo } 1403e705c121SKalle Valo 14042e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 14052e5d4a8fSHaim Dreyfuss { 14062e5d4a8fSHaim Dreyfuss u8 queue = entry->entry; 14072e5d4a8fSHaim Dreyfuss struct msix_entry *entries = entry - queue; 14082e5d4a8fSHaim Dreyfuss 14092e5d4a8fSHaim Dreyfuss return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 14102e5d4a8fSHaim Dreyfuss } 14112e5d4a8fSHaim Dreyfuss 14122e5d4a8fSHaim Dreyfuss static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 14132e5d4a8fSHaim Dreyfuss struct msix_entry *entry) 14142e5d4a8fSHaim Dreyfuss { 14152e5d4a8fSHaim Dreyfuss /* 14162e5d4a8fSHaim Dreyfuss * Before sending the interrupt the HW disables it to prevent 14172e5d4a8fSHaim Dreyfuss * a nested interrupt. This is done by writing 1 to the corresponding 14182e5d4a8fSHaim Dreyfuss * bit in the mask register. After handling the interrupt, it should be 14192e5d4a8fSHaim Dreyfuss * re-enabled by clearing this bit. This register is defined as 14202e5d4a8fSHaim Dreyfuss * write 1 clear (W1C) register, meaning that it's being clear 14212e5d4a8fSHaim Dreyfuss * by writing 1 to the bit. 14222e5d4a8fSHaim Dreyfuss */ 14237ef3dd26SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 14242e5d4a8fSHaim Dreyfuss } 14252e5d4a8fSHaim Dreyfuss 14262e5d4a8fSHaim Dreyfuss /* 14272e5d4a8fSHaim Dreyfuss * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 14282e5d4a8fSHaim Dreyfuss * This interrupt handler should be used with RSS queue only. 14292e5d4a8fSHaim Dreyfuss */ 14302e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 14312e5d4a8fSHaim Dreyfuss { 14322e5d4a8fSHaim Dreyfuss struct msix_entry *entry = dev_id; 14332e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 14342e5d4a8fSHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 14352e5d4a8fSHaim Dreyfuss 1436c42ff65dSJohannes Berg trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); 1437c42ff65dSJohannes Berg 14385eae443eSSara Sharon if (WARN_ON(entry->entry >= trans->num_rx_queues)) 14395eae443eSSara Sharon return IRQ_NONE; 14405eae443eSSara Sharon 14412e5d4a8fSHaim Dreyfuss lock_map_acquire(&trans->sync_cmd_lockdep_map); 14422e5d4a8fSHaim Dreyfuss 14432e5d4a8fSHaim Dreyfuss local_bh_disable(); 14442e5d4a8fSHaim Dreyfuss iwl_pcie_rx_handle(trans, entry->entry); 14452e5d4a8fSHaim Dreyfuss local_bh_enable(); 14462e5d4a8fSHaim Dreyfuss 14472e5d4a8fSHaim Dreyfuss iwl_pcie_clear_irq(trans, entry); 14482e5d4a8fSHaim Dreyfuss 14492e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 14502e5d4a8fSHaim Dreyfuss 14512e5d4a8fSHaim Dreyfuss return IRQ_HANDLED; 14522e5d4a8fSHaim Dreyfuss } 14532e5d4a8fSHaim Dreyfuss 1454e705c121SKalle Valo /* 1455e705c121SKalle Valo * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1456e705c121SKalle Valo */ 1457e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1458e705c121SKalle Valo { 1459e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1460e705c121SKalle Valo int i; 1461e705c121SKalle Valo 1462e705c121SKalle Valo /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1463e705c121SKalle Valo if (trans->cfg->internal_wimax_coex && 1464e705c121SKalle Valo !trans->cfg->apmg_not_supported && 1465e705c121SKalle Valo (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1466e705c121SKalle Valo APMS_CLK_VAL_MRB_FUNC_MODE) || 1467e705c121SKalle Valo (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1468e705c121SKalle Valo APMG_PS_CTRL_VAL_RESET_REQ))) { 1469e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1470e705c121SKalle Valo iwl_op_mode_wimax_active(trans->op_mode); 1471e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1472e705c121SKalle Valo return; 1473e705c121SKalle Valo } 1474e705c121SKalle Valo 147513a3a390SSara Sharon for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 147613a3a390SSara Sharon if (!trans_pcie->txq[i]) 147713a3a390SSara Sharon continue; 1478b2a3b1c1SSara Sharon del_timer(&trans_pcie->txq[i]->stuck_timer); 147913a3a390SSara Sharon } 1480e705c121SKalle Valo 14817d75f32eSEmmanuel Grumbach /* The STATUS_FW_ERROR bit is set in this function. This must happen 14827d75f32eSEmmanuel Grumbach * before we wake up the command caller, to ensure a proper cleanup. */ 14837d75f32eSEmmanuel Grumbach iwl_trans_fw_error(trans); 14847d75f32eSEmmanuel Grumbach 1485e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1486e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1487e705c121SKalle Valo } 1488e705c121SKalle Valo 1489e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1490e705c121SKalle Valo { 1491e705c121SKalle Valo u32 inta; 1492e705c121SKalle Valo 1493e705c121SKalle Valo lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1494e705c121SKalle Valo 1495e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1496e705c121SKalle Valo 1497e705c121SKalle Valo /* Discover which interrupts are active/pending */ 1498e705c121SKalle Valo inta = iwl_read32(trans, CSR_INT); 1499e705c121SKalle Valo 1500e705c121SKalle Valo /* the thread will service interrupts and re-enable them */ 1501e705c121SKalle Valo return inta; 1502e705c121SKalle Valo } 1503e705c121SKalle Valo 1504e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */ 1505e705c121SKalle Valo #define ICT_SHIFT 12 1506e705c121SKalle Valo #define ICT_SIZE (1 << ICT_SHIFT) 1507e705c121SKalle Valo #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1508e705c121SKalle Valo 1509e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will 1510e705c121SKalle Valo * stop using INTA register to get device's interrupt, reading this register 1511e705c121SKalle Valo * is expensive, device will write interrupts in ICT dram table, increment 1512e705c121SKalle Valo * index then will fire interrupt to driver, driver will OR all ICT table 1513e705c121SKalle Valo * entries from current index up to table entry with 0 value. the result is 1514e705c121SKalle Valo * the interrupt we need to service, driver will set the entries back to 0 and 1515e705c121SKalle Valo * set index. 1516e705c121SKalle Valo */ 1517e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1518e705c121SKalle Valo { 1519e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1520e705c121SKalle Valo u32 inta; 1521e705c121SKalle Valo u32 val = 0; 1522e705c121SKalle Valo u32 read; 1523e705c121SKalle Valo 1524e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1525e705c121SKalle Valo 1526e705c121SKalle Valo /* Ignore interrupt if there's nothing in NIC to service. 1527e705c121SKalle Valo * This may be due to IRQ shared with another device, 1528e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. */ 1529e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1530e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1531e705c121SKalle Valo if (!read) 1532e705c121SKalle Valo return 0; 1533e705c121SKalle Valo 1534e705c121SKalle Valo /* 1535e705c121SKalle Valo * Collect all entries up to the first 0, starting from ict_index; 1536e705c121SKalle Valo * note we already read at ict_index. 1537e705c121SKalle Valo */ 1538e705c121SKalle Valo do { 1539e705c121SKalle Valo val |= read; 1540e705c121SKalle Valo IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1541e705c121SKalle Valo trans_pcie->ict_index, read); 1542e705c121SKalle Valo trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1543e705c121SKalle Valo trans_pcie->ict_index = 1544e705c121SKalle Valo ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1545e705c121SKalle Valo 1546e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1547e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1548e705c121SKalle Valo read); 1549e705c121SKalle Valo } while (read); 1550e705c121SKalle Valo 1551e705c121SKalle Valo /* We should not get this value, just ignore it. */ 1552e705c121SKalle Valo if (val == 0xffffffff) 1553e705c121SKalle Valo val = 0; 1554e705c121SKalle Valo 1555e705c121SKalle Valo /* 1556e705c121SKalle Valo * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1557e705c121SKalle Valo * (bit 15 before shifting it to 31) to clear when using interrupt 1558e705c121SKalle Valo * coalescing. fortunately, bits 18 and 19 stay set when this happens 1559e705c121SKalle Valo * so we use them to decide on the real state of the Rx bit. 1560e705c121SKalle Valo * In order words, bit 15 is set if bit 18 or bit 19 are set. 1561e705c121SKalle Valo */ 1562e705c121SKalle Valo if (val & 0xC0000) 1563e705c121SKalle Valo val |= 0x8000; 1564e705c121SKalle Valo 1565e705c121SKalle Valo inta = (0xff & val) | ((0xff00 & val) << 16); 1566e705c121SKalle Valo return inta; 1567e705c121SKalle Valo } 1568e705c121SKalle Valo 1569fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) 15703a6e168bSJohannes Berg { 15713a6e168bSJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 15723a6e168bSJohannes Berg struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1573326477e4SJohannes Berg bool hw_rfkill, prev, report; 15743a6e168bSJohannes Berg 15753a6e168bSJohannes Berg mutex_lock(&trans_pcie->mutex); 1576326477e4SJohannes Berg prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 15773a6e168bSJohannes Berg hw_rfkill = iwl_is_rfkill_set(trans); 1578326477e4SJohannes Berg if (hw_rfkill) { 1579326477e4SJohannes Berg set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1580326477e4SJohannes Berg set_bit(STATUS_RFKILL_HW, &trans->status); 1581326477e4SJohannes Berg } 1582326477e4SJohannes Berg if (trans_pcie->opmode_down) 1583326477e4SJohannes Berg report = hw_rfkill; 1584326477e4SJohannes Berg else 1585326477e4SJohannes Berg report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 15863a6e168bSJohannes Berg 15873a6e168bSJohannes Berg IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 15883a6e168bSJohannes Berg hw_rfkill ? "disable radio" : "enable radio"); 15893a6e168bSJohannes Berg 15903a6e168bSJohannes Berg isr_stats->rfkill++; 15913a6e168bSJohannes Berg 1592326477e4SJohannes Berg if (prev != report) 1593326477e4SJohannes Berg iwl_trans_pcie_rf_kill(trans, report); 15943a6e168bSJohannes Berg mutex_unlock(&trans_pcie->mutex); 15953a6e168bSJohannes Berg 15963a6e168bSJohannes Berg if (hw_rfkill) { 15973a6e168bSJohannes Berg if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 15983a6e168bSJohannes Berg &trans->status)) 15993a6e168bSJohannes Berg IWL_DEBUG_RF_KILL(trans, 16003a6e168bSJohannes Berg "Rfkill while SYNC HCMD in flight\n"); 16013a6e168bSJohannes Berg wake_up(&trans_pcie->wait_command_queue); 16023a6e168bSJohannes Berg } else { 1603326477e4SJohannes Berg clear_bit(STATUS_RFKILL_HW, &trans->status); 1604326477e4SJohannes Berg if (trans_pcie->opmode_down) 1605326477e4SJohannes Berg clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 16063a6e168bSJohannes Berg } 16073a6e168bSJohannes Berg } 16083a6e168bSJohannes Berg 1609e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1610e705c121SKalle Valo { 1611e705c121SKalle Valo struct iwl_trans *trans = dev_id; 1612e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1613e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1614e705c121SKalle Valo u32 inta = 0; 1615e705c121SKalle Valo u32 handled = 0; 1616e705c121SKalle Valo 1617e705c121SKalle Valo lock_map_acquire(&trans->sync_cmd_lockdep_map); 1618e705c121SKalle Valo 1619e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1620e705c121SKalle Valo 1621e705c121SKalle Valo /* dram interrupt table not set yet, 1622e705c121SKalle Valo * use legacy interrupt. 1623e705c121SKalle Valo */ 1624e705c121SKalle Valo if (likely(trans_pcie->use_ict)) 1625e705c121SKalle Valo inta = iwl_pcie_int_cause_ict(trans); 1626e705c121SKalle Valo else 1627e705c121SKalle Valo inta = iwl_pcie_int_cause_non_ict(trans); 1628e705c121SKalle Valo 1629e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) { 1630e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1631e705c121SKalle Valo "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1632e705c121SKalle Valo inta, trans_pcie->inta_mask, 1633e705c121SKalle Valo iwl_read32(trans, CSR_INT_MASK), 1634e705c121SKalle Valo iwl_read32(trans, CSR_FH_INT_STATUS)); 1635e705c121SKalle Valo if (inta & (~trans_pcie->inta_mask)) 1636e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1637e705c121SKalle Valo "We got a masked interrupt (0x%08x)\n", 1638e705c121SKalle Valo inta & (~trans_pcie->inta_mask)); 1639e705c121SKalle Valo } 1640e705c121SKalle Valo 1641e705c121SKalle Valo inta &= trans_pcie->inta_mask; 1642e705c121SKalle Valo 1643e705c121SKalle Valo /* 1644e705c121SKalle Valo * Ignore interrupt if there's nothing in NIC to service. 1645e705c121SKalle Valo * This may be due to IRQ shared with another device, 1646e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. 1647e705c121SKalle Valo */ 1648e705c121SKalle Valo if (unlikely(!inta)) { 1649e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1650e705c121SKalle Valo /* 1651e705c121SKalle Valo * Re-enable interrupts here since we don't 1652e705c121SKalle Valo * have anything to service 1653e705c121SKalle Valo */ 1654e705c121SKalle Valo if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1655f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 1656e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1657e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1658e705c121SKalle Valo return IRQ_NONE; 1659e705c121SKalle Valo } 1660e705c121SKalle Valo 1661e705c121SKalle Valo if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1662e705c121SKalle Valo /* 1663e705c121SKalle Valo * Hardware disappeared. It might have 1664e705c121SKalle Valo * already raised an interrupt. 1665e705c121SKalle Valo */ 1666e705c121SKalle Valo IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1667e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1668e705c121SKalle Valo goto out; 1669e705c121SKalle Valo } 1670e705c121SKalle Valo 1671e705c121SKalle Valo /* Ack/clear/reset pending uCode interrupts. 1672e705c121SKalle Valo * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1673e705c121SKalle Valo */ 1674e705c121SKalle Valo /* There is a hardware bug in the interrupt mask function that some 1675e705c121SKalle Valo * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1676e705c121SKalle Valo * they are disabled in the CSR_INT_MASK register. Furthermore the 1677e705c121SKalle Valo * ICT interrupt handling mechanism has another bug that might cause 1678e705c121SKalle Valo * these unmasked interrupts fail to be detected. We workaround the 1679e705c121SKalle Valo * hardware bugs here by ACKing all the possible interrupts so that 1680e705c121SKalle Valo * interrupt coalescing can still be achieved. 1681e705c121SKalle Valo */ 1682e705c121SKalle Valo iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1683e705c121SKalle Valo 1684e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) 1685e705c121SKalle Valo IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1686e705c121SKalle Valo inta, iwl_read32(trans, CSR_INT_MASK)); 1687e705c121SKalle Valo 1688e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1689e705c121SKalle Valo 1690e705c121SKalle Valo /* Now service all interrupt bits discovered above. */ 1691e705c121SKalle Valo if (inta & CSR_INT_BIT_HW_ERR) { 1692e705c121SKalle Valo IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1693e705c121SKalle Valo 1694e705c121SKalle Valo /* Tell the device to stop sending interrupts */ 1695e705c121SKalle Valo iwl_disable_interrupts(trans); 1696e705c121SKalle Valo 1697e705c121SKalle Valo isr_stats->hw++; 1698e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1699e705c121SKalle Valo 1700e705c121SKalle Valo handled |= CSR_INT_BIT_HW_ERR; 1701e705c121SKalle Valo 1702e705c121SKalle Valo goto out; 1703e705c121SKalle Valo } 1704e705c121SKalle Valo 1705e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) { 1706e705c121SKalle Valo /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1707e705c121SKalle Valo if (inta & CSR_INT_BIT_SCD) { 1708e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1709e705c121SKalle Valo "Scheduler finished to transmit the frame/frames.\n"); 1710e705c121SKalle Valo isr_stats->sch++; 1711e705c121SKalle Valo } 1712e705c121SKalle Valo 1713e705c121SKalle Valo /* Alive notification via Rx interrupt will do the real work */ 1714e705c121SKalle Valo if (inta & CSR_INT_BIT_ALIVE) { 1715e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1716e705c121SKalle Valo isr_stats->alive++; 1717eda50cdeSSara Sharon if (trans->cfg->gen2) { 1718eda50cdeSSara Sharon /* 1719eda50cdeSSara Sharon * We can restock, since firmware configured 1720eda50cdeSSara Sharon * the RFH 1721eda50cdeSSara Sharon */ 1722eda50cdeSSara Sharon iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 1723eda50cdeSSara Sharon } 1724e705c121SKalle Valo } 1725e705c121SKalle Valo } 1726e705c121SKalle Valo 1727e705c121SKalle Valo /* Safely ignore these bits for debug checks below */ 1728e705c121SKalle Valo inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1729e705c121SKalle Valo 1730e705c121SKalle Valo /* HW RF KILL switch toggled */ 1731e705c121SKalle Valo if (inta & CSR_INT_BIT_RF_KILL) { 17323a6e168bSJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 1733e705c121SKalle Valo handled |= CSR_INT_BIT_RF_KILL; 1734e705c121SKalle Valo } 1735e705c121SKalle Valo 1736e705c121SKalle Valo /* Chip got too hot and stopped itself */ 1737e705c121SKalle Valo if (inta & CSR_INT_BIT_CT_KILL) { 1738e705c121SKalle Valo IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1739e705c121SKalle Valo isr_stats->ctkill++; 1740e705c121SKalle Valo handled |= CSR_INT_BIT_CT_KILL; 1741e705c121SKalle Valo } 1742e705c121SKalle Valo 1743e705c121SKalle Valo /* Error detected by uCode */ 1744e705c121SKalle Valo if (inta & CSR_INT_BIT_SW_ERR) { 1745e705c121SKalle Valo IWL_ERR(trans, "Microcode SW error detected. " 1746e705c121SKalle Valo " Restarting 0x%X.\n", inta); 1747e705c121SKalle Valo isr_stats->sw++; 1748e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1749e705c121SKalle Valo handled |= CSR_INT_BIT_SW_ERR; 1750e705c121SKalle Valo } 1751e705c121SKalle Valo 1752e705c121SKalle Valo /* uCode wakes up after power-down sleep */ 1753e705c121SKalle Valo if (inta & CSR_INT_BIT_WAKEUP) { 1754e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1755e705c121SKalle Valo iwl_pcie_rxq_check_wrptr(trans); 1756e705c121SKalle Valo iwl_pcie_txq_check_wrptrs(trans); 1757e705c121SKalle Valo 1758e705c121SKalle Valo isr_stats->wakeup++; 1759e705c121SKalle Valo 1760e705c121SKalle Valo handled |= CSR_INT_BIT_WAKEUP; 1761e705c121SKalle Valo } 1762e705c121SKalle Valo 1763e705c121SKalle Valo /* All uCode command responses, including Tx command responses, 1764e705c121SKalle Valo * Rx "responses" (frame-received notification), and other 1765e705c121SKalle Valo * notifications from uCode come through here*/ 1766e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1767e705c121SKalle Valo CSR_INT_BIT_RX_PERIODIC)) { 1768e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1769e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1770e705c121SKalle Valo handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1771e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 1772e705c121SKalle Valo CSR_FH_INT_RX_MASK); 1773e705c121SKalle Valo } 1774e705c121SKalle Valo if (inta & CSR_INT_BIT_RX_PERIODIC) { 1775e705c121SKalle Valo handled |= CSR_INT_BIT_RX_PERIODIC; 1776e705c121SKalle Valo iwl_write32(trans, 1777e705c121SKalle Valo CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1778e705c121SKalle Valo } 1779e705c121SKalle Valo /* Sending RX interrupt require many steps to be done in the 1780e705c121SKalle Valo * the device: 1781e705c121SKalle Valo * 1- write interrupt to current index in ICT table. 1782e705c121SKalle Valo * 2- dma RX frame. 1783e705c121SKalle Valo * 3- update RX shared data to indicate last write index. 1784e705c121SKalle Valo * 4- send interrupt. 1785e705c121SKalle Valo * This could lead to RX race, driver could receive RX interrupt 1786e705c121SKalle Valo * but the shared data changes does not reflect this; 1787e705c121SKalle Valo * periodic interrupt will detect any dangling Rx activity. 1788e705c121SKalle Valo */ 1789e705c121SKalle Valo 1790e705c121SKalle Valo /* Disable periodic interrupt; we use it as just a one-shot. */ 1791e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 1792e705c121SKalle Valo CSR_INT_PERIODIC_DIS); 1793e705c121SKalle Valo 1794e705c121SKalle Valo /* 1795e705c121SKalle Valo * Enable periodic interrupt in 8 msec only if we received 1796e705c121SKalle Valo * real RX interrupt (instead of just periodic int), to catch 1797e705c121SKalle Valo * any dangling Rx interrupt. If it was just the periodic 1798e705c121SKalle Valo * interrupt, there was no dangling Rx activity, and no need 1799e705c121SKalle Valo * to extend the periodic interrupt; one-shot is enough. 1800e705c121SKalle Valo */ 1801e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1802e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 1803e705c121SKalle Valo CSR_INT_PERIODIC_ENA); 1804e705c121SKalle Valo 1805e705c121SKalle Valo isr_stats->rx++; 1806e705c121SKalle Valo 1807e705c121SKalle Valo local_bh_disable(); 18082e5d4a8fSHaim Dreyfuss iwl_pcie_rx_handle(trans, 0); 1809e705c121SKalle Valo local_bh_enable(); 1810e705c121SKalle Valo } 1811e705c121SKalle Valo 1812e705c121SKalle Valo /* This "Tx" DMA channel is used only for loading uCode */ 1813e705c121SKalle Valo if (inta & CSR_INT_BIT_FH_TX) { 1814e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1815e705c121SKalle Valo IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1816e705c121SKalle Valo isr_stats->tx++; 1817e705c121SKalle Valo handled |= CSR_INT_BIT_FH_TX; 1818e705c121SKalle Valo /* Wake up uCode load routine, now that load is complete */ 1819e705c121SKalle Valo trans_pcie->ucode_write_complete = true; 1820e705c121SKalle Valo wake_up(&trans_pcie->ucode_write_waitq); 1821e705c121SKalle Valo } 1822e705c121SKalle Valo 1823e705c121SKalle Valo if (inta & ~handled) { 1824e705c121SKalle Valo IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1825e705c121SKalle Valo isr_stats->unhandled++; 1826e705c121SKalle Valo } 1827e705c121SKalle Valo 1828e705c121SKalle Valo if (inta & ~(trans_pcie->inta_mask)) { 1829e705c121SKalle Valo IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 1830e705c121SKalle Valo inta & ~trans_pcie->inta_mask); 1831e705c121SKalle Valo } 1832e705c121SKalle Valo 1833f16c3ebfSEmmanuel Grumbach spin_lock(&trans_pcie->irq_lock); 1834a6bd005fSEmmanuel Grumbach /* only Re-enable all interrupt if disabled by irq */ 1835f16c3ebfSEmmanuel Grumbach if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1836f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 1837f16c3ebfSEmmanuel Grumbach /* we are loading the firmware, enable FH_TX interrupt only */ 1838f16c3ebfSEmmanuel Grumbach else if (handled & CSR_INT_BIT_FH_TX) 1839f16c3ebfSEmmanuel Grumbach iwl_enable_fw_load_int(trans); 1840e705c121SKalle Valo /* Re-enable RF_KILL if it occurred */ 1841e705c121SKalle Valo else if (handled & CSR_INT_BIT_RF_KILL) 1842e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1843f16c3ebfSEmmanuel Grumbach spin_unlock(&trans_pcie->irq_lock); 1844e705c121SKalle Valo 1845e705c121SKalle Valo out: 1846e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1847e705c121SKalle Valo return IRQ_HANDLED; 1848e705c121SKalle Valo } 1849e705c121SKalle Valo 1850e705c121SKalle Valo /****************************************************************************** 1851e705c121SKalle Valo * 1852e705c121SKalle Valo * ICT functions 1853e705c121SKalle Valo * 1854e705c121SKalle Valo ******************************************************************************/ 1855e705c121SKalle Valo 1856e705c121SKalle Valo /* Free dram table */ 1857e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans) 1858e705c121SKalle Valo { 1859e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1860e705c121SKalle Valo 1861e705c121SKalle Valo if (trans_pcie->ict_tbl) { 1862e705c121SKalle Valo dma_free_coherent(trans->dev, ICT_SIZE, 1863e705c121SKalle Valo trans_pcie->ict_tbl, 1864e705c121SKalle Valo trans_pcie->ict_tbl_dma); 1865e705c121SKalle Valo trans_pcie->ict_tbl = NULL; 1866e705c121SKalle Valo trans_pcie->ict_tbl_dma = 0; 1867e705c121SKalle Valo } 1868e705c121SKalle Valo } 1869e705c121SKalle Valo 1870e705c121SKalle Valo /* 1871e705c121SKalle Valo * allocate dram shared table, it is an aligned memory 1872e705c121SKalle Valo * block of ICT_SIZE. 1873e705c121SKalle Valo * also reset all data related to ICT table interrupt. 1874e705c121SKalle Valo */ 1875e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans) 1876e705c121SKalle Valo { 1877e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1878e705c121SKalle Valo 1879e705c121SKalle Valo trans_pcie->ict_tbl = 1880e705c121SKalle Valo dma_zalloc_coherent(trans->dev, ICT_SIZE, 1881e705c121SKalle Valo &trans_pcie->ict_tbl_dma, 1882e705c121SKalle Valo GFP_KERNEL); 1883e705c121SKalle Valo if (!trans_pcie->ict_tbl) 1884e705c121SKalle Valo return -ENOMEM; 1885e705c121SKalle Valo 1886e705c121SKalle Valo /* just an API sanity check ... it is guaranteed to be aligned */ 1887e705c121SKalle Valo if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 1888e705c121SKalle Valo iwl_pcie_free_ict(trans); 1889e705c121SKalle Valo return -EINVAL; 1890e705c121SKalle Valo } 1891e705c121SKalle Valo 1892e705c121SKalle Valo return 0; 1893e705c121SKalle Valo } 1894e705c121SKalle Valo 1895e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table, 1896e705c121SKalle Valo * also we need to tell the driver to start using ICT interrupt. 1897e705c121SKalle Valo */ 1898e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans) 1899e705c121SKalle Valo { 1900e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1901e705c121SKalle Valo u32 val; 1902e705c121SKalle Valo 1903e705c121SKalle Valo if (!trans_pcie->ict_tbl) 1904e705c121SKalle Valo return; 1905e705c121SKalle Valo 1906e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1907f16c3ebfSEmmanuel Grumbach _iwl_disable_interrupts(trans); 1908e705c121SKalle Valo 1909e705c121SKalle Valo memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 1910e705c121SKalle Valo 1911e705c121SKalle Valo val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 1912e705c121SKalle Valo 1913e705c121SKalle Valo val |= CSR_DRAM_INT_TBL_ENABLE | 1914e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRAP_CHECK | 1915e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRITE_POINTER; 1916e705c121SKalle Valo 1917e705c121SKalle Valo IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 1918e705c121SKalle Valo 1919e705c121SKalle Valo iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 1920e705c121SKalle Valo trans_pcie->use_ict = true; 1921e705c121SKalle Valo trans_pcie->ict_index = 0; 1922e705c121SKalle Valo iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 1923f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 1924e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1925e705c121SKalle Valo } 1926e705c121SKalle Valo 1927e705c121SKalle Valo /* Device is going down disable ict interrupt usage */ 1928e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans) 1929e705c121SKalle Valo { 1930e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1931e705c121SKalle Valo 1932e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1933e705c121SKalle Valo trans_pcie->use_ict = false; 1934e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1935e705c121SKalle Valo } 1936e705c121SKalle Valo 1937e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data) 1938e705c121SKalle Valo { 1939e705c121SKalle Valo struct iwl_trans *trans = data; 1940e705c121SKalle Valo 1941e705c121SKalle Valo if (!trans) 1942e705c121SKalle Valo return IRQ_NONE; 1943e705c121SKalle Valo 1944e705c121SKalle Valo /* Disable (but don't clear!) interrupts here to avoid 1945e705c121SKalle Valo * back-to-back ISRs and sporadic interrupts from our NIC. 1946e705c121SKalle Valo * If we have something to service, the tasklet will re-enable ints. 1947e705c121SKalle Valo * If we *don't* have something, we'll re-enable before leaving here. 1948e705c121SKalle Valo */ 1949e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1950e705c121SKalle Valo 1951e705c121SKalle Valo return IRQ_WAKE_THREAD; 1952e705c121SKalle Valo } 19532e5d4a8fSHaim Dreyfuss 19542e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 19552e5d4a8fSHaim Dreyfuss { 19562e5d4a8fSHaim Dreyfuss return IRQ_WAKE_THREAD; 19572e5d4a8fSHaim Dreyfuss } 19582e5d4a8fSHaim Dreyfuss 19592e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 19602e5d4a8fSHaim Dreyfuss { 19612e5d4a8fSHaim Dreyfuss struct msix_entry *entry = dev_id; 19622e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 19632e5d4a8fSHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 196446167a8fSColin Ian King struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 19652e5d4a8fSHaim Dreyfuss u32 inta_fh, inta_hw; 19662e5d4a8fSHaim Dreyfuss 19672e5d4a8fSHaim Dreyfuss lock_map_acquire(&trans->sync_cmd_lockdep_map); 19682e5d4a8fSHaim Dreyfuss 19692e5d4a8fSHaim Dreyfuss spin_lock(&trans_pcie->irq_lock); 19707ef3dd26SHaim Dreyfuss inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 19717ef3dd26SHaim Dreyfuss inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 19722e5d4a8fSHaim Dreyfuss /* 19732e5d4a8fSHaim Dreyfuss * Clear causes registers to avoid being handling the same cause. 19742e5d4a8fSHaim Dreyfuss */ 19757ef3dd26SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 19767ef3dd26SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 19772e5d4a8fSHaim Dreyfuss spin_unlock(&trans_pcie->irq_lock); 19782e5d4a8fSHaim Dreyfuss 1979c42ff65dSJohannes Berg trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); 1980c42ff65dSJohannes Berg 19812e5d4a8fSHaim Dreyfuss if (unlikely(!(inta_fh | inta_hw))) { 19822e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 19832e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 19842e5d4a8fSHaim Dreyfuss return IRQ_NONE; 19852e5d4a8fSHaim Dreyfuss } 19862e5d4a8fSHaim Dreyfuss 19872e5d4a8fSHaim Dreyfuss if (iwl_have_debug_level(IWL_DL_ISR)) 19882e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", 19892e5d4a8fSHaim Dreyfuss inta_fh, 19902e5d4a8fSHaim Dreyfuss iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 19912e5d4a8fSHaim Dreyfuss 1992496d83caSHaim Dreyfuss if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && 1993496d83caSHaim Dreyfuss inta_fh & MSIX_FH_INT_CAUSES_Q0) { 1994496d83caSHaim Dreyfuss local_bh_disable(); 1995496d83caSHaim Dreyfuss iwl_pcie_rx_handle(trans, 0); 1996496d83caSHaim Dreyfuss local_bh_enable(); 1997496d83caSHaim Dreyfuss } 1998496d83caSHaim Dreyfuss 1999496d83caSHaim Dreyfuss if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && 2000496d83caSHaim Dreyfuss inta_fh & MSIX_FH_INT_CAUSES_Q1) { 2001496d83caSHaim Dreyfuss local_bh_disable(); 2002496d83caSHaim Dreyfuss iwl_pcie_rx_handle(trans, 1); 2003496d83caSHaim Dreyfuss local_bh_enable(); 2004496d83caSHaim Dreyfuss } 2005496d83caSHaim Dreyfuss 20062e5d4a8fSHaim Dreyfuss /* This "Tx" DMA channel is used only for loading uCode */ 20072e5d4a8fSHaim Dreyfuss if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 20082e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 20092e5d4a8fSHaim Dreyfuss isr_stats->tx++; 20102e5d4a8fSHaim Dreyfuss /* 20112e5d4a8fSHaim Dreyfuss * Wake up uCode load routine, 20122e5d4a8fSHaim Dreyfuss * now that load is complete 20132e5d4a8fSHaim Dreyfuss */ 20142e5d4a8fSHaim Dreyfuss trans_pcie->ucode_write_complete = true; 20152e5d4a8fSHaim Dreyfuss wake_up(&trans_pcie->ucode_write_waitq); 20162e5d4a8fSHaim Dreyfuss } 20172e5d4a8fSHaim Dreyfuss 20182e5d4a8fSHaim Dreyfuss /* Error detected by uCode */ 20192e5d4a8fSHaim Dreyfuss if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || 20209b58419eSGolan Ben Ami (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || 20219b58419eSGolan Ben Ami (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { 20222e5d4a8fSHaim Dreyfuss IWL_ERR(trans, 20232e5d4a8fSHaim Dreyfuss "Microcode SW error detected. Restarting 0x%X.\n", 20242e5d4a8fSHaim Dreyfuss inta_fh); 20252e5d4a8fSHaim Dreyfuss isr_stats->sw++; 20262e5d4a8fSHaim Dreyfuss iwl_pcie_irq_handle_error(trans); 20272e5d4a8fSHaim Dreyfuss } 20282e5d4a8fSHaim Dreyfuss 20292e5d4a8fSHaim Dreyfuss /* After checking FH register check HW register */ 20302e5d4a8fSHaim Dreyfuss if (iwl_have_debug_level(IWL_DL_ISR)) 20312e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, 20322e5d4a8fSHaim Dreyfuss "ISR inta_hw 0x%08x, enabled 0x%08x\n", 20332e5d4a8fSHaim Dreyfuss inta_hw, 20342e5d4a8fSHaim Dreyfuss iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 20352e5d4a8fSHaim Dreyfuss 20362e5d4a8fSHaim Dreyfuss /* Alive notification via Rx interrupt will do the real work */ 20372e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 20382e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 20392e5d4a8fSHaim Dreyfuss isr_stats->alive++; 2040eda50cdeSSara Sharon if (trans->cfg->gen2) { 2041eda50cdeSSara Sharon /* We can restock, since firmware configured the RFH */ 2042eda50cdeSSara Sharon iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 2043eda50cdeSSara Sharon } 20442e5d4a8fSHaim Dreyfuss } 20452e5d4a8fSHaim Dreyfuss 20469b58419eSGolan Ben Ami if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 && 20479b58419eSGolan Ben Ami inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { 20489b58419eSGolan Ben Ami /* Reflect IML transfer status */ 20499b58419eSGolan Ben Ami int res = iwl_read32(trans, CSR_IML_RESP_ADDR); 20509b58419eSGolan Ben Ami 20519b58419eSGolan Ben Ami IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); 20529b58419eSGolan Ben Ami if (res == IWL_IMAGE_RESP_FAIL) { 20539b58419eSGolan Ben Ami isr_stats->sw++; 20549b58419eSGolan Ben Ami iwl_pcie_irq_handle_error(trans); 20559b58419eSGolan Ben Ami } 20569b58419eSGolan Ben Ami } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { 20572e5d4a8fSHaim Dreyfuss /* uCode wakes up after power-down sleep */ 20582e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 20592e5d4a8fSHaim Dreyfuss iwl_pcie_rxq_check_wrptr(trans); 20602e5d4a8fSHaim Dreyfuss iwl_pcie_txq_check_wrptrs(trans); 20612e5d4a8fSHaim Dreyfuss 20622e5d4a8fSHaim Dreyfuss isr_stats->wakeup++; 20632e5d4a8fSHaim Dreyfuss } 20642e5d4a8fSHaim Dreyfuss 20652e5d4a8fSHaim Dreyfuss /* Chip got too hot and stopped itself */ 20662e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 20672e5d4a8fSHaim Dreyfuss IWL_ERR(trans, "Microcode CT kill error detected.\n"); 20682e5d4a8fSHaim Dreyfuss isr_stats->ctkill++; 20692e5d4a8fSHaim Dreyfuss } 20702e5d4a8fSHaim Dreyfuss 20712e5d4a8fSHaim Dreyfuss /* HW RF KILL switch toggled */ 20723a6e168bSJohannes Berg if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) 20733a6e168bSJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 20742e5d4a8fSHaim Dreyfuss 20752e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 20762e5d4a8fSHaim Dreyfuss IWL_ERR(trans, 20772e5d4a8fSHaim Dreyfuss "Hardware error detected. Restarting.\n"); 20782e5d4a8fSHaim Dreyfuss 20792e5d4a8fSHaim Dreyfuss isr_stats->hw++; 20802e5d4a8fSHaim Dreyfuss iwl_pcie_irq_handle_error(trans); 20812e5d4a8fSHaim Dreyfuss } 20822e5d4a8fSHaim Dreyfuss 20832e5d4a8fSHaim Dreyfuss iwl_pcie_clear_irq(trans, entry); 20842e5d4a8fSHaim Dreyfuss 20852e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 20862e5d4a8fSHaim Dreyfuss 20872e5d4a8fSHaim Dreyfuss return IRQ_HANDLED; 20882e5d4a8fSHaim Dreyfuss } 2089