1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3cefec29eSJohannes Berg * This file is provided under a dual BSD/GPLv2 license. When using or 4cefec29eSJohannes Berg * redistributing this file, you may do so under either license. 5cefec29eSJohannes Berg * 6cefec29eSJohannes Berg * GPL LICENSE SUMMARY 7cefec29eSJohannes Berg * 8e705c121SKalle Valo * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 9e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10eda50cdeSSara Sharon * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11ea695b7cSShaul Triebitz * Copyright(c) 2018 - 2019 Intel Corporation 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 14e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 15e705c121SKalle Valo * published by the Free Software Foundation. 16e705c121SKalle Valo * 17e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 18e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20e705c121SKalle Valo * more details. 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23cefec29eSJohannes Berg * file called COPYING. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26d01c5366SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29cefec29eSJohannes Berg * BSD LICENSE 30cefec29eSJohannes Berg * 31cefec29eSJohannes Berg * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 32cefec29eSJohannes Berg * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33cefec29eSJohannes Berg * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34ea695b7cSShaul Triebitz * Copyright(c) 2018 - 2019 Intel Corporation 35cefec29eSJohannes Berg * All rights reserved. 36cefec29eSJohannes Berg * 37cefec29eSJohannes Berg * Redistribution and use in source and binary forms, with or without 38cefec29eSJohannes Berg * modification, are permitted provided that the following conditions 39cefec29eSJohannes Berg * are met: 40cefec29eSJohannes Berg * 41cefec29eSJohannes Berg * * Redistributions of source code must retain the above copyright 42cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer. 43cefec29eSJohannes Berg * * Redistributions in binary form must reproduce the above copyright 44cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer in 45cefec29eSJohannes Berg * the documentation and/or other materials provided with the 46cefec29eSJohannes Berg * distribution. 47cefec29eSJohannes Berg * * Neither the name Intel Corporation nor the names of its 48cefec29eSJohannes Berg * contributors may be used to endorse or promote products derived 49cefec29eSJohannes Berg * from this software without specific prior written permission. 50cefec29eSJohannes Berg * 51cefec29eSJohannes Berg * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52cefec29eSJohannes Berg * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53cefec29eSJohannes Berg * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54cefec29eSJohannes Berg * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55cefec29eSJohannes Berg * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56cefec29eSJohannes Berg * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57cefec29eSJohannes Berg * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58cefec29eSJohannes Berg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59cefec29eSJohannes Berg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60cefec29eSJohannes Berg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61cefec29eSJohannes Berg * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62cefec29eSJohannes Berg * 63e705c121SKalle Valo *****************************************************************************/ 64e705c121SKalle Valo #include <linux/sched.h> 65e705c121SKalle Valo #include <linux/wait.h> 66e705c121SKalle Valo #include <linux/gfp.h> 67e705c121SKalle Valo 68e705c121SKalle Valo #include "iwl-prph.h" 69e705c121SKalle Valo #include "iwl-io.h" 70e705c121SKalle Valo #include "internal.h" 71e705c121SKalle Valo #include "iwl-op-mode.h" 729b58419eSGolan Ben Ami #include "iwl-context-info-gen3.h" 73e705c121SKalle Valo 74e705c121SKalle Valo /****************************************************************************** 75e705c121SKalle Valo * 76e705c121SKalle Valo * RX path functions 77e705c121SKalle Valo * 78e705c121SKalle Valo ******************************************************************************/ 79e705c121SKalle Valo 80e705c121SKalle Valo /* 81e705c121SKalle Valo * Rx theory of operation 82e705c121SKalle Valo * 83e705c121SKalle Valo * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 84e705c121SKalle Valo * each of which point to Receive Buffers to be filled by the NIC. These get 85e705c121SKalle Valo * used not only for Rx frames, but for any command response or notification 86e705c121SKalle Valo * from the NIC. The driver and NIC manage the Rx buffers by means 87e705c121SKalle Valo * of indexes into the circular buffer. 88e705c121SKalle Valo * 89e705c121SKalle Valo * Rx Queue Indexes 90e705c121SKalle Valo * The host/firmware share two index registers for managing the Rx buffers. 91e705c121SKalle Valo * 92e705c121SKalle Valo * The READ index maps to the first position that the firmware may be writing 93e705c121SKalle Valo * to -- the driver can read up to (but not including) this position and get 94e705c121SKalle Valo * good data. 95e705c121SKalle Valo * The READ index is managed by the firmware once the card is enabled. 96e705c121SKalle Valo * 97e705c121SKalle Valo * The WRITE index maps to the last position the driver has read from -- the 98e705c121SKalle Valo * position preceding WRITE is the last slot the firmware can place a packet. 99e705c121SKalle Valo * 100e705c121SKalle Valo * The queue is empty (no good data) if WRITE = READ - 1, and is full if 101e705c121SKalle Valo * WRITE = READ. 102e705c121SKalle Valo * 103e705c121SKalle Valo * During initialization, the host sets up the READ queue position to the first 104e705c121SKalle Valo * INDEX position, and WRITE to the last (READ - 1 wrapped) 105e705c121SKalle Valo * 106e705c121SKalle Valo * When the firmware places a packet in a buffer, it will advance the READ index 107e705c121SKalle Valo * and fire the RX interrupt. The driver can then query the READ index and 108e705c121SKalle Valo * process as many packets as possible, moving the WRITE index forward as it 109e705c121SKalle Valo * resets the Rx queue buffers with new memory. 110e705c121SKalle Valo * 111e705c121SKalle Valo * The management in the driver is as follows: 112e705c121SKalle Valo * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 113e705c121SKalle Valo * When the interrupt handler is called, the request is processed. 114e705c121SKalle Valo * The page is either stolen - transferred to the upper layer 115e705c121SKalle Valo * or reused - added immediately to the iwl->rxq->rx_free list. 116e705c121SKalle Valo * + When the page is stolen - the driver updates the matching queue's used 117e705c121SKalle Valo * count, detaches the RBD and transfers it to the queue used list. 118e705c121SKalle Valo * When there are two used RBDs - they are transferred to the allocator empty 119e705c121SKalle Valo * list. Work is then scheduled for the allocator to start allocating 120e705c121SKalle Valo * eight buffers. 121e705c121SKalle Valo * When there are another 6 used RBDs - they are transferred to the allocator 122e705c121SKalle Valo * empty list and the driver tries to claim the pre-allocated buffers and 123e705c121SKalle Valo * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 124e705c121SKalle Valo * until ready. 125e705c121SKalle Valo * When there are 8+ buffers in the free list - either from allocation or from 126e705c121SKalle Valo * 8 reused unstolen pages - restock is called to update the FW and indexes. 127e705c121SKalle Valo * + In order to make sure the allocator always has RBDs to use for allocation 128e705c121SKalle Valo * the allocator has initial pool in the size of num_queues*(8-2) - the 129e705c121SKalle Valo * maximum missing RBDs per allocation request (request posted with 2 130e705c121SKalle Valo * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 131e705c121SKalle Valo * The queues supplies the recycle of the rest of the RBDs. 132e705c121SKalle Valo * + A received packet is processed and handed to the kernel network stack, 133e705c121SKalle Valo * detached from the iwl->rxq. The driver 'processed' index is updated. 134e705c121SKalle Valo * + If there are no allocated buffers in iwl->rxq->rx_free, 135e705c121SKalle Valo * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 136e705c121SKalle Valo * If there were enough free buffers and RX_STALLED is set it is cleared. 137e705c121SKalle Valo * 138e705c121SKalle Valo * 139e705c121SKalle Valo * Driver sequence: 140e705c121SKalle Valo * 141e705c121SKalle Valo * iwl_rxq_alloc() Allocates rx_free 142e705c121SKalle Valo * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 143e705c121SKalle Valo * iwl_pcie_rxq_restock. 144e705c121SKalle Valo * Used only during initialization. 145e705c121SKalle Valo * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 146e705c121SKalle Valo * queue, updates firmware pointers, and updates 147e705c121SKalle Valo * the WRITE index. 148e705c121SKalle Valo * iwl_pcie_rx_allocator() Background work for allocating pages. 149e705c121SKalle Valo * 150e705c121SKalle Valo * -- enable interrupts -- 151e705c121SKalle Valo * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 152e705c121SKalle Valo * READ INDEX, detaching the SKB from the pool. 153e705c121SKalle Valo * Moves the packet buffer from queue to rx_used. 154e705c121SKalle Valo * Posts and claims requests to the allocator. 155e705c121SKalle Valo * Calls iwl_pcie_rxq_restock to refill any empty 156e705c121SKalle Valo * slots. 157e705c121SKalle Valo * 158e705c121SKalle Valo * RBD life-cycle: 159e705c121SKalle Valo * 160e705c121SKalle Valo * Init: 161e705c121SKalle Valo * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 162e705c121SKalle Valo * 163e705c121SKalle Valo * Regular Receive interrupt: 164e705c121SKalle Valo * Page Stolen: 165e705c121SKalle Valo * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 166e705c121SKalle Valo * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 167e705c121SKalle Valo * Page not Stolen: 168e705c121SKalle Valo * rxq.queue -> rxq.rx_free -> rxq.queue 169e705c121SKalle Valo * ... 170e705c121SKalle Valo * 171e705c121SKalle Valo */ 172e705c121SKalle Valo 173e705c121SKalle Valo /* 174e705c121SKalle Valo * iwl_rxq_space - Return number of free slots available in queue. 175e705c121SKalle Valo */ 176e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq) 177e705c121SKalle Valo { 17896a6497bSSara Sharon /* Make sure rx queue size is a power of 2 */ 17996a6497bSSara Sharon WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 180e705c121SKalle Valo 181e705c121SKalle Valo /* 182e705c121SKalle Valo * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 183e705c121SKalle Valo * between empty and completely full queues. 184e705c121SKalle Valo * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 185e705c121SKalle Valo * defined for negative dividends. 186e705c121SKalle Valo */ 18796a6497bSSara Sharon return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 188e705c121SKalle Valo } 189e705c121SKalle Valo 190e705c121SKalle Valo /* 191e705c121SKalle Valo * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 192e705c121SKalle Valo */ 193e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 194e705c121SKalle Valo { 195e705c121SKalle Valo return cpu_to_le32((u32)(dma_addr >> 8)); 196e705c121SKalle Valo } 197e705c121SKalle Valo 198e705c121SKalle Valo /* 199e705c121SKalle Valo * iwl_pcie_rx_stop - stops the Rx DMA 200e705c121SKalle Valo */ 201e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans) 202e705c121SKalle Valo { 2033681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 2043681021fSJohannes Berg /* TODO: remove this once fw does it */ 205ea695b7cSShaul Triebitz iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); 206ea695b7cSShaul Triebitz return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, 207d0158235SGolan Ben Ami RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 208286ca8ebSLuca Coelho } else if (trans->trans_cfg->mq_rx_supported) { 209d7fdd0e5SSara Sharon iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 210d7fdd0e5SSara Sharon return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 211d7fdd0e5SSara Sharon RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 212d7fdd0e5SSara Sharon } else { 213e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 214e705c121SKalle Valo return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 215d7fdd0e5SSara Sharon FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 216d7fdd0e5SSara Sharon 1000); 217d7fdd0e5SSara Sharon } 218e705c121SKalle Valo } 219e705c121SKalle Valo 220e705c121SKalle Valo /* 221e705c121SKalle Valo * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 222e705c121SKalle Valo */ 22378485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 22478485054SSara Sharon struct iwl_rxq *rxq) 225e705c121SKalle Valo { 226e705c121SKalle Valo u32 reg; 227e705c121SKalle Valo 228e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 229e705c121SKalle Valo 230e705c121SKalle Valo /* 231e705c121SKalle Valo * explicitly wake up the NIC if: 232e705c121SKalle Valo * 1. shadow registers aren't enabled 233e705c121SKalle Valo * 2. there is a chance that the NIC is asleep 234e705c121SKalle Valo */ 235286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->shadow_reg_enable && 236e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 237e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 238e705c121SKalle Valo 239e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 240e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 241e705c121SKalle Valo reg); 242e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 243286ca8ebSLuca Coelho BIT(trans->trans_cfg->csr->flag_mac_access_req)); 244e705c121SKalle Valo rxq->need_update = true; 245e705c121SKalle Valo return; 246e705c121SKalle Valo } 247e705c121SKalle Valo } 248e705c121SKalle Valo 249e705c121SKalle Valo rxq->write_actual = round_down(rxq->write, 8); 2503681021fSJohannes Berg if (trans->trans_cfg->mq_rx_supported) 2511554ed20SSara Sharon iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 25296a6497bSSara Sharon rxq->write_actual); 2531316d595SSara Sharon else 254e705c121SKalle Valo iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 255e705c121SKalle Valo } 256e705c121SKalle Valo 257e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 258e705c121SKalle Valo { 259e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 26078485054SSara Sharon int i; 261e705c121SKalle Valo 26278485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 26378485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 264e705c121SKalle Valo 265e705c121SKalle Valo if (!rxq->need_update) 26678485054SSara Sharon continue; 26778485054SSara Sharon spin_lock(&rxq->lock); 26878485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 269e705c121SKalle Valo rxq->need_update = false; 270e705c121SKalle Valo spin_unlock(&rxq->lock); 271e705c121SKalle Valo } 27278485054SSara Sharon } 273e705c121SKalle Valo 2740307c839SGolan Ben Ami static void iwl_pcie_restock_bd(struct iwl_trans *trans, 2750307c839SGolan Ben Ami struct iwl_rxq *rxq, 2760307c839SGolan Ben Ami struct iwl_rx_mem_buffer *rxb) 2770307c839SGolan Ben Ami { 2783681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 2790307c839SGolan Ben Ami struct iwl_rx_transfer_desc *bd = rxq->bd; 2800307c839SGolan Ben Ami 281f826faaaSJohannes Berg BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); 282f826faaaSJohannes Berg 2830307c839SGolan Ben Ami bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); 2840307c839SGolan Ben Ami bd[rxq->write].rbid = cpu_to_le16(rxb->vid); 2850307c839SGolan Ben Ami } else { 2860307c839SGolan Ben Ami __le64 *bd = rxq->bd; 2870307c839SGolan Ben Ami 2880307c839SGolan Ben Ami bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 2890307c839SGolan Ben Ami } 29085d78bb1SSara Sharon 29185d78bb1SSara Sharon IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", 29285d78bb1SSara Sharon (u32)rxb->vid, rxq->id, rxq->write); 2930307c839SGolan Ben Ami } 2940307c839SGolan Ben Ami 295e0e168dcSGregory Greenman /* 2962047fa54SSara Sharon * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 297e0e168dcSGregory Greenman */ 2982047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 29996a6497bSSara Sharon struct iwl_rxq *rxq) 30096a6497bSSara Sharon { 30196a6497bSSara Sharon struct iwl_rx_mem_buffer *rxb; 30296a6497bSSara Sharon 30396a6497bSSara Sharon /* 30496a6497bSSara Sharon * If the device isn't enabled - no need to try to add buffers... 30596a6497bSSara Sharon * This can happen when we stop the device and still have an interrupt 30696a6497bSSara Sharon * pending. We stop the APM before we sync the interrupts because we 30796a6497bSSara Sharon * have to (see comment there). On the other hand, since the APM is 30896a6497bSSara Sharon * stopped, we cannot access the HW (in particular not prph). 30996a6497bSSara Sharon * So don't try to restock if the APM has been already stopped. 31096a6497bSSara Sharon */ 31196a6497bSSara Sharon if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 31296a6497bSSara Sharon return; 31396a6497bSSara Sharon 31496a6497bSSara Sharon spin_lock(&rxq->lock); 31596a6497bSSara Sharon while (rxq->free_count) { 31696a6497bSSara Sharon /* Get next free Rx buffer, remove from free list */ 31796a6497bSSara Sharon rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 31896a6497bSSara Sharon list); 31996a6497bSSara Sharon list_del(&rxb->list); 320b1753c62SSara Sharon rxb->invalid = false; 32196a6497bSSara Sharon /* 12 first bits are expected to be empty */ 32296a6497bSSara Sharon WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); 32396a6497bSSara Sharon /* Point to Rx buffer via next RBD in circular buffer */ 3240307c839SGolan Ben Ami iwl_pcie_restock_bd(trans, rxq, rxb); 32596a6497bSSara Sharon rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; 32696a6497bSSara Sharon rxq->free_count--; 32796a6497bSSara Sharon } 32896a6497bSSara Sharon spin_unlock(&rxq->lock); 32996a6497bSSara Sharon 33096a6497bSSara Sharon /* 33196a6497bSSara Sharon * If we've added more space for the firmware to place data, tell it. 33296a6497bSSara Sharon * Increment device's write pointer in multiples of 8. 33396a6497bSSara Sharon */ 33496a6497bSSara Sharon if (rxq->write_actual != (rxq->write & ~0x7)) { 33596a6497bSSara Sharon spin_lock(&rxq->lock); 33696a6497bSSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 33796a6497bSSara Sharon spin_unlock(&rxq->lock); 33896a6497bSSara Sharon } 33996a6497bSSara Sharon } 34096a6497bSSara Sharon 341e705c121SKalle Valo /* 3422047fa54SSara Sharon * iwl_pcie_rxsq_restock - restock implementation for single queue rx 343e705c121SKalle Valo */ 3442047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 345e0e168dcSGregory Greenman struct iwl_rxq *rxq) 346e705c121SKalle Valo { 347e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 348e705c121SKalle Valo 349e705c121SKalle Valo /* 350e705c121SKalle Valo * If the device isn't enabled - not need to try to add buffers... 351e705c121SKalle Valo * This can happen when we stop the device and still have an interrupt 352e705c121SKalle Valo * pending. We stop the APM before we sync the interrupts because we 353e705c121SKalle Valo * have to (see comment there). On the other hand, since the APM is 354e705c121SKalle Valo * stopped, we cannot access the HW (in particular not prph). 355e705c121SKalle Valo * So don't try to restock if the APM has been already stopped. 356e705c121SKalle Valo */ 357e705c121SKalle Valo if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 358e705c121SKalle Valo return; 359e705c121SKalle Valo 360e705c121SKalle Valo spin_lock(&rxq->lock); 361e705c121SKalle Valo while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 36296a6497bSSara Sharon __le32 *bd = (__le32 *)rxq->bd; 363e705c121SKalle Valo /* The overwritten rxb must be a used one */ 364e705c121SKalle Valo rxb = rxq->queue[rxq->write]; 365e705c121SKalle Valo BUG_ON(rxb && rxb->page); 366e705c121SKalle Valo 367e705c121SKalle Valo /* Get next free Rx buffer, remove from free list */ 368e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 369e705c121SKalle Valo list); 370e705c121SKalle Valo list_del(&rxb->list); 371b1753c62SSara Sharon rxb->invalid = false; 372e705c121SKalle Valo 373e705c121SKalle Valo /* Point to Rx buffer via next RBD in circular buffer */ 37496a6497bSSara Sharon bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 375e705c121SKalle Valo rxq->queue[rxq->write] = rxb; 376e705c121SKalle Valo rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 377e705c121SKalle Valo rxq->free_count--; 378e705c121SKalle Valo } 379e705c121SKalle Valo spin_unlock(&rxq->lock); 380e705c121SKalle Valo 381e705c121SKalle Valo /* If we've added more space for the firmware to place data, tell it. 382e705c121SKalle Valo * Increment device's write pointer in multiples of 8. */ 383e705c121SKalle Valo if (rxq->write_actual != (rxq->write & ~0x7)) { 384e705c121SKalle Valo spin_lock(&rxq->lock); 38578485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 386e705c121SKalle Valo spin_unlock(&rxq->lock); 387e705c121SKalle Valo } 388e705c121SKalle Valo } 389e705c121SKalle Valo 390e705c121SKalle Valo /* 391e0e168dcSGregory Greenman * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 392e0e168dcSGregory Greenman * 393e0e168dcSGregory Greenman * If there are slots in the RX queue that need to be restocked, 394e0e168dcSGregory Greenman * and we have free pre-allocated buffers, fill the ranks as much 395e0e168dcSGregory Greenman * as we can, pulling from rx_free. 396e0e168dcSGregory Greenman * 397e0e168dcSGregory Greenman * This moves the 'write' index forward to catch up with 'processed', and 398e0e168dcSGregory Greenman * also updates the memory address in the firmware to reference the new 399e0e168dcSGregory Greenman * target buffer. 400e0e168dcSGregory Greenman */ 401e0e168dcSGregory Greenman static 402e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 403e0e168dcSGregory Greenman { 404286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) 4052047fa54SSara Sharon iwl_pcie_rxmq_restock(trans, rxq); 406e0e168dcSGregory Greenman else 4072047fa54SSara Sharon iwl_pcie_rxsq_restock(trans, rxq); 408e0e168dcSGregory Greenman } 409e0e168dcSGregory Greenman 410e0e168dcSGregory Greenman /* 411e705c121SKalle Valo * iwl_pcie_rx_alloc_page - allocates and returns a page. 412e705c121SKalle Valo * 413e705c121SKalle Valo */ 414e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 415e705c121SKalle Valo gfp_t priority) 416e705c121SKalle Valo { 417e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 418e705c121SKalle Valo struct page *page; 419e705c121SKalle Valo gfp_t gfp_mask = priority; 420e705c121SKalle Valo 421e705c121SKalle Valo if (trans_pcie->rx_page_order > 0) 422e705c121SKalle Valo gfp_mask |= __GFP_COMP; 423e705c121SKalle Valo 424e705c121SKalle Valo /* Alloc a new receive buffer */ 425e705c121SKalle Valo page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 426e705c121SKalle Valo if (!page) { 427e705c121SKalle Valo if (net_ratelimit()) 428e705c121SKalle Valo IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 429e705c121SKalle Valo trans_pcie->rx_page_order); 43078485054SSara Sharon /* 43178485054SSara Sharon * Issue an error if we don't have enough pre-allocated 43278485054SSara Sharon * buffers. 4331da3823dSLuca Coelho */ 43478485054SSara Sharon if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 435e705c121SKalle Valo IWL_CRIT(trans, 43678485054SSara Sharon "Failed to alloc_pages\n"); 437e705c121SKalle Valo return NULL; 438e705c121SKalle Valo } 439e705c121SKalle Valo return page; 440e705c121SKalle Valo } 441e705c121SKalle Valo 442e705c121SKalle Valo /* 443e705c121SKalle Valo * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 444e705c121SKalle Valo * 445e705c121SKalle Valo * A used RBD is an Rx buffer that has been given to the stack. To use it again 446e705c121SKalle Valo * a page must be allocated and the RBD must point to the page. This function 447e705c121SKalle Valo * doesn't change the HW pointer but handles the list of pages that is used by 448e705c121SKalle Valo * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 449e705c121SKalle Valo * allocated buffers. 450e705c121SKalle Valo */ 451ff932f61SGolan Ben Ami void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 45278485054SSara Sharon struct iwl_rxq *rxq) 453e705c121SKalle Valo { 454e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 455e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 456e705c121SKalle Valo struct page *page; 457e705c121SKalle Valo 458e705c121SKalle Valo while (1) { 459e705c121SKalle Valo spin_lock(&rxq->lock); 460e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 461e705c121SKalle Valo spin_unlock(&rxq->lock); 462e705c121SKalle Valo return; 463e705c121SKalle Valo } 464e705c121SKalle Valo spin_unlock(&rxq->lock); 465e705c121SKalle Valo 466e705c121SKalle Valo /* Alloc a new receive buffer */ 467e705c121SKalle Valo page = iwl_pcie_rx_alloc_page(trans, priority); 468e705c121SKalle Valo if (!page) 469e705c121SKalle Valo return; 470e705c121SKalle Valo 471e705c121SKalle Valo spin_lock(&rxq->lock); 472e705c121SKalle Valo 473e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 474e705c121SKalle Valo spin_unlock(&rxq->lock); 475e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 476e705c121SKalle Valo return; 477e705c121SKalle Valo } 478e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 479e705c121SKalle Valo list); 480e705c121SKalle Valo list_del(&rxb->list); 481e705c121SKalle Valo spin_unlock(&rxq->lock); 482e705c121SKalle Valo 483e705c121SKalle Valo BUG_ON(rxb->page); 484e705c121SKalle Valo rxb->page = page; 485e705c121SKalle Valo /* Get physical address of the RB */ 486e705c121SKalle Valo rxb->page_dma = 487e705c121SKalle Valo dma_map_page(trans->dev, page, 0, 488e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 489e705c121SKalle Valo DMA_FROM_DEVICE); 490e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 491e705c121SKalle Valo rxb->page = NULL; 492e705c121SKalle Valo spin_lock(&rxq->lock); 493e705c121SKalle Valo list_add(&rxb->list, &rxq->rx_used); 494e705c121SKalle Valo spin_unlock(&rxq->lock); 495e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 496e705c121SKalle Valo return; 497e705c121SKalle Valo } 498e705c121SKalle Valo 499e705c121SKalle Valo spin_lock(&rxq->lock); 500e705c121SKalle Valo 501e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 502e705c121SKalle Valo rxq->free_count++; 503e705c121SKalle Valo 504e705c121SKalle Valo spin_unlock(&rxq->lock); 505e705c121SKalle Valo } 506e705c121SKalle Valo } 507e705c121SKalle Valo 508ff932f61SGolan Ben Ami void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 509e705c121SKalle Valo { 510e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 511e705c121SKalle Valo int i; 512e705c121SKalle Valo 5137b542436SSara Sharon for (i = 0; i < RX_POOL_SIZE; i++) { 51478485054SSara Sharon if (!trans_pcie->rx_pool[i].page) 515e705c121SKalle Valo continue; 51678485054SSara Sharon dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 517e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 518e705c121SKalle Valo DMA_FROM_DEVICE); 51978485054SSara Sharon __free_pages(trans_pcie->rx_pool[i].page, 52078485054SSara Sharon trans_pcie->rx_page_order); 52178485054SSara Sharon trans_pcie->rx_pool[i].page = NULL; 522e705c121SKalle Valo } 523e705c121SKalle Valo } 524e705c121SKalle Valo 525e705c121SKalle Valo /* 526e705c121SKalle Valo * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 527e705c121SKalle Valo * 528e705c121SKalle Valo * Allocates for each received request 8 pages 529e705c121SKalle Valo * Called as a scheduled work item. 530e705c121SKalle Valo */ 531e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 532e705c121SKalle Valo { 533e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 534e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 535e705c121SKalle Valo struct list_head local_empty; 536c6ac9f9fSSara Sharon int pending = atomic_read(&rba->req_pending); 537e705c121SKalle Valo 5386dcdd165SSara Sharon IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); 539e705c121SKalle Valo 540e705c121SKalle Valo /* If we were scheduled - there is at least one request */ 541e705c121SKalle Valo spin_lock(&rba->lock); 542e705c121SKalle Valo /* swap out the rba->rbd_empty to a local list */ 543e705c121SKalle Valo list_replace_init(&rba->rbd_empty, &local_empty); 544e705c121SKalle Valo spin_unlock(&rba->lock); 545e705c121SKalle Valo 546e705c121SKalle Valo while (pending) { 547e705c121SKalle Valo int i; 5480979a913SJohannes Berg LIST_HEAD(local_allocated); 54978485054SSara Sharon gfp_t gfp_mask = GFP_KERNEL; 55078485054SSara Sharon 55178485054SSara Sharon /* Do not post a warning if there are only a few requests */ 55278485054SSara Sharon if (pending < RX_PENDING_WATERMARK) 55378485054SSara Sharon gfp_mask |= __GFP_NOWARN; 554e705c121SKalle Valo 555e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 556e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 557e705c121SKalle Valo struct page *page; 558e705c121SKalle Valo 559e705c121SKalle Valo /* List should never be empty - each reused RBD is 560e705c121SKalle Valo * returned to the list, and initial pool covers any 561e705c121SKalle Valo * possible gap between the time the page is allocated 562e705c121SKalle Valo * to the time the RBD is added. 563e705c121SKalle Valo */ 564e705c121SKalle Valo BUG_ON(list_empty(&local_empty)); 565e705c121SKalle Valo /* Get the first rxb from the rbd list */ 566e705c121SKalle Valo rxb = list_first_entry(&local_empty, 567e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 568e705c121SKalle Valo BUG_ON(rxb->page); 569e705c121SKalle Valo 570e705c121SKalle Valo /* Alloc a new receive buffer */ 57178485054SSara Sharon page = iwl_pcie_rx_alloc_page(trans, gfp_mask); 572e705c121SKalle Valo if (!page) 573e705c121SKalle Valo continue; 574e705c121SKalle Valo rxb->page = page; 575e705c121SKalle Valo 576e705c121SKalle Valo /* Get physical address of the RB */ 577e705c121SKalle Valo rxb->page_dma = dma_map_page(trans->dev, page, 0, 578e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 579e705c121SKalle Valo DMA_FROM_DEVICE); 580e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 581e705c121SKalle Valo rxb->page = NULL; 582e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 583e705c121SKalle Valo continue; 584e705c121SKalle Valo } 585e705c121SKalle Valo 586e705c121SKalle Valo /* move the allocated entry to the out list */ 587e705c121SKalle Valo list_move(&rxb->list, &local_allocated); 588e705c121SKalle Valo i++; 589e705c121SKalle Valo } 590e705c121SKalle Valo 591c6ac9f9fSSara Sharon atomic_dec(&rba->req_pending); 592e705c121SKalle Valo pending--; 593c6ac9f9fSSara Sharon 594e705c121SKalle Valo if (!pending) { 595c6ac9f9fSSara Sharon pending = atomic_read(&rba->req_pending); 5966dcdd165SSara Sharon if (pending) 5976dcdd165SSara Sharon IWL_DEBUG_TPT(trans, 598c6ac9f9fSSara Sharon "Got more pending allocation requests = %d\n", 599e705c121SKalle Valo pending); 600e705c121SKalle Valo } 601e705c121SKalle Valo 602e705c121SKalle Valo spin_lock(&rba->lock); 603e705c121SKalle Valo /* add the allocated rbds to the allocator allocated list */ 604e705c121SKalle Valo list_splice_tail(&local_allocated, &rba->rbd_allocated); 605e705c121SKalle Valo /* get more empty RBDs for current pending requests */ 606e705c121SKalle Valo list_splice_tail_init(&rba->rbd_empty, &local_empty); 607e705c121SKalle Valo spin_unlock(&rba->lock); 608e705c121SKalle Valo 609e705c121SKalle Valo atomic_inc(&rba->req_ready); 610c6ac9f9fSSara Sharon 611e705c121SKalle Valo } 612e705c121SKalle Valo 613e705c121SKalle Valo spin_lock(&rba->lock); 614e705c121SKalle Valo /* return unused rbds to the allocator empty list */ 615e705c121SKalle Valo list_splice_tail(&local_empty, &rba->rbd_empty); 616e705c121SKalle Valo spin_unlock(&rba->lock); 617c6ac9f9fSSara Sharon 6186dcdd165SSara Sharon IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); 619e705c121SKalle Valo } 620e705c121SKalle Valo 621e705c121SKalle Valo /* 622d56daea4SSara Sharon * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 623e705c121SKalle Valo .* 624e705c121SKalle Valo .* Called by queue when the queue posted allocation request and 625e705c121SKalle Valo * has freed 8 RBDs in order to restock itself. 626d56daea4SSara Sharon * This function directly moves the allocated RBs to the queue's ownership 627d56daea4SSara Sharon * and updates the relevant counters. 628e705c121SKalle Valo */ 629d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 630d56daea4SSara Sharon struct iwl_rxq *rxq) 631e705c121SKalle Valo { 632e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 633e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 634e705c121SKalle Valo int i; 635e705c121SKalle Valo 636d56daea4SSara Sharon lockdep_assert_held(&rxq->lock); 637d56daea4SSara Sharon 638e705c121SKalle Valo /* 639e705c121SKalle Valo * atomic_dec_if_positive returns req_ready - 1 for any scenario. 640e705c121SKalle Valo * If req_ready is 0 atomic_dec_if_positive will return -1 and this 641d56daea4SSara Sharon * function will return early, as there are no ready requests. 642e705c121SKalle Valo * atomic_dec_if_positive will perofrm the *actual* decrement only if 643e705c121SKalle Valo * req_ready > 0, i.e. - there are ready requests and the function 644e705c121SKalle Valo * hands one request to the caller. 645e705c121SKalle Valo */ 646e705c121SKalle Valo if (atomic_dec_if_positive(&rba->req_ready) < 0) 647d56daea4SSara Sharon return; 648e705c121SKalle Valo 649e705c121SKalle Valo spin_lock(&rba->lock); 650e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 651e705c121SKalle Valo /* Get next free Rx buffer, remove it from free list */ 652d56daea4SSara Sharon struct iwl_rx_mem_buffer *rxb = 653d56daea4SSara Sharon list_first_entry(&rba->rbd_allocated, 654e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 655d56daea4SSara Sharon 656d56daea4SSara Sharon list_move(&rxb->list, &rxq->rx_free); 657e705c121SKalle Valo } 658e705c121SKalle Valo spin_unlock(&rba->lock); 659e705c121SKalle Valo 660d56daea4SSara Sharon rxq->used_count -= RX_CLAIM_REQ_ALLOC; 661d56daea4SSara Sharon rxq->free_count += RX_CLAIM_REQ_ALLOC; 662e705c121SKalle Valo } 663e705c121SKalle Valo 66410a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data) 665e705c121SKalle Valo { 666e705c121SKalle Valo struct iwl_rb_allocator *rba_p = 667e705c121SKalle Valo container_of(data, struct iwl_rb_allocator, rx_alloc); 668e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = 669e705c121SKalle Valo container_of(rba_p, struct iwl_trans_pcie, rba); 670e705c121SKalle Valo 671e705c121SKalle Valo iwl_pcie_rx_allocator(trans_pcie->trans); 672e705c121SKalle Valo } 673e705c121SKalle Valo 6740307c839SGolan Ben Ami static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) 6750307c839SGolan Ben Ami { 6760307c839SGolan Ben Ami struct iwl_rx_transfer_desc *rx_td; 6770307c839SGolan Ben Ami 6780307c839SGolan Ben Ami if (use_rx_td) 6790307c839SGolan Ben Ami return sizeof(*rx_td); 6800307c839SGolan Ben Ami else 681286ca8ebSLuca Coelho return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : 6820307c839SGolan Ben Ami sizeof(__le32); 6830307c839SGolan Ben Ami } 6840307c839SGolan Ben Ami 6851b493e30SGolan Ben Ami static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, 6861b493e30SGolan Ben Ami struct iwl_rxq *rxq) 6871b493e30SGolan Ben Ami { 6881b493e30SGolan Ben Ami struct device *dev = trans->dev; 689286ca8ebSLuca Coelho bool use_rx_td = (trans->trans_cfg->device_family >= 6903681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210); 6910307c839SGolan Ben Ami int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); 6921b493e30SGolan Ben Ami 6931b493e30SGolan Ben Ami if (rxq->bd) 6940307c839SGolan Ben Ami dma_free_coherent(trans->dev, 6950307c839SGolan Ben Ami free_size * rxq->queue_size, 6961b493e30SGolan Ben Ami rxq->bd, rxq->bd_dma); 6971b493e30SGolan Ben Ami rxq->bd_dma = 0; 6981b493e30SGolan Ben Ami rxq->bd = NULL; 6991b493e30SGolan Ben Ami 7001b493e30SGolan Ben Ami rxq->rb_stts_dma = 0; 7011b493e30SGolan Ben Ami rxq->rb_stts = NULL; 7021b493e30SGolan Ben Ami 7031b493e30SGolan Ben Ami if (rxq->used_bd) 7040307c839SGolan Ben Ami dma_free_coherent(trans->dev, 705b2a58c97SSara Sharon (use_rx_td ? sizeof(*rxq->cd) : 7060307c839SGolan Ben Ami sizeof(__le32)) * rxq->queue_size, 7071b493e30SGolan Ben Ami rxq->used_bd, rxq->used_bd_dma); 7081b493e30SGolan Ben Ami rxq->used_bd_dma = 0; 7091b493e30SGolan Ben Ami rxq->used_bd = NULL; 7101b493e30SGolan Ben Ami 7113681021fSJohannes Berg if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 7121b493e30SGolan Ben Ami return; 7131b493e30SGolan Ben Ami 7141b493e30SGolan Ben Ami if (rxq->tr_tail) 7151b493e30SGolan Ben Ami dma_free_coherent(dev, sizeof(__le16), 7161b493e30SGolan Ben Ami rxq->tr_tail, rxq->tr_tail_dma); 7171b493e30SGolan Ben Ami rxq->tr_tail_dma = 0; 7181b493e30SGolan Ben Ami rxq->tr_tail = NULL; 7191b493e30SGolan Ben Ami 7201b493e30SGolan Ben Ami if (rxq->cr_tail) 7211b493e30SGolan Ben Ami dma_free_coherent(dev, sizeof(__le16), 7221b493e30SGolan Ben Ami rxq->cr_tail, rxq->cr_tail_dma); 7231b493e30SGolan Ben Ami rxq->cr_tail_dma = 0; 7241b493e30SGolan Ben Ami rxq->cr_tail = NULL; 7251b493e30SGolan Ben Ami } 7261b493e30SGolan Ben Ami 7271b493e30SGolan Ben Ami static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, 7281b493e30SGolan Ben Ami struct iwl_rxq *rxq) 729e705c121SKalle Valo { 730e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 731e705c121SKalle Valo struct device *dev = trans->dev; 73278485054SSara Sharon int i; 7330307c839SGolan Ben Ami int free_size; 734286ca8ebSLuca Coelho bool use_rx_td = (trans->trans_cfg->device_family >= 7353681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210); 7366cc6ba3aSTriebitz size_t rb_stts_size = use_rx_td ? sizeof(__le16) : 7376cc6ba3aSTriebitz sizeof(struct iwl_rb_status); 738e705c121SKalle Valo 73978485054SSara Sharon spin_lock_init(&rxq->lock); 740286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) 74196a6497bSSara Sharon rxq->queue_size = MQ_RX_TABLE_SIZE; 74296a6497bSSara Sharon else 74396a6497bSSara Sharon rxq->queue_size = RX_QUEUE_SIZE; 74496a6497bSSara Sharon 7450307c839SGolan Ben Ami free_size = iwl_pcie_free_bd_size(trans, use_rx_td); 7460307c839SGolan Ben Ami 74778485054SSara Sharon /* 74878485054SSara Sharon * Allocate the circular buffer of Read Buffer Descriptors 74978485054SSara Sharon * (RBDs) 75078485054SSara Sharon */ 751750afb08SLuis Chamberlain rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 752e705c121SKalle Valo &rxq->bd_dma, GFP_KERNEL); 753e705c121SKalle Valo if (!rxq->bd) 75478485054SSara Sharon goto err; 75578485054SSara Sharon 756286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) { 757750afb08SLuis Chamberlain rxq->used_bd = dma_alloc_coherent(dev, 758750afb08SLuis Chamberlain (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, 75996a6497bSSara Sharon &rxq->used_bd_dma, 76096a6497bSSara Sharon GFP_KERNEL); 76196a6497bSSara Sharon if (!rxq->used_bd) 76296a6497bSSara Sharon goto err; 76396a6497bSSara Sharon } 764e705c121SKalle Valo 7656cc6ba3aSTriebitz rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; 7666cc6ba3aSTriebitz rxq->rb_stts_dma = 7676cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; 7681b493e30SGolan Ben Ami 7690307c839SGolan Ben Ami if (!use_rx_td) 7701b493e30SGolan Ben Ami return 0; 7711b493e30SGolan Ben Ami 7721b493e30SGolan Ben Ami /* Allocate the driver's pointer to TR tail */ 773750afb08SLuis Chamberlain rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), 774750afb08SLuis Chamberlain &rxq->tr_tail_dma, GFP_KERNEL); 7751b493e30SGolan Ben Ami if (!rxq->tr_tail) 7761b493e30SGolan Ben Ami goto err; 7771b493e30SGolan Ben Ami 7781b493e30SGolan Ben Ami /* Allocate the driver's pointer to CR tail */ 779750afb08SLuis Chamberlain rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), 780750afb08SLuis Chamberlain &rxq->cr_tail_dma, GFP_KERNEL); 7811b493e30SGolan Ben Ami if (!rxq->cr_tail) 7821b493e30SGolan Ben Ami goto err; 7831b493e30SGolan Ben Ami 784e705c121SKalle Valo return 0; 785e705c121SKalle Valo 78678485054SSara Sharon err: 78778485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 78878485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 78978485054SSara Sharon 7901b493e30SGolan Ben Ami iwl_pcie_free_rxq_dma(trans, rxq); 79178485054SSara Sharon } 79296a6497bSSara Sharon 793e705c121SKalle Valo return -ENOMEM; 794e705c121SKalle Valo } 795e705c121SKalle Valo 79689d5e833SGolan Ben Ami int iwl_pcie_rx_alloc(struct iwl_trans *trans) 7971b493e30SGolan Ben Ami { 7981b493e30SGolan Ben Ami struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 7991b493e30SGolan Ben Ami struct iwl_rb_allocator *rba = &trans_pcie->rba; 8001b493e30SGolan Ben Ami int i, ret; 801286ca8ebSLuca Coelho size_t rb_stts_size = trans->trans_cfg->device_family >= 8023681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210 ? 8036cc6ba3aSTriebitz sizeof(__le16) : sizeof(struct iwl_rb_status); 8041b493e30SGolan Ben Ami 8051b493e30SGolan Ben Ami if (WARN_ON(trans_pcie->rxq)) 8061b493e30SGolan Ben Ami return -EINVAL; 8071b493e30SGolan Ben Ami 8081b493e30SGolan Ben Ami trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 8091b493e30SGolan Ben Ami GFP_KERNEL); 8101b493e30SGolan Ben Ami if (!trans_pcie->rxq) 8116cc6ba3aSTriebitz return -ENOMEM; 8121b493e30SGolan Ben Ami 8131b493e30SGolan Ben Ami spin_lock_init(&rba->lock); 8141b493e30SGolan Ben Ami 8156cc6ba3aSTriebitz /* 8166cc6ba3aSTriebitz * Allocate the driver's pointer to receive buffer status. 8176cc6ba3aSTriebitz * Allocate for all queues continuously (HW requirement). 8186cc6ba3aSTriebitz */ 8196cc6ba3aSTriebitz trans_pcie->base_rb_stts = 8206cc6ba3aSTriebitz dma_alloc_coherent(trans->dev, 8216cc6ba3aSTriebitz rb_stts_size * trans->num_rx_queues, 8226cc6ba3aSTriebitz &trans_pcie->base_rb_stts_dma, 8236cc6ba3aSTriebitz GFP_KERNEL); 8246cc6ba3aSTriebitz if (!trans_pcie->base_rb_stts) { 8256cc6ba3aSTriebitz ret = -ENOMEM; 8266cc6ba3aSTriebitz goto err; 8276cc6ba3aSTriebitz } 8286cc6ba3aSTriebitz 8291b493e30SGolan Ben Ami for (i = 0; i < trans->num_rx_queues; i++) { 8301b493e30SGolan Ben Ami struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 8311b493e30SGolan Ben Ami 8326cc6ba3aSTriebitz rxq->id = i; 8331b493e30SGolan Ben Ami ret = iwl_pcie_alloc_rxq_dma(trans, rxq); 8341b493e30SGolan Ben Ami if (ret) 8356cc6ba3aSTriebitz goto err; 8361b493e30SGolan Ben Ami } 8371b493e30SGolan Ben Ami return 0; 8386cc6ba3aSTriebitz 8396cc6ba3aSTriebitz err: 8406cc6ba3aSTriebitz if (trans_pcie->base_rb_stts) { 8416cc6ba3aSTriebitz dma_free_coherent(trans->dev, 8426cc6ba3aSTriebitz rb_stts_size * trans->num_rx_queues, 8436cc6ba3aSTriebitz trans_pcie->base_rb_stts, 8446cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma); 8456cc6ba3aSTriebitz trans_pcie->base_rb_stts = NULL; 8466cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma = 0; 8476cc6ba3aSTriebitz } 8486cc6ba3aSTriebitz kfree(trans_pcie->rxq); 8496cc6ba3aSTriebitz 8506cc6ba3aSTriebitz return ret; 8511b493e30SGolan Ben Ami } 8521b493e30SGolan Ben Ami 853e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 854e705c121SKalle Valo { 855e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 856e705c121SKalle Valo u32 rb_size; 857dfcfeef9SSara Sharon unsigned long flags; 858e705c121SKalle Valo const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 859e705c121SKalle Valo 8606c4fbcbcSEmmanuel Grumbach switch (trans_pcie->rx_buf_size) { 8616c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_4K: 862e705c121SKalle Valo rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 8636c4fbcbcSEmmanuel Grumbach break; 8646c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_8K: 8656c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 8666c4fbcbcSEmmanuel Grumbach break; 8676c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_12K: 8686c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 8696c4fbcbcSEmmanuel Grumbach break; 8706c4fbcbcSEmmanuel Grumbach default: 8716c4fbcbcSEmmanuel Grumbach WARN_ON(1); 8726c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 8736c4fbcbcSEmmanuel Grumbach } 874e705c121SKalle Valo 875dfcfeef9SSara Sharon if (!iwl_trans_grab_nic_access(trans, &flags)) 876dfcfeef9SSara Sharon return; 877dfcfeef9SSara Sharon 878e705c121SKalle Valo /* Stop Rx DMA */ 879dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 880e705c121SKalle Valo /* reset and flush pointers */ 881dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 882dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 883dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 884e705c121SKalle Valo 885e705c121SKalle Valo /* Reset driver's Rx queue write index */ 886dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 887e705c121SKalle Valo 888e705c121SKalle Valo /* Tell device where to find RBD circular buffer in DRAM */ 889dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 890e705c121SKalle Valo (u32)(rxq->bd_dma >> 8)); 891e705c121SKalle Valo 892e705c121SKalle Valo /* Tell device where in DRAM to update its Rx status */ 893dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 894e705c121SKalle Valo rxq->rb_stts_dma >> 4); 895e705c121SKalle Valo 896e705c121SKalle Valo /* Enable Rx DMA 897e705c121SKalle Valo * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 898e705c121SKalle Valo * the credit mechanism in 5000 HW RX FIFO 899e705c121SKalle Valo * Direct rx interrupts to hosts 9006c4fbcbcSEmmanuel Grumbach * Rx buffer size 4 or 8k or 12k 901e705c121SKalle Valo * RB timeout 0x10 902e705c121SKalle Valo * 256 RBDs 903e705c121SKalle Valo */ 904dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 905e705c121SKalle Valo FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 906e705c121SKalle Valo FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 907e705c121SKalle Valo FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 908e705c121SKalle Valo rb_size | 909e705c121SKalle Valo (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 910e705c121SKalle Valo (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 911e705c121SKalle Valo 912dfcfeef9SSara Sharon iwl_trans_release_nic_access(trans, &flags); 913dfcfeef9SSara Sharon 914e705c121SKalle Valo /* Set interrupt coalescing timer to default (2048 usecs) */ 915e705c121SKalle Valo iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 916e705c121SKalle Valo 917e705c121SKalle Valo /* W/A for interrupt coalescing bug in 7260 and 3160 */ 918e705c121SKalle Valo if (trans->cfg->host_interrupt_operation_mode) 919e705c121SKalle Valo iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 920e705c121SKalle Valo } 921e705c121SKalle Valo 922bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 92396a6497bSSara Sharon { 92496a6497bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 92596a6497bSSara Sharon u32 rb_size, enabled = 0; 926dfcfeef9SSara Sharon unsigned long flags; 92796a6497bSSara Sharon int i; 92896a6497bSSara Sharon 92996a6497bSSara Sharon switch (trans_pcie->rx_buf_size) { 9301a4968d1SGolan Ben Ami case IWL_AMSDU_2K: 9311a4968d1SGolan Ben Ami rb_size = RFH_RXF_DMA_RB_SIZE_2K; 9321a4968d1SGolan Ben Ami break; 93396a6497bSSara Sharon case IWL_AMSDU_4K: 93496a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_4K; 93596a6497bSSara Sharon break; 93696a6497bSSara Sharon case IWL_AMSDU_8K: 93796a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_8K; 93896a6497bSSara Sharon break; 93996a6497bSSara Sharon case IWL_AMSDU_12K: 94096a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_12K; 94196a6497bSSara Sharon break; 94296a6497bSSara Sharon default: 94396a6497bSSara Sharon WARN_ON(1); 94496a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_4K; 94596a6497bSSara Sharon } 94696a6497bSSara Sharon 947dfcfeef9SSara Sharon if (!iwl_trans_grab_nic_access(trans, &flags)) 948dfcfeef9SSara Sharon return; 949dfcfeef9SSara Sharon 95096a6497bSSara Sharon /* Stop Rx DMA */ 951dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 95296a6497bSSara Sharon /* disable free amd used rx queue operation */ 953dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 95496a6497bSSara Sharon 95596a6497bSSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 95696a6497bSSara Sharon /* Tell device where to find RBD free table in DRAM */ 95712a17458SSara Sharon iwl_write_prph64_no_grab(trans, 958dfcfeef9SSara Sharon RFH_Q_FRBDCB_BA_LSB(i), 959dfcfeef9SSara Sharon trans_pcie->rxq[i].bd_dma); 96096a6497bSSara Sharon /* Tell device where to find RBD used table in DRAM */ 96112a17458SSara Sharon iwl_write_prph64_no_grab(trans, 962dfcfeef9SSara Sharon RFH_Q_URBDCB_BA_LSB(i), 963dfcfeef9SSara Sharon trans_pcie->rxq[i].used_bd_dma); 96496a6497bSSara Sharon /* Tell device where in DRAM to update its Rx status */ 96512a17458SSara Sharon iwl_write_prph64_no_grab(trans, 966dfcfeef9SSara Sharon RFH_Q_URBD_STTS_WPTR_LSB(i), 967bce97731SSara Sharon trans_pcie->rxq[i].rb_stts_dma); 96896a6497bSSara Sharon /* Reset device indice tables */ 969dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 970dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 971dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 97296a6497bSSara Sharon 97396a6497bSSara Sharon enabled |= BIT(i) | BIT(i + 16); 97496a6497bSSara Sharon } 97596a6497bSSara Sharon 97696a6497bSSara Sharon /* 97796a6497bSSara Sharon * Enable Rx DMA 97896a6497bSSara Sharon * Rx buffer size 4 or 8k or 12k 97996a6497bSSara Sharon * Min RB size 4 or 8 98088076015SSara Sharon * Drop frames that exceed RB size 98196a6497bSSara Sharon * 512 RBDs 98296a6497bSSara Sharon */ 983dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 98463044335SSara Sharon RFH_DMA_EN_ENABLE_VAL | rb_size | 98596a6497bSSara Sharon RFH_RXF_DMA_MIN_RB_4_8 | 98688076015SSara Sharon RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 98796a6497bSSara Sharon RFH_RXF_DMA_RBDCB_SIZE_512); 98896a6497bSSara Sharon 98988076015SSara Sharon /* 99088076015SSara Sharon * Activate DMA snooping. 991b0262f07SSara Sharon * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 99288076015SSara Sharon * Default queue is 0 99388076015SSara Sharon */ 994f3779f47SJohannes Berg iwl_write_prph_no_grab(trans, RFH_GEN_CFG, 995f3779f47SJohannes Berg RFH_GEN_CFG_RFH_DMA_SNOOP | 996f3779f47SJohannes Berg RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | 997b0262f07SSara Sharon RFH_GEN_CFG_SERVICE_DMA_SNOOP | 998f3779f47SJohannes Berg RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, 999f3779f47SJohannes Berg trans->cfg->integrated ? 1000b0262f07SSara Sharon RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 1001f3779f47SJohannes Berg RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 100288076015SSara Sharon /* Enable the relevant rx queues */ 1003dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 1004dfcfeef9SSara Sharon 1005dfcfeef9SSara Sharon iwl_trans_release_nic_access(trans, &flags); 100696a6497bSSara Sharon 100796a6497bSSara Sharon /* Set interrupt coalescing timer to default (2048 usecs) */ 100896a6497bSSara Sharon iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 100996a6497bSSara Sharon } 101096a6497bSSara Sharon 1011ff932f61SGolan Ben Ami void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 1012e705c121SKalle Valo { 1013e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 1014e705c121SKalle Valo 1015e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_free); 1016e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_used); 1017e705c121SKalle Valo rxq->free_count = 0; 1018e705c121SKalle Valo rxq->used_count = 0; 1019e705c121SKalle Valo } 1020e705c121SKalle Valo 1021ff932f61SGolan Ben Ami int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 1022bce97731SSara Sharon { 1023bce97731SSara Sharon WARN_ON(1); 1024bce97731SSara Sharon return 0; 1025bce97731SSara Sharon } 1026bce97731SSara Sharon 102789d5e833SGolan Ben Ami int _iwl_pcie_rx_init(struct iwl_trans *trans) 1028e705c121SKalle Valo { 1029e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 103078485054SSara Sharon struct iwl_rxq *def_rxq; 1031e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 10327b542436SSara Sharon int i, err, queue_size, allocator_pool_size, num_alloc; 1033e705c121SKalle Valo 103478485054SSara Sharon if (!trans_pcie->rxq) { 1035e705c121SKalle Valo err = iwl_pcie_rx_alloc(trans); 1036e705c121SKalle Valo if (err) 1037e705c121SKalle Valo return err; 1038e705c121SKalle Valo } 103978485054SSara Sharon def_rxq = trans_pcie->rxq; 1040e705c121SKalle Valo 10410f22e400SShaul Triebitz cancel_work_sync(&rba->rx_alloc); 10420f22e400SShaul Triebitz 1043e705c121SKalle Valo spin_lock(&rba->lock); 1044e705c121SKalle Valo atomic_set(&rba->req_pending, 0); 1045e705c121SKalle Valo atomic_set(&rba->req_ready, 0); 104696a6497bSSara Sharon INIT_LIST_HEAD(&rba->rbd_allocated); 104796a6497bSSara Sharon INIT_LIST_HEAD(&rba->rbd_empty); 1048e705c121SKalle Valo spin_unlock(&rba->lock); 1049e705c121SKalle Valo 1050e705c121SKalle Valo /* free all first - we might be reconfigured for a different size */ 105178485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 1052e705c121SKalle Valo 1053e705c121SKalle Valo for (i = 0; i < RX_QUEUE_SIZE; i++) 105478485054SSara Sharon def_rxq->queue[i] = NULL; 1055e705c121SKalle Valo 105678485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 105778485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1058e705c121SKalle Valo 1059e705c121SKalle Valo spin_lock(&rxq->lock); 106078485054SSara Sharon /* 106178485054SSara Sharon * Set read write pointer to reflect that we have processed 106278485054SSara Sharon * and used all buffers, but have not restocked the Rx queue 106378485054SSara Sharon * with fresh buffers 106478485054SSara Sharon */ 106578485054SSara Sharon rxq->read = 0; 106678485054SSara Sharon rxq->write = 0; 106778485054SSara Sharon rxq->write_actual = 0; 10683681021fSJohannes Berg memset(rxq->rb_stts, 0, 10693681021fSJohannes Berg (trans->trans_cfg->device_family >= 10703681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210) ? 10710307c839SGolan Ben Ami sizeof(__le16) : sizeof(struct iwl_rb_status)); 107278485054SSara Sharon 107378485054SSara Sharon iwl_pcie_rx_init_rxb_lists(rxq); 107478485054SSara Sharon 1075bce97731SSara Sharon if (!rxq->napi.poll) 1076bce97731SSara Sharon netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 1077bce97731SSara Sharon iwl_pcie_dummy_napi_poll, 64); 1078bce97731SSara Sharon 1079e705c121SKalle Valo spin_unlock(&rxq->lock); 108078485054SSara Sharon } 108178485054SSara Sharon 108296a6497bSSara Sharon /* move the pool to the default queue and allocator ownerships */ 1083286ca8ebSLuca Coelho queue_size = trans->trans_cfg->mq_rx_supported ? 10847b542436SSara Sharon MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; 108596a6497bSSara Sharon allocator_pool_size = trans->num_rx_queues * 108696a6497bSSara Sharon (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 10877b542436SSara Sharon num_alloc = queue_size + allocator_pool_size; 108843146925SSara Sharon BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != 108943146925SSara Sharon ARRAY_SIZE(trans_pcie->rx_pool)); 10907b542436SSara Sharon for (i = 0; i < num_alloc; i++) { 109196a6497bSSara Sharon struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 109296a6497bSSara Sharon 109396a6497bSSara Sharon if (i < allocator_pool_size) 109496a6497bSSara Sharon list_add(&rxb->list, &rba->rbd_empty); 109596a6497bSSara Sharon else 109696a6497bSSara Sharon list_add(&rxb->list, &def_rxq->rx_used); 109796a6497bSSara Sharon trans_pcie->global_table[i] = rxb; 1098e25d65f2SSara Sharon rxb->vid = (u16)(i + 1); 1099b1753c62SSara Sharon rxb->invalid = true; 110096a6497bSSara Sharon } 110178485054SSara Sharon 110278485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 11032047fa54SSara Sharon 1104eda50cdeSSara Sharon return 0; 1105eda50cdeSSara Sharon } 1106eda50cdeSSara Sharon 1107eda50cdeSSara Sharon int iwl_pcie_rx_init(struct iwl_trans *trans) 1108eda50cdeSSara Sharon { 1109eda50cdeSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1110eda50cdeSSara Sharon int ret = _iwl_pcie_rx_init(trans); 1111eda50cdeSSara Sharon 1112eda50cdeSSara Sharon if (ret) 1113eda50cdeSSara Sharon return ret; 1114eda50cdeSSara Sharon 1115286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) 1116bce97731SSara Sharon iwl_pcie_rx_mq_hw_init(trans); 11172047fa54SSara Sharon else 1118eda50cdeSSara Sharon iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); 11192047fa54SSara Sharon 1120eda50cdeSSara Sharon iwl_pcie_rxq_restock(trans, trans_pcie->rxq); 112178485054SSara Sharon 1122eda50cdeSSara Sharon spin_lock(&trans_pcie->rxq->lock); 1123eda50cdeSSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); 1124eda50cdeSSara Sharon spin_unlock(&trans_pcie->rxq->lock); 1125e705c121SKalle Valo 1126e705c121SKalle Valo return 0; 1127e705c121SKalle Valo } 1128e705c121SKalle Valo 1129eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) 1130eda50cdeSSara Sharon { 1131e506b481SSara Sharon /* Set interrupt coalescing timer to default (2048 usecs) */ 1132e506b481SSara Sharon iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 1133e506b481SSara Sharon 1134eda50cdeSSara Sharon /* 1135eda50cdeSSara Sharon * We don't configure the RFH. 1136eda50cdeSSara Sharon * Restock will be done at alive, after firmware configured the RFH. 1137eda50cdeSSara Sharon */ 1138eda50cdeSSara Sharon return _iwl_pcie_rx_init(trans); 1139eda50cdeSSara Sharon } 1140eda50cdeSSara Sharon 1141e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans) 1142e705c121SKalle Valo { 1143e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1144e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 114578485054SSara Sharon int i; 1146286ca8ebSLuca Coelho size_t rb_stts_size = trans->trans_cfg->device_family >= 11473681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210 ? 11486cc6ba3aSTriebitz sizeof(__le16) : sizeof(struct iwl_rb_status); 1149e705c121SKalle Valo 115078485054SSara Sharon /* 115178485054SSara Sharon * if rxq is NULL, it means that nothing has been allocated, 115278485054SSara Sharon * exit now 115378485054SSara Sharon */ 115478485054SSara Sharon if (!trans_pcie->rxq) { 1155e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 1156e705c121SKalle Valo return; 1157e705c121SKalle Valo } 1158e705c121SKalle Valo 1159e705c121SKalle Valo cancel_work_sync(&rba->rx_alloc); 1160e705c121SKalle Valo 116178485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 1162e705c121SKalle Valo 11636cc6ba3aSTriebitz if (trans_pcie->base_rb_stts) { 11646cc6ba3aSTriebitz dma_free_coherent(trans->dev, 11656cc6ba3aSTriebitz rb_stts_size * trans->num_rx_queues, 11666cc6ba3aSTriebitz trans_pcie->base_rb_stts, 11676cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma); 11686cc6ba3aSTriebitz trans_pcie->base_rb_stts = NULL; 11696cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma = 0; 11706cc6ba3aSTriebitz } 11716cc6ba3aSTriebitz 117278485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 117378485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 117478485054SSara Sharon 11751b493e30SGolan Ben Ami iwl_pcie_free_rxq_dma(trans, rxq); 1176bce97731SSara Sharon 1177bce97731SSara Sharon if (rxq->napi.poll) 1178bce97731SSara Sharon netif_napi_del(&rxq->napi); 117996a6497bSSara Sharon } 118078485054SSara Sharon kfree(trans_pcie->rxq); 1181e705c121SKalle Valo } 1182e705c121SKalle Valo 1183868a1e86SShaul Triebitz static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, 1184868a1e86SShaul Triebitz struct iwl_rb_allocator *rba) 1185868a1e86SShaul Triebitz { 1186868a1e86SShaul Triebitz spin_lock(&rba->lock); 1187868a1e86SShaul Triebitz list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1188868a1e86SShaul Triebitz spin_unlock(&rba->lock); 1189868a1e86SShaul Triebitz } 1190868a1e86SShaul Triebitz 1191e705c121SKalle Valo /* 1192e705c121SKalle Valo * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1193e705c121SKalle Valo * 1194e705c121SKalle Valo * Called when a RBD can be reused. The RBD is transferred to the allocator. 1195e705c121SKalle Valo * When there are 2 empty RBDs - a request for allocation is posted 1196e705c121SKalle Valo */ 1197e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1198e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 1199e705c121SKalle Valo struct iwl_rxq *rxq, bool emergency) 1200e705c121SKalle Valo { 1201e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1202e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 1203e705c121SKalle Valo 1204e705c121SKalle Valo /* Move the RBD to the used list, will be moved to allocator in batches 1205e705c121SKalle Valo * before claiming or posting a request*/ 1206e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_used); 1207e705c121SKalle Valo 1208e705c121SKalle Valo if (unlikely(emergency)) 1209e705c121SKalle Valo return; 1210e705c121SKalle Valo 1211e705c121SKalle Valo /* Count the allocator owned RBDs */ 1212e705c121SKalle Valo rxq->used_count++; 1213e705c121SKalle Valo 1214e705c121SKalle Valo /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1215e705c121SKalle Valo * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1216e705c121SKalle Valo * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1217e705c121SKalle Valo * after but we still need to post another request. 1218e705c121SKalle Valo */ 1219e705c121SKalle Valo if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1220e705c121SKalle Valo /* Move the 2 RBDs to the allocator ownership. 1221e705c121SKalle Valo Allocator has another 6 from pool for the request completion*/ 1222868a1e86SShaul Triebitz iwl_pcie_rx_move_to_allocator(rxq, rba); 1223e705c121SKalle Valo 1224e705c121SKalle Valo atomic_inc(&rba->req_pending); 1225e705c121SKalle Valo queue_work(rba->alloc_wq, &rba->rx_alloc); 1226e705c121SKalle Valo } 1227e705c121SKalle Valo } 1228e705c121SKalle Valo 1229e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 123078485054SSara Sharon struct iwl_rxq *rxq, 1231e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 12327891965dSSara Sharon bool emergency, 12337891965dSSara Sharon int i) 1234e705c121SKalle Valo { 1235e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1236b2a3b1c1SSara Sharon struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1237e705c121SKalle Valo bool page_stolen = false; 1238e705c121SKalle Valo int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 1239e705c121SKalle Valo u32 offset = 0; 1240e705c121SKalle Valo 1241e705c121SKalle Valo if (WARN_ON(!rxb)) 1242e705c121SKalle Valo return; 1243e705c121SKalle Valo 1244e705c121SKalle Valo dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1245e705c121SKalle Valo 1246e705c121SKalle Valo while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1247e705c121SKalle Valo struct iwl_rx_packet *pkt; 1248e705c121SKalle Valo u16 sequence; 1249e705c121SKalle Valo bool reclaim; 1250e705c121SKalle Valo int index, cmd_index, len; 1251e705c121SKalle Valo struct iwl_rx_cmd_buffer rxcb = { 1252e705c121SKalle Valo ._offset = offset, 1253e705c121SKalle Valo ._rx_page_order = trans_pcie->rx_page_order, 1254e705c121SKalle Valo ._page = rxb->page, 1255e705c121SKalle Valo ._page_stolen = false, 1256e705c121SKalle Valo .truesize = max_len, 1257e705c121SKalle Valo }; 1258e705c121SKalle Valo 1259e705c121SKalle Valo pkt = rxb_addr(&rxcb); 1260e705c121SKalle Valo 12613bfdee76SJohannes Berg if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { 12623bfdee76SJohannes Berg IWL_DEBUG_RX(trans, 12633bfdee76SJohannes Berg "Q %d: RB end marker at offset %d\n", 12643bfdee76SJohannes Berg rxq->id, offset); 1265e705c121SKalle Valo break; 12663bfdee76SJohannes Berg } 1267e705c121SKalle Valo 1268a395058eSJohannes Berg WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1269a395058eSJohannes Berg FH_RSCSR_RXQ_POS != rxq->id, 1270a395058eSJohannes Berg "frame on invalid queue - is on %d and indicates %d\n", 1271a395058eSJohannes Berg rxq->id, 1272a395058eSJohannes Berg (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1273a395058eSJohannes Berg FH_RSCSR_RXQ_POS); 1274ab2e696bSSara Sharon 1275e705c121SKalle Valo IWL_DEBUG_RX(trans, 12763bfdee76SJohannes Berg "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", 12773bfdee76SJohannes Berg rxq->id, offset, 127839bdb17eSSharon Dvir iwl_get_cmd_string(trans, 127939bdb17eSSharon Dvir iwl_cmd_id(pkt->hdr.cmd, 128039bdb17eSSharon Dvir pkt->hdr.group_id, 128139bdb17eSSharon Dvir 0)), 128235177c99SSara Sharon pkt->hdr.group_id, pkt->hdr.cmd, 128335177c99SSara Sharon le16_to_cpu(pkt->hdr.sequence)); 1284e705c121SKalle Valo 1285e705c121SKalle Valo len = iwl_rx_packet_len(pkt); 1286e705c121SKalle Valo len += sizeof(u32); /* account for status word */ 1287e705c121SKalle Valo trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1288e705c121SKalle Valo trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1289e705c121SKalle Valo 1290e705c121SKalle Valo /* Reclaim a command buffer only if this packet is a response 1291e705c121SKalle Valo * to a (driver-originated) command. 1292e705c121SKalle Valo * If the packet (e.g. Rx frame) originated from uCode, 1293e705c121SKalle Valo * there is no command buffer to reclaim. 1294e705c121SKalle Valo * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1295e705c121SKalle Valo * but apparently a few don't get set; catch them here. */ 1296e705c121SKalle Valo reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1297d8a130b0SJohannes Berg if (reclaim && !pkt->hdr.group_id) { 1298e705c121SKalle Valo int i; 1299e705c121SKalle Valo 1300e705c121SKalle Valo for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1301e705c121SKalle Valo if (trans_pcie->no_reclaim_cmds[i] == 1302e705c121SKalle Valo pkt->hdr.cmd) { 1303e705c121SKalle Valo reclaim = false; 1304e705c121SKalle Valo break; 1305e705c121SKalle Valo } 1306e705c121SKalle Valo } 1307e705c121SKalle Valo } 1308e705c121SKalle Valo 1309e705c121SKalle Valo sequence = le16_to_cpu(pkt->hdr.sequence); 1310e705c121SKalle Valo index = SEQ_TO_INDEX(sequence); 13114ecab561SEmmanuel Grumbach cmd_index = iwl_pcie_get_cmd_index(txq, index); 1312e705c121SKalle Valo 13139416560eSGolan Ben Ami if (rxq->id == trans_pcie->def_rx_queue) 1314bce97731SSara Sharon iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1315bce97731SSara Sharon &rxcb); 1316bce97731SSara Sharon else 1317bce97731SSara Sharon iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1318bce97731SSara Sharon &rxcb, rxq->id); 1319e705c121SKalle Valo 1320e705c121SKalle Valo if (reclaim) { 1321e705c121SKalle Valo kzfree(txq->entries[cmd_index].free_buf); 1322e705c121SKalle Valo txq->entries[cmd_index].free_buf = NULL; 1323e705c121SKalle Valo } 1324e705c121SKalle Valo 1325e705c121SKalle Valo /* 1326e705c121SKalle Valo * After here, we should always check rxcb._page_stolen, 1327e705c121SKalle Valo * if it is true then one of the handlers took the page. 1328e705c121SKalle Valo */ 1329e705c121SKalle Valo 1330e705c121SKalle Valo if (reclaim) { 1331e705c121SKalle Valo /* Invoke any callbacks, transfer the buffer to caller, 1332e705c121SKalle Valo * and fire off the (possibly) blocking 1333e705c121SKalle Valo * iwl_trans_send_cmd() 1334e705c121SKalle Valo * as we reclaim the driver command queue */ 1335e705c121SKalle Valo if (!rxcb._page_stolen) 1336e705c121SKalle Valo iwl_pcie_hcmd_complete(trans, &rxcb); 1337e705c121SKalle Valo else 1338e705c121SKalle Valo IWL_WARN(trans, "Claim null rxb?\n"); 1339e705c121SKalle Valo } 1340e705c121SKalle Valo 1341e705c121SKalle Valo page_stolen |= rxcb._page_stolen; 13423681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 13430307c839SGolan Ben Ami break; 1344e705c121SKalle Valo offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1345e705c121SKalle Valo } 1346e705c121SKalle Valo 1347e705c121SKalle Valo /* page was stolen from us -- free our reference */ 1348e705c121SKalle Valo if (page_stolen) { 1349e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 1350e705c121SKalle Valo rxb->page = NULL; 1351e705c121SKalle Valo } 1352e705c121SKalle Valo 1353e705c121SKalle Valo /* Reuse the page if possible. For notification packets and 1354e705c121SKalle Valo * SKBs that fail to Rx correctly, add them back into the 1355e705c121SKalle Valo * rx_free list for reuse later. */ 1356e705c121SKalle Valo if (rxb->page != NULL) { 1357e705c121SKalle Valo rxb->page_dma = 1358e705c121SKalle Valo dma_map_page(trans->dev, rxb->page, 0, 1359e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 1360e705c121SKalle Valo DMA_FROM_DEVICE); 1361e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1362e705c121SKalle Valo /* 1363e705c121SKalle Valo * free the page(s) as well to not break 1364e705c121SKalle Valo * the invariant that the items on the used 1365e705c121SKalle Valo * list have no page(s) 1366e705c121SKalle Valo */ 1367e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 1368e705c121SKalle Valo rxb->page = NULL; 1369e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1370e705c121SKalle Valo } else { 1371e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 1372e705c121SKalle Valo rxq->free_count++; 1373e705c121SKalle Valo } 1374e705c121SKalle Valo } else 1375e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1376e705c121SKalle Valo } 1377e705c121SKalle Valo 13781b4bbe8bSSara Sharon static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, 13791b4bbe8bSSara Sharon struct iwl_rxq *rxq, int i) 13801b4bbe8bSSara Sharon { 13811b4bbe8bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 13821b4bbe8bSSara Sharon struct iwl_rx_mem_buffer *rxb; 13831b4bbe8bSSara Sharon u16 vid; 13841b4bbe8bSSara Sharon 1385f826faaaSJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); 1386f826faaaSJohannes Berg 1387286ca8ebSLuca Coelho if (!trans->trans_cfg->mq_rx_supported) { 13881b4bbe8bSSara Sharon rxb = rxq->queue[i]; 13891b4bbe8bSSara Sharon rxq->queue[i] = NULL; 13901b4bbe8bSSara Sharon return rxb; 13911b4bbe8bSSara Sharon } 13921b4bbe8bSSara Sharon 13931b4bbe8bSSara Sharon /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ 13943681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 13951b4bbe8bSSara Sharon vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; 13961b4bbe8bSSara Sharon else 13971b4bbe8bSSara Sharon vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; 13981b4bbe8bSSara Sharon 13991b4bbe8bSSara Sharon if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) 14001b4bbe8bSSara Sharon goto out_err; 14011b4bbe8bSSara Sharon 14021b4bbe8bSSara Sharon rxb = trans_pcie->global_table[vid - 1]; 14031b4bbe8bSSara Sharon if (rxb->invalid) 14041b4bbe8bSSara Sharon goto out_err; 14051b4bbe8bSSara Sharon 140685d78bb1SSara Sharon IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); 140785d78bb1SSara Sharon 14081b4bbe8bSSara Sharon rxb->invalid = true; 14091b4bbe8bSSara Sharon 14101b4bbe8bSSara Sharon return rxb; 14111b4bbe8bSSara Sharon 14121b4bbe8bSSara Sharon out_err: 14131b4bbe8bSSara Sharon WARN(1, "Invalid rxb from HW %u\n", (u32)vid); 14141b4bbe8bSSara Sharon iwl_force_nmi(trans); 14151b4bbe8bSSara Sharon return NULL; 14161b4bbe8bSSara Sharon } 14171b4bbe8bSSara Sharon 1418e705c121SKalle Valo /* 1419e705c121SKalle Valo * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1420e705c121SKalle Valo */ 14212e5d4a8fSHaim Dreyfuss static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) 1422e705c121SKalle Valo { 1423e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 142430f24eabSJohannes Berg struct iwl_rxq *rxq; 1425d56daea4SSara Sharon u32 r, i, count = 0; 1426e705c121SKalle Valo bool emergency = false; 1427e705c121SKalle Valo 142830f24eabSJohannes Berg if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) 142930f24eabSJohannes Berg return; 143030f24eabSJohannes Berg 143130f24eabSJohannes Berg rxq = &trans_pcie->rxq[queue]; 143230f24eabSJohannes Berg 1433e705c121SKalle Valo restart: 1434e705c121SKalle Valo spin_lock(&rxq->lock); 1435e705c121SKalle Valo /* uCode's read index (stored in shared DRAM) indicates the last Rx 1436e705c121SKalle Valo * buffer that the driver may process (last buffer filled by ucode). */ 14370307c839SGolan Ben Ami r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 1438e705c121SKalle Valo i = rxq->read; 1439e705c121SKalle Valo 14405eae443eSSara Sharon /* W/A 9000 device step A0 wrap-around bug */ 14415eae443eSSara Sharon r &= (rxq->queue_size - 1); 14425eae443eSSara Sharon 1443e705c121SKalle Valo /* Rx interrupt, but nothing sent from uCode */ 1444e705c121SKalle Valo if (i == r) 14455eae443eSSara Sharon IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1446e705c121SKalle Valo 1447e705c121SKalle Valo while (i != r) { 1448868a1e86SShaul Triebitz struct iwl_rb_allocator *rba = &trans_pcie->rba; 1449e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 1450868a1e86SShaul Triebitz /* number of RBDs still waiting for page allocation */ 1451868a1e86SShaul Triebitz u32 rb_pending_alloc = 1452868a1e86SShaul Triebitz atomic_read(&trans_pcie->rba.req_pending) * 1453868a1e86SShaul Triebitz RX_CLAIM_REQ_ALLOC; 1454e705c121SKalle Valo 1455868a1e86SShaul Triebitz if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && 1456868a1e86SShaul Triebitz !emergency)) { 1457868a1e86SShaul Triebitz iwl_pcie_rx_move_to_allocator(rxq, rba); 1458e705c121SKalle Valo emergency = true; 14596dcdd165SSara Sharon IWL_DEBUG_TPT(trans, 14606dcdd165SSara Sharon "RX path is in emergency. Pending allocations %d\n", 14616dcdd165SSara Sharon rb_pending_alloc); 1462868a1e86SShaul Triebitz } 1463e705c121SKalle Valo 146485d78bb1SSara Sharon IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 146585d78bb1SSara Sharon 14661b4bbe8bSSara Sharon rxb = iwl_pcie_get_rxb(trans, rxq, i); 14671b4bbe8bSSara Sharon if (!rxb) 14685eae443eSSara Sharon goto out; 1469e705c121SKalle Valo 14707891965dSSara Sharon iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); 1471e705c121SKalle Valo 147296a6497bSSara Sharon i = (i + 1) & (rxq->queue_size - 1); 1473e705c121SKalle Valo 1474d56daea4SSara Sharon /* 1475d56daea4SSara Sharon * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1476d56daea4SSara Sharon * try to claim the pre-allocated buffers from the allocator. 1477d56daea4SSara Sharon * If not ready - will try to reclaim next time. 1478d56daea4SSara Sharon * There is no need to reschedule work - allocator exits only 1479d56daea4SSara Sharon * on success 1480e705c121SKalle Valo */ 1481d56daea4SSara Sharon if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1482d56daea4SSara Sharon iwl_pcie_rx_allocator_get(trans, rxq); 1483e705c121SKalle Valo 1484d56daea4SSara Sharon if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1485d56daea4SSara Sharon /* Add the remaining empty RBDs for allocator use */ 1486868a1e86SShaul Triebitz iwl_pcie_rx_move_to_allocator(rxq, rba); 1487d56daea4SSara Sharon } else if (emergency) { 1488e705c121SKalle Valo count++; 1489e705c121SKalle Valo if (count == 8) { 1490e705c121SKalle Valo count = 0; 14916dcdd165SSara Sharon if (rb_pending_alloc < rxq->queue_size / 3) { 14926dcdd165SSara Sharon IWL_DEBUG_TPT(trans, 14936dcdd165SSara Sharon "RX path exited emergency. Pending allocations %d\n", 14946dcdd165SSara Sharon rb_pending_alloc); 1495e705c121SKalle Valo emergency = false; 14966dcdd165SSara Sharon } 1497e0e168dcSGregory Greenman 1498e705c121SKalle Valo rxq->read = i; 1499e705c121SKalle Valo spin_unlock(&rxq->lock); 1500e0e168dcSGregory Greenman iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 150178485054SSara Sharon iwl_pcie_rxq_restock(trans, rxq); 1502e705c121SKalle Valo goto restart; 1503e705c121SKalle Valo } 1504e705c121SKalle Valo } 1505e0e168dcSGregory Greenman } 15065eae443eSSara Sharon out: 1507e705c121SKalle Valo /* Backtrack one entry */ 1508e705c121SKalle Valo rxq->read = i; 15090307c839SGolan Ben Ami /* update cr tail with the rxq read pointer */ 15103681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 15110307c839SGolan Ben Ami *rxq->cr_tail = cpu_to_le16(r); 1512e705c121SKalle Valo spin_unlock(&rxq->lock); 1513e705c121SKalle Valo 1514e705c121SKalle Valo /* 1515e705c121SKalle Valo * handle a case where in emergency there are some unallocated RBDs. 1516e705c121SKalle Valo * those RBDs are in the used list, but are not tracked by the queue's 1517e705c121SKalle Valo * used_count which counts allocator owned RBDs. 1518e705c121SKalle Valo * unallocated emergency RBDs must be allocated on exit, otherwise 1519e705c121SKalle Valo * when called again the function may not be in emergency mode and 1520e705c121SKalle Valo * they will be handed to the allocator with no tracking in the RBD 1521e705c121SKalle Valo * allocator counters, which will lead to them never being claimed back 1522e705c121SKalle Valo * by the queue. 1523e705c121SKalle Valo * by allocating them here, they are now in the queue free list, and 1524e705c121SKalle Valo * will be restocked by the next call of iwl_pcie_rxq_restock. 1525e705c121SKalle Valo */ 1526e705c121SKalle Valo if (unlikely(emergency && count)) 152778485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1528e705c121SKalle Valo 1529bce97731SSara Sharon if (rxq->napi.poll) 1530bce97731SSara Sharon napi_gro_flush(&rxq->napi, false); 1531e0e168dcSGregory Greenman 1532e0e168dcSGregory Greenman iwl_pcie_rxq_restock(trans, rxq); 1533e705c121SKalle Valo } 1534e705c121SKalle Valo 15352e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 15362e5d4a8fSHaim Dreyfuss { 15372e5d4a8fSHaim Dreyfuss u8 queue = entry->entry; 15382e5d4a8fSHaim Dreyfuss struct msix_entry *entries = entry - queue; 15392e5d4a8fSHaim Dreyfuss 15402e5d4a8fSHaim Dreyfuss return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 15412e5d4a8fSHaim Dreyfuss } 15422e5d4a8fSHaim Dreyfuss 15432e5d4a8fSHaim Dreyfuss /* 15442e5d4a8fSHaim Dreyfuss * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 15452e5d4a8fSHaim Dreyfuss * This interrupt handler should be used with RSS queue only. 15462e5d4a8fSHaim Dreyfuss */ 15472e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 15482e5d4a8fSHaim Dreyfuss { 15492e5d4a8fSHaim Dreyfuss struct msix_entry *entry = dev_id; 15502e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 15512e5d4a8fSHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 15522e5d4a8fSHaim Dreyfuss 1553c42ff65dSJohannes Berg trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); 1554c42ff65dSJohannes Berg 15555eae443eSSara Sharon if (WARN_ON(entry->entry >= trans->num_rx_queues)) 15565eae443eSSara Sharon return IRQ_NONE; 15575eae443eSSara Sharon 15582e5d4a8fSHaim Dreyfuss lock_map_acquire(&trans->sync_cmd_lockdep_map); 15592e5d4a8fSHaim Dreyfuss 15602e5d4a8fSHaim Dreyfuss local_bh_disable(); 15612e5d4a8fSHaim Dreyfuss iwl_pcie_rx_handle(trans, entry->entry); 15622e5d4a8fSHaim Dreyfuss local_bh_enable(); 15632e5d4a8fSHaim Dreyfuss 15642e5d4a8fSHaim Dreyfuss iwl_pcie_clear_irq(trans, entry); 15652e5d4a8fSHaim Dreyfuss 15662e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 15672e5d4a8fSHaim Dreyfuss 15682e5d4a8fSHaim Dreyfuss return IRQ_HANDLED; 15692e5d4a8fSHaim Dreyfuss } 15702e5d4a8fSHaim Dreyfuss 1571e705c121SKalle Valo /* 1572e705c121SKalle Valo * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1573e705c121SKalle Valo */ 1574e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1575e705c121SKalle Valo { 1576e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1577e705c121SKalle Valo int i; 1578e705c121SKalle Valo 1579e705c121SKalle Valo /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1580e705c121SKalle Valo if (trans->cfg->internal_wimax_coex && 1581e705c121SKalle Valo !trans->cfg->apmg_not_supported && 1582e705c121SKalle Valo (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1583e705c121SKalle Valo APMS_CLK_VAL_MRB_FUNC_MODE) || 1584e705c121SKalle Valo (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1585e705c121SKalle Valo APMG_PS_CTRL_VAL_RESET_REQ))) { 1586e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1587e705c121SKalle Valo iwl_op_mode_wimax_active(trans->op_mode); 1588e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1589e705c121SKalle Valo return; 1590e705c121SKalle Valo } 1591e705c121SKalle Valo 1592286ca8ebSLuca Coelho for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 159313a3a390SSara Sharon if (!trans_pcie->txq[i]) 159413a3a390SSara Sharon continue; 1595b2a3b1c1SSara Sharon del_timer(&trans_pcie->txq[i]->stuck_timer); 159613a3a390SSara Sharon } 1597e705c121SKalle Valo 15987d75f32eSEmmanuel Grumbach /* The STATUS_FW_ERROR bit is set in this function. This must happen 15997d75f32eSEmmanuel Grumbach * before we wake up the command caller, to ensure a proper cleanup. */ 16007d75f32eSEmmanuel Grumbach iwl_trans_fw_error(trans); 16017d75f32eSEmmanuel Grumbach 1602e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1603e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1604e705c121SKalle Valo } 1605e705c121SKalle Valo 1606e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1607e705c121SKalle Valo { 1608e705c121SKalle Valo u32 inta; 1609e705c121SKalle Valo 1610e705c121SKalle Valo lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1611e705c121SKalle Valo 1612e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1613e705c121SKalle Valo 1614e705c121SKalle Valo /* Discover which interrupts are active/pending */ 1615e705c121SKalle Valo inta = iwl_read32(trans, CSR_INT); 1616e705c121SKalle Valo 1617e705c121SKalle Valo /* the thread will service interrupts and re-enable them */ 1618e705c121SKalle Valo return inta; 1619e705c121SKalle Valo } 1620e705c121SKalle Valo 1621e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */ 1622e705c121SKalle Valo #define ICT_SHIFT 12 1623e705c121SKalle Valo #define ICT_SIZE (1 << ICT_SHIFT) 1624e705c121SKalle Valo #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1625e705c121SKalle Valo 1626e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will 1627e705c121SKalle Valo * stop using INTA register to get device's interrupt, reading this register 1628e705c121SKalle Valo * is expensive, device will write interrupts in ICT dram table, increment 1629e705c121SKalle Valo * index then will fire interrupt to driver, driver will OR all ICT table 1630e705c121SKalle Valo * entries from current index up to table entry with 0 value. the result is 1631e705c121SKalle Valo * the interrupt we need to service, driver will set the entries back to 0 and 1632e705c121SKalle Valo * set index. 1633e705c121SKalle Valo */ 1634e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1635e705c121SKalle Valo { 1636e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1637e705c121SKalle Valo u32 inta; 1638e705c121SKalle Valo u32 val = 0; 1639e705c121SKalle Valo u32 read; 1640e705c121SKalle Valo 1641e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1642e705c121SKalle Valo 1643e705c121SKalle Valo /* Ignore interrupt if there's nothing in NIC to service. 1644e705c121SKalle Valo * This may be due to IRQ shared with another device, 1645e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. */ 1646e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1647e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1648e705c121SKalle Valo if (!read) 1649e705c121SKalle Valo return 0; 1650e705c121SKalle Valo 1651e705c121SKalle Valo /* 1652e705c121SKalle Valo * Collect all entries up to the first 0, starting from ict_index; 1653e705c121SKalle Valo * note we already read at ict_index. 1654e705c121SKalle Valo */ 1655e705c121SKalle Valo do { 1656e705c121SKalle Valo val |= read; 1657e705c121SKalle Valo IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1658e705c121SKalle Valo trans_pcie->ict_index, read); 1659e705c121SKalle Valo trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1660e705c121SKalle Valo trans_pcie->ict_index = 1661e705c121SKalle Valo ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1662e705c121SKalle Valo 1663e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1664e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1665e705c121SKalle Valo read); 1666e705c121SKalle Valo } while (read); 1667e705c121SKalle Valo 1668e705c121SKalle Valo /* We should not get this value, just ignore it. */ 1669e705c121SKalle Valo if (val == 0xffffffff) 1670e705c121SKalle Valo val = 0; 1671e705c121SKalle Valo 1672e705c121SKalle Valo /* 1673e705c121SKalle Valo * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1674e705c121SKalle Valo * (bit 15 before shifting it to 31) to clear when using interrupt 1675e705c121SKalle Valo * coalescing. fortunately, bits 18 and 19 stay set when this happens 1676e705c121SKalle Valo * so we use them to decide on the real state of the Rx bit. 1677e705c121SKalle Valo * In order words, bit 15 is set if bit 18 or bit 19 are set. 1678e705c121SKalle Valo */ 1679e705c121SKalle Valo if (val & 0xC0000) 1680e705c121SKalle Valo val |= 0x8000; 1681e705c121SKalle Valo 1682e705c121SKalle Valo inta = (0xff & val) | ((0xff00 & val) << 16); 1683e705c121SKalle Valo return inta; 1684e705c121SKalle Valo } 1685e705c121SKalle Valo 1686fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) 16873a6e168bSJohannes Berg { 16883a6e168bSJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 16893a6e168bSJohannes Berg struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1690326477e4SJohannes Berg bool hw_rfkill, prev, report; 16913a6e168bSJohannes Berg 16923a6e168bSJohannes Berg mutex_lock(&trans_pcie->mutex); 1693326477e4SJohannes Berg prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 16943a6e168bSJohannes Berg hw_rfkill = iwl_is_rfkill_set(trans); 1695326477e4SJohannes Berg if (hw_rfkill) { 1696326477e4SJohannes Berg set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1697326477e4SJohannes Berg set_bit(STATUS_RFKILL_HW, &trans->status); 1698326477e4SJohannes Berg } 1699326477e4SJohannes Berg if (trans_pcie->opmode_down) 1700326477e4SJohannes Berg report = hw_rfkill; 1701326477e4SJohannes Berg else 1702326477e4SJohannes Berg report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 17033a6e168bSJohannes Berg 17043a6e168bSJohannes Berg IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 17053a6e168bSJohannes Berg hw_rfkill ? "disable radio" : "enable radio"); 17063a6e168bSJohannes Berg 17073a6e168bSJohannes Berg isr_stats->rfkill++; 17083a6e168bSJohannes Berg 1709326477e4SJohannes Berg if (prev != report) 1710326477e4SJohannes Berg iwl_trans_pcie_rf_kill(trans, report); 17113a6e168bSJohannes Berg mutex_unlock(&trans_pcie->mutex); 17123a6e168bSJohannes Berg 17133a6e168bSJohannes Berg if (hw_rfkill) { 17143a6e168bSJohannes Berg if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 17153a6e168bSJohannes Berg &trans->status)) 17163a6e168bSJohannes Berg IWL_DEBUG_RF_KILL(trans, 17173a6e168bSJohannes Berg "Rfkill while SYNC HCMD in flight\n"); 17183a6e168bSJohannes Berg wake_up(&trans_pcie->wait_command_queue); 17193a6e168bSJohannes Berg } else { 1720326477e4SJohannes Berg clear_bit(STATUS_RFKILL_HW, &trans->status); 1721326477e4SJohannes Berg if (trans_pcie->opmode_down) 1722326477e4SJohannes Berg clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 17233a6e168bSJohannes Berg } 17243a6e168bSJohannes Berg } 17253a6e168bSJohannes Berg 1726e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1727e705c121SKalle Valo { 1728e705c121SKalle Valo struct iwl_trans *trans = dev_id; 1729e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1730e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1731e705c121SKalle Valo u32 inta = 0; 1732e705c121SKalle Valo u32 handled = 0; 1733e705c121SKalle Valo 1734e705c121SKalle Valo lock_map_acquire(&trans->sync_cmd_lockdep_map); 1735e705c121SKalle Valo 1736e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1737e705c121SKalle Valo 1738e705c121SKalle Valo /* dram interrupt table not set yet, 1739e705c121SKalle Valo * use legacy interrupt. 1740e705c121SKalle Valo */ 1741e705c121SKalle Valo if (likely(trans_pcie->use_ict)) 1742e705c121SKalle Valo inta = iwl_pcie_int_cause_ict(trans); 1743e705c121SKalle Valo else 1744e705c121SKalle Valo inta = iwl_pcie_int_cause_non_ict(trans); 1745e705c121SKalle Valo 1746e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) { 1747e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1748e705c121SKalle Valo "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1749e705c121SKalle Valo inta, trans_pcie->inta_mask, 1750e705c121SKalle Valo iwl_read32(trans, CSR_INT_MASK), 1751e705c121SKalle Valo iwl_read32(trans, CSR_FH_INT_STATUS)); 1752e705c121SKalle Valo if (inta & (~trans_pcie->inta_mask)) 1753e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1754e705c121SKalle Valo "We got a masked interrupt (0x%08x)\n", 1755e705c121SKalle Valo inta & (~trans_pcie->inta_mask)); 1756e705c121SKalle Valo } 1757e705c121SKalle Valo 1758e705c121SKalle Valo inta &= trans_pcie->inta_mask; 1759e705c121SKalle Valo 1760e705c121SKalle Valo /* 1761e705c121SKalle Valo * Ignore interrupt if there's nothing in NIC to service. 1762e705c121SKalle Valo * This may be due to IRQ shared with another device, 1763e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. 1764e705c121SKalle Valo */ 1765e705c121SKalle Valo if (unlikely(!inta)) { 1766e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1767e705c121SKalle Valo /* 1768e705c121SKalle Valo * Re-enable interrupts here since we don't 1769e705c121SKalle Valo * have anything to service 1770e705c121SKalle Valo */ 1771e705c121SKalle Valo if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1772f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 1773e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1774e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1775e705c121SKalle Valo return IRQ_NONE; 1776e705c121SKalle Valo } 1777e705c121SKalle Valo 1778e705c121SKalle Valo if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1779e705c121SKalle Valo /* 1780e705c121SKalle Valo * Hardware disappeared. It might have 1781e705c121SKalle Valo * already raised an interrupt. 1782e705c121SKalle Valo */ 1783e705c121SKalle Valo IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1784e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1785e705c121SKalle Valo goto out; 1786e705c121SKalle Valo } 1787e705c121SKalle Valo 1788e705c121SKalle Valo /* Ack/clear/reset pending uCode interrupts. 1789e705c121SKalle Valo * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1790e705c121SKalle Valo */ 1791e705c121SKalle Valo /* There is a hardware bug in the interrupt mask function that some 1792e705c121SKalle Valo * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1793e705c121SKalle Valo * they are disabled in the CSR_INT_MASK register. Furthermore the 1794e705c121SKalle Valo * ICT interrupt handling mechanism has another bug that might cause 1795e705c121SKalle Valo * these unmasked interrupts fail to be detected. We workaround the 1796e705c121SKalle Valo * hardware bugs here by ACKing all the possible interrupts so that 1797e705c121SKalle Valo * interrupt coalescing can still be achieved. 1798e705c121SKalle Valo */ 1799e705c121SKalle Valo iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1800e705c121SKalle Valo 1801e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) 1802e705c121SKalle Valo IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1803e705c121SKalle Valo inta, iwl_read32(trans, CSR_INT_MASK)); 1804e705c121SKalle Valo 1805e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1806e705c121SKalle Valo 1807e705c121SKalle Valo /* Now service all interrupt bits discovered above. */ 1808e705c121SKalle Valo if (inta & CSR_INT_BIT_HW_ERR) { 1809e705c121SKalle Valo IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1810e705c121SKalle Valo 1811e705c121SKalle Valo /* Tell the device to stop sending interrupts */ 1812e705c121SKalle Valo iwl_disable_interrupts(trans); 1813e705c121SKalle Valo 1814e705c121SKalle Valo isr_stats->hw++; 1815e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1816e705c121SKalle Valo 1817e705c121SKalle Valo handled |= CSR_INT_BIT_HW_ERR; 1818e705c121SKalle Valo 1819e705c121SKalle Valo goto out; 1820e705c121SKalle Valo } 1821e705c121SKalle Valo 1822e705c121SKalle Valo /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1823e705c121SKalle Valo if (inta & CSR_INT_BIT_SCD) { 1824e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1825e705c121SKalle Valo "Scheduler finished to transmit the frame/frames.\n"); 1826e705c121SKalle Valo isr_stats->sch++; 1827e705c121SKalle Valo } 1828e705c121SKalle Valo 1829e705c121SKalle Valo /* Alive notification via Rx interrupt will do the real work */ 1830e705c121SKalle Valo if (inta & CSR_INT_BIT_ALIVE) { 1831e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1832e705c121SKalle Valo isr_stats->alive++; 1833286ca8ebSLuca Coelho if (trans->trans_cfg->gen2) { 1834eda50cdeSSara Sharon /* 1835eda50cdeSSara Sharon * We can restock, since firmware configured 1836eda50cdeSSara Sharon * the RFH 1837eda50cdeSSara Sharon */ 1838eda50cdeSSara Sharon iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 1839eda50cdeSSara Sharon } 1840ed3e4c6dSEmmanuel Grumbach 1841ed3e4c6dSEmmanuel Grumbach handled |= CSR_INT_BIT_ALIVE; 1842e705c121SKalle Valo } 1843e705c121SKalle Valo 1844e705c121SKalle Valo /* Safely ignore these bits for debug checks below */ 1845e705c121SKalle Valo inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1846e705c121SKalle Valo 1847e705c121SKalle Valo /* HW RF KILL switch toggled */ 1848e705c121SKalle Valo if (inta & CSR_INT_BIT_RF_KILL) { 18493a6e168bSJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 1850e705c121SKalle Valo handled |= CSR_INT_BIT_RF_KILL; 1851e705c121SKalle Valo } 1852e705c121SKalle Valo 1853e705c121SKalle Valo /* Chip got too hot and stopped itself */ 1854e705c121SKalle Valo if (inta & CSR_INT_BIT_CT_KILL) { 1855e705c121SKalle Valo IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1856e705c121SKalle Valo isr_stats->ctkill++; 1857e705c121SKalle Valo handled |= CSR_INT_BIT_CT_KILL; 1858e705c121SKalle Valo } 1859e705c121SKalle Valo 1860e705c121SKalle Valo /* Error detected by uCode */ 1861e705c121SKalle Valo if (inta & CSR_INT_BIT_SW_ERR) { 1862e705c121SKalle Valo IWL_ERR(trans, "Microcode SW error detected. " 1863e705c121SKalle Valo " Restarting 0x%X.\n", inta); 1864e705c121SKalle Valo isr_stats->sw++; 1865e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1866e705c121SKalle Valo handled |= CSR_INT_BIT_SW_ERR; 1867e705c121SKalle Valo } 1868e705c121SKalle Valo 1869e705c121SKalle Valo /* uCode wakes up after power-down sleep */ 1870e705c121SKalle Valo if (inta & CSR_INT_BIT_WAKEUP) { 1871e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1872e705c121SKalle Valo iwl_pcie_rxq_check_wrptr(trans); 1873e705c121SKalle Valo iwl_pcie_txq_check_wrptrs(trans); 1874e705c121SKalle Valo 1875e705c121SKalle Valo isr_stats->wakeup++; 1876e705c121SKalle Valo 1877e705c121SKalle Valo handled |= CSR_INT_BIT_WAKEUP; 1878e705c121SKalle Valo } 1879e705c121SKalle Valo 1880e705c121SKalle Valo /* All uCode command responses, including Tx command responses, 1881e705c121SKalle Valo * Rx "responses" (frame-received notification), and other 1882e705c121SKalle Valo * notifications from uCode come through here*/ 1883e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1884e705c121SKalle Valo CSR_INT_BIT_RX_PERIODIC)) { 1885e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1886e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1887e705c121SKalle Valo handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1888e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 1889e705c121SKalle Valo CSR_FH_INT_RX_MASK); 1890e705c121SKalle Valo } 1891e705c121SKalle Valo if (inta & CSR_INT_BIT_RX_PERIODIC) { 1892e705c121SKalle Valo handled |= CSR_INT_BIT_RX_PERIODIC; 1893e705c121SKalle Valo iwl_write32(trans, 1894e705c121SKalle Valo CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1895e705c121SKalle Valo } 1896e705c121SKalle Valo /* Sending RX interrupt require many steps to be done in the 1897e705c121SKalle Valo * the device: 1898e705c121SKalle Valo * 1- write interrupt to current index in ICT table. 1899e705c121SKalle Valo * 2- dma RX frame. 1900e705c121SKalle Valo * 3- update RX shared data to indicate last write index. 1901e705c121SKalle Valo * 4- send interrupt. 1902e705c121SKalle Valo * This could lead to RX race, driver could receive RX interrupt 1903e705c121SKalle Valo * but the shared data changes does not reflect this; 1904e705c121SKalle Valo * periodic interrupt will detect any dangling Rx activity. 1905e705c121SKalle Valo */ 1906e705c121SKalle Valo 1907e705c121SKalle Valo /* Disable periodic interrupt; we use it as just a one-shot. */ 1908e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 1909e705c121SKalle Valo CSR_INT_PERIODIC_DIS); 1910e705c121SKalle Valo 1911e705c121SKalle Valo /* 1912e705c121SKalle Valo * Enable periodic interrupt in 8 msec only if we received 1913e705c121SKalle Valo * real RX interrupt (instead of just periodic int), to catch 1914e705c121SKalle Valo * any dangling Rx interrupt. If it was just the periodic 1915e705c121SKalle Valo * interrupt, there was no dangling Rx activity, and no need 1916e705c121SKalle Valo * to extend the periodic interrupt; one-shot is enough. 1917e705c121SKalle Valo */ 1918e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1919e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 1920e705c121SKalle Valo CSR_INT_PERIODIC_ENA); 1921e705c121SKalle Valo 1922e705c121SKalle Valo isr_stats->rx++; 1923e705c121SKalle Valo 1924e705c121SKalle Valo local_bh_disable(); 19252e5d4a8fSHaim Dreyfuss iwl_pcie_rx_handle(trans, 0); 1926e705c121SKalle Valo local_bh_enable(); 1927e705c121SKalle Valo } 1928e705c121SKalle Valo 1929e705c121SKalle Valo /* This "Tx" DMA channel is used only for loading uCode */ 1930e705c121SKalle Valo if (inta & CSR_INT_BIT_FH_TX) { 1931e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1932e705c121SKalle Valo IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1933e705c121SKalle Valo isr_stats->tx++; 1934e705c121SKalle Valo handled |= CSR_INT_BIT_FH_TX; 1935e705c121SKalle Valo /* Wake up uCode load routine, now that load is complete */ 1936e705c121SKalle Valo trans_pcie->ucode_write_complete = true; 1937e705c121SKalle Valo wake_up(&trans_pcie->ucode_write_waitq); 1938e705c121SKalle Valo } 1939e705c121SKalle Valo 1940e705c121SKalle Valo if (inta & ~handled) { 1941e705c121SKalle Valo IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1942e705c121SKalle Valo isr_stats->unhandled++; 1943e705c121SKalle Valo } 1944e705c121SKalle Valo 1945e705c121SKalle Valo if (inta & ~(trans_pcie->inta_mask)) { 1946e705c121SKalle Valo IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 1947e705c121SKalle Valo inta & ~trans_pcie->inta_mask); 1948e705c121SKalle Valo } 1949e705c121SKalle Valo 1950f16c3ebfSEmmanuel Grumbach spin_lock(&trans_pcie->irq_lock); 1951a6bd005fSEmmanuel Grumbach /* only Re-enable all interrupt if disabled by irq */ 1952f16c3ebfSEmmanuel Grumbach if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1953f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 1954f16c3ebfSEmmanuel Grumbach /* we are loading the firmware, enable FH_TX interrupt only */ 1955f16c3ebfSEmmanuel Grumbach else if (handled & CSR_INT_BIT_FH_TX) 1956f16c3ebfSEmmanuel Grumbach iwl_enable_fw_load_int(trans); 1957e705c121SKalle Valo /* Re-enable RF_KILL if it occurred */ 1958e705c121SKalle Valo else if (handled & CSR_INT_BIT_RF_KILL) 1959e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1960ed3e4c6dSEmmanuel Grumbach /* Re-enable the ALIVE / Rx interrupt if it occurred */ 1961ed3e4c6dSEmmanuel Grumbach else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) 1962ed3e4c6dSEmmanuel Grumbach iwl_enable_fw_load_int_ctx_info(trans); 1963f16c3ebfSEmmanuel Grumbach spin_unlock(&trans_pcie->irq_lock); 1964e705c121SKalle Valo 1965e705c121SKalle Valo out: 1966e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1967e705c121SKalle Valo return IRQ_HANDLED; 1968e705c121SKalle Valo } 1969e705c121SKalle Valo 1970e705c121SKalle Valo /****************************************************************************** 1971e705c121SKalle Valo * 1972e705c121SKalle Valo * ICT functions 1973e705c121SKalle Valo * 1974e705c121SKalle Valo ******************************************************************************/ 1975e705c121SKalle Valo 1976e705c121SKalle Valo /* Free dram table */ 1977e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans) 1978e705c121SKalle Valo { 1979e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1980e705c121SKalle Valo 1981e705c121SKalle Valo if (trans_pcie->ict_tbl) { 1982e705c121SKalle Valo dma_free_coherent(trans->dev, ICT_SIZE, 1983e705c121SKalle Valo trans_pcie->ict_tbl, 1984e705c121SKalle Valo trans_pcie->ict_tbl_dma); 1985e705c121SKalle Valo trans_pcie->ict_tbl = NULL; 1986e705c121SKalle Valo trans_pcie->ict_tbl_dma = 0; 1987e705c121SKalle Valo } 1988e705c121SKalle Valo } 1989e705c121SKalle Valo 1990e705c121SKalle Valo /* 1991e705c121SKalle Valo * allocate dram shared table, it is an aligned memory 1992e705c121SKalle Valo * block of ICT_SIZE. 1993e705c121SKalle Valo * also reset all data related to ICT table interrupt. 1994e705c121SKalle Valo */ 1995e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans) 1996e705c121SKalle Valo { 1997e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1998e705c121SKalle Valo 1999e705c121SKalle Valo trans_pcie->ict_tbl = 2000750afb08SLuis Chamberlain dma_alloc_coherent(trans->dev, ICT_SIZE, 2001750afb08SLuis Chamberlain &trans_pcie->ict_tbl_dma, GFP_KERNEL); 2002e705c121SKalle Valo if (!trans_pcie->ict_tbl) 2003e705c121SKalle Valo return -ENOMEM; 2004e705c121SKalle Valo 2005e705c121SKalle Valo /* just an API sanity check ... it is guaranteed to be aligned */ 2006e705c121SKalle Valo if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 2007e705c121SKalle Valo iwl_pcie_free_ict(trans); 2008e705c121SKalle Valo return -EINVAL; 2009e705c121SKalle Valo } 2010e705c121SKalle Valo 2011e705c121SKalle Valo return 0; 2012e705c121SKalle Valo } 2013e705c121SKalle Valo 2014e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table, 2015e705c121SKalle Valo * also we need to tell the driver to start using ICT interrupt. 2016e705c121SKalle Valo */ 2017e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans) 2018e705c121SKalle Valo { 2019e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2020e705c121SKalle Valo u32 val; 2021e705c121SKalle Valo 2022e705c121SKalle Valo if (!trans_pcie->ict_tbl) 2023e705c121SKalle Valo return; 2024e705c121SKalle Valo 2025e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 2026f16c3ebfSEmmanuel Grumbach _iwl_disable_interrupts(trans); 2027e705c121SKalle Valo 2028e705c121SKalle Valo memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 2029e705c121SKalle Valo 2030e705c121SKalle Valo val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 2031e705c121SKalle Valo 2032e705c121SKalle Valo val |= CSR_DRAM_INT_TBL_ENABLE | 2033e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRAP_CHECK | 2034e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRITE_POINTER; 2035e705c121SKalle Valo 2036e705c121SKalle Valo IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 2037e705c121SKalle Valo 2038e705c121SKalle Valo iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 2039e705c121SKalle Valo trans_pcie->use_ict = true; 2040e705c121SKalle Valo trans_pcie->ict_index = 0; 2041e705c121SKalle Valo iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 2042f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 2043e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 2044e705c121SKalle Valo } 2045e705c121SKalle Valo 2046e705c121SKalle Valo /* Device is going down disable ict interrupt usage */ 2047e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans) 2048e705c121SKalle Valo { 2049e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2050e705c121SKalle Valo 2051e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 2052e705c121SKalle Valo trans_pcie->use_ict = false; 2053e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 2054e705c121SKalle Valo } 2055e705c121SKalle Valo 2056e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data) 2057e705c121SKalle Valo { 2058e705c121SKalle Valo struct iwl_trans *trans = data; 2059e705c121SKalle Valo 2060e705c121SKalle Valo if (!trans) 2061e705c121SKalle Valo return IRQ_NONE; 2062e705c121SKalle Valo 2063e705c121SKalle Valo /* Disable (but don't clear!) interrupts here to avoid 2064e705c121SKalle Valo * back-to-back ISRs and sporadic interrupts from our NIC. 2065e705c121SKalle Valo * If we have something to service, the tasklet will re-enable ints. 2066e705c121SKalle Valo * If we *don't* have something, we'll re-enable before leaving here. 2067e705c121SKalle Valo */ 2068e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 2069e705c121SKalle Valo 2070e705c121SKalle Valo return IRQ_WAKE_THREAD; 2071e705c121SKalle Valo } 20722e5d4a8fSHaim Dreyfuss 20732e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 20742e5d4a8fSHaim Dreyfuss { 20752e5d4a8fSHaim Dreyfuss return IRQ_WAKE_THREAD; 20762e5d4a8fSHaim Dreyfuss } 20772e5d4a8fSHaim Dreyfuss 20782e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 20792e5d4a8fSHaim Dreyfuss { 20802e5d4a8fSHaim Dreyfuss struct msix_entry *entry = dev_id; 20812e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 20822e5d4a8fSHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 208346167a8fSColin Ian King struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 20842e5d4a8fSHaim Dreyfuss u32 inta_fh, inta_hw; 20852e5d4a8fSHaim Dreyfuss 20862e5d4a8fSHaim Dreyfuss lock_map_acquire(&trans->sync_cmd_lockdep_map); 20872e5d4a8fSHaim Dreyfuss 20882e5d4a8fSHaim Dreyfuss spin_lock(&trans_pcie->irq_lock); 20897ef3dd26SHaim Dreyfuss inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 20907ef3dd26SHaim Dreyfuss inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 20912e5d4a8fSHaim Dreyfuss /* 20922e5d4a8fSHaim Dreyfuss * Clear causes registers to avoid being handling the same cause. 20932e5d4a8fSHaim Dreyfuss */ 20947ef3dd26SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 20957ef3dd26SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 20962e5d4a8fSHaim Dreyfuss spin_unlock(&trans_pcie->irq_lock); 20972e5d4a8fSHaim Dreyfuss 2098c42ff65dSJohannes Berg trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); 2099c42ff65dSJohannes Berg 21002e5d4a8fSHaim Dreyfuss if (unlikely(!(inta_fh | inta_hw))) { 21012e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 21022e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 21032e5d4a8fSHaim Dreyfuss return IRQ_NONE; 21042e5d4a8fSHaim Dreyfuss } 21052e5d4a8fSHaim Dreyfuss 21063b57a10cSEmmanuel Grumbach if (iwl_have_debug_level(IWL_DL_ISR)) { 21073b57a10cSEmmanuel Grumbach IWL_DEBUG_ISR(trans, 21083b57a10cSEmmanuel Grumbach "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 21093b57a10cSEmmanuel Grumbach inta_fh, trans_pcie->fh_mask, 21102e5d4a8fSHaim Dreyfuss iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 21113b57a10cSEmmanuel Grumbach if (inta_fh & ~trans_pcie->fh_mask) 21123b57a10cSEmmanuel Grumbach IWL_DEBUG_ISR(trans, 21133b57a10cSEmmanuel Grumbach "We got a masked interrupt (0x%08x)\n", 21143b57a10cSEmmanuel Grumbach inta_fh & ~trans_pcie->fh_mask); 21153b57a10cSEmmanuel Grumbach } 21163b57a10cSEmmanuel Grumbach 21173b57a10cSEmmanuel Grumbach inta_fh &= trans_pcie->fh_mask; 21182e5d4a8fSHaim Dreyfuss 2119496d83caSHaim Dreyfuss if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && 2120496d83caSHaim Dreyfuss inta_fh & MSIX_FH_INT_CAUSES_Q0) { 2121496d83caSHaim Dreyfuss local_bh_disable(); 2122496d83caSHaim Dreyfuss iwl_pcie_rx_handle(trans, 0); 2123496d83caSHaim Dreyfuss local_bh_enable(); 2124496d83caSHaim Dreyfuss } 2125496d83caSHaim Dreyfuss 2126496d83caSHaim Dreyfuss if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && 2127496d83caSHaim Dreyfuss inta_fh & MSIX_FH_INT_CAUSES_Q1) { 2128496d83caSHaim Dreyfuss local_bh_disable(); 2129496d83caSHaim Dreyfuss iwl_pcie_rx_handle(trans, 1); 2130496d83caSHaim Dreyfuss local_bh_enable(); 2131496d83caSHaim Dreyfuss } 2132496d83caSHaim Dreyfuss 21332e5d4a8fSHaim Dreyfuss /* This "Tx" DMA channel is used only for loading uCode */ 21342e5d4a8fSHaim Dreyfuss if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 21352e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 21362e5d4a8fSHaim Dreyfuss isr_stats->tx++; 21372e5d4a8fSHaim Dreyfuss /* 21382e5d4a8fSHaim Dreyfuss * Wake up uCode load routine, 21392e5d4a8fSHaim Dreyfuss * now that load is complete 21402e5d4a8fSHaim Dreyfuss */ 21412e5d4a8fSHaim Dreyfuss trans_pcie->ucode_write_complete = true; 21422e5d4a8fSHaim Dreyfuss wake_up(&trans_pcie->ucode_write_waitq); 21432e5d4a8fSHaim Dreyfuss } 21442e5d4a8fSHaim Dreyfuss 21452e5d4a8fSHaim Dreyfuss /* Error detected by uCode */ 21462e5d4a8fSHaim Dreyfuss if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || 21473681021fSJohannes Berg (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { 21482e5d4a8fSHaim Dreyfuss IWL_ERR(trans, 21492e5d4a8fSHaim Dreyfuss "Microcode SW error detected. Restarting 0x%X.\n", 21502e5d4a8fSHaim Dreyfuss inta_fh); 21512e5d4a8fSHaim Dreyfuss isr_stats->sw++; 21522e5d4a8fSHaim Dreyfuss iwl_pcie_irq_handle_error(trans); 21532e5d4a8fSHaim Dreyfuss } 21542e5d4a8fSHaim Dreyfuss 21552e5d4a8fSHaim Dreyfuss /* After checking FH register check HW register */ 21563b57a10cSEmmanuel Grumbach if (iwl_have_debug_level(IWL_DL_ISR)) { 21572e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, 21583b57a10cSEmmanuel Grumbach "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 21593b57a10cSEmmanuel Grumbach inta_hw, trans_pcie->hw_mask, 21602e5d4a8fSHaim Dreyfuss iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 21613b57a10cSEmmanuel Grumbach if (inta_hw & ~trans_pcie->hw_mask) 21623b57a10cSEmmanuel Grumbach IWL_DEBUG_ISR(trans, 21633b57a10cSEmmanuel Grumbach "We got a masked interrupt 0x%08x\n", 21643b57a10cSEmmanuel Grumbach inta_hw & ~trans_pcie->hw_mask); 21653b57a10cSEmmanuel Grumbach } 21663b57a10cSEmmanuel Grumbach 21673b57a10cSEmmanuel Grumbach inta_hw &= trans_pcie->hw_mask; 21682e5d4a8fSHaim Dreyfuss 21692e5d4a8fSHaim Dreyfuss /* Alive notification via Rx interrupt will do the real work */ 21702e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 21712e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 21722e5d4a8fSHaim Dreyfuss isr_stats->alive++; 2173286ca8ebSLuca Coelho if (trans->trans_cfg->gen2) { 2174eda50cdeSSara Sharon /* We can restock, since firmware configured the RFH */ 2175eda50cdeSSara Sharon iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 2176eda50cdeSSara Sharon } 21772e5d4a8fSHaim Dreyfuss } 21782e5d4a8fSHaim Dreyfuss 21793681021fSJohannes Berg if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { 2180e5f3f215SHaim Dreyfuss u32 sleep_notif = 2181e5f3f215SHaim Dreyfuss le32_to_cpu(trans_pcie->prph_info->sleep_notif); 2182e5f3f215SHaim Dreyfuss if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || 2183e5f3f215SHaim Dreyfuss sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { 2184e5f3f215SHaim Dreyfuss IWL_DEBUG_ISR(trans, 2185e5f3f215SHaim Dreyfuss "Sx interrupt: sleep notification = 0x%x\n", 2186e5f3f215SHaim Dreyfuss sleep_notif); 2187e5f3f215SHaim Dreyfuss trans_pcie->sx_complete = true; 2188e5f3f215SHaim Dreyfuss wake_up(&trans_pcie->sx_waitq); 2189e5f3f215SHaim Dreyfuss } else { 21902e5d4a8fSHaim Dreyfuss /* uCode wakes up after power-down sleep */ 21912e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 21922e5d4a8fSHaim Dreyfuss iwl_pcie_rxq_check_wrptr(trans); 21932e5d4a8fSHaim Dreyfuss iwl_pcie_txq_check_wrptrs(trans); 21942e5d4a8fSHaim Dreyfuss 21952e5d4a8fSHaim Dreyfuss isr_stats->wakeup++; 21962e5d4a8fSHaim Dreyfuss } 2197e5f3f215SHaim Dreyfuss } 21982e5d4a8fSHaim Dreyfuss 2199ff911dcaSShaul Triebitz if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { 2200ff911dcaSShaul Triebitz /* Reflect IML transfer status */ 2201ff911dcaSShaul Triebitz int res = iwl_read32(trans, CSR_IML_RESP_ADDR); 2202ff911dcaSShaul Triebitz 2203ff911dcaSShaul Triebitz IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); 2204ff911dcaSShaul Triebitz if (res == IWL_IMAGE_RESP_FAIL) { 2205ff911dcaSShaul Triebitz isr_stats->sw++; 2206ff911dcaSShaul Triebitz iwl_pcie_irq_handle_error(trans); 2207ff911dcaSShaul Triebitz } 2208ff911dcaSShaul Triebitz } 2209ff911dcaSShaul Triebitz 22102e5d4a8fSHaim Dreyfuss /* Chip got too hot and stopped itself */ 22112e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 22122e5d4a8fSHaim Dreyfuss IWL_ERR(trans, "Microcode CT kill error detected.\n"); 22132e5d4a8fSHaim Dreyfuss isr_stats->ctkill++; 22142e5d4a8fSHaim Dreyfuss } 22152e5d4a8fSHaim Dreyfuss 22162e5d4a8fSHaim Dreyfuss /* HW RF KILL switch toggled */ 22173a6e168bSJohannes Berg if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) 22183a6e168bSJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 22192e5d4a8fSHaim Dreyfuss 22202e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 22212e5d4a8fSHaim Dreyfuss IWL_ERR(trans, 22222e5d4a8fSHaim Dreyfuss "Hardware error detected. Restarting.\n"); 22232e5d4a8fSHaim Dreyfuss 22242e5d4a8fSHaim Dreyfuss isr_stats->hw++; 222591c28b83SShahar S Matityahu trans->dbg.hw_error = true; 22262e5d4a8fSHaim Dreyfuss iwl_pcie_irq_handle_error(trans); 22272e5d4a8fSHaim Dreyfuss } 22282e5d4a8fSHaim Dreyfuss 22292e5d4a8fSHaim Dreyfuss iwl_pcie_clear_irq(trans, entry); 22302e5d4a8fSHaim Dreyfuss 22312e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 22322e5d4a8fSHaim Dreyfuss 22332e5d4a8fSHaim Dreyfuss return IRQ_HANDLED; 22342e5d4a8fSHaim Dreyfuss } 2235