18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 28e99ea8dSJohannes Berg /* 35af2bb31SGregory Greenman * Copyright (C) 2003-2014, 2018-2023 Intel Corporation 48e99ea8dSJohannes Berg * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 58e99ea8dSJohannes Berg * Copyright (C) 2016-2017 Intel Deutschland GmbH 68e99ea8dSJohannes Berg */ 7e705c121SKalle Valo #include <linux/sched.h> 8e705c121SKalle Valo #include <linux/wait.h> 9e705c121SKalle Valo #include <linux/gfp.h> 10e705c121SKalle Valo 11e705c121SKalle Valo #include "iwl-prph.h" 12e705c121SKalle Valo #include "iwl-io.h" 13e705c121SKalle Valo #include "internal.h" 14e705c121SKalle Valo #include "iwl-op-mode.h" 159b58419eSGolan Ben Ami #include "iwl-context-info-gen3.h" 16e705c121SKalle Valo 17e705c121SKalle Valo /****************************************************************************** 18e705c121SKalle Valo * 19e705c121SKalle Valo * RX path functions 20e705c121SKalle Valo * 21e705c121SKalle Valo ******************************************************************************/ 22e705c121SKalle Valo 23e705c121SKalle Valo /* 24e705c121SKalle Valo * Rx theory of operation 25e705c121SKalle Valo * 26e705c121SKalle Valo * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 27e705c121SKalle Valo * each of which point to Receive Buffers to be filled by the NIC. These get 28e705c121SKalle Valo * used not only for Rx frames, but for any command response or notification 29e705c121SKalle Valo * from the NIC. The driver and NIC manage the Rx buffers by means 30e705c121SKalle Valo * of indexes into the circular buffer. 31e705c121SKalle Valo * 32e705c121SKalle Valo * Rx Queue Indexes 33e705c121SKalle Valo * The host/firmware share two index registers for managing the Rx buffers. 34e705c121SKalle Valo * 35e705c121SKalle Valo * The READ index maps to the first position that the firmware may be writing 36e705c121SKalle Valo * to -- the driver can read up to (but not including) this position and get 37e705c121SKalle Valo * good data. 38e705c121SKalle Valo * The READ index is managed by the firmware once the card is enabled. 39e705c121SKalle Valo * 40e705c121SKalle Valo * The WRITE index maps to the last position the driver has read from -- the 41e705c121SKalle Valo * position preceding WRITE is the last slot the firmware can place a packet. 42e705c121SKalle Valo * 43e705c121SKalle Valo * The queue is empty (no good data) if WRITE = READ - 1, and is full if 44e705c121SKalle Valo * WRITE = READ. 45e705c121SKalle Valo * 46e705c121SKalle Valo * During initialization, the host sets up the READ queue position to the first 47e705c121SKalle Valo * INDEX position, and WRITE to the last (READ - 1 wrapped) 48e705c121SKalle Valo * 49e705c121SKalle Valo * When the firmware places a packet in a buffer, it will advance the READ index 50e705c121SKalle Valo * and fire the RX interrupt. The driver can then query the READ index and 51e705c121SKalle Valo * process as many packets as possible, moving the WRITE index forward as it 52e705c121SKalle Valo * resets the Rx queue buffers with new memory. 53e705c121SKalle Valo * 54e705c121SKalle Valo * The management in the driver is as follows: 55e705c121SKalle Valo * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 56e705c121SKalle Valo * When the interrupt handler is called, the request is processed. 57e705c121SKalle Valo * The page is either stolen - transferred to the upper layer 58e705c121SKalle Valo * or reused - added immediately to the iwl->rxq->rx_free list. 59e705c121SKalle Valo * + When the page is stolen - the driver updates the matching queue's used 60e705c121SKalle Valo * count, detaches the RBD and transfers it to the queue used list. 61e705c121SKalle Valo * When there are two used RBDs - they are transferred to the allocator empty 62e705c121SKalle Valo * list. Work is then scheduled for the allocator to start allocating 63e705c121SKalle Valo * eight buffers. 64e705c121SKalle Valo * When there are another 6 used RBDs - they are transferred to the allocator 65e705c121SKalle Valo * empty list and the driver tries to claim the pre-allocated buffers and 66e705c121SKalle Valo * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 67e705c121SKalle Valo * until ready. 68e705c121SKalle Valo * When there are 8+ buffers in the free list - either from allocation or from 69e705c121SKalle Valo * 8 reused unstolen pages - restock is called to update the FW and indexes. 70e705c121SKalle Valo * + In order to make sure the allocator always has RBDs to use for allocation 71e705c121SKalle Valo * the allocator has initial pool in the size of num_queues*(8-2) - the 72e705c121SKalle Valo * maximum missing RBDs per allocation request (request posted with 2 73e705c121SKalle Valo * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 74e705c121SKalle Valo * The queues supplies the recycle of the rest of the RBDs. 75e705c121SKalle Valo * + A received packet is processed and handed to the kernel network stack, 76e705c121SKalle Valo * detached from the iwl->rxq. The driver 'processed' index is updated. 77e705c121SKalle Valo * + If there are no allocated buffers in iwl->rxq->rx_free, 78e705c121SKalle Valo * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 79e705c121SKalle Valo * If there were enough free buffers and RX_STALLED is set it is cleared. 80e705c121SKalle Valo * 81e705c121SKalle Valo * 82e705c121SKalle Valo * Driver sequence: 83e705c121SKalle Valo * 84e705c121SKalle Valo * iwl_rxq_alloc() Allocates rx_free 85e705c121SKalle Valo * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 86e705c121SKalle Valo * iwl_pcie_rxq_restock. 87e705c121SKalle Valo * Used only during initialization. 88e705c121SKalle Valo * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 89e705c121SKalle Valo * queue, updates firmware pointers, and updates 90e705c121SKalle Valo * the WRITE index. 91e705c121SKalle Valo * iwl_pcie_rx_allocator() Background work for allocating pages. 92e705c121SKalle Valo * 93e705c121SKalle Valo * -- enable interrupts -- 94e705c121SKalle Valo * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 95e705c121SKalle Valo * READ INDEX, detaching the SKB from the pool. 96e705c121SKalle Valo * Moves the packet buffer from queue to rx_used. 97e705c121SKalle Valo * Posts and claims requests to the allocator. 98e705c121SKalle Valo * Calls iwl_pcie_rxq_restock to refill any empty 99e705c121SKalle Valo * slots. 100e705c121SKalle Valo * 101e705c121SKalle Valo * RBD life-cycle: 102e705c121SKalle Valo * 103e705c121SKalle Valo * Init: 104e705c121SKalle Valo * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 105e705c121SKalle Valo * 106e705c121SKalle Valo * Regular Receive interrupt: 107e705c121SKalle Valo * Page Stolen: 108e705c121SKalle Valo * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 109e705c121SKalle Valo * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 110e705c121SKalle Valo * Page not Stolen: 111e705c121SKalle Valo * rxq.queue -> rxq.rx_free -> rxq.queue 112e705c121SKalle Valo * ... 113e705c121SKalle Valo * 114e705c121SKalle Valo */ 115e705c121SKalle Valo 116e705c121SKalle Valo /* 117e705c121SKalle Valo * iwl_rxq_space - Return number of free slots available in queue. 118e705c121SKalle Valo */ 119e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq) 120e705c121SKalle Valo { 12196a6497bSSara Sharon /* Make sure rx queue size is a power of 2 */ 12296a6497bSSara Sharon WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 123e705c121SKalle Valo 124e705c121SKalle Valo /* 125e705c121SKalle Valo * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 126e705c121SKalle Valo * between empty and completely full queues. 127e705c121SKalle Valo * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 128e705c121SKalle Valo * defined for negative dividends. 129e705c121SKalle Valo */ 13096a6497bSSara Sharon return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 131e705c121SKalle Valo } 132e705c121SKalle Valo 133e705c121SKalle Valo /* 134e705c121SKalle Valo * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 135e705c121SKalle Valo */ 136e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 137e705c121SKalle Valo { 138e705c121SKalle Valo return cpu_to_le32((u32)(dma_addr >> 8)); 139e705c121SKalle Valo } 140e705c121SKalle Valo 141e705c121SKalle Valo /* 142e705c121SKalle Valo * iwl_pcie_rx_stop - stops the Rx DMA 143e705c121SKalle Valo */ 144e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans) 145e705c121SKalle Valo { 1463681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1473681021fSJohannes Berg /* TODO: remove this once fw does it */ 148ea695b7cSShaul Triebitz iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); 149ea695b7cSShaul Triebitz return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, 150d0158235SGolan Ben Ami RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 151286ca8ebSLuca Coelho } else if (trans->trans_cfg->mq_rx_supported) { 152d7fdd0e5SSara Sharon iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 153d7fdd0e5SSara Sharon return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 154d7fdd0e5SSara Sharon RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 155d7fdd0e5SSara Sharon } else { 156e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 157e705c121SKalle Valo return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 158d7fdd0e5SSara Sharon FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 159d7fdd0e5SSara Sharon 1000); 160d7fdd0e5SSara Sharon } 161e705c121SKalle Valo } 162e705c121SKalle Valo 163e705c121SKalle Valo /* 164e705c121SKalle Valo * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 165e705c121SKalle Valo */ 16678485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 16778485054SSara Sharon struct iwl_rxq *rxq) 168e705c121SKalle Valo { 169e705c121SKalle Valo u32 reg; 170e705c121SKalle Valo 171e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 172e705c121SKalle Valo 173e705c121SKalle Valo /* 174e705c121SKalle Valo * explicitly wake up the NIC if: 175e705c121SKalle Valo * 1. shadow registers aren't enabled 176e705c121SKalle Valo * 2. there is a chance that the NIC is asleep 177e705c121SKalle Valo */ 178286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->shadow_reg_enable && 179e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 180e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 181e705c121SKalle Valo 182e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 183e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 184e705c121SKalle Valo reg); 185e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 1866dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 187e705c121SKalle Valo rxq->need_update = true; 188e705c121SKalle Valo return; 189e705c121SKalle Valo } 190e705c121SKalle Valo } 191e705c121SKalle Valo 192e705c121SKalle Valo rxq->write_actual = round_down(rxq->write, 8); 193fba58d37SMatti Gottlieb if (!trans->trans_cfg->mq_rx_supported) 194fba58d37SMatti Gottlieb iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 195fba58d37SMatti Gottlieb else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 196fba58d37SMatti Gottlieb iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | 197fba58d37SMatti Gottlieb HBUS_TARG_WRPTR_RX_Q(rxq->id)); 198fba58d37SMatti Gottlieb else 1991554ed20SSara Sharon iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 20096a6497bSSara Sharon rxq->write_actual); 201e705c121SKalle Valo } 202e705c121SKalle Valo 203e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 204e705c121SKalle Valo { 205e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20678485054SSara Sharon int i; 207e705c121SKalle Valo 20878485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 20978485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 210e705c121SKalle Valo 211e705c121SKalle Valo if (!rxq->need_update) 21278485054SSara Sharon continue; 21325edc8f2SJohannes Berg spin_lock_bh(&rxq->lock); 21478485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 215e705c121SKalle Valo rxq->need_update = false; 21625edc8f2SJohannes Berg spin_unlock_bh(&rxq->lock); 217e705c121SKalle Valo } 21878485054SSara Sharon } 219e705c121SKalle Valo 2200307c839SGolan Ben Ami static void iwl_pcie_restock_bd(struct iwl_trans *trans, 2210307c839SGolan Ben Ami struct iwl_rxq *rxq, 2220307c839SGolan Ben Ami struct iwl_rx_mem_buffer *rxb) 2230307c839SGolan Ben Ami { 2243681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 2250307c839SGolan Ben Ami struct iwl_rx_transfer_desc *bd = rxq->bd; 2260307c839SGolan Ben Ami 227f826faaaSJohannes Berg BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); 228f826faaaSJohannes Berg 2290307c839SGolan Ben Ami bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); 2300307c839SGolan Ben Ami bd[rxq->write].rbid = cpu_to_le16(rxb->vid); 2310307c839SGolan Ben Ami } else { 2320307c839SGolan Ben Ami __le64 *bd = rxq->bd; 2330307c839SGolan Ben Ami 2340307c839SGolan Ben Ami bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 2350307c839SGolan Ben Ami } 23685d78bb1SSara Sharon 23785d78bb1SSara Sharon IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", 23885d78bb1SSara Sharon (u32)rxb->vid, rxq->id, rxq->write); 2390307c839SGolan Ben Ami } 2400307c839SGolan Ben Ami 241e0e168dcSGregory Greenman /* 2422047fa54SSara Sharon * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 243e0e168dcSGregory Greenman */ 2442047fa54SSara Sharon static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 24596a6497bSSara Sharon struct iwl_rxq *rxq) 24696a6497bSSara Sharon { 247cfdc20efSJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 24896a6497bSSara Sharon struct iwl_rx_mem_buffer *rxb; 24996a6497bSSara Sharon 25096a6497bSSara Sharon /* 25196a6497bSSara Sharon * If the device isn't enabled - no need to try to add buffers... 25296a6497bSSara Sharon * This can happen when we stop the device and still have an interrupt 25396a6497bSSara Sharon * pending. We stop the APM before we sync the interrupts because we 25496a6497bSSara Sharon * have to (see comment there). On the other hand, since the APM is 25596a6497bSSara Sharon * stopped, we cannot access the HW (in particular not prph). 25696a6497bSSara Sharon * So don't try to restock if the APM has been already stopped. 25796a6497bSSara Sharon */ 25896a6497bSSara Sharon if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 25996a6497bSSara Sharon return; 26096a6497bSSara Sharon 26125edc8f2SJohannes Berg spin_lock_bh(&rxq->lock); 26296a6497bSSara Sharon while (rxq->free_count) { 26396a6497bSSara Sharon /* Get next free Rx buffer, remove from free list */ 26496a6497bSSara Sharon rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 26596a6497bSSara Sharon list); 26696a6497bSSara Sharon list_del(&rxb->list); 267b1753c62SSara Sharon rxb->invalid = false; 268cfdc20efSJohannes Berg /* some low bits are expected to be unset (depending on hw) */ 269cfdc20efSJohannes Berg WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); 27096a6497bSSara Sharon /* Point to Rx buffer via next RBD in circular buffer */ 2710307c839SGolan Ben Ami iwl_pcie_restock_bd(trans, rxq, rxb); 2725661925aSJohannes Berg rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); 27396a6497bSSara Sharon rxq->free_count--; 27496a6497bSSara Sharon } 27525edc8f2SJohannes Berg spin_unlock_bh(&rxq->lock); 27696a6497bSSara Sharon 27796a6497bSSara Sharon /* 27896a6497bSSara Sharon * If we've added more space for the firmware to place data, tell it. 27996a6497bSSara Sharon * Increment device's write pointer in multiples of 8. 28096a6497bSSara Sharon */ 28196a6497bSSara Sharon if (rxq->write_actual != (rxq->write & ~0x7)) { 28225edc8f2SJohannes Berg spin_lock_bh(&rxq->lock); 28396a6497bSSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 28425edc8f2SJohannes Berg spin_unlock_bh(&rxq->lock); 28596a6497bSSara Sharon } 28696a6497bSSara Sharon } 28796a6497bSSara Sharon 288e705c121SKalle Valo /* 2892047fa54SSara Sharon * iwl_pcie_rxsq_restock - restock implementation for single queue rx 290e705c121SKalle Valo */ 2912047fa54SSara Sharon static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 292e0e168dcSGregory Greenman struct iwl_rxq *rxq) 293e705c121SKalle Valo { 294e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 295e705c121SKalle Valo 296e705c121SKalle Valo /* 297e705c121SKalle Valo * If the device isn't enabled - not need to try to add buffers... 298e705c121SKalle Valo * This can happen when we stop the device and still have an interrupt 299e705c121SKalle Valo * pending. We stop the APM before we sync the interrupts because we 300e705c121SKalle Valo * have to (see comment there). On the other hand, since the APM is 301e705c121SKalle Valo * stopped, we cannot access the HW (in particular not prph). 302e705c121SKalle Valo * So don't try to restock if the APM has been already stopped. 303e705c121SKalle Valo */ 304e705c121SKalle Valo if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 305e705c121SKalle Valo return; 306e705c121SKalle Valo 30747ef328cSIlan Peer spin_lock_bh(&rxq->lock); 308e705c121SKalle Valo while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 30996a6497bSSara Sharon __le32 *bd = (__le32 *)rxq->bd; 310e705c121SKalle Valo /* The overwritten rxb must be a used one */ 311e705c121SKalle Valo rxb = rxq->queue[rxq->write]; 312e705c121SKalle Valo BUG_ON(rxb && rxb->page); 313e705c121SKalle Valo 314e705c121SKalle Valo /* Get next free Rx buffer, remove from free list */ 315e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 316e705c121SKalle Valo list); 317e705c121SKalle Valo list_del(&rxb->list); 318b1753c62SSara Sharon rxb->invalid = false; 319e705c121SKalle Valo 320e705c121SKalle Valo /* Point to Rx buffer via next RBD in circular buffer */ 32196a6497bSSara Sharon bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 322e705c121SKalle Valo rxq->queue[rxq->write] = rxb; 323e705c121SKalle Valo rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 324e705c121SKalle Valo rxq->free_count--; 325e705c121SKalle Valo } 32647ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 327e705c121SKalle Valo 328e705c121SKalle Valo /* If we've added more space for the firmware to place data, tell it. 329e705c121SKalle Valo * Increment device's write pointer in multiples of 8. */ 330e705c121SKalle Valo if (rxq->write_actual != (rxq->write & ~0x7)) { 33147ef328cSIlan Peer spin_lock_bh(&rxq->lock); 33278485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 33347ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 334e705c121SKalle Valo } 335e705c121SKalle Valo } 336e705c121SKalle Valo 337e705c121SKalle Valo /* 338e0e168dcSGregory Greenman * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 339e0e168dcSGregory Greenman * 340e0e168dcSGregory Greenman * If there are slots in the RX queue that need to be restocked, 341e0e168dcSGregory Greenman * and we have free pre-allocated buffers, fill the ranks as much 342e0e168dcSGregory Greenman * as we can, pulling from rx_free. 343e0e168dcSGregory Greenman * 344e0e168dcSGregory Greenman * This moves the 'write' index forward to catch up with 'processed', and 345e0e168dcSGregory Greenman * also updates the memory address in the firmware to reference the new 346e0e168dcSGregory Greenman * target buffer. 347e0e168dcSGregory Greenman */ 348e0e168dcSGregory Greenman static 349e0e168dcSGregory Greenman void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 350e0e168dcSGregory Greenman { 351286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) 3522047fa54SSara Sharon iwl_pcie_rxmq_restock(trans, rxq); 353e0e168dcSGregory Greenman else 3542047fa54SSara Sharon iwl_pcie_rxsq_restock(trans, rxq); 355e0e168dcSGregory Greenman } 356e0e168dcSGregory Greenman 357e0e168dcSGregory Greenman /* 358e705c121SKalle Valo * iwl_pcie_rx_alloc_page - allocates and returns a page. 359e705c121SKalle Valo * 360e705c121SKalle Valo */ 361e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 362cfdc20efSJohannes Berg u32 *offset, gfp_t priority) 363e705c121SKalle Valo { 364e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 365cfdc20efSJohannes Berg unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 366cfdc20efSJohannes Berg unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; 367e705c121SKalle Valo struct page *page; 368e705c121SKalle Valo gfp_t gfp_mask = priority; 369e705c121SKalle Valo 370e705c121SKalle Valo if (trans_pcie->rx_page_order > 0) 371e705c121SKalle Valo gfp_mask |= __GFP_COMP; 372e705c121SKalle Valo 373cfdc20efSJohannes Berg if (trans_pcie->alloc_page) { 374cfdc20efSJohannes Berg spin_lock_bh(&trans_pcie->alloc_page_lock); 375cfdc20efSJohannes Berg /* recheck */ 376cfdc20efSJohannes Berg if (trans_pcie->alloc_page) { 377cfdc20efSJohannes Berg *offset = trans_pcie->alloc_page_used; 378cfdc20efSJohannes Berg page = trans_pcie->alloc_page; 379cfdc20efSJohannes Berg trans_pcie->alloc_page_used += rbsize; 380cfdc20efSJohannes Berg if (trans_pcie->alloc_page_used >= allocsize) 381cfdc20efSJohannes Berg trans_pcie->alloc_page = NULL; 382cfdc20efSJohannes Berg else 383cfdc20efSJohannes Berg get_page(page); 384cfdc20efSJohannes Berg spin_unlock_bh(&trans_pcie->alloc_page_lock); 385cfdc20efSJohannes Berg return page; 386cfdc20efSJohannes Berg } 387cfdc20efSJohannes Berg spin_unlock_bh(&trans_pcie->alloc_page_lock); 388cfdc20efSJohannes Berg } 389cfdc20efSJohannes Berg 390e705c121SKalle Valo /* Alloc a new receive buffer */ 391e705c121SKalle Valo page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 392e705c121SKalle Valo if (!page) { 393e705c121SKalle Valo if (net_ratelimit()) 394e705c121SKalle Valo IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 395e705c121SKalle Valo trans_pcie->rx_page_order); 39678485054SSara Sharon /* 39778485054SSara Sharon * Issue an error if we don't have enough pre-allocated 39878485054SSara Sharon * buffers. 3991da3823dSLuca Coelho */ 40078485054SSara Sharon if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 401e705c121SKalle Valo IWL_CRIT(trans, 40278485054SSara Sharon "Failed to alloc_pages\n"); 403e705c121SKalle Valo return NULL; 404e705c121SKalle Valo } 405cfdc20efSJohannes Berg 406cfdc20efSJohannes Berg if (2 * rbsize <= allocsize) { 407cfdc20efSJohannes Berg spin_lock_bh(&trans_pcie->alloc_page_lock); 408cfdc20efSJohannes Berg if (!trans_pcie->alloc_page) { 409cfdc20efSJohannes Berg get_page(page); 410cfdc20efSJohannes Berg trans_pcie->alloc_page = page; 411cfdc20efSJohannes Berg trans_pcie->alloc_page_used = rbsize; 412cfdc20efSJohannes Berg } 413cfdc20efSJohannes Berg spin_unlock_bh(&trans_pcie->alloc_page_lock); 414cfdc20efSJohannes Berg } 415cfdc20efSJohannes Berg 416cfdc20efSJohannes Berg *offset = 0; 417e705c121SKalle Valo return page; 418e705c121SKalle Valo } 419e705c121SKalle Valo 420e705c121SKalle Valo /* 421e705c121SKalle Valo * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 422e705c121SKalle Valo * 423e705c121SKalle Valo * A used RBD is an Rx buffer that has been given to the stack. To use it again 424e705c121SKalle Valo * a page must be allocated and the RBD must point to the page. This function 425e705c121SKalle Valo * doesn't change the HW pointer but handles the list of pages that is used by 426e705c121SKalle Valo * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 427e705c121SKalle Valo * allocated buffers. 428e705c121SKalle Valo */ 429ff932f61SGolan Ben Ami void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 43078485054SSara Sharon struct iwl_rxq *rxq) 431e705c121SKalle Valo { 432e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 433e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 434e705c121SKalle Valo struct page *page; 435e705c121SKalle Valo 436e705c121SKalle Valo while (1) { 437cfdc20efSJohannes Berg unsigned int offset; 438cfdc20efSJohannes Berg 43947ef328cSIlan Peer spin_lock_bh(&rxq->lock); 440e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 44147ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 442e705c121SKalle Valo return; 443e705c121SKalle Valo } 44447ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 445e705c121SKalle Valo 446cfdc20efSJohannes Berg page = iwl_pcie_rx_alloc_page(trans, &offset, priority); 447e705c121SKalle Valo if (!page) 448e705c121SKalle Valo return; 449e705c121SKalle Valo 45047ef328cSIlan Peer spin_lock_bh(&rxq->lock); 451e705c121SKalle Valo 452e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 45347ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 454e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 455e705c121SKalle Valo return; 456e705c121SKalle Valo } 457e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 458e705c121SKalle Valo list); 459e705c121SKalle Valo list_del(&rxb->list); 46047ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 461e705c121SKalle Valo 462e705c121SKalle Valo BUG_ON(rxb->page); 463e705c121SKalle Valo rxb->page = page; 464cfdc20efSJohannes Berg rxb->offset = offset; 465e705c121SKalle Valo /* Get physical address of the RB */ 466e705c121SKalle Valo rxb->page_dma = 467cfdc20efSJohannes Berg dma_map_page(trans->dev, page, rxb->offset, 46880084e35SJohannes Berg trans_pcie->rx_buf_bytes, 469e705c121SKalle Valo DMA_FROM_DEVICE); 470e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 471e705c121SKalle Valo rxb->page = NULL; 47247ef328cSIlan Peer spin_lock_bh(&rxq->lock); 473e705c121SKalle Valo list_add(&rxb->list, &rxq->rx_used); 47447ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 475e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 476e705c121SKalle Valo return; 477e705c121SKalle Valo } 478e705c121SKalle Valo 47947ef328cSIlan Peer spin_lock_bh(&rxq->lock); 480e705c121SKalle Valo 481e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 482e705c121SKalle Valo rxq->free_count++; 483e705c121SKalle Valo 48447ef328cSIlan Peer spin_unlock_bh(&rxq->lock); 485e705c121SKalle Valo } 486e705c121SKalle Valo } 487e705c121SKalle Valo 488ff932f61SGolan Ben Ami void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 489e705c121SKalle Valo { 490e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 491e705c121SKalle Valo int i; 492e705c121SKalle Valo 4936ac57200SJohannes Berg if (!trans_pcie->rx_pool) 4946ac57200SJohannes Berg return; 4956ac57200SJohannes Berg 496c042f0c7SJohannes Berg for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { 49778485054SSara Sharon if (!trans_pcie->rx_pool[i].page) 498e705c121SKalle Valo continue; 49978485054SSara Sharon dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 50080084e35SJohannes Berg trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); 50178485054SSara Sharon __free_pages(trans_pcie->rx_pool[i].page, 50278485054SSara Sharon trans_pcie->rx_page_order); 50378485054SSara Sharon trans_pcie->rx_pool[i].page = NULL; 504e705c121SKalle Valo } 505e705c121SKalle Valo } 506e705c121SKalle Valo 507e705c121SKalle Valo /* 508e705c121SKalle Valo * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 509e705c121SKalle Valo * 510e705c121SKalle Valo * Allocates for each received request 8 pages 511e705c121SKalle Valo * Called as a scheduled work item. 512e705c121SKalle Valo */ 513e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 514e705c121SKalle Valo { 515e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 516e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 517e705c121SKalle Valo struct list_head local_empty; 518c6ac9f9fSSara Sharon int pending = atomic_read(&rba->req_pending); 519e705c121SKalle Valo 5206dcdd165SSara Sharon IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); 521e705c121SKalle Valo 522e705c121SKalle Valo /* If we were scheduled - there is at least one request */ 52325edc8f2SJohannes Berg spin_lock_bh(&rba->lock); 524e705c121SKalle Valo /* swap out the rba->rbd_empty to a local list */ 525e705c121SKalle Valo list_replace_init(&rba->rbd_empty, &local_empty); 52625edc8f2SJohannes Berg spin_unlock_bh(&rba->lock); 527e705c121SKalle Valo 528e705c121SKalle Valo while (pending) { 529e705c121SKalle Valo int i; 5300979a913SJohannes Berg LIST_HEAD(local_allocated); 53178485054SSara Sharon gfp_t gfp_mask = GFP_KERNEL; 53278485054SSara Sharon 53378485054SSara Sharon /* Do not post a warning if there are only a few requests */ 53478485054SSara Sharon if (pending < RX_PENDING_WATERMARK) 53578485054SSara Sharon gfp_mask |= __GFP_NOWARN; 536e705c121SKalle Valo 537e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 538e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 539e705c121SKalle Valo struct page *page; 540e705c121SKalle Valo 541e705c121SKalle Valo /* List should never be empty - each reused RBD is 542e705c121SKalle Valo * returned to the list, and initial pool covers any 543e705c121SKalle Valo * possible gap between the time the page is allocated 544e705c121SKalle Valo * to the time the RBD is added. 545e705c121SKalle Valo */ 546e705c121SKalle Valo BUG_ON(list_empty(&local_empty)); 547e705c121SKalle Valo /* Get the first rxb from the rbd list */ 548e705c121SKalle Valo rxb = list_first_entry(&local_empty, 549e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 550e705c121SKalle Valo BUG_ON(rxb->page); 551e705c121SKalle Valo 552e705c121SKalle Valo /* Alloc a new receive buffer */ 553cfdc20efSJohannes Berg page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, 554cfdc20efSJohannes Berg gfp_mask); 555e705c121SKalle Valo if (!page) 556e705c121SKalle Valo continue; 557e705c121SKalle Valo rxb->page = page; 558e705c121SKalle Valo 559e705c121SKalle Valo /* Get physical address of the RB */ 560cfdc20efSJohannes Berg rxb->page_dma = dma_map_page(trans->dev, page, 561cfdc20efSJohannes Berg rxb->offset, 56280084e35SJohannes Berg trans_pcie->rx_buf_bytes, 563e705c121SKalle Valo DMA_FROM_DEVICE); 564e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 565e705c121SKalle Valo rxb->page = NULL; 566e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 567e705c121SKalle Valo continue; 568e705c121SKalle Valo } 569e705c121SKalle Valo 570e705c121SKalle Valo /* move the allocated entry to the out list */ 571e705c121SKalle Valo list_move(&rxb->list, &local_allocated); 572e705c121SKalle Valo i++; 573e705c121SKalle Valo } 574e705c121SKalle Valo 575c6ac9f9fSSara Sharon atomic_dec(&rba->req_pending); 576e705c121SKalle Valo pending--; 577c6ac9f9fSSara Sharon 578e705c121SKalle Valo if (!pending) { 579c6ac9f9fSSara Sharon pending = atomic_read(&rba->req_pending); 5806dcdd165SSara Sharon if (pending) 5816dcdd165SSara Sharon IWL_DEBUG_TPT(trans, 582c6ac9f9fSSara Sharon "Got more pending allocation requests = %d\n", 583e705c121SKalle Valo pending); 584e705c121SKalle Valo } 585e705c121SKalle Valo 58625edc8f2SJohannes Berg spin_lock_bh(&rba->lock); 587e705c121SKalle Valo /* add the allocated rbds to the allocator allocated list */ 588e705c121SKalle Valo list_splice_tail(&local_allocated, &rba->rbd_allocated); 589e705c121SKalle Valo /* get more empty RBDs for current pending requests */ 590e705c121SKalle Valo list_splice_tail_init(&rba->rbd_empty, &local_empty); 59125edc8f2SJohannes Berg spin_unlock_bh(&rba->lock); 592e705c121SKalle Valo 593e705c121SKalle Valo atomic_inc(&rba->req_ready); 594c6ac9f9fSSara Sharon 595e705c121SKalle Valo } 596e705c121SKalle Valo 59725edc8f2SJohannes Berg spin_lock_bh(&rba->lock); 598e705c121SKalle Valo /* return unused rbds to the allocator empty list */ 599e705c121SKalle Valo list_splice_tail(&local_empty, &rba->rbd_empty); 60025edc8f2SJohannes Berg spin_unlock_bh(&rba->lock); 601c6ac9f9fSSara Sharon 6026dcdd165SSara Sharon IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); 603e705c121SKalle Valo } 604e705c121SKalle Valo 605e705c121SKalle Valo /* 606d56daea4SSara Sharon * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 607e705c121SKalle Valo .* 608e705c121SKalle Valo .* Called by queue when the queue posted allocation request and 609e705c121SKalle Valo * has freed 8 RBDs in order to restock itself. 610d56daea4SSara Sharon * This function directly moves the allocated RBs to the queue's ownership 611d56daea4SSara Sharon * and updates the relevant counters. 612e705c121SKalle Valo */ 613d56daea4SSara Sharon static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 614d56daea4SSara Sharon struct iwl_rxq *rxq) 615e705c121SKalle Valo { 616e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 617e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 618e705c121SKalle Valo int i; 619e705c121SKalle Valo 620d56daea4SSara Sharon lockdep_assert_held(&rxq->lock); 621d56daea4SSara Sharon 622e705c121SKalle Valo /* 623e705c121SKalle Valo * atomic_dec_if_positive returns req_ready - 1 for any scenario. 624e705c121SKalle Valo * If req_ready is 0 atomic_dec_if_positive will return -1 and this 625d56daea4SSara Sharon * function will return early, as there are no ready requests. 626e705c121SKalle Valo * atomic_dec_if_positive will perofrm the *actual* decrement only if 627e705c121SKalle Valo * req_ready > 0, i.e. - there are ready requests and the function 628e705c121SKalle Valo * hands one request to the caller. 629e705c121SKalle Valo */ 630e705c121SKalle Valo if (atomic_dec_if_positive(&rba->req_ready) < 0) 631d56daea4SSara Sharon return; 632e705c121SKalle Valo 633e705c121SKalle Valo spin_lock(&rba->lock); 634e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 635e705c121SKalle Valo /* Get next free Rx buffer, remove it from free list */ 636d56daea4SSara Sharon struct iwl_rx_mem_buffer *rxb = 637d56daea4SSara Sharon list_first_entry(&rba->rbd_allocated, 638e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 639d56daea4SSara Sharon 640d56daea4SSara Sharon list_move(&rxb->list, &rxq->rx_free); 641e705c121SKalle Valo } 642e705c121SKalle Valo spin_unlock(&rba->lock); 643e705c121SKalle Valo 644d56daea4SSara Sharon rxq->used_count -= RX_CLAIM_REQ_ALLOC; 645d56daea4SSara Sharon rxq->free_count += RX_CLAIM_REQ_ALLOC; 646e705c121SKalle Valo } 647e705c121SKalle Valo 64810a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data) 649e705c121SKalle Valo { 650e705c121SKalle Valo struct iwl_rb_allocator *rba_p = 651e705c121SKalle Valo container_of(data, struct iwl_rb_allocator, rx_alloc); 652e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = 653e705c121SKalle Valo container_of(rba_p, struct iwl_trans_pcie, rba); 654e705c121SKalle Valo 655e705c121SKalle Valo iwl_pcie_rx_allocator(trans_pcie->trans); 656e705c121SKalle Valo } 657e705c121SKalle Valo 6585d19e208SJohannes Berg static int iwl_pcie_free_bd_size(struct iwl_trans *trans) 6590307c839SGolan Ben Ami { 6605d19e208SJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 6615d19e208SJohannes Berg return sizeof(struct iwl_rx_transfer_desc); 6620307c839SGolan Ben Ami 6635d19e208SJohannes Berg return trans->trans_cfg->mq_rx_supported ? 6645d19e208SJohannes Berg sizeof(__le64) : sizeof(__le32); 6655d19e208SJohannes Berg } 6665d19e208SJohannes Berg 6675d19e208SJohannes Berg static int iwl_pcie_used_bd_size(struct iwl_trans *trans) 6685d19e208SJohannes Berg { 6695d19e208SJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 6705d19e208SJohannes Berg return sizeof(struct iwl_rx_completion_desc_bz); 6715d19e208SJohannes Berg 6725d19e208SJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 6735d19e208SJohannes Berg return sizeof(struct iwl_rx_completion_desc); 6745d19e208SJohannes Berg 6755d19e208SJohannes Berg return sizeof(__le32); 6760307c839SGolan Ben Ami } 6770307c839SGolan Ben Ami 6781b493e30SGolan Ben Ami static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, 6791b493e30SGolan Ben Ami struct iwl_rxq *rxq) 6801b493e30SGolan Ben Ami { 6815d19e208SJohannes Berg int free_size = iwl_pcie_free_bd_size(trans); 6821b493e30SGolan Ben Ami 6831b493e30SGolan Ben Ami if (rxq->bd) 6840307c839SGolan Ben Ami dma_free_coherent(trans->dev, 6850307c839SGolan Ben Ami free_size * rxq->queue_size, 6861b493e30SGolan Ben Ami rxq->bd, rxq->bd_dma); 6871b493e30SGolan Ben Ami rxq->bd_dma = 0; 6881b493e30SGolan Ben Ami rxq->bd = NULL; 6891b493e30SGolan Ben Ami 6901b493e30SGolan Ben Ami rxq->rb_stts_dma = 0; 6911b493e30SGolan Ben Ami rxq->rb_stts = NULL; 6921b493e30SGolan Ben Ami 6931b493e30SGolan Ben Ami if (rxq->used_bd) 6940307c839SGolan Ben Ami dma_free_coherent(trans->dev, 6955d19e208SJohannes Berg iwl_pcie_used_bd_size(trans) * 6965d19e208SJohannes Berg rxq->queue_size, 6971b493e30SGolan Ben Ami rxq->used_bd, rxq->used_bd_dma); 6981b493e30SGolan Ben Ami rxq->used_bd_dma = 0; 6991b493e30SGolan Ben Ami rxq->used_bd = NULL; 7001b493e30SGolan Ben Ami } 7011b493e30SGolan Ben Ami 7021b493e30SGolan Ben Ami static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, 7031b493e30SGolan Ben Ami struct iwl_rxq *rxq) 704e705c121SKalle Valo { 705e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 706e705c121SKalle Valo struct device *dev = trans->dev; 70778485054SSara Sharon int i; 7080307c839SGolan Ben Ami int free_size; 709286ca8ebSLuca Coelho bool use_rx_td = (trans->trans_cfg->device_family >= 7103681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210); 7116cc6ba3aSTriebitz size_t rb_stts_size = use_rx_td ? sizeof(__le16) : 7126cc6ba3aSTriebitz sizeof(struct iwl_rb_status); 713e705c121SKalle Valo 71478485054SSara Sharon spin_lock_init(&rxq->lock); 715286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) 716c042f0c7SJohannes Berg rxq->queue_size = trans->cfg->num_rbds; 71796a6497bSSara Sharon else 71896a6497bSSara Sharon rxq->queue_size = RX_QUEUE_SIZE; 71996a6497bSSara Sharon 7205d19e208SJohannes Berg free_size = iwl_pcie_free_bd_size(trans); 7210307c839SGolan Ben Ami 72278485054SSara Sharon /* 72378485054SSara Sharon * Allocate the circular buffer of Read Buffer Descriptors 72478485054SSara Sharon * (RBDs) 72578485054SSara Sharon */ 726750afb08SLuis Chamberlain rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 727e705c121SKalle Valo &rxq->bd_dma, GFP_KERNEL); 728e705c121SKalle Valo if (!rxq->bd) 72978485054SSara Sharon goto err; 73078485054SSara Sharon 731286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) { 732750afb08SLuis Chamberlain rxq->used_bd = dma_alloc_coherent(dev, 7335d19e208SJohannes Berg iwl_pcie_used_bd_size(trans) * 7345d19e208SJohannes Berg rxq->queue_size, 73596a6497bSSara Sharon &rxq->used_bd_dma, 73696a6497bSSara Sharon GFP_KERNEL); 73796a6497bSSara Sharon if (!rxq->used_bd) 73896a6497bSSara Sharon goto err; 73996a6497bSSara Sharon } 740e705c121SKalle Valo 7413827cb59SJohannes Berg rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; 7426cc6ba3aSTriebitz rxq->rb_stts_dma = 7436cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; 7441b493e30SGolan Ben Ami 745e705c121SKalle Valo return 0; 746e705c121SKalle Valo 74778485054SSara Sharon err: 74878485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 74978485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 75078485054SSara Sharon 7511b493e30SGolan Ben Ami iwl_pcie_free_rxq_dma(trans, rxq); 75278485054SSara Sharon } 75396a6497bSSara Sharon 754e705c121SKalle Valo return -ENOMEM; 755e705c121SKalle Valo } 756e705c121SKalle Valo 757ab393cb1SJohannes Berg static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 7581b493e30SGolan Ben Ami { 7591b493e30SGolan Ben Ami struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 7601b493e30SGolan Ben Ami struct iwl_rb_allocator *rba = &trans_pcie->rba; 7611b493e30SGolan Ben Ami int i, ret; 762286ca8ebSLuca Coelho size_t rb_stts_size = trans->trans_cfg->device_family >= 7633681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210 ? 7646cc6ba3aSTriebitz sizeof(__le16) : sizeof(struct iwl_rb_status); 7651b493e30SGolan Ben Ami 7661b493e30SGolan Ben Ami if (WARN_ON(trans_pcie->rxq)) 7671b493e30SGolan Ben Ami return -EINVAL; 7681b493e30SGolan Ben Ami 7691b493e30SGolan Ben Ami trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 7701b493e30SGolan Ben Ami GFP_KERNEL); 771c042f0c7SJohannes Berg trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 772c042f0c7SJohannes Berg sizeof(trans_pcie->rx_pool[0]), 773c042f0c7SJohannes Berg GFP_KERNEL); 774c042f0c7SJohannes Berg trans_pcie->global_table = 775c042f0c7SJohannes Berg kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 776c042f0c7SJohannes Berg sizeof(trans_pcie->global_table[0]), 777c042f0c7SJohannes Berg GFP_KERNEL); 778c042f0c7SJohannes Berg if (!trans_pcie->rxq || !trans_pcie->rx_pool || 779c042f0c7SJohannes Berg !trans_pcie->global_table) { 780c042f0c7SJohannes Berg ret = -ENOMEM; 781c042f0c7SJohannes Berg goto err; 782c042f0c7SJohannes Berg } 7831b493e30SGolan Ben Ami 7841b493e30SGolan Ben Ami spin_lock_init(&rba->lock); 7851b493e30SGolan Ben Ami 7866cc6ba3aSTriebitz /* 7876cc6ba3aSTriebitz * Allocate the driver's pointer to receive buffer status. 7886cc6ba3aSTriebitz * Allocate for all queues continuously (HW requirement). 7896cc6ba3aSTriebitz */ 7906cc6ba3aSTriebitz trans_pcie->base_rb_stts = 7916cc6ba3aSTriebitz dma_alloc_coherent(trans->dev, 7926cc6ba3aSTriebitz rb_stts_size * trans->num_rx_queues, 7936cc6ba3aSTriebitz &trans_pcie->base_rb_stts_dma, 7946cc6ba3aSTriebitz GFP_KERNEL); 7956cc6ba3aSTriebitz if (!trans_pcie->base_rb_stts) { 7966cc6ba3aSTriebitz ret = -ENOMEM; 7976cc6ba3aSTriebitz goto err; 7986cc6ba3aSTriebitz } 7996cc6ba3aSTriebitz 8001b493e30SGolan Ben Ami for (i = 0; i < trans->num_rx_queues; i++) { 8011b493e30SGolan Ben Ami struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 8021b493e30SGolan Ben Ami 8036cc6ba3aSTriebitz rxq->id = i; 8041b493e30SGolan Ben Ami ret = iwl_pcie_alloc_rxq_dma(trans, rxq); 8051b493e30SGolan Ben Ami if (ret) 8066cc6ba3aSTriebitz goto err; 8071b493e30SGolan Ben Ami } 8081b493e30SGolan Ben Ami return 0; 8096cc6ba3aSTriebitz 8106cc6ba3aSTriebitz err: 8116cc6ba3aSTriebitz if (trans_pcie->base_rb_stts) { 8126cc6ba3aSTriebitz dma_free_coherent(trans->dev, 8136cc6ba3aSTriebitz rb_stts_size * trans->num_rx_queues, 8146cc6ba3aSTriebitz trans_pcie->base_rb_stts, 8156cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma); 8166cc6ba3aSTriebitz trans_pcie->base_rb_stts = NULL; 8176cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma = 0; 8186cc6ba3aSTriebitz } 819c042f0c7SJohannes Berg kfree(trans_pcie->rx_pool); 8209cf671d6SEmmanuel Grumbach trans_pcie->rx_pool = NULL; 821c042f0c7SJohannes Berg kfree(trans_pcie->global_table); 8229cf671d6SEmmanuel Grumbach trans_pcie->global_table = NULL; 8236cc6ba3aSTriebitz kfree(trans_pcie->rxq); 8249cf671d6SEmmanuel Grumbach trans_pcie->rxq = NULL; 8256cc6ba3aSTriebitz 8266cc6ba3aSTriebitz return ret; 8271b493e30SGolan Ben Ami } 8281b493e30SGolan Ben Ami 829e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 830e705c121SKalle Valo { 831e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 832e705c121SKalle Valo u32 rb_size; 833e705c121SKalle Valo const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 834e705c121SKalle Valo 8356c4fbcbcSEmmanuel Grumbach switch (trans_pcie->rx_buf_size) { 8366c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_4K: 837e705c121SKalle Valo rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 8386c4fbcbcSEmmanuel Grumbach break; 8396c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_8K: 8406c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 8416c4fbcbcSEmmanuel Grumbach break; 8426c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_12K: 8436c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 8446c4fbcbcSEmmanuel Grumbach break; 8456c4fbcbcSEmmanuel Grumbach default: 8466c4fbcbcSEmmanuel Grumbach WARN_ON(1); 8476c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 8486c4fbcbcSEmmanuel Grumbach } 849e705c121SKalle Valo 8501ed08f6fSJohannes Berg if (!iwl_trans_grab_nic_access(trans)) 851dfcfeef9SSara Sharon return; 852dfcfeef9SSara Sharon 853e705c121SKalle Valo /* Stop Rx DMA */ 854dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 855e705c121SKalle Valo /* reset and flush pointers */ 856dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 857dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 858dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 859e705c121SKalle Valo 860e705c121SKalle Valo /* Reset driver's Rx queue write index */ 861dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 862e705c121SKalle Valo 863e705c121SKalle Valo /* Tell device where to find RBD circular buffer in DRAM */ 864dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 865e705c121SKalle Valo (u32)(rxq->bd_dma >> 8)); 866e705c121SKalle Valo 867e705c121SKalle Valo /* Tell device where in DRAM to update its Rx status */ 868dfcfeef9SSara Sharon iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 869e705c121SKalle Valo rxq->rb_stts_dma >> 4); 870e705c121SKalle Valo 871e705c121SKalle Valo /* Enable Rx DMA 872e705c121SKalle Valo * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 873e705c121SKalle Valo * the credit mechanism in 5000 HW RX FIFO 874e705c121SKalle Valo * Direct rx interrupts to hosts 8756c4fbcbcSEmmanuel Grumbach * Rx buffer size 4 or 8k or 12k 876e705c121SKalle Valo * RB timeout 0x10 877e705c121SKalle Valo * 256 RBDs 878e705c121SKalle Valo */ 879dfcfeef9SSara Sharon iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 880e705c121SKalle Valo FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 881e705c121SKalle Valo FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 882e705c121SKalle Valo FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 883e705c121SKalle Valo rb_size | 884e705c121SKalle Valo (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 885e705c121SKalle Valo (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 886e705c121SKalle Valo 8871ed08f6fSJohannes Berg iwl_trans_release_nic_access(trans); 888dfcfeef9SSara Sharon 889e705c121SKalle Valo /* Set interrupt coalescing timer to default (2048 usecs) */ 890e705c121SKalle Valo iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 891e705c121SKalle Valo 892e705c121SKalle Valo /* W/A for interrupt coalescing bug in 7260 and 3160 */ 893e705c121SKalle Valo if (trans->cfg->host_interrupt_operation_mode) 894e705c121SKalle Valo iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 895e705c121SKalle Valo } 896e705c121SKalle Valo 897bce97731SSara Sharon static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 89896a6497bSSara Sharon { 89996a6497bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 90096a6497bSSara Sharon u32 rb_size, enabled = 0; 90196a6497bSSara Sharon int i; 90296a6497bSSara Sharon 90396a6497bSSara Sharon switch (trans_pcie->rx_buf_size) { 9041a4968d1SGolan Ben Ami case IWL_AMSDU_2K: 9051a4968d1SGolan Ben Ami rb_size = RFH_RXF_DMA_RB_SIZE_2K; 9061a4968d1SGolan Ben Ami break; 90796a6497bSSara Sharon case IWL_AMSDU_4K: 90896a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_4K; 90996a6497bSSara Sharon break; 91096a6497bSSara Sharon case IWL_AMSDU_8K: 91196a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_8K; 91296a6497bSSara Sharon break; 91396a6497bSSara Sharon case IWL_AMSDU_12K: 91496a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_12K; 91596a6497bSSara Sharon break; 91696a6497bSSara Sharon default: 91796a6497bSSara Sharon WARN_ON(1); 91896a6497bSSara Sharon rb_size = RFH_RXF_DMA_RB_SIZE_4K; 91996a6497bSSara Sharon } 92096a6497bSSara Sharon 9211ed08f6fSJohannes Berg if (!iwl_trans_grab_nic_access(trans)) 922dfcfeef9SSara Sharon return; 923dfcfeef9SSara Sharon 92496a6497bSSara Sharon /* Stop Rx DMA */ 925dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 92696a6497bSSara Sharon /* disable free amd used rx queue operation */ 927dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 92896a6497bSSara Sharon 92996a6497bSSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 93096a6497bSSara Sharon /* Tell device where to find RBD free table in DRAM */ 93112a17458SSara Sharon iwl_write_prph64_no_grab(trans, 932dfcfeef9SSara Sharon RFH_Q_FRBDCB_BA_LSB(i), 933dfcfeef9SSara Sharon trans_pcie->rxq[i].bd_dma); 93496a6497bSSara Sharon /* Tell device where to find RBD used table in DRAM */ 93512a17458SSara Sharon iwl_write_prph64_no_grab(trans, 936dfcfeef9SSara Sharon RFH_Q_URBDCB_BA_LSB(i), 937dfcfeef9SSara Sharon trans_pcie->rxq[i].used_bd_dma); 93896a6497bSSara Sharon /* Tell device where in DRAM to update its Rx status */ 93912a17458SSara Sharon iwl_write_prph64_no_grab(trans, 940dfcfeef9SSara Sharon RFH_Q_URBD_STTS_WPTR_LSB(i), 941bce97731SSara Sharon trans_pcie->rxq[i].rb_stts_dma); 94296a6497bSSara Sharon /* Reset device indice tables */ 943dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 944dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 945dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 94696a6497bSSara Sharon 94796a6497bSSara Sharon enabled |= BIT(i) | BIT(i + 16); 94896a6497bSSara Sharon } 94996a6497bSSara Sharon 95096a6497bSSara Sharon /* 95196a6497bSSara Sharon * Enable Rx DMA 95296a6497bSSara Sharon * Rx buffer size 4 or 8k or 12k 95396a6497bSSara Sharon * Min RB size 4 or 8 95488076015SSara Sharon * Drop frames that exceed RB size 95596a6497bSSara Sharon * 512 RBDs 95696a6497bSSara Sharon */ 957dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 95863044335SSara Sharon RFH_DMA_EN_ENABLE_VAL | rb_size | 95996a6497bSSara Sharon RFH_RXF_DMA_MIN_RB_4_8 | 96088076015SSara Sharon RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 96196a6497bSSara Sharon RFH_RXF_DMA_RBDCB_SIZE_512); 96296a6497bSSara Sharon 96388076015SSara Sharon /* 96488076015SSara Sharon * Activate DMA snooping. 965b0262f07SSara Sharon * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 96688076015SSara Sharon * Default queue is 0 96788076015SSara Sharon */ 968f3779f47SJohannes Berg iwl_write_prph_no_grab(trans, RFH_GEN_CFG, 969f3779f47SJohannes Berg RFH_GEN_CFG_RFH_DMA_SNOOP | 970f3779f47SJohannes Berg RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | 971b0262f07SSara Sharon RFH_GEN_CFG_SERVICE_DMA_SNOOP | 972f3779f47SJohannes Berg RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, 9737897dfa2SLuca Coelho trans->trans_cfg->integrated ? 974b0262f07SSara Sharon RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 975f3779f47SJohannes Berg RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 97688076015SSara Sharon /* Enable the relevant rx queues */ 977dfcfeef9SSara Sharon iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 978dfcfeef9SSara Sharon 9791ed08f6fSJohannes Berg iwl_trans_release_nic_access(trans); 98096a6497bSSara Sharon 98196a6497bSSara Sharon /* Set interrupt coalescing timer to default (2048 usecs) */ 98296a6497bSSara Sharon iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 98396a6497bSSara Sharon } 98496a6497bSSara Sharon 985ff932f61SGolan Ben Ami void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 986e705c121SKalle Valo { 987e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 988e705c121SKalle Valo 989e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_free); 990e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_used); 991e705c121SKalle Valo rxq->free_count = 0; 992e705c121SKalle Valo rxq->used_count = 0; 993e705c121SKalle Valo } 994e705c121SKalle Valo 99525edc8f2SJohannes Berg static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); 99625edc8f2SJohannes Berg 99725edc8f2SJohannes Berg static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) 998bce97731SSara Sharon { 99925edc8f2SJohannes Berg struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); 100025edc8f2SJohannes Berg struct iwl_trans_pcie *trans_pcie; 100125edc8f2SJohannes Berg struct iwl_trans *trans; 100225edc8f2SJohannes Berg int ret; 100325edc8f2SJohannes Berg 100425edc8f2SJohannes Berg trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); 100525edc8f2SJohannes Berg trans = trans_pcie->trans; 100625edc8f2SJohannes Berg 100725edc8f2SJohannes Berg ret = iwl_pcie_rx_handle(trans, rxq->id, budget); 100825edc8f2SJohannes Berg 10099d401222SMordechay Goodstein IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", 10109d401222SMordechay Goodstein rxq->id, ret, budget); 10119d401222SMordechay Goodstein 101225edc8f2SJohannes Berg if (ret < budget) { 101325edc8f2SJohannes Berg spin_lock(&trans_pcie->irq_lock); 101425edc8f2SJohannes Berg if (test_bit(STATUS_INT_ENABLED, &trans->status)) 101525edc8f2SJohannes Berg _iwl_enable_interrupts(trans); 101625edc8f2SJohannes Berg spin_unlock(&trans_pcie->irq_lock); 101725edc8f2SJohannes Berg 101825edc8f2SJohannes Berg napi_complete_done(&rxq->napi, ret); 101925edc8f2SJohannes Berg } 102025edc8f2SJohannes Berg 102125edc8f2SJohannes Berg return ret; 102225edc8f2SJohannes Berg } 102325edc8f2SJohannes Berg 102425edc8f2SJohannes Berg static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) 102525edc8f2SJohannes Berg { 102625edc8f2SJohannes Berg struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); 102725edc8f2SJohannes Berg struct iwl_trans_pcie *trans_pcie; 102825edc8f2SJohannes Berg struct iwl_trans *trans; 102925edc8f2SJohannes Berg int ret; 103025edc8f2SJohannes Berg 103125edc8f2SJohannes Berg trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); 103225edc8f2SJohannes Berg trans = trans_pcie->trans; 103325edc8f2SJohannes Berg 103425edc8f2SJohannes Berg ret = iwl_pcie_rx_handle(trans, rxq->id, budget); 10352b616666SMordechay Goodstein IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, 10362b616666SMordechay Goodstein budget); 103725edc8f2SJohannes Berg 103825edc8f2SJohannes Berg if (ret < budget) { 10392b616666SMordechay Goodstein int irq_line = rxq->id; 10402b616666SMordechay Goodstein 10412b616666SMordechay Goodstein /* FIRST_RSS is shared with line 0 */ 10422b616666SMordechay Goodstein if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && 10432b616666SMordechay Goodstein rxq->id == 1) 10442b616666SMordechay Goodstein irq_line = 0; 10452b616666SMordechay Goodstein 104625edc8f2SJohannes Berg spin_lock(&trans_pcie->irq_lock); 10472b616666SMordechay Goodstein iwl_pcie_clear_irq(trans, irq_line); 104825edc8f2SJohannes Berg spin_unlock(&trans_pcie->irq_lock); 104925edc8f2SJohannes Berg 105025edc8f2SJohannes Berg napi_complete_done(&rxq->napi, ret); 105125edc8f2SJohannes Berg } 105225edc8f2SJohannes Berg 105325edc8f2SJohannes Berg return ret; 1054bce97731SSara Sharon } 1055bce97731SSara Sharon 10565af2bb31SGregory Greenman void iwl_pcie_rx_napi_sync(struct iwl_trans *trans) 10575af2bb31SGregory Greenman { 10585af2bb31SGregory Greenman struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 10595af2bb31SGregory Greenman int i; 10605af2bb31SGregory Greenman 10615af2bb31SGregory Greenman if (unlikely(!trans_pcie->rxq)) 10625af2bb31SGregory Greenman return; 10635af2bb31SGregory Greenman 10645af2bb31SGregory Greenman for (i = 0; i < trans->num_rx_queues; i++) { 10655af2bb31SGregory Greenman struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 10665af2bb31SGregory Greenman 10675af2bb31SGregory Greenman if (rxq && rxq->napi.poll) 10685af2bb31SGregory Greenman napi_synchronize(&rxq->napi); 10695af2bb31SGregory Greenman } 10705af2bb31SGregory Greenman } 10715af2bb31SGregory Greenman 1072ab393cb1SJohannes Berg static int _iwl_pcie_rx_init(struct iwl_trans *trans) 1073e705c121SKalle Valo { 1074e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 107578485054SSara Sharon struct iwl_rxq *def_rxq; 1076e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 10777b542436SSara Sharon int i, err, queue_size, allocator_pool_size, num_alloc; 1078e705c121SKalle Valo 107978485054SSara Sharon if (!trans_pcie->rxq) { 1080e705c121SKalle Valo err = iwl_pcie_rx_alloc(trans); 1081e705c121SKalle Valo if (err) 1082e705c121SKalle Valo return err; 1083e705c121SKalle Valo } 108478485054SSara Sharon def_rxq = trans_pcie->rxq; 1085e705c121SKalle Valo 10860f22e400SShaul Triebitz cancel_work_sync(&rba->rx_alloc); 10870f22e400SShaul Triebitz 108825edc8f2SJohannes Berg spin_lock_bh(&rba->lock); 1089e705c121SKalle Valo atomic_set(&rba->req_pending, 0); 1090e705c121SKalle Valo atomic_set(&rba->req_ready, 0); 109196a6497bSSara Sharon INIT_LIST_HEAD(&rba->rbd_allocated); 109296a6497bSSara Sharon INIT_LIST_HEAD(&rba->rbd_empty); 109325edc8f2SJohannes Berg spin_unlock_bh(&rba->lock); 1094e705c121SKalle Valo 10956ac57200SJohannes Berg /* free all first - we overwrite everything here */ 109678485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 1097e705c121SKalle Valo 1098e705c121SKalle Valo for (i = 0; i < RX_QUEUE_SIZE; i++) 109978485054SSara Sharon def_rxq->queue[i] = NULL; 1100e705c121SKalle Valo 110178485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 110278485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1103e705c121SKalle Valo 110447ef328cSIlan Peer spin_lock_bh(&rxq->lock); 110578485054SSara Sharon /* 110678485054SSara Sharon * Set read write pointer to reflect that we have processed 110778485054SSara Sharon * and used all buffers, but have not restocked the Rx queue 110878485054SSara Sharon * with fresh buffers 110978485054SSara Sharon */ 111078485054SSara Sharon rxq->read = 0; 111178485054SSara Sharon rxq->write = 0; 111278485054SSara Sharon rxq->write_actual = 0; 11133681021fSJohannes Berg memset(rxq->rb_stts, 0, 11143681021fSJohannes Berg (trans->trans_cfg->device_family >= 11153681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210) ? 11160307c839SGolan Ben Ami sizeof(__le16) : sizeof(struct iwl_rb_status)); 111778485054SSara Sharon 111878485054SSara Sharon iwl_pcie_rx_init_rxb_lists(rxq); 111978485054SSara Sharon 1120295d4cd8SJiri Kosina spin_unlock_bh(&rxq->lock); 1121295d4cd8SJiri Kosina 112225edc8f2SJohannes Berg if (!rxq->napi.poll) { 112325edc8f2SJohannes Berg int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; 112425edc8f2SJohannes Berg 11252b616666SMordechay Goodstein if (trans_pcie->msix_enabled) 112625edc8f2SJohannes Berg poll = iwl_pcie_napi_poll_msix; 112725edc8f2SJohannes Berg 1128bce97731SSara Sharon netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 1129b48b89f9SJakub Kicinski poll); 113025edc8f2SJohannes Berg napi_enable(&rxq->napi); 113125edc8f2SJohannes Berg } 1132bce97731SSara Sharon 113378485054SSara Sharon } 113478485054SSara Sharon 113596a6497bSSara Sharon /* move the pool to the default queue and allocator ownerships */ 1136286ca8ebSLuca Coelho queue_size = trans->trans_cfg->mq_rx_supported ? 1137c042f0c7SJohannes Berg trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; 113896a6497bSSara Sharon allocator_pool_size = trans->num_rx_queues * 113996a6497bSSara Sharon (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 11407b542436SSara Sharon num_alloc = queue_size + allocator_pool_size; 1141c042f0c7SJohannes Berg 11427b542436SSara Sharon for (i = 0; i < num_alloc; i++) { 114396a6497bSSara Sharon struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 114496a6497bSSara Sharon 114596a6497bSSara Sharon if (i < allocator_pool_size) 114696a6497bSSara Sharon list_add(&rxb->list, &rba->rbd_empty); 114796a6497bSSara Sharon else 114896a6497bSSara Sharon list_add(&rxb->list, &def_rxq->rx_used); 114996a6497bSSara Sharon trans_pcie->global_table[i] = rxb; 1150e25d65f2SSara Sharon rxb->vid = (u16)(i + 1); 1151b1753c62SSara Sharon rxb->invalid = true; 115296a6497bSSara Sharon } 115378485054SSara Sharon 115478485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 11552047fa54SSara Sharon 1156eda50cdeSSara Sharon return 0; 1157eda50cdeSSara Sharon } 1158eda50cdeSSara Sharon 1159eda50cdeSSara Sharon int iwl_pcie_rx_init(struct iwl_trans *trans) 1160eda50cdeSSara Sharon { 1161eda50cdeSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1162eda50cdeSSara Sharon int ret = _iwl_pcie_rx_init(trans); 1163eda50cdeSSara Sharon 1164eda50cdeSSara Sharon if (ret) 1165eda50cdeSSara Sharon return ret; 1166eda50cdeSSara Sharon 1167286ca8ebSLuca Coelho if (trans->trans_cfg->mq_rx_supported) 1168bce97731SSara Sharon iwl_pcie_rx_mq_hw_init(trans); 11692047fa54SSara Sharon else 1170eda50cdeSSara Sharon iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); 11712047fa54SSara Sharon 1172eda50cdeSSara Sharon iwl_pcie_rxq_restock(trans, trans_pcie->rxq); 117378485054SSara Sharon 117447ef328cSIlan Peer spin_lock_bh(&trans_pcie->rxq->lock); 1175eda50cdeSSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); 117647ef328cSIlan Peer spin_unlock_bh(&trans_pcie->rxq->lock); 1177e705c121SKalle Valo 1178e705c121SKalle Valo return 0; 1179e705c121SKalle Valo } 1180e705c121SKalle Valo 1181eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) 1182eda50cdeSSara Sharon { 1183e506b481SSara Sharon /* Set interrupt coalescing timer to default (2048 usecs) */ 1184e506b481SSara Sharon iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 1185e506b481SSara Sharon 1186eda50cdeSSara Sharon /* 1187eda50cdeSSara Sharon * We don't configure the RFH. 1188eda50cdeSSara Sharon * Restock will be done at alive, after firmware configured the RFH. 1189eda50cdeSSara Sharon */ 1190eda50cdeSSara Sharon return _iwl_pcie_rx_init(trans); 1191eda50cdeSSara Sharon } 1192eda50cdeSSara Sharon 1193e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans) 1194e705c121SKalle Valo { 1195e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1196e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 119778485054SSara Sharon int i; 1198286ca8ebSLuca Coelho size_t rb_stts_size = trans->trans_cfg->device_family >= 11993681021fSJohannes Berg IWL_DEVICE_FAMILY_AX210 ? 12006cc6ba3aSTriebitz sizeof(__le16) : sizeof(struct iwl_rb_status); 1201e705c121SKalle Valo 120278485054SSara Sharon /* 120378485054SSara Sharon * if rxq is NULL, it means that nothing has been allocated, 120478485054SSara Sharon * exit now 120578485054SSara Sharon */ 120678485054SSara Sharon if (!trans_pcie->rxq) { 1207e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 1208e705c121SKalle Valo return; 1209e705c121SKalle Valo } 1210e705c121SKalle Valo 1211e705c121SKalle Valo cancel_work_sync(&rba->rx_alloc); 1212e705c121SKalle Valo 121378485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 1214e705c121SKalle Valo 12156cc6ba3aSTriebitz if (trans_pcie->base_rb_stts) { 12166cc6ba3aSTriebitz dma_free_coherent(trans->dev, 12176cc6ba3aSTriebitz rb_stts_size * trans->num_rx_queues, 12186cc6ba3aSTriebitz trans_pcie->base_rb_stts, 12196cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma); 12206cc6ba3aSTriebitz trans_pcie->base_rb_stts = NULL; 12216cc6ba3aSTriebitz trans_pcie->base_rb_stts_dma = 0; 12226cc6ba3aSTriebitz } 12236cc6ba3aSTriebitz 122478485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 122578485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 122678485054SSara Sharon 12271b493e30SGolan Ben Ami iwl_pcie_free_rxq_dma(trans, rxq); 1228bce97731SSara Sharon 122925edc8f2SJohannes Berg if (rxq->napi.poll) { 123025edc8f2SJohannes Berg napi_disable(&rxq->napi); 1231bce97731SSara Sharon netif_napi_del(&rxq->napi); 123296a6497bSSara Sharon } 123325edc8f2SJohannes Berg } 1234c042f0c7SJohannes Berg kfree(trans_pcie->rx_pool); 1235c042f0c7SJohannes Berg kfree(trans_pcie->global_table); 123678485054SSara Sharon kfree(trans_pcie->rxq); 1237cfdc20efSJohannes Berg 1238cfdc20efSJohannes Berg if (trans_pcie->alloc_page) 1239cfdc20efSJohannes Berg __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); 1240e705c121SKalle Valo } 1241e705c121SKalle Valo 1242868a1e86SShaul Triebitz static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, 1243868a1e86SShaul Triebitz struct iwl_rb_allocator *rba) 1244868a1e86SShaul Triebitz { 1245868a1e86SShaul Triebitz spin_lock(&rba->lock); 1246868a1e86SShaul Triebitz list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1247868a1e86SShaul Triebitz spin_unlock(&rba->lock); 1248868a1e86SShaul Triebitz } 1249868a1e86SShaul Triebitz 1250e705c121SKalle Valo /* 1251e705c121SKalle Valo * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1252e705c121SKalle Valo * 1253e705c121SKalle Valo * Called when a RBD can be reused. The RBD is transferred to the allocator. 1254e705c121SKalle Valo * When there are 2 empty RBDs - a request for allocation is posted 1255e705c121SKalle Valo */ 1256e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1257e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 1258e705c121SKalle Valo struct iwl_rxq *rxq, bool emergency) 1259e705c121SKalle Valo { 1260e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1261e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 1262e705c121SKalle Valo 1263e705c121SKalle Valo /* Move the RBD to the used list, will be moved to allocator in batches 1264e705c121SKalle Valo * before claiming or posting a request*/ 1265e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_used); 1266e705c121SKalle Valo 1267e705c121SKalle Valo if (unlikely(emergency)) 1268e705c121SKalle Valo return; 1269e705c121SKalle Valo 1270e705c121SKalle Valo /* Count the allocator owned RBDs */ 1271e705c121SKalle Valo rxq->used_count++; 1272e705c121SKalle Valo 1273e705c121SKalle Valo /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1274e705c121SKalle Valo * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1275e705c121SKalle Valo * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1276e705c121SKalle Valo * after but we still need to post another request. 1277e705c121SKalle Valo */ 1278e705c121SKalle Valo if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1279e705c121SKalle Valo /* Move the 2 RBDs to the allocator ownership. 1280e705c121SKalle Valo Allocator has another 6 from pool for the request completion*/ 1281868a1e86SShaul Triebitz iwl_pcie_rx_move_to_allocator(rxq, rba); 1282e705c121SKalle Valo 1283e705c121SKalle Valo atomic_inc(&rba->req_pending); 1284e705c121SKalle Valo queue_work(rba->alloc_wq, &rba->rx_alloc); 1285e705c121SKalle Valo } 1286e705c121SKalle Valo } 1287e705c121SKalle Valo 1288e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 128978485054SSara Sharon struct iwl_rxq *rxq, 1290e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 12917891965dSSara Sharon bool emergency, 12927891965dSSara Sharon int i) 1293e705c121SKalle Valo { 1294e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12954f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1296e705c121SKalle Valo bool page_stolen = false; 129780084e35SJohannes Berg int max_len = trans_pcie->rx_buf_bytes; 1298e705c121SKalle Valo u32 offset = 0; 1299e705c121SKalle Valo 1300e705c121SKalle Valo if (WARN_ON(!rxb)) 1301e705c121SKalle Valo return; 1302e705c121SKalle Valo 1303e705c121SKalle Valo dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1304e705c121SKalle Valo 1305e705c121SKalle Valo while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1306e705c121SKalle Valo struct iwl_rx_packet *pkt; 1307e705c121SKalle Valo bool reclaim; 1308e4475583SJohannes Berg int len; 1309e705c121SKalle Valo struct iwl_rx_cmd_buffer rxcb = { 1310cfdc20efSJohannes Berg ._offset = rxb->offset + offset, 1311e705c121SKalle Valo ._rx_page_order = trans_pcie->rx_page_order, 1312e705c121SKalle Valo ._page = rxb->page, 1313e705c121SKalle Valo ._page_stolen = false, 1314e705c121SKalle Valo .truesize = max_len, 1315e705c121SKalle Valo }; 1316e705c121SKalle Valo 1317e705c121SKalle Valo pkt = rxb_addr(&rxcb); 1318e705c121SKalle Valo 13193bfdee76SJohannes Berg if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { 13203bfdee76SJohannes Berg IWL_DEBUG_RX(trans, 13213bfdee76SJohannes Berg "Q %d: RB end marker at offset %d\n", 13223bfdee76SJohannes Berg rxq->id, offset); 1323e705c121SKalle Valo break; 13243bfdee76SJohannes Berg } 1325e705c121SKalle Valo 1326a395058eSJohannes Berg WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1327a395058eSJohannes Berg FH_RSCSR_RXQ_POS != rxq->id, 1328a395058eSJohannes Berg "frame on invalid queue - is on %d and indicates %d\n", 1329a395058eSJohannes Berg rxq->id, 1330a395058eSJohannes Berg (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1331a395058eSJohannes Berg FH_RSCSR_RXQ_POS); 1332ab2e696bSSara Sharon 1333e705c121SKalle Valo IWL_DEBUG_RX(trans, 13343bfdee76SJohannes Berg "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", 13353bfdee76SJohannes Berg rxq->id, offset, 133639bdb17eSSharon Dvir iwl_get_cmd_string(trans, 1337f0c86427SJohannes Berg WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), 133835177c99SSara Sharon pkt->hdr.group_id, pkt->hdr.cmd, 133935177c99SSara Sharon le16_to_cpu(pkt->hdr.sequence)); 1340e705c121SKalle Valo 1341e705c121SKalle Valo len = iwl_rx_packet_len(pkt); 1342e705c121SKalle Valo len += sizeof(u32); /* account for status word */ 1343df72138dSJohannes Berg 1344df72138dSJohannes Berg offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1345df72138dSJohannes Berg 1346df72138dSJohannes Berg /* check that what the device tells us made sense */ 1347f1658dcbSAndrei Otcheretianski if (len < sizeof(*pkt) || offset > max_len) 1348df72138dSJohannes Berg break; 1349df72138dSJohannes Berg 1350e705c121SKalle Valo trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1351e705c121SKalle Valo trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1352e705c121SKalle Valo 1353e705c121SKalle Valo /* Reclaim a command buffer only if this packet is a response 1354e705c121SKalle Valo * to a (driver-originated) command. 1355e705c121SKalle Valo * If the packet (e.g. Rx frame) originated from uCode, 1356e705c121SKalle Valo * there is no command buffer to reclaim. 1357e705c121SKalle Valo * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1358e705c121SKalle Valo * but apparently a few don't get set; catch them here. */ 1359e705c121SKalle Valo reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1360d8a130b0SJohannes Berg if (reclaim && !pkt->hdr.group_id) { 1361e705c121SKalle Valo int i; 1362e705c121SKalle Valo 1363e705c121SKalle Valo for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1364e705c121SKalle Valo if (trans_pcie->no_reclaim_cmds[i] == 1365e705c121SKalle Valo pkt->hdr.cmd) { 1366e705c121SKalle Valo reclaim = false; 1367e705c121SKalle Valo break; 1368e705c121SKalle Valo } 1369e705c121SKalle Valo } 1370e705c121SKalle Valo } 1371e705c121SKalle Valo 13729416560eSGolan Ben Ami if (rxq->id == trans_pcie->def_rx_queue) 1373bce97731SSara Sharon iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1374bce97731SSara Sharon &rxcb); 1375bce97731SSara Sharon else 1376bce97731SSara Sharon iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1377bce97731SSara Sharon &rxcb, rxq->id); 1378e705c121SKalle Valo 1379e705c121SKalle Valo /* 1380e705c121SKalle Valo * After here, we should always check rxcb._page_stolen, 1381e705c121SKalle Valo * if it is true then one of the handlers took the page. 1382e705c121SKalle Valo */ 1383e705c121SKalle Valo 1384e705c121SKalle Valo if (reclaim) { 1385e4475583SJohannes Berg u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1386e4475583SJohannes Berg int index = SEQ_TO_INDEX(sequence); 1387e4475583SJohannes Berg int cmd_index = iwl_txq_get_cmd_index(txq, index); 1388e4475583SJohannes Berg 1389e4475583SJohannes Berg kfree_sensitive(txq->entries[cmd_index].free_buf); 1390e4475583SJohannes Berg txq->entries[cmd_index].free_buf = NULL; 1391e4475583SJohannes Berg 1392e705c121SKalle Valo /* Invoke any callbacks, transfer the buffer to caller, 1393e705c121SKalle Valo * and fire off the (possibly) blocking 1394e705c121SKalle Valo * iwl_trans_send_cmd() 1395e705c121SKalle Valo * as we reclaim the driver command queue */ 1396e705c121SKalle Valo if (!rxcb._page_stolen) 1397e705c121SKalle Valo iwl_pcie_hcmd_complete(trans, &rxcb); 1398e705c121SKalle Valo else 1399e705c121SKalle Valo IWL_WARN(trans, "Claim null rxb?\n"); 1400e705c121SKalle Valo } 1401e705c121SKalle Valo 1402e705c121SKalle Valo page_stolen |= rxcb._page_stolen; 14033681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 14040307c839SGolan Ben Ami break; 1405e705c121SKalle Valo } 1406e705c121SKalle Valo 1407e705c121SKalle Valo /* page was stolen from us -- free our reference */ 1408e705c121SKalle Valo if (page_stolen) { 1409e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 1410e705c121SKalle Valo rxb->page = NULL; 1411e705c121SKalle Valo } 1412e705c121SKalle Valo 1413e705c121SKalle Valo /* Reuse the page if possible. For notification packets and 1414e705c121SKalle Valo * SKBs that fail to Rx correctly, add them back into the 1415e705c121SKalle Valo * rx_free list for reuse later. */ 1416e705c121SKalle Valo if (rxb->page != NULL) { 1417e705c121SKalle Valo rxb->page_dma = 1418cfdc20efSJohannes Berg dma_map_page(trans->dev, rxb->page, rxb->offset, 141980084e35SJohannes Berg trans_pcie->rx_buf_bytes, 1420e705c121SKalle Valo DMA_FROM_DEVICE); 1421e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1422e705c121SKalle Valo /* 1423e705c121SKalle Valo * free the page(s) as well to not break 1424e705c121SKalle Valo * the invariant that the items on the used 1425e705c121SKalle Valo * list have no page(s) 1426e705c121SKalle Valo */ 1427e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 1428e705c121SKalle Valo rxb->page = NULL; 1429e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1430e705c121SKalle Valo } else { 1431e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 1432e705c121SKalle Valo rxq->free_count++; 1433e705c121SKalle Valo } 1434e705c121SKalle Valo } else 1435e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1436e705c121SKalle Valo } 1437e705c121SKalle Valo 14381b4bbe8bSSara Sharon static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, 1439b1c860f6SJohannes Berg struct iwl_rxq *rxq, int i, 1440b1c860f6SJohannes Berg bool *join) 14411b4bbe8bSSara Sharon { 14421b4bbe8bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 14431b4bbe8bSSara Sharon struct iwl_rx_mem_buffer *rxb; 14441b4bbe8bSSara Sharon u16 vid; 14451b4bbe8bSSara Sharon 1446f826faaaSJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); 14475d19e208SJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); 1448f826faaaSJohannes Berg 1449286ca8ebSLuca Coelho if (!trans->trans_cfg->mq_rx_supported) { 14501b4bbe8bSSara Sharon rxb = rxq->queue[i]; 14511b4bbe8bSSara Sharon rxq->queue[i] = NULL; 14521b4bbe8bSSara Sharon return rxb; 14531b4bbe8bSSara Sharon } 14541b4bbe8bSSara Sharon 14555d19e208SJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 14565d19e208SJohannes Berg struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; 14575d19e208SJohannes Berg 14585d19e208SJohannes Berg vid = le16_to_cpu(cd[i].rbid); 14595d19e208SJohannes Berg *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 14605d19e208SJohannes Berg } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 14615d19e208SJohannes Berg struct iwl_rx_completion_desc *cd = rxq->used_bd; 14625d19e208SJohannes Berg 14635d19e208SJohannes Berg vid = le16_to_cpu(cd[i].rbid); 14645d19e208SJohannes Berg *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 1465b1c860f6SJohannes Berg } else { 14665d19e208SJohannes Berg __le32 *cd = rxq->used_bd; 14675d19e208SJohannes Berg 14685d19e208SJohannes Berg vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ 1469b1c860f6SJohannes Berg } 14701b4bbe8bSSara Sharon 1471c042f0c7SJohannes Berg if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) 14721b4bbe8bSSara Sharon goto out_err; 14731b4bbe8bSSara Sharon 14741b4bbe8bSSara Sharon rxb = trans_pcie->global_table[vid - 1]; 14751b4bbe8bSSara Sharon if (rxb->invalid) 14761b4bbe8bSSara Sharon goto out_err; 14771b4bbe8bSSara Sharon 147885d78bb1SSara Sharon IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); 147985d78bb1SSara Sharon 14801b4bbe8bSSara Sharon rxb->invalid = true; 14811b4bbe8bSSara Sharon 14821b4bbe8bSSara Sharon return rxb; 14831b4bbe8bSSara Sharon 14841b4bbe8bSSara Sharon out_err: 14851b4bbe8bSSara Sharon WARN(1, "Invalid rxb from HW %u\n", (u32)vid); 14861b4bbe8bSSara Sharon iwl_force_nmi(trans); 14871b4bbe8bSSara Sharon return NULL; 14881b4bbe8bSSara Sharon } 14891b4bbe8bSSara Sharon 1490e705c121SKalle Valo /* 1491e705c121SKalle Valo * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1492e705c121SKalle Valo */ 149325edc8f2SJohannes Berg static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) 1494e705c121SKalle Valo { 1495e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 149630f24eabSJohannes Berg struct iwl_rxq *rxq; 149725edc8f2SJohannes Berg u32 r, i, count = 0, handled = 0; 1498e705c121SKalle Valo bool emergency = false; 1499e705c121SKalle Valo 150030f24eabSJohannes Berg if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) 150125edc8f2SJohannes Berg return budget; 150230f24eabSJohannes Berg 150330f24eabSJohannes Berg rxq = &trans_pcie->rxq[queue]; 150430f24eabSJohannes Berg 1505e705c121SKalle Valo restart: 1506e705c121SKalle Valo spin_lock(&rxq->lock); 1507e705c121SKalle Valo /* uCode's read index (stored in shared DRAM) indicates the last Rx 1508e705c121SKalle Valo * buffer that the driver may process (last buffer filled by ucode). */ 15090307c839SGolan Ben Ami r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 1510e705c121SKalle Valo i = rxq->read; 1511e705c121SKalle Valo 15125eae443eSSara Sharon /* W/A 9000 device step A0 wrap-around bug */ 15135eae443eSSara Sharon r &= (rxq->queue_size - 1); 15145eae443eSSara Sharon 1515e705c121SKalle Valo /* Rx interrupt, but nothing sent from uCode */ 1516e705c121SKalle Valo if (i == r) 15175eae443eSSara Sharon IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1518e705c121SKalle Valo 151925edc8f2SJohannes Berg while (i != r && ++handled < budget) { 1520868a1e86SShaul Triebitz struct iwl_rb_allocator *rba = &trans_pcie->rba; 1521e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 1522868a1e86SShaul Triebitz /* number of RBDs still waiting for page allocation */ 1523868a1e86SShaul Triebitz u32 rb_pending_alloc = 1524868a1e86SShaul Triebitz atomic_read(&trans_pcie->rba.req_pending) * 1525868a1e86SShaul Triebitz RX_CLAIM_REQ_ALLOC; 1526b1c860f6SJohannes Berg bool join = false; 1527e705c121SKalle Valo 1528868a1e86SShaul Triebitz if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && 1529868a1e86SShaul Triebitz !emergency)) { 1530868a1e86SShaul Triebitz iwl_pcie_rx_move_to_allocator(rxq, rba); 1531e705c121SKalle Valo emergency = true; 15326dcdd165SSara Sharon IWL_DEBUG_TPT(trans, 15336dcdd165SSara Sharon "RX path is in emergency. Pending allocations %d\n", 15346dcdd165SSara Sharon rb_pending_alloc); 1535868a1e86SShaul Triebitz } 1536e705c121SKalle Valo 153785d78bb1SSara Sharon IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 153885d78bb1SSara Sharon 1539b1c860f6SJohannes Berg rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); 15401b4bbe8bSSara Sharon if (!rxb) 15415eae443eSSara Sharon goto out; 1542e705c121SKalle Valo 1543b1c860f6SJohannes Berg if (unlikely(join || rxq->next_rb_is_fragment)) { 1544b1c860f6SJohannes Berg rxq->next_rb_is_fragment = join; 1545b1c860f6SJohannes Berg /* 1546b1c860f6SJohannes Berg * We can only get a multi-RB in the following cases: 1547b1c860f6SJohannes Berg * - firmware issue, sending a too big notification 1548b1c860f6SJohannes Berg * - sniffer mode with a large A-MSDU 1549b1c860f6SJohannes Berg * - large MTU frames (>2k) 1550b1c860f6SJohannes Berg * since the multi-RB functionality is limited to newer 1551b1c860f6SJohannes Berg * hardware that cannot put multiple entries into a 1552b1c860f6SJohannes Berg * single RB. 1553b1c860f6SJohannes Berg * 1554b1c860f6SJohannes Berg * Right now, the higher layers aren't set up to deal 1555b1c860f6SJohannes Berg * with that, so discard all of these. 1556b1c860f6SJohannes Berg */ 1557b1c860f6SJohannes Berg list_add_tail(&rxb->list, &rxq->rx_free); 1558b1c860f6SJohannes Berg rxq->free_count++; 1559b1c860f6SJohannes Berg } else { 15607891965dSSara Sharon iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); 1561b1c860f6SJohannes Berg } 1562e705c121SKalle Valo 156396a6497bSSara Sharon i = (i + 1) & (rxq->queue_size - 1); 1564e705c121SKalle Valo 1565d56daea4SSara Sharon /* 1566d56daea4SSara Sharon * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1567d56daea4SSara Sharon * try to claim the pre-allocated buffers from the allocator. 1568d56daea4SSara Sharon * If not ready - will try to reclaim next time. 1569d56daea4SSara Sharon * There is no need to reschedule work - allocator exits only 1570d56daea4SSara Sharon * on success 1571e705c121SKalle Valo */ 1572d56daea4SSara Sharon if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1573d56daea4SSara Sharon iwl_pcie_rx_allocator_get(trans, rxq); 1574e705c121SKalle Valo 1575d56daea4SSara Sharon if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1576d56daea4SSara Sharon /* Add the remaining empty RBDs for allocator use */ 1577868a1e86SShaul Triebitz iwl_pcie_rx_move_to_allocator(rxq, rba); 1578d56daea4SSara Sharon } else if (emergency) { 1579e705c121SKalle Valo count++; 1580e705c121SKalle Valo if (count == 8) { 1581e705c121SKalle Valo count = 0; 15826dcdd165SSara Sharon if (rb_pending_alloc < rxq->queue_size / 3) { 15836dcdd165SSara Sharon IWL_DEBUG_TPT(trans, 15846dcdd165SSara Sharon "RX path exited emergency. Pending allocations %d\n", 15856dcdd165SSara Sharon rb_pending_alloc); 1586e705c121SKalle Valo emergency = false; 15876dcdd165SSara Sharon } 1588e0e168dcSGregory Greenman 1589e705c121SKalle Valo rxq->read = i; 1590e705c121SKalle Valo spin_unlock(&rxq->lock); 1591e0e168dcSGregory Greenman iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 159278485054SSara Sharon iwl_pcie_rxq_restock(trans, rxq); 1593e705c121SKalle Valo goto restart; 1594e705c121SKalle Valo } 1595e705c121SKalle Valo } 1596e0e168dcSGregory Greenman } 15975eae443eSSara Sharon out: 1598e705c121SKalle Valo /* Backtrack one entry */ 1599e705c121SKalle Valo rxq->read = i; 1600e705c121SKalle Valo spin_unlock(&rxq->lock); 1601e705c121SKalle Valo 1602e705c121SKalle Valo /* 1603e705c121SKalle Valo * handle a case where in emergency there are some unallocated RBDs. 1604e705c121SKalle Valo * those RBDs are in the used list, but are not tracked by the queue's 1605e705c121SKalle Valo * used_count which counts allocator owned RBDs. 1606e705c121SKalle Valo * unallocated emergency RBDs must be allocated on exit, otherwise 1607e705c121SKalle Valo * when called again the function may not be in emergency mode and 1608e705c121SKalle Valo * they will be handed to the allocator with no tracking in the RBD 1609e705c121SKalle Valo * allocator counters, which will lead to them never being claimed back 1610e705c121SKalle Valo * by the queue. 1611e705c121SKalle Valo * by allocating them here, they are now in the queue free list, and 1612e705c121SKalle Valo * will be restocked by the next call of iwl_pcie_rxq_restock. 1613e705c121SKalle Valo */ 1614e705c121SKalle Valo if (unlikely(emergency && count)) 161578485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1616e705c121SKalle Valo 1617e0e168dcSGregory Greenman iwl_pcie_rxq_restock(trans, rxq); 161825edc8f2SJohannes Berg 161925edc8f2SJohannes Berg return handled; 1620e705c121SKalle Valo } 1621e705c121SKalle Valo 16222e5d4a8fSHaim Dreyfuss static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 16232e5d4a8fSHaim Dreyfuss { 16242e5d4a8fSHaim Dreyfuss u8 queue = entry->entry; 16252e5d4a8fSHaim Dreyfuss struct msix_entry *entries = entry - queue; 16262e5d4a8fSHaim Dreyfuss 16272e5d4a8fSHaim Dreyfuss return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 16282e5d4a8fSHaim Dreyfuss } 16292e5d4a8fSHaim Dreyfuss 16302e5d4a8fSHaim Dreyfuss /* 16312e5d4a8fSHaim Dreyfuss * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 16322e5d4a8fSHaim Dreyfuss * This interrupt handler should be used with RSS queue only. 16332e5d4a8fSHaim Dreyfuss */ 16342e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 16352e5d4a8fSHaim Dreyfuss { 16362e5d4a8fSHaim Dreyfuss struct msix_entry *entry = dev_id; 16372e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 16382e5d4a8fSHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 1639*1902f195SAnjaneyulu struct iwl_rxq *rxq; 16402e5d4a8fSHaim Dreyfuss 1641c42ff65dSJohannes Berg trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); 1642c42ff65dSJohannes Berg 16435eae443eSSara Sharon if (WARN_ON(entry->entry >= trans->num_rx_queues)) 16445eae443eSSara Sharon return IRQ_NONE; 16455eae443eSSara Sharon 1646*1902f195SAnjaneyulu if (!trans_pcie->rxq) { 164791ca9c3aSEmmanuel Grumbach if (net_ratelimit()) 164891ca9c3aSEmmanuel Grumbach IWL_ERR(trans, 164991ca9c3aSEmmanuel Grumbach "[%d] Got MSI-X interrupt before we have Rx queues\n", 165091ca9c3aSEmmanuel Grumbach entry->entry); 1651abc599efSEmmanuel Grumbach return IRQ_NONE; 165291ca9c3aSEmmanuel Grumbach } 1653abc599efSEmmanuel Grumbach 1654*1902f195SAnjaneyulu rxq = &trans_pcie->rxq[entry->entry]; 16552e5d4a8fSHaim Dreyfuss lock_map_acquire(&trans->sync_cmd_lockdep_map); 16569d401222SMordechay Goodstein IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); 16572e5d4a8fSHaim Dreyfuss 16582e5d4a8fSHaim Dreyfuss local_bh_disable(); 165925edc8f2SJohannes Berg if (napi_schedule_prep(&rxq->napi)) 166025edc8f2SJohannes Berg __napi_schedule(&rxq->napi); 166125edc8f2SJohannes Berg else 166225edc8f2SJohannes Berg iwl_pcie_clear_irq(trans, entry->entry); 16632e5d4a8fSHaim Dreyfuss local_bh_enable(); 16642e5d4a8fSHaim Dreyfuss 16652e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 16662e5d4a8fSHaim Dreyfuss 16672e5d4a8fSHaim Dreyfuss return IRQ_HANDLED; 16682e5d4a8fSHaim Dreyfuss } 16692e5d4a8fSHaim Dreyfuss 1670e705c121SKalle Valo /* 1671e705c121SKalle Valo * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1672e705c121SKalle Valo */ 1673e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1674e705c121SKalle Valo { 1675e705c121SKalle Valo int i; 1676e705c121SKalle Valo 1677e705c121SKalle Valo /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1678e705c121SKalle Valo if (trans->cfg->internal_wimax_coex && 1679e705c121SKalle Valo !trans->cfg->apmg_not_supported && 1680e705c121SKalle Valo (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1681e705c121SKalle Valo APMS_CLK_VAL_MRB_FUNC_MODE) || 1682e705c121SKalle Valo (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1683e705c121SKalle Valo APMG_PS_CTRL_VAL_RESET_REQ))) { 1684e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1685e705c121SKalle Valo iwl_op_mode_wimax_active(trans->op_mode); 168613f028b4SMordechay Goodstein wake_up(&trans->wait_command_queue); 1687e705c121SKalle Valo return; 1688e705c121SKalle Valo } 1689e705c121SKalle Valo 1690286ca8ebSLuca Coelho for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 16914f4822b7SMordechay Goodstein if (!trans->txqs.txq[i]) 169213a3a390SSara Sharon continue; 16934f4822b7SMordechay Goodstein del_timer(&trans->txqs.txq[i]->stuck_timer); 169413a3a390SSara Sharon } 1695e705c121SKalle Valo 16967d75f32eSEmmanuel Grumbach /* The STATUS_FW_ERROR bit is set in this function. This must happen 16977d75f32eSEmmanuel Grumbach * before we wake up the command caller, to ensure a proper cleanup. */ 1698b8221b0fSJohannes Berg iwl_trans_fw_error(trans, false); 16997d75f32eSEmmanuel Grumbach 1700e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 170113f028b4SMordechay Goodstein wake_up(&trans->wait_command_queue); 1702e705c121SKalle Valo } 1703e705c121SKalle Valo 1704e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1705e705c121SKalle Valo { 1706e705c121SKalle Valo u32 inta; 1707e705c121SKalle Valo 1708e705c121SKalle Valo lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1709e705c121SKalle Valo 1710e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1711e705c121SKalle Valo 1712e705c121SKalle Valo /* Discover which interrupts are active/pending */ 1713e705c121SKalle Valo inta = iwl_read32(trans, CSR_INT); 1714e705c121SKalle Valo 1715e705c121SKalle Valo /* the thread will service interrupts and re-enable them */ 1716e705c121SKalle Valo return inta; 1717e705c121SKalle Valo } 1718e705c121SKalle Valo 1719e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */ 1720e705c121SKalle Valo #define ICT_SHIFT 12 1721e705c121SKalle Valo #define ICT_SIZE (1 << ICT_SHIFT) 1722e705c121SKalle Valo #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1723e705c121SKalle Valo 1724e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will 1725e705c121SKalle Valo * stop using INTA register to get device's interrupt, reading this register 1726e705c121SKalle Valo * is expensive, device will write interrupts in ICT dram table, increment 1727e705c121SKalle Valo * index then will fire interrupt to driver, driver will OR all ICT table 1728e705c121SKalle Valo * entries from current index up to table entry with 0 value. the result is 1729e705c121SKalle Valo * the interrupt we need to service, driver will set the entries back to 0 and 1730e705c121SKalle Valo * set index. 1731e705c121SKalle Valo */ 1732e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1733e705c121SKalle Valo { 1734e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1735e705c121SKalle Valo u32 inta; 1736e705c121SKalle Valo u32 val = 0; 1737e705c121SKalle Valo u32 read; 1738e705c121SKalle Valo 1739e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1740e705c121SKalle Valo 1741e705c121SKalle Valo /* Ignore interrupt if there's nothing in NIC to service. 1742e705c121SKalle Valo * This may be due to IRQ shared with another device, 1743e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. */ 1744e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1745e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1746e705c121SKalle Valo if (!read) 1747e705c121SKalle Valo return 0; 1748e705c121SKalle Valo 1749e705c121SKalle Valo /* 1750e705c121SKalle Valo * Collect all entries up to the first 0, starting from ict_index; 1751e705c121SKalle Valo * note we already read at ict_index. 1752e705c121SKalle Valo */ 1753e705c121SKalle Valo do { 1754e705c121SKalle Valo val |= read; 1755e705c121SKalle Valo IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1756e705c121SKalle Valo trans_pcie->ict_index, read); 1757e705c121SKalle Valo trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1758e705c121SKalle Valo trans_pcie->ict_index = 1759e705c121SKalle Valo ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1760e705c121SKalle Valo 1761e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1762e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1763e705c121SKalle Valo read); 1764e705c121SKalle Valo } while (read); 1765e705c121SKalle Valo 1766e705c121SKalle Valo /* We should not get this value, just ignore it. */ 1767e705c121SKalle Valo if (val == 0xffffffff) 1768e705c121SKalle Valo val = 0; 1769e705c121SKalle Valo 1770e705c121SKalle Valo /* 1771e705c121SKalle Valo * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1772e705c121SKalle Valo * (bit 15 before shifting it to 31) to clear when using interrupt 1773e705c121SKalle Valo * coalescing. fortunately, bits 18 and 19 stay set when this happens 1774e705c121SKalle Valo * so we use them to decide on the real state of the Rx bit. 1775e705c121SKalle Valo * In order words, bit 15 is set if bit 18 or bit 19 are set. 1776e705c121SKalle Valo */ 1777e705c121SKalle Valo if (val & 0xC0000) 1778e705c121SKalle Valo val |= 0x8000; 1779e705c121SKalle Valo 1780e705c121SKalle Valo inta = (0xff & val) | ((0xff00 & val) << 16); 1781e705c121SKalle Valo return inta; 1782e705c121SKalle Valo } 1783e705c121SKalle Valo 1784fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) 17853a6e168bSJohannes Berg { 17863a6e168bSJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 17873a6e168bSJohannes Berg struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1788326477e4SJohannes Berg bool hw_rfkill, prev, report; 17893a6e168bSJohannes Berg 17903a6e168bSJohannes Berg mutex_lock(&trans_pcie->mutex); 1791326477e4SJohannes Berg prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 17923a6e168bSJohannes Berg hw_rfkill = iwl_is_rfkill_set(trans); 1793326477e4SJohannes Berg if (hw_rfkill) { 1794326477e4SJohannes Berg set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1795326477e4SJohannes Berg set_bit(STATUS_RFKILL_HW, &trans->status); 1796326477e4SJohannes Berg } 1797326477e4SJohannes Berg if (trans_pcie->opmode_down) 1798326477e4SJohannes Berg report = hw_rfkill; 1799326477e4SJohannes Berg else 1800326477e4SJohannes Berg report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 18013a6e168bSJohannes Berg 18023a6e168bSJohannes Berg IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 18033a6e168bSJohannes Berg hw_rfkill ? "disable radio" : "enable radio"); 18043a6e168bSJohannes Berg 18053a6e168bSJohannes Berg isr_stats->rfkill++; 18063a6e168bSJohannes Berg 1807326477e4SJohannes Berg if (prev != report) 1808326477e4SJohannes Berg iwl_trans_pcie_rf_kill(trans, report); 18093a6e168bSJohannes Berg mutex_unlock(&trans_pcie->mutex); 18103a6e168bSJohannes Berg 18113a6e168bSJohannes Berg if (hw_rfkill) { 18123a6e168bSJohannes Berg if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 18133a6e168bSJohannes Berg &trans->status)) 18143a6e168bSJohannes Berg IWL_DEBUG_RF_KILL(trans, 18153a6e168bSJohannes Berg "Rfkill while SYNC HCMD in flight\n"); 181613f028b4SMordechay Goodstein wake_up(&trans->wait_command_queue); 18173a6e168bSJohannes Berg } else { 1818326477e4SJohannes Berg clear_bit(STATUS_RFKILL_HW, &trans->status); 1819326477e4SJohannes Berg if (trans_pcie->opmode_down) 1820326477e4SJohannes Berg clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 18213a6e168bSJohannes Berg } 18223a6e168bSJohannes Berg } 18233a6e168bSJohannes Berg 1824e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1825e705c121SKalle Valo { 1826e705c121SKalle Valo struct iwl_trans *trans = dev_id; 1827e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1828e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1829e705c121SKalle Valo u32 inta = 0; 1830e705c121SKalle Valo u32 handled = 0; 183125edc8f2SJohannes Berg bool polling = false; 1832e705c121SKalle Valo 1833e705c121SKalle Valo lock_map_acquire(&trans->sync_cmd_lockdep_map); 1834e705c121SKalle Valo 183525edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 1836e705c121SKalle Valo 1837e705c121SKalle Valo /* dram interrupt table not set yet, 1838e705c121SKalle Valo * use legacy interrupt. 1839e705c121SKalle Valo */ 1840e705c121SKalle Valo if (likely(trans_pcie->use_ict)) 1841e705c121SKalle Valo inta = iwl_pcie_int_cause_ict(trans); 1842e705c121SKalle Valo else 1843e705c121SKalle Valo inta = iwl_pcie_int_cause_non_ict(trans); 1844e705c121SKalle Valo 1845e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) { 1846e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1847e705c121SKalle Valo "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1848e705c121SKalle Valo inta, trans_pcie->inta_mask, 1849e705c121SKalle Valo iwl_read32(trans, CSR_INT_MASK), 1850e705c121SKalle Valo iwl_read32(trans, CSR_FH_INT_STATUS)); 1851e705c121SKalle Valo if (inta & (~trans_pcie->inta_mask)) 1852e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1853e705c121SKalle Valo "We got a masked interrupt (0x%08x)\n", 1854e705c121SKalle Valo inta & (~trans_pcie->inta_mask)); 1855e705c121SKalle Valo } 1856e705c121SKalle Valo 1857e705c121SKalle Valo inta &= trans_pcie->inta_mask; 1858e705c121SKalle Valo 1859e705c121SKalle Valo /* 1860e705c121SKalle Valo * Ignore interrupt if there's nothing in NIC to service. 1861e705c121SKalle Valo * This may be due to IRQ shared with another device, 1862e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. 1863e705c121SKalle Valo */ 1864e705c121SKalle Valo if (unlikely(!inta)) { 1865e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1866e705c121SKalle Valo /* 1867e705c121SKalle Valo * Re-enable interrupts here since we don't 1868e705c121SKalle Valo * have anything to service 1869e705c121SKalle Valo */ 1870e705c121SKalle Valo if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1871f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 187225edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 1873e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1874e705c121SKalle Valo return IRQ_NONE; 1875e705c121SKalle Valo } 1876e705c121SKalle Valo 1877d4f1a50cSJohannes Berg if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) { 1878e705c121SKalle Valo /* 1879e705c121SKalle Valo * Hardware disappeared. It might have 1880e705c121SKalle Valo * already raised an interrupt. 1881e705c121SKalle Valo */ 1882e705c121SKalle Valo IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 188325edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 1884e705c121SKalle Valo goto out; 1885e705c121SKalle Valo } 1886e705c121SKalle Valo 1887e705c121SKalle Valo /* Ack/clear/reset pending uCode interrupts. 1888e705c121SKalle Valo * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1889e705c121SKalle Valo */ 1890e705c121SKalle Valo /* There is a hardware bug in the interrupt mask function that some 1891e705c121SKalle Valo * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1892e705c121SKalle Valo * they are disabled in the CSR_INT_MASK register. Furthermore the 1893e705c121SKalle Valo * ICT interrupt handling mechanism has another bug that might cause 1894e705c121SKalle Valo * these unmasked interrupts fail to be detected. We workaround the 1895e705c121SKalle Valo * hardware bugs here by ACKing all the possible interrupts so that 1896e705c121SKalle Valo * interrupt coalescing can still be achieved. 1897e705c121SKalle Valo */ 1898e705c121SKalle Valo iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1899e705c121SKalle Valo 1900e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) 1901e705c121SKalle Valo IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1902e705c121SKalle Valo inta, iwl_read32(trans, CSR_INT_MASK)); 1903e705c121SKalle Valo 190425edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 1905e705c121SKalle Valo 1906e705c121SKalle Valo /* Now service all interrupt bits discovered above. */ 1907e705c121SKalle Valo if (inta & CSR_INT_BIT_HW_ERR) { 1908e705c121SKalle Valo IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1909e705c121SKalle Valo 1910e705c121SKalle Valo /* Tell the device to stop sending interrupts */ 1911e705c121SKalle Valo iwl_disable_interrupts(trans); 1912e705c121SKalle Valo 1913e705c121SKalle Valo isr_stats->hw++; 1914e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1915e705c121SKalle Valo 1916e705c121SKalle Valo handled |= CSR_INT_BIT_HW_ERR; 1917e705c121SKalle Valo 1918e705c121SKalle Valo goto out; 1919e705c121SKalle Valo } 1920e705c121SKalle Valo 1921e705c121SKalle Valo /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1922e705c121SKalle Valo if (inta & CSR_INT_BIT_SCD) { 1923e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1924e705c121SKalle Valo "Scheduler finished to transmit the frame/frames.\n"); 1925e705c121SKalle Valo isr_stats->sch++; 1926e705c121SKalle Valo } 1927e705c121SKalle Valo 1928e705c121SKalle Valo /* Alive notification via Rx interrupt will do the real work */ 1929e705c121SKalle Valo if (inta & CSR_INT_BIT_ALIVE) { 1930e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1931e705c121SKalle Valo isr_stats->alive++; 1932286ca8ebSLuca Coelho if (trans->trans_cfg->gen2) { 1933eda50cdeSSara Sharon /* 1934eda50cdeSSara Sharon * We can restock, since firmware configured 1935eda50cdeSSara Sharon * the RFH 1936eda50cdeSSara Sharon */ 1937eda50cdeSSara Sharon iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 1938eda50cdeSSara Sharon } 1939ed3e4c6dSEmmanuel Grumbach 1940ed3e4c6dSEmmanuel Grumbach handled |= CSR_INT_BIT_ALIVE; 1941e705c121SKalle Valo } 1942e705c121SKalle Valo 1943e705c121SKalle Valo /* Safely ignore these bits for debug checks below */ 1944e705c121SKalle Valo inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1945e705c121SKalle Valo 1946e705c121SKalle Valo /* HW RF KILL switch toggled */ 1947e705c121SKalle Valo if (inta & CSR_INT_BIT_RF_KILL) { 19483a6e168bSJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 1949e705c121SKalle Valo handled |= CSR_INT_BIT_RF_KILL; 1950e705c121SKalle Valo } 1951e705c121SKalle Valo 1952e705c121SKalle Valo /* Chip got too hot and stopped itself */ 1953e705c121SKalle Valo if (inta & CSR_INT_BIT_CT_KILL) { 1954e705c121SKalle Valo IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1955e705c121SKalle Valo isr_stats->ctkill++; 1956e705c121SKalle Valo handled |= CSR_INT_BIT_CT_KILL; 1957e705c121SKalle Valo } 1958e705c121SKalle Valo 1959e705c121SKalle Valo /* Error detected by uCode */ 1960e705c121SKalle Valo if (inta & CSR_INT_BIT_SW_ERR) { 1961e705c121SKalle Valo IWL_ERR(trans, "Microcode SW error detected. " 1962e705c121SKalle Valo " Restarting 0x%X.\n", inta); 1963e705c121SKalle Valo isr_stats->sw++; 1964e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1965e705c121SKalle Valo handled |= CSR_INT_BIT_SW_ERR; 1966e705c121SKalle Valo } 1967e705c121SKalle Valo 1968e705c121SKalle Valo /* uCode wakes up after power-down sleep */ 1969e705c121SKalle Valo if (inta & CSR_INT_BIT_WAKEUP) { 1970e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1971e705c121SKalle Valo iwl_pcie_rxq_check_wrptr(trans); 1972e705c121SKalle Valo iwl_pcie_txq_check_wrptrs(trans); 1973e705c121SKalle Valo 1974e705c121SKalle Valo isr_stats->wakeup++; 1975e705c121SKalle Valo 1976e705c121SKalle Valo handled |= CSR_INT_BIT_WAKEUP; 1977e705c121SKalle Valo } 1978e705c121SKalle Valo 1979e705c121SKalle Valo /* All uCode command responses, including Tx command responses, 1980e705c121SKalle Valo * Rx "responses" (frame-received notification), and other 1981e705c121SKalle Valo * notifications from uCode come through here*/ 1982e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1983e705c121SKalle Valo CSR_INT_BIT_RX_PERIODIC)) { 1984e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1985e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1986e705c121SKalle Valo handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1987e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 1988e705c121SKalle Valo CSR_FH_INT_RX_MASK); 1989e705c121SKalle Valo } 1990e705c121SKalle Valo if (inta & CSR_INT_BIT_RX_PERIODIC) { 1991e705c121SKalle Valo handled |= CSR_INT_BIT_RX_PERIODIC; 1992e705c121SKalle Valo iwl_write32(trans, 1993e705c121SKalle Valo CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1994e705c121SKalle Valo } 1995e705c121SKalle Valo /* Sending RX interrupt require many steps to be done in the 1996195a367eSXiang wangx * device: 1997e705c121SKalle Valo * 1- write interrupt to current index in ICT table. 1998e705c121SKalle Valo * 2- dma RX frame. 1999e705c121SKalle Valo * 3- update RX shared data to indicate last write index. 2000e705c121SKalle Valo * 4- send interrupt. 2001e705c121SKalle Valo * This could lead to RX race, driver could receive RX interrupt 2002e705c121SKalle Valo * but the shared data changes does not reflect this; 2003e705c121SKalle Valo * periodic interrupt will detect any dangling Rx activity. 2004e705c121SKalle Valo */ 2005e705c121SKalle Valo 2006e705c121SKalle Valo /* Disable periodic interrupt; we use it as just a one-shot. */ 2007e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 2008e705c121SKalle Valo CSR_INT_PERIODIC_DIS); 2009e705c121SKalle Valo 2010e705c121SKalle Valo /* 2011e705c121SKalle Valo * Enable periodic interrupt in 8 msec only if we received 2012e705c121SKalle Valo * real RX interrupt (instead of just periodic int), to catch 2013e705c121SKalle Valo * any dangling Rx interrupt. If it was just the periodic 2014e705c121SKalle Valo * interrupt, there was no dangling Rx activity, and no need 2015e705c121SKalle Valo * to extend the periodic interrupt; one-shot is enough. 2016e705c121SKalle Valo */ 2017e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 2018e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 2019e705c121SKalle Valo CSR_INT_PERIODIC_ENA); 2020e705c121SKalle Valo 2021e705c121SKalle Valo isr_stats->rx++; 2022e705c121SKalle Valo 2023e705c121SKalle Valo local_bh_disable(); 202425edc8f2SJohannes Berg if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { 202525edc8f2SJohannes Berg polling = true; 202625edc8f2SJohannes Berg __napi_schedule(&trans_pcie->rxq[0].napi); 202725edc8f2SJohannes Berg } 2028e705c121SKalle Valo local_bh_enable(); 2029e705c121SKalle Valo } 2030e705c121SKalle Valo 2031e705c121SKalle Valo /* This "Tx" DMA channel is used only for loading uCode */ 2032e705c121SKalle Valo if (inta & CSR_INT_BIT_FH_TX) { 2033e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 2034e705c121SKalle Valo IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 2035e705c121SKalle Valo isr_stats->tx++; 2036e705c121SKalle Valo handled |= CSR_INT_BIT_FH_TX; 2037e705c121SKalle Valo /* Wake up uCode load routine, now that load is complete */ 2038e705c121SKalle Valo trans_pcie->ucode_write_complete = true; 2039e705c121SKalle Valo wake_up(&trans_pcie->ucode_write_waitq); 2040c0941aceSMukesh Sisodiya /* Wake up IMR write routine, now that write to SRAM is complete */ 2041c0941aceSMukesh Sisodiya if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2042c0941aceSMukesh Sisodiya trans_pcie->imr_status = IMR_D2S_COMPLETED; 2043c0941aceSMukesh Sisodiya wake_up(&trans_pcie->ucode_write_waitq); 2044c0941aceSMukesh Sisodiya } 2045e705c121SKalle Valo } 2046e705c121SKalle Valo 2047e705c121SKalle Valo if (inta & ~handled) { 2048e705c121SKalle Valo IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 2049e705c121SKalle Valo isr_stats->unhandled++; 2050e705c121SKalle Valo } 2051e705c121SKalle Valo 2052e705c121SKalle Valo if (inta & ~(trans_pcie->inta_mask)) { 2053e705c121SKalle Valo IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 2054e705c121SKalle Valo inta & ~trans_pcie->inta_mask); 2055e705c121SKalle Valo } 2056e705c121SKalle Valo 205725edc8f2SJohannes Berg if (!polling) { 205825edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 2059a6bd005fSEmmanuel Grumbach /* only Re-enable all interrupt if disabled by irq */ 2060f16c3ebfSEmmanuel Grumbach if (test_bit(STATUS_INT_ENABLED, &trans->status)) 2061f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 2062f16c3ebfSEmmanuel Grumbach /* we are loading the firmware, enable FH_TX interrupt only */ 2063f16c3ebfSEmmanuel Grumbach else if (handled & CSR_INT_BIT_FH_TX) 2064f16c3ebfSEmmanuel Grumbach iwl_enable_fw_load_int(trans); 2065e705c121SKalle Valo /* Re-enable RF_KILL if it occurred */ 2066e705c121SKalle Valo else if (handled & CSR_INT_BIT_RF_KILL) 2067e705c121SKalle Valo iwl_enable_rfkill_int(trans); 2068ed3e4c6dSEmmanuel Grumbach /* Re-enable the ALIVE / Rx interrupt if it occurred */ 2069ed3e4c6dSEmmanuel Grumbach else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) 2070ed3e4c6dSEmmanuel Grumbach iwl_enable_fw_load_int_ctx_info(trans); 207125edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 207225edc8f2SJohannes Berg } 2073e705c121SKalle Valo 2074e705c121SKalle Valo out: 2075e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 2076e705c121SKalle Valo return IRQ_HANDLED; 2077e705c121SKalle Valo } 2078e705c121SKalle Valo 2079e705c121SKalle Valo /****************************************************************************** 2080e705c121SKalle Valo * 2081e705c121SKalle Valo * ICT functions 2082e705c121SKalle Valo * 2083e705c121SKalle Valo ******************************************************************************/ 2084e705c121SKalle Valo 2085e705c121SKalle Valo /* Free dram table */ 2086e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans) 2087e705c121SKalle Valo { 2088e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2089e705c121SKalle Valo 2090e705c121SKalle Valo if (trans_pcie->ict_tbl) { 2091e705c121SKalle Valo dma_free_coherent(trans->dev, ICT_SIZE, 2092e705c121SKalle Valo trans_pcie->ict_tbl, 2093e705c121SKalle Valo trans_pcie->ict_tbl_dma); 2094e705c121SKalle Valo trans_pcie->ict_tbl = NULL; 2095e705c121SKalle Valo trans_pcie->ict_tbl_dma = 0; 2096e705c121SKalle Valo } 2097e705c121SKalle Valo } 2098e705c121SKalle Valo 2099e705c121SKalle Valo /* 2100e705c121SKalle Valo * allocate dram shared table, it is an aligned memory 2101e705c121SKalle Valo * block of ICT_SIZE. 2102e705c121SKalle Valo * also reset all data related to ICT table interrupt. 2103e705c121SKalle Valo */ 2104e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans) 2105e705c121SKalle Valo { 2106e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2107e705c121SKalle Valo 2108e705c121SKalle Valo trans_pcie->ict_tbl = 2109750afb08SLuis Chamberlain dma_alloc_coherent(trans->dev, ICT_SIZE, 2110750afb08SLuis Chamberlain &trans_pcie->ict_tbl_dma, GFP_KERNEL); 2111e705c121SKalle Valo if (!trans_pcie->ict_tbl) 2112e705c121SKalle Valo return -ENOMEM; 2113e705c121SKalle Valo 2114e705c121SKalle Valo /* just an API sanity check ... it is guaranteed to be aligned */ 2115e705c121SKalle Valo if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 2116e705c121SKalle Valo iwl_pcie_free_ict(trans); 2117e705c121SKalle Valo return -EINVAL; 2118e705c121SKalle Valo } 2119e705c121SKalle Valo 2120e705c121SKalle Valo return 0; 2121e705c121SKalle Valo } 2122e705c121SKalle Valo 2123e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table, 2124e705c121SKalle Valo * also we need to tell the driver to start using ICT interrupt. 2125e705c121SKalle Valo */ 2126e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans) 2127e705c121SKalle Valo { 2128e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2129e705c121SKalle Valo u32 val; 2130e705c121SKalle Valo 2131e705c121SKalle Valo if (!trans_pcie->ict_tbl) 2132e705c121SKalle Valo return; 2133e705c121SKalle Valo 213425edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 2135f16c3ebfSEmmanuel Grumbach _iwl_disable_interrupts(trans); 2136e705c121SKalle Valo 2137e705c121SKalle Valo memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 2138e705c121SKalle Valo 2139e705c121SKalle Valo val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 2140e705c121SKalle Valo 2141e705c121SKalle Valo val |= CSR_DRAM_INT_TBL_ENABLE | 2142e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRAP_CHECK | 2143e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRITE_POINTER; 2144e705c121SKalle Valo 2145e705c121SKalle Valo IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 2146e705c121SKalle Valo 2147e705c121SKalle Valo iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 2148e705c121SKalle Valo trans_pcie->use_ict = true; 2149e705c121SKalle Valo trans_pcie->ict_index = 0; 2150e705c121SKalle Valo iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 2151f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 215225edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 2153e705c121SKalle Valo } 2154e705c121SKalle Valo 2155e705c121SKalle Valo /* Device is going down disable ict interrupt usage */ 2156e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans) 2157e705c121SKalle Valo { 2158e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2159e705c121SKalle Valo 216025edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 2161e705c121SKalle Valo trans_pcie->use_ict = false; 216225edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 2163e705c121SKalle Valo } 2164e705c121SKalle Valo 2165e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data) 2166e705c121SKalle Valo { 2167e705c121SKalle Valo struct iwl_trans *trans = data; 2168e705c121SKalle Valo 2169e705c121SKalle Valo if (!trans) 2170e705c121SKalle Valo return IRQ_NONE; 2171e705c121SKalle Valo 2172e705c121SKalle Valo /* Disable (but don't clear!) interrupts here to avoid 2173e705c121SKalle Valo * back-to-back ISRs and sporadic interrupts from our NIC. 2174e705c121SKalle Valo * If we have something to service, the tasklet will re-enable ints. 2175e705c121SKalle Valo * If we *don't* have something, we'll re-enable before leaving here. 2176e705c121SKalle Valo */ 2177e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 2178e705c121SKalle Valo 2179e705c121SKalle Valo return IRQ_WAKE_THREAD; 2180e705c121SKalle Valo } 21812e5d4a8fSHaim Dreyfuss 21822e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 21832e5d4a8fSHaim Dreyfuss { 21842e5d4a8fSHaim Dreyfuss return IRQ_WAKE_THREAD; 21852e5d4a8fSHaim Dreyfuss } 21862e5d4a8fSHaim Dreyfuss 21872e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 21882e5d4a8fSHaim Dreyfuss { 21892e5d4a8fSHaim Dreyfuss struct msix_entry *entry = dev_id; 21902e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 21912e5d4a8fSHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 219246167a8fSColin Ian King struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2193d4626f91SMordechay Goodstein u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; 21942e5d4a8fSHaim Dreyfuss u32 inta_fh, inta_hw; 219525edc8f2SJohannes Berg bool polling = false; 2196571836a0SMike Golant bool sw_err; 21972e5d4a8fSHaim Dreyfuss 2198d4626f91SMordechay Goodstein if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 2199d4626f91SMordechay Goodstein inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; 2200d4626f91SMordechay Goodstein 2201d4626f91SMordechay Goodstein if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 2202d4626f91SMordechay Goodstein inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; 2203d4626f91SMordechay Goodstein 22042e5d4a8fSHaim Dreyfuss lock_map_acquire(&trans->sync_cmd_lockdep_map); 22052e5d4a8fSHaim Dreyfuss 220625edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 22077ef3dd26SHaim Dreyfuss inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 22087ef3dd26SHaim Dreyfuss inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 22092e5d4a8fSHaim Dreyfuss /* 22102e5d4a8fSHaim Dreyfuss * Clear causes registers to avoid being handling the same cause. 22112e5d4a8fSHaim Dreyfuss */ 2212d4626f91SMordechay Goodstein iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); 22137ef3dd26SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 221425edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 22152e5d4a8fSHaim Dreyfuss 2216c42ff65dSJohannes Berg trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); 2217c42ff65dSJohannes Berg 22182e5d4a8fSHaim Dreyfuss if (unlikely(!(inta_fh | inta_hw))) { 22192e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 22202e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 22212e5d4a8fSHaim Dreyfuss return IRQ_NONE; 22222e5d4a8fSHaim Dreyfuss } 22232e5d4a8fSHaim Dreyfuss 22243b57a10cSEmmanuel Grumbach if (iwl_have_debug_level(IWL_DL_ISR)) { 22253b57a10cSEmmanuel Grumbach IWL_DEBUG_ISR(trans, 22269d401222SMordechay Goodstein "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 22279d401222SMordechay Goodstein entry->entry, inta_fh, trans_pcie->fh_mask, 22282e5d4a8fSHaim Dreyfuss iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 22293b57a10cSEmmanuel Grumbach if (inta_fh & ~trans_pcie->fh_mask) 22303b57a10cSEmmanuel Grumbach IWL_DEBUG_ISR(trans, 22313b57a10cSEmmanuel Grumbach "We got a masked interrupt (0x%08x)\n", 22323b57a10cSEmmanuel Grumbach inta_fh & ~trans_pcie->fh_mask); 22333b57a10cSEmmanuel Grumbach } 22343b57a10cSEmmanuel Grumbach 22353b57a10cSEmmanuel Grumbach inta_fh &= trans_pcie->fh_mask; 22362e5d4a8fSHaim Dreyfuss 2237496d83caSHaim Dreyfuss if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && 2238496d83caSHaim Dreyfuss inta_fh & MSIX_FH_INT_CAUSES_Q0) { 2239496d83caSHaim Dreyfuss local_bh_disable(); 224025edc8f2SJohannes Berg if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { 224125edc8f2SJohannes Berg polling = true; 224225edc8f2SJohannes Berg __napi_schedule(&trans_pcie->rxq[0].napi); 224325edc8f2SJohannes Berg } 2244496d83caSHaim Dreyfuss local_bh_enable(); 2245496d83caSHaim Dreyfuss } 2246496d83caSHaim Dreyfuss 2247496d83caSHaim Dreyfuss if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && 2248496d83caSHaim Dreyfuss inta_fh & MSIX_FH_INT_CAUSES_Q1) { 2249496d83caSHaim Dreyfuss local_bh_disable(); 225025edc8f2SJohannes Berg if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) { 225125edc8f2SJohannes Berg polling = true; 225225edc8f2SJohannes Berg __napi_schedule(&trans_pcie->rxq[1].napi); 225325edc8f2SJohannes Berg } 2254496d83caSHaim Dreyfuss local_bh_enable(); 2255496d83caSHaim Dreyfuss } 2256496d83caSHaim Dreyfuss 22572e5d4a8fSHaim Dreyfuss /* This "Tx" DMA channel is used only for loading uCode */ 2258c0941aceSMukesh Sisodiya if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM && 2259c0941aceSMukesh Sisodiya trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2260c0941aceSMukesh Sisodiya IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n"); 2261c0941aceSMukesh Sisodiya isr_stats->tx++; 2262c0941aceSMukesh Sisodiya 2263c0941aceSMukesh Sisodiya /* Wake up IMR routine once write to SRAM is complete */ 2264c0941aceSMukesh Sisodiya if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2265c0941aceSMukesh Sisodiya trans_pcie->imr_status = IMR_D2S_COMPLETED; 2266c0941aceSMukesh Sisodiya wake_up(&trans_pcie->ucode_write_waitq); 2267c0941aceSMukesh Sisodiya } 2268c0941aceSMukesh Sisodiya } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 22692e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 22702e5d4a8fSHaim Dreyfuss isr_stats->tx++; 22712e5d4a8fSHaim Dreyfuss /* 22722e5d4a8fSHaim Dreyfuss * Wake up uCode load routine, 22732e5d4a8fSHaim Dreyfuss * now that load is complete 22742e5d4a8fSHaim Dreyfuss */ 22752e5d4a8fSHaim Dreyfuss trans_pcie->ucode_write_complete = true; 22762e5d4a8fSHaim Dreyfuss wake_up(&trans_pcie->ucode_write_waitq); 2277c0941aceSMukesh Sisodiya 2278c0941aceSMukesh Sisodiya /* Wake up IMR routine once write to SRAM is complete */ 2279c0941aceSMukesh Sisodiya if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2280c0941aceSMukesh Sisodiya trans_pcie->imr_status = IMR_D2S_COMPLETED; 2281c0941aceSMukesh Sisodiya wake_up(&trans_pcie->ucode_write_waitq); 2282c0941aceSMukesh Sisodiya } 22832e5d4a8fSHaim Dreyfuss } 22842e5d4a8fSHaim Dreyfuss 2285571836a0SMike Golant if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2286571836a0SMike Golant sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 2287571836a0SMike Golant else 2288571836a0SMike Golant sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; 2289571836a0SMike Golant 22902e5d4a8fSHaim Dreyfuss /* Error detected by uCode */ 2291571836a0SMike Golant if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { 22922e5d4a8fSHaim Dreyfuss IWL_ERR(trans, 22932e5d4a8fSHaim Dreyfuss "Microcode SW error detected. Restarting 0x%X.\n", 22942e5d4a8fSHaim Dreyfuss inta_fh); 22952e5d4a8fSHaim Dreyfuss isr_stats->sw++; 2296e63aafeaSJohannes Berg /* during FW reset flow report errors from there */ 2297c0941aceSMukesh Sisodiya if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2298c0941aceSMukesh Sisodiya trans_pcie->imr_status = IMR_D2S_ERROR; 2299c0941aceSMukesh Sisodiya wake_up(&trans_pcie->imr_waitq); 2300c0941aceSMukesh Sisodiya } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { 2301e63aafeaSJohannes Berg trans_pcie->fw_reset_state = FW_RESET_ERROR; 2302e63aafeaSJohannes Berg wake_up(&trans_pcie->fw_reset_waitq); 2303e63aafeaSJohannes Berg } else { 23042e5d4a8fSHaim Dreyfuss iwl_pcie_irq_handle_error(trans); 23052e5d4a8fSHaim Dreyfuss } 2306e63aafeaSJohannes Berg } 23072e5d4a8fSHaim Dreyfuss 23082e5d4a8fSHaim Dreyfuss /* After checking FH register check HW register */ 23093b57a10cSEmmanuel Grumbach if (iwl_have_debug_level(IWL_DL_ISR)) { 23102e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, 23119d401222SMordechay Goodstein "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 23129d401222SMordechay Goodstein entry->entry, inta_hw, trans_pcie->hw_mask, 23132e5d4a8fSHaim Dreyfuss iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 23143b57a10cSEmmanuel Grumbach if (inta_hw & ~trans_pcie->hw_mask) 23153b57a10cSEmmanuel Grumbach IWL_DEBUG_ISR(trans, 23163b57a10cSEmmanuel Grumbach "We got a masked interrupt 0x%08x\n", 23173b57a10cSEmmanuel Grumbach inta_hw & ~trans_pcie->hw_mask); 23183b57a10cSEmmanuel Grumbach } 23193b57a10cSEmmanuel Grumbach 23203b57a10cSEmmanuel Grumbach inta_hw &= trans_pcie->hw_mask; 23212e5d4a8fSHaim Dreyfuss 23222e5d4a8fSHaim Dreyfuss /* Alive notification via Rx interrupt will do the real work */ 23232e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 23242e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 23252e5d4a8fSHaim Dreyfuss isr_stats->alive++; 2326286ca8ebSLuca Coelho if (trans->trans_cfg->gen2) { 2327eda50cdeSSara Sharon /* We can restock, since firmware configured the RFH */ 2328eda50cdeSSara Sharon iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 2329eda50cdeSSara Sharon } 23302e5d4a8fSHaim Dreyfuss } 23312e5d4a8fSHaim Dreyfuss 2332459fc0f2SLuca Coelho /* 2333459fc0f2SLuca Coelho * In some rare cases when the HW is in a bad state, we may 2334459fc0f2SLuca Coelho * get this interrupt too early, when prph_info is still NULL. 2335459fc0f2SLuca Coelho * So make sure that it's not NULL to prevent crashing. 2336459fc0f2SLuca Coelho */ 2337459fc0f2SLuca Coelho if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { 2338e5f3f215SHaim Dreyfuss u32 sleep_notif = 2339e5f3f215SHaim Dreyfuss le32_to_cpu(trans_pcie->prph_info->sleep_notif); 2340e5f3f215SHaim Dreyfuss if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || 2341e5f3f215SHaim Dreyfuss sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { 2342e5f3f215SHaim Dreyfuss IWL_DEBUG_ISR(trans, 2343e5f3f215SHaim Dreyfuss "Sx interrupt: sleep notification = 0x%x\n", 2344e5f3f215SHaim Dreyfuss sleep_notif); 2345e5f3f215SHaim Dreyfuss trans_pcie->sx_complete = true; 2346e5f3f215SHaim Dreyfuss wake_up(&trans_pcie->sx_waitq); 2347e5f3f215SHaim Dreyfuss } else { 23482e5d4a8fSHaim Dreyfuss /* uCode wakes up after power-down sleep */ 23492e5d4a8fSHaim Dreyfuss IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 23502e5d4a8fSHaim Dreyfuss iwl_pcie_rxq_check_wrptr(trans); 23512e5d4a8fSHaim Dreyfuss iwl_pcie_txq_check_wrptrs(trans); 23522e5d4a8fSHaim Dreyfuss 23532e5d4a8fSHaim Dreyfuss isr_stats->wakeup++; 23542e5d4a8fSHaim Dreyfuss } 2355e5f3f215SHaim Dreyfuss } 23562e5d4a8fSHaim Dreyfuss 23572e5d4a8fSHaim Dreyfuss /* Chip got too hot and stopped itself */ 23582e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 23592e5d4a8fSHaim Dreyfuss IWL_ERR(trans, "Microcode CT kill error detected.\n"); 23602e5d4a8fSHaim Dreyfuss isr_stats->ctkill++; 23612e5d4a8fSHaim Dreyfuss } 23622e5d4a8fSHaim Dreyfuss 23632e5d4a8fSHaim Dreyfuss /* HW RF KILL switch toggled */ 23643a6e168bSJohannes Berg if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) 23653a6e168bSJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 23662e5d4a8fSHaim Dreyfuss 23672e5d4a8fSHaim Dreyfuss if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 23682e5d4a8fSHaim Dreyfuss IWL_ERR(trans, 23692e5d4a8fSHaim Dreyfuss "Hardware error detected. Restarting.\n"); 23702e5d4a8fSHaim Dreyfuss 23712e5d4a8fSHaim Dreyfuss isr_stats->hw++; 237291c28b83SShahar S Matityahu trans->dbg.hw_error = true; 23732e5d4a8fSHaim Dreyfuss iwl_pcie_irq_handle_error(trans); 23742e5d4a8fSHaim Dreyfuss } 23752e5d4a8fSHaim Dreyfuss 2376906d4eb8SJohannes Berg if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { 2377906d4eb8SJohannes Berg IWL_DEBUG_ISR(trans, "Reset flow completed\n"); 2378e63aafeaSJohannes Berg trans_pcie->fw_reset_state = FW_RESET_OK; 2379906d4eb8SJohannes Berg wake_up(&trans_pcie->fw_reset_waitq); 2380906d4eb8SJohannes Berg } 2381906d4eb8SJohannes Berg 238225edc8f2SJohannes Berg if (!polling) 238325edc8f2SJohannes Berg iwl_pcie_clear_irq(trans, entry->entry); 23842e5d4a8fSHaim Dreyfuss 23852e5d4a8fSHaim Dreyfuss lock_map_release(&trans->sync_cmd_lockdep_map); 23862e5d4a8fSHaim Dreyfuss 23872e5d4a8fSHaim Dreyfuss return IRQ_HANDLED; 23882e5d4a8fSHaim Dreyfuss } 2389