1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5e705c121SKalle Valo * 6e705c121SKalle Valo * Portions of this file are derived from the ipw3945 project, as well 7e705c121SKalle Valo * as portions of the ieee80211 subsystem header files. 8e705c121SKalle Valo * 9e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 10e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 11e705c121SKalle Valo * published by the Free Software Foundation. 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 14e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16e705c121SKalle Valo * more details. 17e705c121SKalle Valo * 18e705c121SKalle Valo * You should have received a copy of the GNU General Public License along with 19e705c121SKalle Valo * this program; if not, write to the Free Software Foundation, Inc., 20e705c121SKalle Valo * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23e705c121SKalle Valo * file called LICENSE. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26d01c5366SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29e705c121SKalle Valo *****************************************************************************/ 30e705c121SKalle Valo #include <linux/sched.h> 31e705c121SKalle Valo #include <linux/wait.h> 32e705c121SKalle Valo #include <linux/gfp.h> 33e705c121SKalle Valo 34e705c121SKalle Valo #include "iwl-prph.h" 35e705c121SKalle Valo #include "iwl-io.h" 36e705c121SKalle Valo #include "internal.h" 37e705c121SKalle Valo #include "iwl-op-mode.h" 38e705c121SKalle Valo 39e705c121SKalle Valo /****************************************************************************** 40e705c121SKalle Valo * 41e705c121SKalle Valo * RX path functions 42e705c121SKalle Valo * 43e705c121SKalle Valo ******************************************************************************/ 44e705c121SKalle Valo 45e705c121SKalle Valo /* 46e705c121SKalle Valo * Rx theory of operation 47e705c121SKalle Valo * 48e705c121SKalle Valo * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 49e705c121SKalle Valo * each of which point to Receive Buffers to be filled by the NIC. These get 50e705c121SKalle Valo * used not only for Rx frames, but for any command response or notification 51e705c121SKalle Valo * from the NIC. The driver and NIC manage the Rx buffers by means 52e705c121SKalle Valo * of indexes into the circular buffer. 53e705c121SKalle Valo * 54e705c121SKalle Valo * Rx Queue Indexes 55e705c121SKalle Valo * The host/firmware share two index registers for managing the Rx buffers. 56e705c121SKalle Valo * 57e705c121SKalle Valo * The READ index maps to the first position that the firmware may be writing 58e705c121SKalle Valo * to -- the driver can read up to (but not including) this position and get 59e705c121SKalle Valo * good data. 60e705c121SKalle Valo * The READ index is managed by the firmware once the card is enabled. 61e705c121SKalle Valo * 62e705c121SKalle Valo * The WRITE index maps to the last position the driver has read from -- the 63e705c121SKalle Valo * position preceding WRITE is the last slot the firmware can place a packet. 64e705c121SKalle Valo * 65e705c121SKalle Valo * The queue is empty (no good data) if WRITE = READ - 1, and is full if 66e705c121SKalle Valo * WRITE = READ. 67e705c121SKalle Valo * 68e705c121SKalle Valo * During initialization, the host sets up the READ queue position to the first 69e705c121SKalle Valo * INDEX position, and WRITE to the last (READ - 1 wrapped) 70e705c121SKalle Valo * 71e705c121SKalle Valo * When the firmware places a packet in a buffer, it will advance the READ index 72e705c121SKalle Valo * and fire the RX interrupt. The driver can then query the READ index and 73e705c121SKalle Valo * process as many packets as possible, moving the WRITE index forward as it 74e705c121SKalle Valo * resets the Rx queue buffers with new memory. 75e705c121SKalle Valo * 76e705c121SKalle Valo * The management in the driver is as follows: 77e705c121SKalle Valo * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 78e705c121SKalle Valo * When the interrupt handler is called, the request is processed. 79e705c121SKalle Valo * The page is either stolen - transferred to the upper layer 80e705c121SKalle Valo * or reused - added immediately to the iwl->rxq->rx_free list. 81e705c121SKalle Valo * + When the page is stolen - the driver updates the matching queue's used 82e705c121SKalle Valo * count, detaches the RBD and transfers it to the queue used list. 83e705c121SKalle Valo * When there are two used RBDs - they are transferred to the allocator empty 84e705c121SKalle Valo * list. Work is then scheduled for the allocator to start allocating 85e705c121SKalle Valo * eight buffers. 86e705c121SKalle Valo * When there are another 6 used RBDs - they are transferred to the allocator 87e705c121SKalle Valo * empty list and the driver tries to claim the pre-allocated buffers and 88e705c121SKalle Valo * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 89e705c121SKalle Valo * until ready. 90e705c121SKalle Valo * When there are 8+ buffers in the free list - either from allocation or from 91e705c121SKalle Valo * 8 reused unstolen pages - restock is called to update the FW and indexes. 92e705c121SKalle Valo * + In order to make sure the allocator always has RBDs to use for allocation 93e705c121SKalle Valo * the allocator has initial pool in the size of num_queues*(8-2) - the 94e705c121SKalle Valo * maximum missing RBDs per allocation request (request posted with 2 95e705c121SKalle Valo * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 96e705c121SKalle Valo * The queues supplies the recycle of the rest of the RBDs. 97e705c121SKalle Valo * + A received packet is processed and handed to the kernel network stack, 98e705c121SKalle Valo * detached from the iwl->rxq. The driver 'processed' index is updated. 99e705c121SKalle Valo * + If there are no allocated buffers in iwl->rxq->rx_free, 100e705c121SKalle Valo * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 101e705c121SKalle Valo * If there were enough free buffers and RX_STALLED is set it is cleared. 102e705c121SKalle Valo * 103e705c121SKalle Valo * 104e705c121SKalle Valo * Driver sequence: 105e705c121SKalle Valo * 106e705c121SKalle Valo * iwl_rxq_alloc() Allocates rx_free 107e705c121SKalle Valo * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 108e705c121SKalle Valo * iwl_pcie_rxq_restock. 109e705c121SKalle Valo * Used only during initialization. 110e705c121SKalle Valo * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 111e705c121SKalle Valo * queue, updates firmware pointers, and updates 112e705c121SKalle Valo * the WRITE index. 113e705c121SKalle Valo * iwl_pcie_rx_allocator() Background work for allocating pages. 114e705c121SKalle Valo * 115e705c121SKalle Valo * -- enable interrupts -- 116e705c121SKalle Valo * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 117e705c121SKalle Valo * READ INDEX, detaching the SKB from the pool. 118e705c121SKalle Valo * Moves the packet buffer from queue to rx_used. 119e705c121SKalle Valo * Posts and claims requests to the allocator. 120e705c121SKalle Valo * Calls iwl_pcie_rxq_restock to refill any empty 121e705c121SKalle Valo * slots. 122e705c121SKalle Valo * 123e705c121SKalle Valo * RBD life-cycle: 124e705c121SKalle Valo * 125e705c121SKalle Valo * Init: 126e705c121SKalle Valo * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 127e705c121SKalle Valo * 128e705c121SKalle Valo * Regular Receive interrupt: 129e705c121SKalle Valo * Page Stolen: 130e705c121SKalle Valo * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 131e705c121SKalle Valo * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 132e705c121SKalle Valo * Page not Stolen: 133e705c121SKalle Valo * rxq.queue -> rxq.rx_free -> rxq.queue 134e705c121SKalle Valo * ... 135e705c121SKalle Valo * 136e705c121SKalle Valo */ 137e705c121SKalle Valo 138e705c121SKalle Valo /* 139e705c121SKalle Valo * iwl_rxq_space - Return number of free slots available in queue. 140e705c121SKalle Valo */ 141e705c121SKalle Valo static int iwl_rxq_space(const struct iwl_rxq *rxq) 142e705c121SKalle Valo { 143e705c121SKalle Valo /* Make sure RX_QUEUE_SIZE is a power of 2 */ 144e705c121SKalle Valo BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); 145e705c121SKalle Valo 146e705c121SKalle Valo /* 147e705c121SKalle Valo * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 148e705c121SKalle Valo * between empty and completely full queues. 149e705c121SKalle Valo * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 150e705c121SKalle Valo * defined for negative dividends. 151e705c121SKalle Valo */ 152e705c121SKalle Valo return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); 153e705c121SKalle Valo } 154e705c121SKalle Valo 155e705c121SKalle Valo /* 156e705c121SKalle Valo * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 157e705c121SKalle Valo */ 158e705c121SKalle Valo static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 159e705c121SKalle Valo { 160e705c121SKalle Valo return cpu_to_le32((u32)(dma_addr >> 8)); 161e705c121SKalle Valo } 162e705c121SKalle Valo 163e705c121SKalle Valo /* 164e705c121SKalle Valo * iwl_pcie_rx_stop - stops the Rx DMA 165e705c121SKalle Valo */ 166e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans) 167e705c121SKalle Valo { 168e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 169e705c121SKalle Valo return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 170e705c121SKalle Valo FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 171e705c121SKalle Valo } 172e705c121SKalle Valo 173e705c121SKalle Valo /* 174e705c121SKalle Valo * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 175e705c121SKalle Valo */ 17678485054SSara Sharon static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 17778485054SSara Sharon struct iwl_rxq *rxq) 178e705c121SKalle Valo { 179e705c121SKalle Valo u32 reg; 180e705c121SKalle Valo 181e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 182e705c121SKalle Valo 183e705c121SKalle Valo /* 184e705c121SKalle Valo * explicitly wake up the NIC if: 185e705c121SKalle Valo * 1. shadow registers aren't enabled 186e705c121SKalle Valo * 2. there is a chance that the NIC is asleep 187e705c121SKalle Valo */ 188e705c121SKalle Valo if (!trans->cfg->base_params->shadow_reg_enable && 189e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 190e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 191e705c121SKalle Valo 192e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 193e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 194e705c121SKalle Valo reg); 195e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 196e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 197e705c121SKalle Valo rxq->need_update = true; 198e705c121SKalle Valo return; 199e705c121SKalle Valo } 200e705c121SKalle Valo } 201e705c121SKalle Valo 202e705c121SKalle Valo rxq->write_actual = round_down(rxq->write, 8); 203e705c121SKalle Valo iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 204e705c121SKalle Valo } 205e705c121SKalle Valo 206e705c121SKalle Valo static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 207e705c121SKalle Valo { 208e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20978485054SSara Sharon int i; 210e705c121SKalle Valo 21178485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 21278485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 213e705c121SKalle Valo 214e705c121SKalle Valo if (!rxq->need_update) 21578485054SSara Sharon continue; 21678485054SSara Sharon spin_lock(&rxq->lock); 21778485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 218e705c121SKalle Valo rxq->need_update = false; 219e705c121SKalle Valo spin_unlock(&rxq->lock); 220e705c121SKalle Valo } 22178485054SSara Sharon } 222e705c121SKalle Valo 223e705c121SKalle Valo /* 224e705c121SKalle Valo * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 225e705c121SKalle Valo * 226e705c121SKalle Valo * If there are slots in the RX queue that need to be restocked, 227e705c121SKalle Valo * and we have free pre-allocated buffers, fill the ranks as much 228e705c121SKalle Valo * as we can, pulling from rx_free. 229e705c121SKalle Valo * 230e705c121SKalle Valo * This moves the 'write' index forward to catch up with 'processed', and 231e705c121SKalle Valo * also updates the memory address in the firmware to reference the new 232e705c121SKalle Valo * target buffer. 233e705c121SKalle Valo */ 23478485054SSara Sharon static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 235e705c121SKalle Valo { 236e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 237e705c121SKalle Valo 238e705c121SKalle Valo /* 239e705c121SKalle Valo * If the device isn't enabled - not need to try to add buffers... 240e705c121SKalle Valo * This can happen when we stop the device and still have an interrupt 241e705c121SKalle Valo * pending. We stop the APM before we sync the interrupts because we 242e705c121SKalle Valo * have to (see comment there). On the other hand, since the APM is 243e705c121SKalle Valo * stopped, we cannot access the HW (in particular not prph). 244e705c121SKalle Valo * So don't try to restock if the APM has been already stopped. 245e705c121SKalle Valo */ 246e705c121SKalle Valo if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 247e705c121SKalle Valo return; 248e705c121SKalle Valo 249e705c121SKalle Valo spin_lock(&rxq->lock); 250e705c121SKalle Valo while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 251e705c121SKalle Valo /* The overwritten rxb must be a used one */ 252e705c121SKalle Valo rxb = rxq->queue[rxq->write]; 253e705c121SKalle Valo BUG_ON(rxb && rxb->page); 254e705c121SKalle Valo 255e705c121SKalle Valo /* Get next free Rx buffer, remove from free list */ 256e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 257e705c121SKalle Valo list); 258e705c121SKalle Valo list_del(&rxb->list); 259e705c121SKalle Valo 260e705c121SKalle Valo /* Point to Rx buffer via next RBD in circular buffer */ 261e705c121SKalle Valo rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 262e705c121SKalle Valo rxq->queue[rxq->write] = rxb; 263e705c121SKalle Valo rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 264e705c121SKalle Valo rxq->free_count--; 265e705c121SKalle Valo } 266e705c121SKalle Valo spin_unlock(&rxq->lock); 267e705c121SKalle Valo 268e705c121SKalle Valo /* If we've added more space for the firmware to place data, tell it. 269e705c121SKalle Valo * Increment device's write pointer in multiples of 8. */ 270e705c121SKalle Valo if (rxq->write_actual != (rxq->write & ~0x7)) { 271e705c121SKalle Valo spin_lock(&rxq->lock); 27278485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 273e705c121SKalle Valo spin_unlock(&rxq->lock); 274e705c121SKalle Valo } 275e705c121SKalle Valo } 276e705c121SKalle Valo 277e705c121SKalle Valo /* 278e705c121SKalle Valo * iwl_pcie_rx_alloc_page - allocates and returns a page. 279e705c121SKalle Valo * 280e705c121SKalle Valo */ 281e705c121SKalle Valo static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 282e705c121SKalle Valo gfp_t priority) 283e705c121SKalle Valo { 284e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 285e705c121SKalle Valo struct page *page; 286e705c121SKalle Valo gfp_t gfp_mask = priority; 287e705c121SKalle Valo 288e705c121SKalle Valo if (trans_pcie->rx_page_order > 0) 289e705c121SKalle Valo gfp_mask |= __GFP_COMP; 290e705c121SKalle Valo 291e705c121SKalle Valo /* Alloc a new receive buffer */ 292e705c121SKalle Valo page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 293e705c121SKalle Valo if (!page) { 294e705c121SKalle Valo if (net_ratelimit()) 295e705c121SKalle Valo IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 296e705c121SKalle Valo trans_pcie->rx_page_order); 29778485054SSara Sharon /* 29878485054SSara Sharon * Issue an error if we don't have enough pre-allocated 29978485054SSara Sharon * buffers. 300e705c121SKalle Valo ` */ 30178485054SSara Sharon if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 302e705c121SKalle Valo IWL_CRIT(trans, 30378485054SSara Sharon "Failed to alloc_pages\n"); 304e705c121SKalle Valo return NULL; 305e705c121SKalle Valo } 306e705c121SKalle Valo return page; 307e705c121SKalle Valo } 308e705c121SKalle Valo 309e705c121SKalle Valo /* 310e705c121SKalle Valo * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 311e705c121SKalle Valo * 312e705c121SKalle Valo * A used RBD is an Rx buffer that has been given to the stack. To use it again 313e705c121SKalle Valo * a page must be allocated and the RBD must point to the page. This function 314e705c121SKalle Valo * doesn't change the HW pointer but handles the list of pages that is used by 315e705c121SKalle Valo * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 316e705c121SKalle Valo * allocated buffers. 317e705c121SKalle Valo */ 31878485054SSara Sharon static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 31978485054SSara Sharon struct iwl_rxq *rxq) 320e705c121SKalle Valo { 321e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 322e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 323e705c121SKalle Valo struct page *page; 324e705c121SKalle Valo 325e705c121SKalle Valo while (1) { 326e705c121SKalle Valo spin_lock(&rxq->lock); 327e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 328e705c121SKalle Valo spin_unlock(&rxq->lock); 329e705c121SKalle Valo return; 330e705c121SKalle Valo } 331e705c121SKalle Valo spin_unlock(&rxq->lock); 332e705c121SKalle Valo 333e705c121SKalle Valo /* Alloc a new receive buffer */ 334e705c121SKalle Valo page = iwl_pcie_rx_alloc_page(trans, priority); 335e705c121SKalle Valo if (!page) 336e705c121SKalle Valo return; 337e705c121SKalle Valo 338e705c121SKalle Valo spin_lock(&rxq->lock); 339e705c121SKalle Valo 340e705c121SKalle Valo if (list_empty(&rxq->rx_used)) { 341e705c121SKalle Valo spin_unlock(&rxq->lock); 342e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 343e705c121SKalle Valo return; 344e705c121SKalle Valo } 345e705c121SKalle Valo rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 346e705c121SKalle Valo list); 347e705c121SKalle Valo list_del(&rxb->list); 348e705c121SKalle Valo spin_unlock(&rxq->lock); 349e705c121SKalle Valo 350e705c121SKalle Valo BUG_ON(rxb->page); 351e705c121SKalle Valo rxb->page = page; 352e705c121SKalle Valo /* Get physical address of the RB */ 353e705c121SKalle Valo rxb->page_dma = 354e705c121SKalle Valo dma_map_page(trans->dev, page, 0, 355e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 356e705c121SKalle Valo DMA_FROM_DEVICE); 357e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 358e705c121SKalle Valo rxb->page = NULL; 359e705c121SKalle Valo spin_lock(&rxq->lock); 360e705c121SKalle Valo list_add(&rxb->list, &rxq->rx_used); 361e705c121SKalle Valo spin_unlock(&rxq->lock); 362e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 363e705c121SKalle Valo return; 364e705c121SKalle Valo } 365e705c121SKalle Valo /* dma address must be no more than 36 bits */ 366e705c121SKalle Valo BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 367e705c121SKalle Valo /* and also 256 byte aligned! */ 368e705c121SKalle Valo BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 369e705c121SKalle Valo 370e705c121SKalle Valo spin_lock(&rxq->lock); 371e705c121SKalle Valo 372e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 373e705c121SKalle Valo rxq->free_count++; 374e705c121SKalle Valo 375e705c121SKalle Valo spin_unlock(&rxq->lock); 376e705c121SKalle Valo } 377e705c121SKalle Valo } 378e705c121SKalle Valo 37978485054SSara Sharon static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 380e705c121SKalle Valo { 381e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 382e705c121SKalle Valo int i; 383e705c121SKalle Valo 384e705c121SKalle Valo for (i = 0; i < RX_QUEUE_SIZE; i++) { 38578485054SSara Sharon if (!trans_pcie->rx_pool[i].page) 386e705c121SKalle Valo continue; 38778485054SSara Sharon dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 388e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 389e705c121SKalle Valo DMA_FROM_DEVICE); 39078485054SSara Sharon __free_pages(trans_pcie->rx_pool[i].page, 39178485054SSara Sharon trans_pcie->rx_page_order); 39278485054SSara Sharon trans_pcie->rx_pool[i].page = NULL; 393e705c121SKalle Valo } 394e705c121SKalle Valo } 395e705c121SKalle Valo 396e705c121SKalle Valo /* 397e705c121SKalle Valo * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 398e705c121SKalle Valo * 399e705c121SKalle Valo * Allocates for each received request 8 pages 400e705c121SKalle Valo * Called as a scheduled work item. 401e705c121SKalle Valo */ 402e705c121SKalle Valo static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 403e705c121SKalle Valo { 404e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 405e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 406e705c121SKalle Valo struct list_head local_empty; 407e705c121SKalle Valo int pending = atomic_xchg(&rba->req_pending, 0); 408e705c121SKalle Valo 409e705c121SKalle Valo IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); 410e705c121SKalle Valo 411e705c121SKalle Valo /* If we were scheduled - there is at least one request */ 412e705c121SKalle Valo spin_lock(&rba->lock); 413e705c121SKalle Valo /* swap out the rba->rbd_empty to a local list */ 414e705c121SKalle Valo list_replace_init(&rba->rbd_empty, &local_empty); 415e705c121SKalle Valo spin_unlock(&rba->lock); 416e705c121SKalle Valo 417e705c121SKalle Valo while (pending) { 418e705c121SKalle Valo int i; 419e705c121SKalle Valo struct list_head local_allocated; 42078485054SSara Sharon gfp_t gfp_mask = GFP_KERNEL; 42178485054SSara Sharon 42278485054SSara Sharon /* Do not post a warning if there are only a few requests */ 42378485054SSara Sharon if (pending < RX_PENDING_WATERMARK) 42478485054SSara Sharon gfp_mask |= __GFP_NOWARN; 425e705c121SKalle Valo 426e705c121SKalle Valo INIT_LIST_HEAD(&local_allocated); 427e705c121SKalle Valo 428e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 429e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 430e705c121SKalle Valo struct page *page; 431e705c121SKalle Valo 432e705c121SKalle Valo /* List should never be empty - each reused RBD is 433e705c121SKalle Valo * returned to the list, and initial pool covers any 434e705c121SKalle Valo * possible gap between the time the page is allocated 435e705c121SKalle Valo * to the time the RBD is added. 436e705c121SKalle Valo */ 437e705c121SKalle Valo BUG_ON(list_empty(&local_empty)); 438e705c121SKalle Valo /* Get the first rxb from the rbd list */ 439e705c121SKalle Valo rxb = list_first_entry(&local_empty, 440e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 441e705c121SKalle Valo BUG_ON(rxb->page); 442e705c121SKalle Valo 443e705c121SKalle Valo /* Alloc a new receive buffer */ 44478485054SSara Sharon page = iwl_pcie_rx_alloc_page(trans, gfp_mask); 445e705c121SKalle Valo if (!page) 446e705c121SKalle Valo continue; 447e705c121SKalle Valo rxb->page = page; 448e705c121SKalle Valo 449e705c121SKalle Valo /* Get physical address of the RB */ 450e705c121SKalle Valo rxb->page_dma = dma_map_page(trans->dev, page, 0, 451e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 452e705c121SKalle Valo DMA_FROM_DEVICE); 453e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 454e705c121SKalle Valo rxb->page = NULL; 455e705c121SKalle Valo __free_pages(page, trans_pcie->rx_page_order); 456e705c121SKalle Valo continue; 457e705c121SKalle Valo } 458e705c121SKalle Valo /* dma address must be no more than 36 bits */ 459e705c121SKalle Valo BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 460e705c121SKalle Valo /* and also 256 byte aligned! */ 461e705c121SKalle Valo BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 462e705c121SKalle Valo 463e705c121SKalle Valo /* move the allocated entry to the out list */ 464e705c121SKalle Valo list_move(&rxb->list, &local_allocated); 465e705c121SKalle Valo i++; 466e705c121SKalle Valo } 467e705c121SKalle Valo 468e705c121SKalle Valo pending--; 469e705c121SKalle Valo if (!pending) { 470e705c121SKalle Valo pending = atomic_xchg(&rba->req_pending, 0); 471e705c121SKalle Valo IWL_DEBUG_RX(trans, 472e705c121SKalle Valo "Pending allocation requests = %d\n", 473e705c121SKalle Valo pending); 474e705c121SKalle Valo } 475e705c121SKalle Valo 476e705c121SKalle Valo spin_lock(&rba->lock); 477e705c121SKalle Valo /* add the allocated rbds to the allocator allocated list */ 478e705c121SKalle Valo list_splice_tail(&local_allocated, &rba->rbd_allocated); 479e705c121SKalle Valo /* get more empty RBDs for current pending requests */ 480e705c121SKalle Valo list_splice_tail_init(&rba->rbd_empty, &local_empty); 481e705c121SKalle Valo spin_unlock(&rba->lock); 482e705c121SKalle Valo 483e705c121SKalle Valo atomic_inc(&rba->req_ready); 484e705c121SKalle Valo } 485e705c121SKalle Valo 486e705c121SKalle Valo spin_lock(&rba->lock); 487e705c121SKalle Valo /* return unused rbds to the allocator empty list */ 488e705c121SKalle Valo list_splice_tail(&local_empty, &rba->rbd_empty); 489e705c121SKalle Valo spin_unlock(&rba->lock); 490e705c121SKalle Valo } 491e705c121SKalle Valo 492e705c121SKalle Valo /* 493e705c121SKalle Valo * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages 494e705c121SKalle Valo .* 495e705c121SKalle Valo .* Called by queue when the queue posted allocation request and 496e705c121SKalle Valo * has freed 8 RBDs in order to restock itself. 497e705c121SKalle Valo */ 498e705c121SKalle Valo static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 499e705c121SKalle Valo struct iwl_rx_mem_buffer 500e705c121SKalle Valo *out[RX_CLAIM_REQ_ALLOC]) 501e705c121SKalle Valo { 502e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 503e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 504e705c121SKalle Valo int i; 505e705c121SKalle Valo 506e705c121SKalle Valo /* 507e705c121SKalle Valo * atomic_dec_if_positive returns req_ready - 1 for any scenario. 508e705c121SKalle Valo * If req_ready is 0 atomic_dec_if_positive will return -1 and this 509e705c121SKalle Valo * function will return -ENOMEM, as there are no ready requests. 510e705c121SKalle Valo * atomic_dec_if_positive will perofrm the *actual* decrement only if 511e705c121SKalle Valo * req_ready > 0, i.e. - there are ready requests and the function 512e705c121SKalle Valo * hands one request to the caller. 513e705c121SKalle Valo */ 514e705c121SKalle Valo if (atomic_dec_if_positive(&rba->req_ready) < 0) 515e705c121SKalle Valo return -ENOMEM; 516e705c121SKalle Valo 517e705c121SKalle Valo spin_lock(&rba->lock); 518e705c121SKalle Valo for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 519e705c121SKalle Valo /* Get next free Rx buffer, remove it from free list */ 520e705c121SKalle Valo out[i] = list_first_entry(&rba->rbd_allocated, 521e705c121SKalle Valo struct iwl_rx_mem_buffer, list); 522e705c121SKalle Valo list_del(&out[i]->list); 523e705c121SKalle Valo } 524e705c121SKalle Valo spin_unlock(&rba->lock); 525e705c121SKalle Valo 526e705c121SKalle Valo return 0; 527e705c121SKalle Valo } 528e705c121SKalle Valo 529e705c121SKalle Valo static void iwl_pcie_rx_allocator_work(struct work_struct *data) 530e705c121SKalle Valo { 531e705c121SKalle Valo struct iwl_rb_allocator *rba_p = 532e705c121SKalle Valo container_of(data, struct iwl_rb_allocator, rx_alloc); 533e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = 534e705c121SKalle Valo container_of(rba_p, struct iwl_trans_pcie, rba); 535e705c121SKalle Valo 536e705c121SKalle Valo iwl_pcie_rx_allocator(trans_pcie->trans); 537e705c121SKalle Valo } 538e705c121SKalle Valo 539e705c121SKalle Valo static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 540e705c121SKalle Valo { 541e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 542e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 543e705c121SKalle Valo struct device *dev = trans->dev; 54478485054SSara Sharon int i; 545e705c121SKalle Valo 54678485054SSara Sharon if (WARN_ON(trans_pcie->rxq)) 547e705c121SKalle Valo return -EINVAL; 548e705c121SKalle Valo 54978485054SSara Sharon trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 55078485054SSara Sharon GFP_KERNEL); 55178485054SSara Sharon if (!trans_pcie->rxq) 55278485054SSara Sharon return -EINVAL; 55378485054SSara Sharon 55478485054SSara Sharon spin_lock_init(&rba->lock); 55578485054SSara Sharon 55678485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 55778485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 55878485054SSara Sharon 55978485054SSara Sharon spin_lock_init(&rxq->lock); 56078485054SSara Sharon /* 56178485054SSara Sharon * Allocate the circular buffer of Read Buffer Descriptors 56278485054SSara Sharon * (RBDs) 56378485054SSara Sharon */ 56478485054SSara Sharon rxq->bd = dma_zalloc_coherent(dev, 56578485054SSara Sharon sizeof(__le32) * RX_QUEUE_SIZE, 566e705c121SKalle Valo &rxq->bd_dma, GFP_KERNEL); 567e705c121SKalle Valo if (!rxq->bd) 56878485054SSara Sharon goto err; 56978485054SSara Sharon 570e705c121SKalle Valo 571e705c121SKalle Valo /*Allocate the driver's pointer to receive buffer status */ 572e705c121SKalle Valo rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 57378485054SSara Sharon &rxq->rb_stts_dma, 57478485054SSara Sharon GFP_KERNEL); 575e705c121SKalle Valo if (!rxq->rb_stts) 57678485054SSara Sharon goto err; 57778485054SSara Sharon } 578e705c121SKalle Valo return 0; 579e705c121SKalle Valo 58078485054SSara Sharon err: 58178485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 58278485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 58378485054SSara Sharon 58478485054SSara Sharon if (rxq->bd) 585e705c121SKalle Valo dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 586e705c121SKalle Valo rxq->bd, rxq->bd_dma); 587e705c121SKalle Valo rxq->bd_dma = 0; 588e705c121SKalle Valo rxq->bd = NULL; 58978485054SSara Sharon 59078485054SSara Sharon if (rxq->rb_stts) 59178485054SSara Sharon dma_free_coherent(trans->dev, 59278485054SSara Sharon sizeof(struct iwl_rb_status), 59378485054SSara Sharon rxq->rb_stts, rxq->rb_stts_dma); 59478485054SSara Sharon } 59578485054SSara Sharon kfree(trans_pcie->rxq); 596e705c121SKalle Valo return -ENOMEM; 597e705c121SKalle Valo } 598e705c121SKalle Valo 599e705c121SKalle Valo static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 600e705c121SKalle Valo { 601e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 602e705c121SKalle Valo u32 rb_size; 603e705c121SKalle Valo const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 604e705c121SKalle Valo 6056c4fbcbcSEmmanuel Grumbach switch (trans_pcie->rx_buf_size) { 6066c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_4K: 607e705c121SKalle Valo rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 6086c4fbcbcSEmmanuel Grumbach break; 6096c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_8K: 6106c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 6116c4fbcbcSEmmanuel Grumbach break; 6126c4fbcbcSEmmanuel Grumbach case IWL_AMSDU_12K: 6136c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 6146c4fbcbcSEmmanuel Grumbach break; 6156c4fbcbcSEmmanuel Grumbach default: 6166c4fbcbcSEmmanuel Grumbach WARN_ON(1); 6176c4fbcbcSEmmanuel Grumbach rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 6186c4fbcbcSEmmanuel Grumbach } 619e705c121SKalle Valo 620e705c121SKalle Valo /* Stop Rx DMA */ 621e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 622e705c121SKalle Valo /* reset and flush pointers */ 623e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 624e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 625e705c121SKalle Valo iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 626e705c121SKalle Valo 627e705c121SKalle Valo /* Reset driver's Rx queue write index */ 628e705c121SKalle Valo iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 629e705c121SKalle Valo 630e705c121SKalle Valo /* Tell device where to find RBD circular buffer in DRAM */ 631e705c121SKalle Valo iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 632e705c121SKalle Valo (u32)(rxq->bd_dma >> 8)); 633e705c121SKalle Valo 634e705c121SKalle Valo /* Tell device where in DRAM to update its Rx status */ 635e705c121SKalle Valo iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 636e705c121SKalle Valo rxq->rb_stts_dma >> 4); 637e705c121SKalle Valo 638e705c121SKalle Valo /* Enable Rx DMA 639e705c121SKalle Valo * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 640e705c121SKalle Valo * the credit mechanism in 5000 HW RX FIFO 641e705c121SKalle Valo * Direct rx interrupts to hosts 6426c4fbcbcSEmmanuel Grumbach * Rx buffer size 4 or 8k or 12k 643e705c121SKalle Valo * RB timeout 0x10 644e705c121SKalle Valo * 256 RBDs 645e705c121SKalle Valo */ 646e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 647e705c121SKalle Valo FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 648e705c121SKalle Valo FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 649e705c121SKalle Valo FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 650e705c121SKalle Valo rb_size| 651e705c121SKalle Valo (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 652e705c121SKalle Valo (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 653e705c121SKalle Valo 654e705c121SKalle Valo /* Set interrupt coalescing timer to default (2048 usecs) */ 655e705c121SKalle Valo iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 656e705c121SKalle Valo 657e705c121SKalle Valo /* W/A for interrupt coalescing bug in 7260 and 3160 */ 658e705c121SKalle Valo if (trans->cfg->host_interrupt_operation_mode) 659e705c121SKalle Valo iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 660e705c121SKalle Valo } 661e705c121SKalle Valo 662e705c121SKalle Valo static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 663e705c121SKalle Valo { 664e705c121SKalle Valo lockdep_assert_held(&rxq->lock); 665e705c121SKalle Valo 666e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_free); 667e705c121SKalle Valo INIT_LIST_HEAD(&rxq->rx_used); 668e705c121SKalle Valo rxq->free_count = 0; 669e705c121SKalle Valo rxq->used_count = 0; 670e705c121SKalle Valo } 671e705c121SKalle Valo 672e705c121SKalle Valo static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) 673e705c121SKalle Valo { 674e705c121SKalle Valo int i; 675e705c121SKalle Valo 676e705c121SKalle Valo lockdep_assert_held(&rba->lock); 677e705c121SKalle Valo 678e705c121SKalle Valo INIT_LIST_HEAD(&rba->rbd_allocated); 679e705c121SKalle Valo INIT_LIST_HEAD(&rba->rbd_empty); 680e705c121SKalle Valo 681e705c121SKalle Valo for (i = 0; i < RX_POOL_SIZE; i++) 682e705c121SKalle Valo list_add(&rba->pool[i].list, &rba->rbd_empty); 683e705c121SKalle Valo } 684e705c121SKalle Valo 685e705c121SKalle Valo static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) 686e705c121SKalle Valo { 687e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 688e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 689e705c121SKalle Valo int i; 690e705c121SKalle Valo 691e705c121SKalle Valo lockdep_assert_held(&rba->lock); 692e705c121SKalle Valo 693e705c121SKalle Valo for (i = 0; i < RX_POOL_SIZE; i++) { 694e705c121SKalle Valo if (!rba->pool[i].page) 695e705c121SKalle Valo continue; 696e705c121SKalle Valo dma_unmap_page(trans->dev, rba->pool[i].page_dma, 697e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 698e705c121SKalle Valo DMA_FROM_DEVICE); 699e705c121SKalle Valo __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); 700e705c121SKalle Valo rba->pool[i].page = NULL; 701e705c121SKalle Valo } 702e705c121SKalle Valo } 703e705c121SKalle Valo 704e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans) 705e705c121SKalle Valo { 706e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 70778485054SSara Sharon struct iwl_rxq *def_rxq; 708e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 709e705c121SKalle Valo int i, err; 710e705c121SKalle Valo 71178485054SSara Sharon if (!trans_pcie->rxq) { 712e705c121SKalle Valo err = iwl_pcie_rx_alloc(trans); 713e705c121SKalle Valo if (err) 714e705c121SKalle Valo return err; 715e705c121SKalle Valo } 71678485054SSara Sharon def_rxq = trans_pcie->rxq; 717e705c121SKalle Valo if (!rba->alloc_wq) 718e705c121SKalle Valo rba->alloc_wq = alloc_workqueue("rb_allocator", 719e705c121SKalle Valo WQ_HIGHPRI | WQ_UNBOUND, 1); 720e705c121SKalle Valo INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); 721e705c121SKalle Valo 722e705c121SKalle Valo spin_lock(&rba->lock); 723e705c121SKalle Valo atomic_set(&rba->req_pending, 0); 724e705c121SKalle Valo atomic_set(&rba->req_ready, 0); 725e705c121SKalle Valo /* free all first - we might be reconfigured for a different size */ 726e705c121SKalle Valo iwl_pcie_rx_free_rba(trans); 727e705c121SKalle Valo iwl_pcie_rx_init_rba(rba); 728e705c121SKalle Valo spin_unlock(&rba->lock); 729e705c121SKalle Valo 730e705c121SKalle Valo /* free all first - we might be reconfigured for a different size */ 73178485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 732e705c121SKalle Valo 733e705c121SKalle Valo for (i = 0; i < RX_QUEUE_SIZE; i++) 73478485054SSara Sharon def_rxq->queue[i] = NULL; 735e705c121SKalle Valo 73678485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 73778485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 738e705c121SKalle Valo 739e705c121SKalle Valo spin_lock(&rxq->lock); 74078485054SSara Sharon /* 74178485054SSara Sharon * Set read write pointer to reflect that we have processed 74278485054SSara Sharon * and used all buffers, but have not restocked the Rx queue 74378485054SSara Sharon * with fresh buffers 74478485054SSara Sharon */ 74578485054SSara Sharon rxq->read = 0; 74678485054SSara Sharon rxq->write = 0; 74778485054SSara Sharon rxq->write_actual = 0; 74878485054SSara Sharon memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 74978485054SSara Sharon 75078485054SSara Sharon iwl_pcie_rx_init_rxb_lists(rxq); 75178485054SSara Sharon 752e705c121SKalle Valo spin_unlock(&rxq->lock); 75378485054SSara Sharon } 75478485054SSara Sharon 75578485054SSara Sharon /* move the entire pool to the default queue ownership */ 75678485054SSara Sharon for (i = 0; i < RX_QUEUE_SIZE; i++) 75778485054SSara Sharon list_add(&trans_pcie->rx_pool[i].list, &def_rxq->rx_used); 75878485054SSara Sharon 75978485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 76078485054SSara Sharon iwl_pcie_rxq_restock(trans, def_rxq); 76178485054SSara Sharon iwl_pcie_rx_hw_init(trans, def_rxq); 76278485054SSara Sharon 76378485054SSara Sharon spin_lock(&def_rxq->lock); 76478485054SSara Sharon iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); 76578485054SSara Sharon spin_unlock(&def_rxq->lock); 766e705c121SKalle Valo 767e705c121SKalle Valo return 0; 768e705c121SKalle Valo } 769e705c121SKalle Valo 770e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans) 771e705c121SKalle Valo { 772e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 773e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 77478485054SSara Sharon int i; 775e705c121SKalle Valo 77678485054SSara Sharon /* 77778485054SSara Sharon * if rxq is NULL, it means that nothing has been allocated, 77878485054SSara Sharon * exit now 77978485054SSara Sharon */ 78078485054SSara Sharon if (!trans_pcie->rxq) { 781e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 782e705c121SKalle Valo return; 783e705c121SKalle Valo } 784e705c121SKalle Valo 785e705c121SKalle Valo cancel_work_sync(&rba->rx_alloc); 786e705c121SKalle Valo if (rba->alloc_wq) { 787e705c121SKalle Valo destroy_workqueue(rba->alloc_wq); 788e705c121SKalle Valo rba->alloc_wq = NULL; 789e705c121SKalle Valo } 790e705c121SKalle Valo 791e705c121SKalle Valo spin_lock(&rba->lock); 792e705c121SKalle Valo iwl_pcie_rx_free_rba(trans); 793e705c121SKalle Valo spin_unlock(&rba->lock); 794e705c121SKalle Valo 79578485054SSara Sharon iwl_pcie_free_rbs_pool(trans); 796e705c121SKalle Valo 79778485054SSara Sharon for (i = 0; i < trans->num_rx_queues; i++) { 79878485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 79978485054SSara Sharon 80078485054SSara Sharon if (rxq->bd) 80178485054SSara Sharon dma_free_coherent(trans->dev, 80278485054SSara Sharon sizeof(__le32) * RX_QUEUE_SIZE, 803e705c121SKalle Valo rxq->bd, rxq->bd_dma); 804e705c121SKalle Valo rxq->bd_dma = 0; 805e705c121SKalle Valo rxq->bd = NULL; 806e705c121SKalle Valo 807e705c121SKalle Valo if (rxq->rb_stts) 808e705c121SKalle Valo dma_free_coherent(trans->dev, 809e705c121SKalle Valo sizeof(struct iwl_rb_status), 810e705c121SKalle Valo rxq->rb_stts, rxq->rb_stts_dma); 811e705c121SKalle Valo else 81278485054SSara Sharon IWL_DEBUG_INFO(trans, 81378485054SSara Sharon "Free rxq->rb_stts which is NULL\n"); 81478485054SSara Sharon } 81578485054SSara Sharon 81678485054SSara Sharon kfree(trans_pcie->rxq); 817e705c121SKalle Valo } 818e705c121SKalle Valo 819e705c121SKalle Valo /* 820e705c121SKalle Valo * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 821e705c121SKalle Valo * 822e705c121SKalle Valo * Called when a RBD can be reused. The RBD is transferred to the allocator. 823e705c121SKalle Valo * When there are 2 empty RBDs - a request for allocation is posted 824e705c121SKalle Valo */ 825e705c121SKalle Valo static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 826e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 827e705c121SKalle Valo struct iwl_rxq *rxq, bool emergency) 828e705c121SKalle Valo { 829e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 830e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 831e705c121SKalle Valo 832e705c121SKalle Valo /* Move the RBD to the used list, will be moved to allocator in batches 833e705c121SKalle Valo * before claiming or posting a request*/ 834e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_used); 835e705c121SKalle Valo 836e705c121SKalle Valo if (unlikely(emergency)) 837e705c121SKalle Valo return; 838e705c121SKalle Valo 839e705c121SKalle Valo /* Count the allocator owned RBDs */ 840e705c121SKalle Valo rxq->used_count++; 841e705c121SKalle Valo 842e705c121SKalle Valo /* If we have RX_POST_REQ_ALLOC new released rx buffers - 843e705c121SKalle Valo * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 844e705c121SKalle Valo * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 845e705c121SKalle Valo * after but we still need to post another request. 846e705c121SKalle Valo */ 847e705c121SKalle Valo if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 848e705c121SKalle Valo /* Move the 2 RBDs to the allocator ownership. 849e705c121SKalle Valo Allocator has another 6 from pool for the request completion*/ 850e705c121SKalle Valo spin_lock(&rba->lock); 851e705c121SKalle Valo list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 852e705c121SKalle Valo spin_unlock(&rba->lock); 853e705c121SKalle Valo 854e705c121SKalle Valo atomic_inc(&rba->req_pending); 855e705c121SKalle Valo queue_work(rba->alloc_wq, &rba->rx_alloc); 856e705c121SKalle Valo } 857e705c121SKalle Valo } 858e705c121SKalle Valo 859e705c121SKalle Valo static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 86078485054SSara Sharon struct iwl_rxq *rxq, 861e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb, 862e705c121SKalle Valo bool emergency) 863e705c121SKalle Valo { 864e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 865e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 866e705c121SKalle Valo bool page_stolen = false; 867e705c121SKalle Valo int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 868e705c121SKalle Valo u32 offset = 0; 869e705c121SKalle Valo 870e705c121SKalle Valo if (WARN_ON(!rxb)) 871e705c121SKalle Valo return; 872e705c121SKalle Valo 873e705c121SKalle Valo dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 874e705c121SKalle Valo 875e705c121SKalle Valo while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 876e705c121SKalle Valo struct iwl_rx_packet *pkt; 877e705c121SKalle Valo u16 sequence; 878e705c121SKalle Valo bool reclaim; 879e705c121SKalle Valo int index, cmd_index, len; 880e705c121SKalle Valo struct iwl_rx_cmd_buffer rxcb = { 881e705c121SKalle Valo ._offset = offset, 882e705c121SKalle Valo ._rx_page_order = trans_pcie->rx_page_order, 883e705c121SKalle Valo ._page = rxb->page, 884e705c121SKalle Valo ._page_stolen = false, 885e705c121SKalle Valo .truesize = max_len, 886e705c121SKalle Valo }; 887e705c121SKalle Valo 888e705c121SKalle Valo pkt = rxb_addr(&rxcb); 889e705c121SKalle Valo 890e705c121SKalle Valo if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) 891e705c121SKalle Valo break; 892e705c121SKalle Valo 893e705c121SKalle Valo IWL_DEBUG_RX(trans, 894e705c121SKalle Valo "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", 895e705c121SKalle Valo rxcb._offset, 89639bdb17eSSharon Dvir iwl_get_cmd_string(trans, 89739bdb17eSSharon Dvir iwl_cmd_id(pkt->hdr.cmd, 89839bdb17eSSharon Dvir pkt->hdr.group_id, 89939bdb17eSSharon Dvir 0)), 900e705c121SKalle Valo pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); 901e705c121SKalle Valo 902e705c121SKalle Valo len = iwl_rx_packet_len(pkt); 903e705c121SKalle Valo len += sizeof(u32); /* account for status word */ 904e705c121SKalle Valo trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 905e705c121SKalle Valo trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 906e705c121SKalle Valo 907e705c121SKalle Valo /* Reclaim a command buffer only if this packet is a response 908e705c121SKalle Valo * to a (driver-originated) command. 909e705c121SKalle Valo * If the packet (e.g. Rx frame) originated from uCode, 910e705c121SKalle Valo * there is no command buffer to reclaim. 911e705c121SKalle Valo * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 912e705c121SKalle Valo * but apparently a few don't get set; catch them here. */ 913e705c121SKalle Valo reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 914e705c121SKalle Valo if (reclaim) { 915e705c121SKalle Valo int i; 916e705c121SKalle Valo 917e705c121SKalle Valo for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 918e705c121SKalle Valo if (trans_pcie->no_reclaim_cmds[i] == 919e705c121SKalle Valo pkt->hdr.cmd) { 920e705c121SKalle Valo reclaim = false; 921e705c121SKalle Valo break; 922e705c121SKalle Valo } 923e705c121SKalle Valo } 924e705c121SKalle Valo } 925e705c121SKalle Valo 926e705c121SKalle Valo sequence = le16_to_cpu(pkt->hdr.sequence); 927e705c121SKalle Valo index = SEQ_TO_INDEX(sequence); 928e705c121SKalle Valo cmd_index = get_cmd_index(&txq->q, index); 929e705c121SKalle Valo 930e705c121SKalle Valo iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); 931e705c121SKalle Valo 932e705c121SKalle Valo if (reclaim) { 933e705c121SKalle Valo kzfree(txq->entries[cmd_index].free_buf); 934e705c121SKalle Valo txq->entries[cmd_index].free_buf = NULL; 935e705c121SKalle Valo } 936e705c121SKalle Valo 937e705c121SKalle Valo /* 938e705c121SKalle Valo * After here, we should always check rxcb._page_stolen, 939e705c121SKalle Valo * if it is true then one of the handlers took the page. 940e705c121SKalle Valo */ 941e705c121SKalle Valo 942e705c121SKalle Valo if (reclaim) { 943e705c121SKalle Valo /* Invoke any callbacks, transfer the buffer to caller, 944e705c121SKalle Valo * and fire off the (possibly) blocking 945e705c121SKalle Valo * iwl_trans_send_cmd() 946e705c121SKalle Valo * as we reclaim the driver command queue */ 947e705c121SKalle Valo if (!rxcb._page_stolen) 948e705c121SKalle Valo iwl_pcie_hcmd_complete(trans, &rxcb); 949e705c121SKalle Valo else 950e705c121SKalle Valo IWL_WARN(trans, "Claim null rxb?\n"); 951e705c121SKalle Valo } 952e705c121SKalle Valo 953e705c121SKalle Valo page_stolen |= rxcb._page_stolen; 954e705c121SKalle Valo offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 955e705c121SKalle Valo } 956e705c121SKalle Valo 957e705c121SKalle Valo /* page was stolen from us -- free our reference */ 958e705c121SKalle Valo if (page_stolen) { 959e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 960e705c121SKalle Valo rxb->page = NULL; 961e705c121SKalle Valo } 962e705c121SKalle Valo 963e705c121SKalle Valo /* Reuse the page if possible. For notification packets and 964e705c121SKalle Valo * SKBs that fail to Rx correctly, add them back into the 965e705c121SKalle Valo * rx_free list for reuse later. */ 966e705c121SKalle Valo if (rxb->page != NULL) { 967e705c121SKalle Valo rxb->page_dma = 968e705c121SKalle Valo dma_map_page(trans->dev, rxb->page, 0, 969e705c121SKalle Valo PAGE_SIZE << trans_pcie->rx_page_order, 970e705c121SKalle Valo DMA_FROM_DEVICE); 971e705c121SKalle Valo if (dma_mapping_error(trans->dev, rxb->page_dma)) { 972e705c121SKalle Valo /* 973e705c121SKalle Valo * free the page(s) as well to not break 974e705c121SKalle Valo * the invariant that the items on the used 975e705c121SKalle Valo * list have no page(s) 976e705c121SKalle Valo */ 977e705c121SKalle Valo __free_pages(rxb->page, trans_pcie->rx_page_order); 978e705c121SKalle Valo rxb->page = NULL; 979e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 980e705c121SKalle Valo } else { 981e705c121SKalle Valo list_add_tail(&rxb->list, &rxq->rx_free); 982e705c121SKalle Valo rxq->free_count++; 983e705c121SKalle Valo } 984e705c121SKalle Valo } else 985e705c121SKalle Valo iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 986e705c121SKalle Valo } 987e705c121SKalle Valo 988e705c121SKalle Valo /* 989e705c121SKalle Valo * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 990e705c121SKalle Valo */ 991e705c121SKalle Valo static void iwl_pcie_rx_handle(struct iwl_trans *trans) 992e705c121SKalle Valo { 993e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 99478485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 995e705c121SKalle Valo u32 r, i, j, count = 0; 996e705c121SKalle Valo bool emergency = false; 997e705c121SKalle Valo 998e705c121SKalle Valo restart: 999e705c121SKalle Valo spin_lock(&rxq->lock); 1000e705c121SKalle Valo /* uCode's read index (stored in shared DRAM) indicates the last Rx 1001e705c121SKalle Valo * buffer that the driver may process (last buffer filled by ucode). */ 1002e705c121SKalle Valo r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 1003e705c121SKalle Valo i = rxq->read; 1004e705c121SKalle Valo 1005e705c121SKalle Valo /* Rx interrupt, but nothing sent from uCode */ 1006e705c121SKalle Valo if (i == r) 1007e705c121SKalle Valo IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 1008e705c121SKalle Valo 1009e705c121SKalle Valo while (i != r) { 1010e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb; 1011e705c121SKalle Valo 1012e705c121SKalle Valo if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) 1013e705c121SKalle Valo emergency = true; 1014e705c121SKalle Valo 1015e705c121SKalle Valo rxb = rxq->queue[i]; 1016e705c121SKalle Valo rxq->queue[i] = NULL; 1017e705c121SKalle Valo 1018f02d2ccdSJohannes Berg IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); 101978485054SSara Sharon iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1020e705c121SKalle Valo 1021e705c121SKalle Valo i = (i + 1) & RX_QUEUE_MASK; 1022e705c121SKalle Valo 1023e705c121SKalle Valo /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1024e705c121SKalle Valo * try to claim the pre-allocated buffers from the allocator */ 1025e705c121SKalle Valo if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 1026e705c121SKalle Valo struct iwl_rb_allocator *rba = &trans_pcie->rba; 1027e705c121SKalle Valo struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 1028e705c121SKalle Valo 1029e705c121SKalle Valo if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && 1030e705c121SKalle Valo !emergency) { 1031e705c121SKalle Valo /* Add the remaining 6 empty RBDs 1032e705c121SKalle Valo * for allocator use 1033e705c121SKalle Valo */ 1034e705c121SKalle Valo spin_lock(&rba->lock); 1035e705c121SKalle Valo list_splice_tail_init(&rxq->rx_used, 1036e705c121SKalle Valo &rba->rbd_empty); 1037e705c121SKalle Valo spin_unlock(&rba->lock); 1038e705c121SKalle Valo } 1039e705c121SKalle Valo 1040e705c121SKalle Valo /* If not ready - continue, will try to reclaim later. 1041e705c121SKalle Valo * No need to reschedule work - allocator exits only on 1042e705c121SKalle Valo * success */ 1043e705c121SKalle Valo if (!iwl_pcie_rx_allocator_get(trans, out)) { 1044e705c121SKalle Valo /* If success - then RX_CLAIM_REQ_ALLOC 1045e705c121SKalle Valo * buffers were retrieved and should be added 1046e705c121SKalle Valo * to free list */ 1047e705c121SKalle Valo rxq->used_count -= RX_CLAIM_REQ_ALLOC; 1048e705c121SKalle Valo for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { 1049e705c121SKalle Valo list_add_tail(&out[j]->list, 1050e705c121SKalle Valo &rxq->rx_free); 1051e705c121SKalle Valo rxq->free_count++; 1052e705c121SKalle Valo } 1053e705c121SKalle Valo } 1054e705c121SKalle Valo } 1055e705c121SKalle Valo if (emergency) { 1056e705c121SKalle Valo count++; 1057e705c121SKalle Valo if (count == 8) { 1058e705c121SKalle Valo count = 0; 1059e705c121SKalle Valo if (rxq->used_count < RX_QUEUE_SIZE / 3) 1060e705c121SKalle Valo emergency = false; 1061e705c121SKalle Valo spin_unlock(&rxq->lock); 106278485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1063e705c121SKalle Valo spin_lock(&rxq->lock); 1064e705c121SKalle Valo } 1065e705c121SKalle Valo } 1066e705c121SKalle Valo /* handle restock for three cases, can be all of them at once: 1067e705c121SKalle Valo * - we just pulled buffers from the allocator 1068e705c121SKalle Valo * - we have 8+ unstolen pages accumulated 1069e705c121SKalle Valo * - we are in emergency and allocated buffers 1070e705c121SKalle Valo */ 1071e705c121SKalle Valo if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { 1072e705c121SKalle Valo rxq->read = i; 1073e705c121SKalle Valo spin_unlock(&rxq->lock); 107478485054SSara Sharon iwl_pcie_rxq_restock(trans, rxq); 1075e705c121SKalle Valo goto restart; 1076e705c121SKalle Valo } 1077e705c121SKalle Valo } 1078e705c121SKalle Valo 1079e705c121SKalle Valo /* Backtrack one entry */ 1080e705c121SKalle Valo rxq->read = i; 1081e705c121SKalle Valo spin_unlock(&rxq->lock); 1082e705c121SKalle Valo 1083e705c121SKalle Valo /* 1084e705c121SKalle Valo * handle a case where in emergency there are some unallocated RBDs. 1085e705c121SKalle Valo * those RBDs are in the used list, but are not tracked by the queue's 1086e705c121SKalle Valo * used_count which counts allocator owned RBDs. 1087e705c121SKalle Valo * unallocated emergency RBDs must be allocated on exit, otherwise 1088e705c121SKalle Valo * when called again the function may not be in emergency mode and 1089e705c121SKalle Valo * they will be handed to the allocator with no tracking in the RBD 1090e705c121SKalle Valo * allocator counters, which will lead to them never being claimed back 1091e705c121SKalle Valo * by the queue. 1092e705c121SKalle Valo * by allocating them here, they are now in the queue free list, and 1093e705c121SKalle Valo * will be restocked by the next call of iwl_pcie_rxq_restock. 1094e705c121SKalle Valo */ 1095e705c121SKalle Valo if (unlikely(emergency && count)) 109678485054SSara Sharon iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1097e705c121SKalle Valo 1098e705c121SKalle Valo if (trans_pcie->napi.poll) 1099e705c121SKalle Valo napi_gro_flush(&trans_pcie->napi, false); 1100e705c121SKalle Valo } 1101e705c121SKalle Valo 1102e705c121SKalle Valo /* 1103e705c121SKalle Valo * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1104e705c121SKalle Valo */ 1105e705c121SKalle Valo static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1106e705c121SKalle Valo { 1107e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1108e705c121SKalle Valo int i; 1109e705c121SKalle Valo 1110e705c121SKalle Valo /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1111e705c121SKalle Valo if (trans->cfg->internal_wimax_coex && 1112e705c121SKalle Valo !trans->cfg->apmg_not_supported && 1113e705c121SKalle Valo (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1114e705c121SKalle Valo APMS_CLK_VAL_MRB_FUNC_MODE) || 1115e705c121SKalle Valo (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1116e705c121SKalle Valo APMG_PS_CTRL_VAL_RESET_REQ))) { 1117e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1118e705c121SKalle Valo iwl_op_mode_wimax_active(trans->op_mode); 1119e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1120e705c121SKalle Valo return; 1121e705c121SKalle Valo } 1122e705c121SKalle Valo 1123e705c121SKalle Valo iwl_pcie_dump_csr(trans); 1124e705c121SKalle Valo iwl_dump_fh(trans, NULL); 1125e705c121SKalle Valo 1126e705c121SKalle Valo local_bh_disable(); 1127e705c121SKalle Valo /* The STATUS_FW_ERROR bit is set in this function. This must happen 1128e705c121SKalle Valo * before we wake up the command caller, to ensure a proper cleanup. */ 1129e705c121SKalle Valo iwl_trans_fw_error(trans); 1130e705c121SKalle Valo local_bh_enable(); 1131e705c121SKalle Valo 1132e705c121SKalle Valo for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 1133e705c121SKalle Valo del_timer(&trans_pcie->txq[i].stuck_timer); 1134e705c121SKalle Valo 1135e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1136e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1137e705c121SKalle Valo } 1138e705c121SKalle Valo 1139e705c121SKalle Valo static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1140e705c121SKalle Valo { 1141e705c121SKalle Valo u32 inta; 1142e705c121SKalle Valo 1143e705c121SKalle Valo lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1144e705c121SKalle Valo 1145e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1146e705c121SKalle Valo 1147e705c121SKalle Valo /* Discover which interrupts are active/pending */ 1148e705c121SKalle Valo inta = iwl_read32(trans, CSR_INT); 1149e705c121SKalle Valo 1150e705c121SKalle Valo /* the thread will service interrupts and re-enable them */ 1151e705c121SKalle Valo return inta; 1152e705c121SKalle Valo } 1153e705c121SKalle Valo 1154e705c121SKalle Valo /* a device (PCI-E) page is 4096 bytes long */ 1155e705c121SKalle Valo #define ICT_SHIFT 12 1156e705c121SKalle Valo #define ICT_SIZE (1 << ICT_SHIFT) 1157e705c121SKalle Valo #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1158e705c121SKalle Valo 1159e705c121SKalle Valo /* interrupt handler using ict table, with this interrupt driver will 1160e705c121SKalle Valo * stop using INTA register to get device's interrupt, reading this register 1161e705c121SKalle Valo * is expensive, device will write interrupts in ICT dram table, increment 1162e705c121SKalle Valo * index then will fire interrupt to driver, driver will OR all ICT table 1163e705c121SKalle Valo * entries from current index up to table entry with 0 value. the result is 1164e705c121SKalle Valo * the interrupt we need to service, driver will set the entries back to 0 and 1165e705c121SKalle Valo * set index. 1166e705c121SKalle Valo */ 1167e705c121SKalle Valo static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1168e705c121SKalle Valo { 1169e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1170e705c121SKalle Valo u32 inta; 1171e705c121SKalle Valo u32 val = 0; 1172e705c121SKalle Valo u32 read; 1173e705c121SKalle Valo 1174e705c121SKalle Valo trace_iwlwifi_dev_irq(trans->dev); 1175e705c121SKalle Valo 1176e705c121SKalle Valo /* Ignore interrupt if there's nothing in NIC to service. 1177e705c121SKalle Valo * This may be due to IRQ shared with another device, 1178e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. */ 1179e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1180e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1181e705c121SKalle Valo if (!read) 1182e705c121SKalle Valo return 0; 1183e705c121SKalle Valo 1184e705c121SKalle Valo /* 1185e705c121SKalle Valo * Collect all entries up to the first 0, starting from ict_index; 1186e705c121SKalle Valo * note we already read at ict_index. 1187e705c121SKalle Valo */ 1188e705c121SKalle Valo do { 1189e705c121SKalle Valo val |= read; 1190e705c121SKalle Valo IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1191e705c121SKalle Valo trans_pcie->ict_index, read); 1192e705c121SKalle Valo trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1193e705c121SKalle Valo trans_pcie->ict_index = 1194e705c121SKalle Valo ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1195e705c121SKalle Valo 1196e705c121SKalle Valo read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1197e705c121SKalle Valo trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1198e705c121SKalle Valo read); 1199e705c121SKalle Valo } while (read); 1200e705c121SKalle Valo 1201e705c121SKalle Valo /* We should not get this value, just ignore it. */ 1202e705c121SKalle Valo if (val == 0xffffffff) 1203e705c121SKalle Valo val = 0; 1204e705c121SKalle Valo 1205e705c121SKalle Valo /* 1206e705c121SKalle Valo * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1207e705c121SKalle Valo * (bit 15 before shifting it to 31) to clear when using interrupt 1208e705c121SKalle Valo * coalescing. fortunately, bits 18 and 19 stay set when this happens 1209e705c121SKalle Valo * so we use them to decide on the real state of the Rx bit. 1210e705c121SKalle Valo * In order words, bit 15 is set if bit 18 or bit 19 are set. 1211e705c121SKalle Valo */ 1212e705c121SKalle Valo if (val & 0xC0000) 1213e705c121SKalle Valo val |= 0x8000; 1214e705c121SKalle Valo 1215e705c121SKalle Valo inta = (0xff & val) | ((0xff00 & val) << 16); 1216e705c121SKalle Valo return inta; 1217e705c121SKalle Valo } 1218e705c121SKalle Valo 1219e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1220e705c121SKalle Valo { 1221e705c121SKalle Valo struct iwl_trans *trans = dev_id; 1222e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1223e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1224e705c121SKalle Valo u32 inta = 0; 1225e705c121SKalle Valo u32 handled = 0; 1226e705c121SKalle Valo 1227e705c121SKalle Valo lock_map_acquire(&trans->sync_cmd_lockdep_map); 1228e705c121SKalle Valo 1229e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1230e705c121SKalle Valo 1231e705c121SKalle Valo /* dram interrupt table not set yet, 1232e705c121SKalle Valo * use legacy interrupt. 1233e705c121SKalle Valo */ 1234e705c121SKalle Valo if (likely(trans_pcie->use_ict)) 1235e705c121SKalle Valo inta = iwl_pcie_int_cause_ict(trans); 1236e705c121SKalle Valo else 1237e705c121SKalle Valo inta = iwl_pcie_int_cause_non_ict(trans); 1238e705c121SKalle Valo 1239e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) { 1240e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1241e705c121SKalle Valo "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1242e705c121SKalle Valo inta, trans_pcie->inta_mask, 1243e705c121SKalle Valo iwl_read32(trans, CSR_INT_MASK), 1244e705c121SKalle Valo iwl_read32(trans, CSR_FH_INT_STATUS)); 1245e705c121SKalle Valo if (inta & (~trans_pcie->inta_mask)) 1246e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1247e705c121SKalle Valo "We got a masked interrupt (0x%08x)\n", 1248e705c121SKalle Valo inta & (~trans_pcie->inta_mask)); 1249e705c121SKalle Valo } 1250e705c121SKalle Valo 1251e705c121SKalle Valo inta &= trans_pcie->inta_mask; 1252e705c121SKalle Valo 1253e705c121SKalle Valo /* 1254e705c121SKalle Valo * Ignore interrupt if there's nothing in NIC to service. 1255e705c121SKalle Valo * This may be due to IRQ shared with another device, 1256e705c121SKalle Valo * or due to sporadic interrupts thrown from our NIC. 1257e705c121SKalle Valo */ 1258e705c121SKalle Valo if (unlikely(!inta)) { 1259e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1260e705c121SKalle Valo /* 1261e705c121SKalle Valo * Re-enable interrupts here since we don't 1262e705c121SKalle Valo * have anything to service 1263e705c121SKalle Valo */ 1264e705c121SKalle Valo if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1265e705c121SKalle Valo iwl_enable_interrupts(trans); 1266e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1267e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1268e705c121SKalle Valo return IRQ_NONE; 1269e705c121SKalle Valo } 1270e705c121SKalle Valo 1271e705c121SKalle Valo if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1272e705c121SKalle Valo /* 1273e705c121SKalle Valo * Hardware disappeared. It might have 1274e705c121SKalle Valo * already raised an interrupt. 1275e705c121SKalle Valo */ 1276e705c121SKalle Valo IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1277e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1278e705c121SKalle Valo goto out; 1279e705c121SKalle Valo } 1280e705c121SKalle Valo 1281e705c121SKalle Valo /* Ack/clear/reset pending uCode interrupts. 1282e705c121SKalle Valo * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1283e705c121SKalle Valo */ 1284e705c121SKalle Valo /* There is a hardware bug in the interrupt mask function that some 1285e705c121SKalle Valo * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1286e705c121SKalle Valo * they are disabled in the CSR_INT_MASK register. Furthermore the 1287e705c121SKalle Valo * ICT interrupt handling mechanism has another bug that might cause 1288e705c121SKalle Valo * these unmasked interrupts fail to be detected. We workaround the 1289e705c121SKalle Valo * hardware bugs here by ACKing all the possible interrupts so that 1290e705c121SKalle Valo * interrupt coalescing can still be achieved. 1291e705c121SKalle Valo */ 1292e705c121SKalle Valo iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1293e705c121SKalle Valo 1294e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) 1295e705c121SKalle Valo IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1296e705c121SKalle Valo inta, iwl_read32(trans, CSR_INT_MASK)); 1297e705c121SKalle Valo 1298e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1299e705c121SKalle Valo 1300e705c121SKalle Valo /* Now service all interrupt bits discovered above. */ 1301e705c121SKalle Valo if (inta & CSR_INT_BIT_HW_ERR) { 1302e705c121SKalle Valo IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1303e705c121SKalle Valo 1304e705c121SKalle Valo /* Tell the device to stop sending interrupts */ 1305e705c121SKalle Valo iwl_disable_interrupts(trans); 1306e705c121SKalle Valo 1307e705c121SKalle Valo isr_stats->hw++; 1308e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1309e705c121SKalle Valo 1310e705c121SKalle Valo handled |= CSR_INT_BIT_HW_ERR; 1311e705c121SKalle Valo 1312e705c121SKalle Valo goto out; 1313e705c121SKalle Valo } 1314e705c121SKalle Valo 1315e705c121SKalle Valo if (iwl_have_debug_level(IWL_DL_ISR)) { 1316e705c121SKalle Valo /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1317e705c121SKalle Valo if (inta & CSR_INT_BIT_SCD) { 1318e705c121SKalle Valo IWL_DEBUG_ISR(trans, 1319e705c121SKalle Valo "Scheduler finished to transmit the frame/frames.\n"); 1320e705c121SKalle Valo isr_stats->sch++; 1321e705c121SKalle Valo } 1322e705c121SKalle Valo 1323e705c121SKalle Valo /* Alive notification via Rx interrupt will do the real work */ 1324e705c121SKalle Valo if (inta & CSR_INT_BIT_ALIVE) { 1325e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1326e705c121SKalle Valo isr_stats->alive++; 1327e705c121SKalle Valo } 1328e705c121SKalle Valo } 1329e705c121SKalle Valo 1330e705c121SKalle Valo /* Safely ignore these bits for debug checks below */ 1331e705c121SKalle Valo inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1332e705c121SKalle Valo 1333e705c121SKalle Valo /* HW RF KILL switch toggled */ 1334e705c121SKalle Valo if (inta & CSR_INT_BIT_RF_KILL) { 1335e705c121SKalle Valo bool hw_rfkill; 1336e705c121SKalle Valo 1337e705c121SKalle Valo hw_rfkill = iwl_is_rfkill_set(trans); 1338e705c121SKalle Valo IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1339e705c121SKalle Valo hw_rfkill ? "disable radio" : "enable radio"); 1340e705c121SKalle Valo 1341e705c121SKalle Valo isr_stats->rfkill++; 1342e705c121SKalle Valo 1343e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1344e705c121SKalle Valo iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1345e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1346e705c121SKalle Valo if (hw_rfkill) { 1347e705c121SKalle Valo set_bit(STATUS_RFKILL, &trans->status); 1348e705c121SKalle Valo if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1349e705c121SKalle Valo &trans->status)) 1350e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, 1351e705c121SKalle Valo "Rfkill while SYNC HCMD in flight\n"); 1352e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1353e705c121SKalle Valo } else { 1354e705c121SKalle Valo clear_bit(STATUS_RFKILL, &trans->status); 1355e705c121SKalle Valo } 1356e705c121SKalle Valo 1357e705c121SKalle Valo handled |= CSR_INT_BIT_RF_KILL; 1358e705c121SKalle Valo } 1359e705c121SKalle Valo 1360e705c121SKalle Valo /* Chip got too hot and stopped itself */ 1361e705c121SKalle Valo if (inta & CSR_INT_BIT_CT_KILL) { 1362e705c121SKalle Valo IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1363e705c121SKalle Valo isr_stats->ctkill++; 1364e705c121SKalle Valo handled |= CSR_INT_BIT_CT_KILL; 1365e705c121SKalle Valo } 1366e705c121SKalle Valo 1367e705c121SKalle Valo /* Error detected by uCode */ 1368e705c121SKalle Valo if (inta & CSR_INT_BIT_SW_ERR) { 1369e705c121SKalle Valo IWL_ERR(trans, "Microcode SW error detected. " 1370e705c121SKalle Valo " Restarting 0x%X.\n", inta); 1371e705c121SKalle Valo isr_stats->sw++; 1372e705c121SKalle Valo iwl_pcie_irq_handle_error(trans); 1373e705c121SKalle Valo handled |= CSR_INT_BIT_SW_ERR; 1374e705c121SKalle Valo } 1375e705c121SKalle Valo 1376e705c121SKalle Valo /* uCode wakes up after power-down sleep */ 1377e705c121SKalle Valo if (inta & CSR_INT_BIT_WAKEUP) { 1378e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1379e705c121SKalle Valo iwl_pcie_rxq_check_wrptr(trans); 1380e705c121SKalle Valo iwl_pcie_txq_check_wrptrs(trans); 1381e705c121SKalle Valo 1382e705c121SKalle Valo isr_stats->wakeup++; 1383e705c121SKalle Valo 1384e705c121SKalle Valo handled |= CSR_INT_BIT_WAKEUP; 1385e705c121SKalle Valo } 1386e705c121SKalle Valo 1387e705c121SKalle Valo /* All uCode command responses, including Tx command responses, 1388e705c121SKalle Valo * Rx "responses" (frame-received notification), and other 1389e705c121SKalle Valo * notifications from uCode come through here*/ 1390e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1391e705c121SKalle Valo CSR_INT_BIT_RX_PERIODIC)) { 1392e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1393e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1394e705c121SKalle Valo handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1395e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 1396e705c121SKalle Valo CSR_FH_INT_RX_MASK); 1397e705c121SKalle Valo } 1398e705c121SKalle Valo if (inta & CSR_INT_BIT_RX_PERIODIC) { 1399e705c121SKalle Valo handled |= CSR_INT_BIT_RX_PERIODIC; 1400e705c121SKalle Valo iwl_write32(trans, 1401e705c121SKalle Valo CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1402e705c121SKalle Valo } 1403e705c121SKalle Valo /* Sending RX interrupt require many steps to be done in the 1404e705c121SKalle Valo * the device: 1405e705c121SKalle Valo * 1- write interrupt to current index in ICT table. 1406e705c121SKalle Valo * 2- dma RX frame. 1407e705c121SKalle Valo * 3- update RX shared data to indicate last write index. 1408e705c121SKalle Valo * 4- send interrupt. 1409e705c121SKalle Valo * This could lead to RX race, driver could receive RX interrupt 1410e705c121SKalle Valo * but the shared data changes does not reflect this; 1411e705c121SKalle Valo * periodic interrupt will detect any dangling Rx activity. 1412e705c121SKalle Valo */ 1413e705c121SKalle Valo 1414e705c121SKalle Valo /* Disable periodic interrupt; we use it as just a one-shot. */ 1415e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 1416e705c121SKalle Valo CSR_INT_PERIODIC_DIS); 1417e705c121SKalle Valo 1418e705c121SKalle Valo /* 1419e705c121SKalle Valo * Enable periodic interrupt in 8 msec only if we received 1420e705c121SKalle Valo * real RX interrupt (instead of just periodic int), to catch 1421e705c121SKalle Valo * any dangling Rx interrupt. If it was just the periodic 1422e705c121SKalle Valo * interrupt, there was no dangling Rx activity, and no need 1423e705c121SKalle Valo * to extend the periodic interrupt; one-shot is enough. 1424e705c121SKalle Valo */ 1425e705c121SKalle Valo if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1426e705c121SKalle Valo iwl_write8(trans, CSR_INT_PERIODIC_REG, 1427e705c121SKalle Valo CSR_INT_PERIODIC_ENA); 1428e705c121SKalle Valo 1429e705c121SKalle Valo isr_stats->rx++; 1430e705c121SKalle Valo 1431e705c121SKalle Valo local_bh_disable(); 1432e705c121SKalle Valo iwl_pcie_rx_handle(trans); 1433e705c121SKalle Valo local_bh_enable(); 1434e705c121SKalle Valo } 1435e705c121SKalle Valo 1436e705c121SKalle Valo /* This "Tx" DMA channel is used only for loading uCode */ 1437e705c121SKalle Valo if (inta & CSR_INT_BIT_FH_TX) { 1438e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1439e705c121SKalle Valo IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1440e705c121SKalle Valo isr_stats->tx++; 1441e705c121SKalle Valo handled |= CSR_INT_BIT_FH_TX; 1442e705c121SKalle Valo /* Wake up uCode load routine, now that load is complete */ 1443e705c121SKalle Valo trans_pcie->ucode_write_complete = true; 1444e705c121SKalle Valo wake_up(&trans_pcie->ucode_write_waitq); 1445e705c121SKalle Valo } 1446e705c121SKalle Valo 1447e705c121SKalle Valo if (inta & ~handled) { 1448e705c121SKalle Valo IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1449e705c121SKalle Valo isr_stats->unhandled++; 1450e705c121SKalle Valo } 1451e705c121SKalle Valo 1452e705c121SKalle Valo if (inta & ~(trans_pcie->inta_mask)) { 1453e705c121SKalle Valo IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 1454e705c121SKalle Valo inta & ~trans_pcie->inta_mask); 1455e705c121SKalle Valo } 1456e705c121SKalle Valo 1457e705c121SKalle Valo /* Re-enable all interrupts */ 1458e705c121SKalle Valo /* only Re-enable if disabled by irq */ 1459e705c121SKalle Valo if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1460e705c121SKalle Valo iwl_enable_interrupts(trans); 1461e705c121SKalle Valo /* Re-enable RF_KILL if it occurred */ 1462e705c121SKalle Valo else if (handled & CSR_INT_BIT_RF_KILL) 1463e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1464e705c121SKalle Valo 1465e705c121SKalle Valo out: 1466e705c121SKalle Valo lock_map_release(&trans->sync_cmd_lockdep_map); 1467e705c121SKalle Valo return IRQ_HANDLED; 1468e705c121SKalle Valo } 1469e705c121SKalle Valo 1470e705c121SKalle Valo /****************************************************************************** 1471e705c121SKalle Valo * 1472e705c121SKalle Valo * ICT functions 1473e705c121SKalle Valo * 1474e705c121SKalle Valo ******************************************************************************/ 1475e705c121SKalle Valo 1476e705c121SKalle Valo /* Free dram table */ 1477e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans) 1478e705c121SKalle Valo { 1479e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1480e705c121SKalle Valo 1481e705c121SKalle Valo if (trans_pcie->ict_tbl) { 1482e705c121SKalle Valo dma_free_coherent(trans->dev, ICT_SIZE, 1483e705c121SKalle Valo trans_pcie->ict_tbl, 1484e705c121SKalle Valo trans_pcie->ict_tbl_dma); 1485e705c121SKalle Valo trans_pcie->ict_tbl = NULL; 1486e705c121SKalle Valo trans_pcie->ict_tbl_dma = 0; 1487e705c121SKalle Valo } 1488e705c121SKalle Valo } 1489e705c121SKalle Valo 1490e705c121SKalle Valo /* 1491e705c121SKalle Valo * allocate dram shared table, it is an aligned memory 1492e705c121SKalle Valo * block of ICT_SIZE. 1493e705c121SKalle Valo * also reset all data related to ICT table interrupt. 1494e705c121SKalle Valo */ 1495e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans) 1496e705c121SKalle Valo { 1497e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1498e705c121SKalle Valo 1499e705c121SKalle Valo trans_pcie->ict_tbl = 1500e705c121SKalle Valo dma_zalloc_coherent(trans->dev, ICT_SIZE, 1501e705c121SKalle Valo &trans_pcie->ict_tbl_dma, 1502e705c121SKalle Valo GFP_KERNEL); 1503e705c121SKalle Valo if (!trans_pcie->ict_tbl) 1504e705c121SKalle Valo return -ENOMEM; 1505e705c121SKalle Valo 1506e705c121SKalle Valo /* just an API sanity check ... it is guaranteed to be aligned */ 1507e705c121SKalle Valo if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 1508e705c121SKalle Valo iwl_pcie_free_ict(trans); 1509e705c121SKalle Valo return -EINVAL; 1510e705c121SKalle Valo } 1511e705c121SKalle Valo 1512e705c121SKalle Valo return 0; 1513e705c121SKalle Valo } 1514e705c121SKalle Valo 1515e705c121SKalle Valo /* Device is going up inform it about using ICT interrupt table, 1516e705c121SKalle Valo * also we need to tell the driver to start using ICT interrupt. 1517e705c121SKalle Valo */ 1518e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans) 1519e705c121SKalle Valo { 1520e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1521e705c121SKalle Valo u32 val; 1522e705c121SKalle Valo 1523e705c121SKalle Valo if (!trans_pcie->ict_tbl) 1524e705c121SKalle Valo return; 1525e705c121SKalle Valo 1526e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1527e705c121SKalle Valo iwl_disable_interrupts(trans); 1528e705c121SKalle Valo 1529e705c121SKalle Valo memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 1530e705c121SKalle Valo 1531e705c121SKalle Valo val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 1532e705c121SKalle Valo 1533e705c121SKalle Valo val |= CSR_DRAM_INT_TBL_ENABLE | 1534e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRAP_CHECK | 1535e705c121SKalle Valo CSR_DRAM_INIT_TBL_WRITE_POINTER; 1536e705c121SKalle Valo 1537e705c121SKalle Valo IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 1538e705c121SKalle Valo 1539e705c121SKalle Valo iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 1540e705c121SKalle Valo trans_pcie->use_ict = true; 1541e705c121SKalle Valo trans_pcie->ict_index = 0; 1542e705c121SKalle Valo iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 1543e705c121SKalle Valo iwl_enable_interrupts(trans); 1544e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1545e705c121SKalle Valo } 1546e705c121SKalle Valo 1547e705c121SKalle Valo /* Device is going down disable ict interrupt usage */ 1548e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans) 1549e705c121SKalle Valo { 1550e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1551e705c121SKalle Valo 1552e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1553e705c121SKalle Valo trans_pcie->use_ict = false; 1554e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1555e705c121SKalle Valo } 1556e705c121SKalle Valo 1557e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data) 1558e705c121SKalle Valo { 1559e705c121SKalle Valo struct iwl_trans *trans = data; 1560e705c121SKalle Valo 1561e705c121SKalle Valo if (!trans) 1562e705c121SKalle Valo return IRQ_NONE; 1563e705c121SKalle Valo 1564e705c121SKalle Valo /* Disable (but don't clear!) interrupts here to avoid 1565e705c121SKalle Valo * back-to-back ISRs and sporadic interrupts from our NIC. 1566e705c121SKalle Valo * If we have something to service, the tasklet will re-enable ints. 1567e705c121SKalle Valo * If we *don't* have something, we'll re-enable before leaving here. 1568e705c121SKalle Valo */ 1569e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1570e705c121SKalle Valo 1571e705c121SKalle Valo return IRQ_WAKE_THREAD; 1572e705c121SKalle Valo } 1573