1*8e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2*8e99ea8dSJohannes Berg /* 3*8e99ea8dSJohannes Berg * Copyright (C) 2003-2014, 2018-2020 Intel Corporation 4*8e99ea8dSJohannes Berg * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5*8e99ea8dSJohannes Berg * Copyright (C) 2016-2017 Intel Deutschland GmbH 6*8e99ea8dSJohannes Berg */ 7e705c121SKalle Valo #include <linux/etherdevice.h> 86eb5e529SEmmanuel Grumbach #include <linux/ieee80211.h> 9e705c121SKalle Valo #include <linux/slab.h> 10e705c121SKalle Valo #include <linux/sched.h> 116eb5e529SEmmanuel Grumbach #include <net/ip6_checksum.h> 126eb5e529SEmmanuel Grumbach #include <net/tso.h> 13e705c121SKalle Valo 14e705c121SKalle Valo #include "iwl-debug.h" 15e705c121SKalle Valo #include "iwl-csr.h" 16e705c121SKalle Valo #include "iwl-prph.h" 17e705c121SKalle Valo #include "iwl-io.h" 18e705c121SKalle Valo #include "iwl-scd.h" 19e705c121SKalle Valo #include "iwl-op-mode.h" 20e705c121SKalle Valo #include "internal.h" 21d172a5efSJohannes Berg #include "fw/api/tx.h" 22e705c121SKalle Valo 23e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24e705c121SKalle Valo * DMA services 25e705c121SKalle Valo * 26e705c121SKalle Valo * Theory of operation 27e705c121SKalle Valo * 28e705c121SKalle Valo * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29e705c121SKalle Valo * of buffer descriptors, each of which points to one or more data buffers for 30e705c121SKalle Valo * the device to read from or fill. Driver and device exchange status of each 31e705c121SKalle Valo * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32e705c121SKalle Valo * entries in each circular buffer, to protect against confusing empty and full 33e705c121SKalle Valo * queue states. 34e705c121SKalle Valo * 35e705c121SKalle Valo * The device reads or writes the data in the queues via the device's several 36e705c121SKalle Valo * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37e705c121SKalle Valo * 38e705c121SKalle Valo * For Tx queue, there are low mark and high mark limits. If, after queuing 39e705c121SKalle Valo * the packet for Tx, free space become < low mark, Tx queue stopped. When 40e705c121SKalle Valo * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41e705c121SKalle Valo * Tx queue resumed. 42e705c121SKalle Valo * 43e705c121SKalle Valo ***************************************************/ 44e22744afSSara Sharon 45e705c121SKalle Valo 4613a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47e705c121SKalle Valo struct iwl_dma_ptr *ptr, size_t size) 48e705c121SKalle Valo { 49e705c121SKalle Valo if (WARN_ON(ptr->addr)) 50e705c121SKalle Valo return -EINVAL; 51e705c121SKalle Valo 52e705c121SKalle Valo ptr->addr = dma_alloc_coherent(trans->dev, size, 53e705c121SKalle Valo &ptr->dma, GFP_KERNEL); 54e705c121SKalle Valo if (!ptr->addr) 55e705c121SKalle Valo return -ENOMEM; 56e705c121SKalle Valo ptr->size = size; 57e705c121SKalle Valo return 0; 58e705c121SKalle Valo } 59e705c121SKalle Valo 6013a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61e705c121SKalle Valo { 62e705c121SKalle Valo if (unlikely(!ptr->addr)) 63e705c121SKalle Valo return; 64e705c121SKalle Valo 65e705c121SKalle Valo dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66e705c121SKalle Valo memset(ptr, 0, sizeof(*ptr)); 67e705c121SKalle Valo } 68e705c121SKalle Valo 69e705c121SKalle Valo /* 70e705c121SKalle Valo * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71e705c121SKalle Valo */ 72e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73e705c121SKalle Valo struct iwl_txq *txq) 74e705c121SKalle Valo { 75e705c121SKalle Valo u32 reg = 0; 76bb98ecd4SSara Sharon int txq_id = txq->id; 77e705c121SKalle Valo 78e705c121SKalle Valo lockdep_assert_held(&txq->lock); 79e705c121SKalle Valo 80e705c121SKalle Valo /* 81e705c121SKalle Valo * explicitly wake up the NIC if: 82e705c121SKalle Valo * 1. shadow registers aren't enabled 83e705c121SKalle Valo * 2. NIC is woken up for CMD regardless of shadow outside this function 84e705c121SKalle Valo * 3. there is a chance that the NIC is asleep 85e705c121SKalle Valo */ 86286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->shadow_reg_enable && 874f4822b7SMordechay Goodstein txq_id != trans->txqs.cmd.q_id && 88e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89e705c121SKalle Valo /* 90e705c121SKalle Valo * wake up nic if it's powered down ... 91e705c121SKalle Valo * uCode will wake up, and interrupt us again, so next 92e705c121SKalle Valo * time we'll skip this part. 93e705c121SKalle Valo */ 94e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95e705c121SKalle Valo 96e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98e705c121SKalle Valo txq_id, reg); 99e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 1006dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101e705c121SKalle Valo txq->need_update = true; 102e705c121SKalle Valo return; 103e705c121SKalle Valo } 104e705c121SKalle Valo } 105e705c121SKalle Valo 106e705c121SKalle Valo /* 107e705c121SKalle Valo * if not in power-save mode, uCode will never sleep when we're 108e705c121SKalle Valo * trying to tx (during RFKILL, we're not trying to tx). 109e705c121SKalle Valo */ 110bb98ecd4SSara Sharon IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 1110cd58eaaSEmmanuel Grumbach if (!txq->block) 1120cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 113bb98ecd4SSara Sharon txq->write_ptr | (txq_id << 8)); 114e705c121SKalle Valo } 115e705c121SKalle Valo 116e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117e705c121SKalle Valo { 118e705c121SKalle Valo int i; 119e705c121SKalle Valo 120286ca8ebSLuca Coelho for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1214f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[i]; 122e705c121SKalle Valo 1234f4822b7SMordechay Goodstein if (!test_bit(i, trans->txqs.queue_used)) 124f6eac740SMordechai Goodstein continue; 125f6eac740SMordechai Goodstein 126e705c121SKalle Valo spin_lock_bh(&txq->lock); 127b2a3b1c1SSara Sharon if (txq->need_update) { 128e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 129b2a3b1c1SSara Sharon txq->need_update = false; 130e705c121SKalle Valo } 131e705c121SKalle Valo spin_unlock_bh(&txq->lock); 132e705c121SKalle Valo } 133e705c121SKalle Valo } 134e705c121SKalle Valo 1356983ba69SSara Sharon static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 1366983ba69SSara Sharon u8 idx, dma_addr_t addr, u16 len) 137e705c121SKalle Valo { 1386983ba69SSara Sharon struct iwl_tfd *tfd_fh = (void *)tfd; 1396983ba69SSara Sharon struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 1406983ba69SSara Sharon 141e705c121SKalle Valo u16 hi_n_len = len << 4; 142e705c121SKalle Valo 143e705c121SKalle Valo put_unaligned_le32(addr, &tb->lo); 1447abf6fdeSJohannes Berg hi_n_len |= iwl_get_dma_hi_addr(addr); 145e705c121SKalle Valo 146e705c121SKalle Valo tb->hi_n_len = cpu_to_le16(hi_n_len); 147e705c121SKalle Valo 1486983ba69SSara Sharon tfd_fh->num_tbs = idx + 1; 1496983ba69SSara Sharon } 150e705c121SKalle Valo 151e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152e705c121SKalle Valo dma_addr_t addr, u16 len, bool reset) 153e705c121SKalle Valo { 1546983ba69SSara Sharon void *tfd; 155e705c121SKalle Valo u32 num_tbs; 156e705c121SKalle Valo 157885375d0SMordechay Goodstein tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158e705c121SKalle Valo 159e705c121SKalle Valo if (reset) 160885375d0SMordechay Goodstein memset(tfd, 0, trans->txqs.tfd.size); 161e705c121SKalle Valo 1620179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163e705c121SKalle Valo 1646983ba69SSara Sharon /* Each TFD can point to a maximum max_tbs Tx buffers */ 165885375d0SMordechay Goodstein if (num_tbs >= trans->txqs.tfd.max_tbs) { 166e705c121SKalle Valo IWL_ERR(trans, "Error can not send more than %d chunks\n", 167885375d0SMordechay Goodstein trans->txqs.tfd.max_tbs); 168e705c121SKalle Valo return -EINVAL; 169e705c121SKalle Valo } 170e705c121SKalle Valo 171e705c121SKalle Valo if (WARN(addr & ~IWL_TX_DMA_MASK, 172e705c121SKalle Valo "Unaligned address = %llx\n", (unsigned long long)addr)) 173e705c121SKalle Valo return -EINVAL; 174e705c121SKalle Valo 1756983ba69SSara Sharon iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176e705c121SKalle Valo 177e705c121SKalle Valo return num_tbs; 178e705c121SKalle Valo } 179e705c121SKalle Valo 18001d11cd1SSara Sharon static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 18101d11cd1SSara Sharon { 18201d11cd1SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 18301d11cd1SSara Sharon 18401d11cd1SSara Sharon lockdep_assert_held(&trans_pcie->reg_lock); 18501d11cd1SSara Sharon 186286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 18701d11cd1SSara Sharon return; 18801d11cd1SSara Sharon if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 18901d11cd1SSara Sharon return; 19001d11cd1SSara Sharon 19101d11cd1SSara Sharon trans_pcie->cmd_hold_nic_awake = false; 19201d11cd1SSara Sharon __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1936dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 19401d11cd1SSara Sharon } 19501d11cd1SSara Sharon 196e705c121SKalle Valo /* 197e705c121SKalle Valo * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 198e705c121SKalle Valo */ 199e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 200e705c121SKalle Valo { 201e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2024f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 203e705c121SKalle Valo 204e705c121SKalle Valo spin_lock_bh(&txq->lock); 205bb98ecd4SSara Sharon while (txq->write_ptr != txq->read_ptr) { 206e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 207bb98ecd4SSara Sharon txq_id, txq->read_ptr); 2086eb5e529SEmmanuel Grumbach 2094f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 210bb98ecd4SSara Sharon struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 2116eb5e529SEmmanuel Grumbach 2126eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb)) 2136eb5e529SEmmanuel Grumbach continue; 2146eb5e529SEmmanuel Grumbach 2150cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 2166eb5e529SEmmanuel Grumbach } 217a4450980SMordechay Goodstein iwl_txq_free_tfd(trans, txq); 2180cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 21901d11cd1SSara Sharon 220bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 22101d11cd1SSara Sharon unsigned long flags; 22201d11cd1SSara Sharon 22301d11cd1SSara Sharon spin_lock_irqsave(&trans_pcie->reg_lock, flags); 2244f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 22501d11cd1SSara Sharon iwl_pcie_clear_cmd_in_flight(trans); 22601d11cd1SSara Sharon spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 22701d11cd1SSara Sharon } 228e705c121SKalle Valo } 2293955525dSEmmanuel Grumbach 2303955525dSEmmanuel Grumbach while (!skb_queue_empty(&txq->overflow_q)) { 2313955525dSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 2323955525dSEmmanuel Grumbach 2333955525dSEmmanuel Grumbach iwl_op_mode_free_skb(trans->op_mode, skb); 2343955525dSEmmanuel Grumbach } 2353955525dSEmmanuel Grumbach 236e705c121SKalle Valo spin_unlock_bh(&txq->lock); 237e705c121SKalle Valo 238e705c121SKalle Valo /* just in case - this queue may have been stopped */ 239e705c121SKalle Valo iwl_wake_queue(trans, txq); 240e705c121SKalle Valo } 241e705c121SKalle Valo 242e705c121SKalle Valo /* 243e705c121SKalle Valo * iwl_pcie_txq_free - Deallocate DMA queue. 244e705c121SKalle Valo * @txq: Transmit queue to deallocate. 245e705c121SKalle Valo * 246e705c121SKalle Valo * Empty queue by removing and destroying all BD's. 247e705c121SKalle Valo * Free all buffers. 248e705c121SKalle Valo * 0-fill, but do not free "txq" descriptor structure. 249e705c121SKalle Valo */ 250e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 251e705c121SKalle Valo { 2524f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 253e705c121SKalle Valo struct device *dev = trans->dev; 254e705c121SKalle Valo int i; 255e705c121SKalle Valo 256e705c121SKalle Valo if (WARN_ON(!txq)) 257e705c121SKalle Valo return; 258e705c121SKalle Valo 259e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 260e705c121SKalle Valo 261e705c121SKalle Valo /* De-alloc array of command/tx buffers */ 2624f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 263bb98ecd4SSara Sharon for (i = 0; i < txq->n_window; i++) { 264453431a5SWaiman Long kfree_sensitive(txq->entries[i].cmd); 265453431a5SWaiman Long kfree_sensitive(txq->entries[i].free_buf); 266e705c121SKalle Valo } 267e705c121SKalle Valo 268e705c121SKalle Valo /* De-alloc circular buffer of TFDs */ 269e705c121SKalle Valo if (txq->tfds) { 270e705c121SKalle Valo dma_free_coherent(dev, 271885375d0SMordechay Goodstein trans->txqs.tfd.size * 272286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 273bb98ecd4SSara Sharon txq->tfds, txq->dma_addr); 274bb98ecd4SSara Sharon txq->dma_addr = 0; 275e705c121SKalle Valo txq->tfds = NULL; 276e705c121SKalle Valo 277e705c121SKalle Valo dma_free_coherent(dev, 278bb98ecd4SSara Sharon sizeof(*txq->first_tb_bufs) * txq->n_window, 2798de437c7SSara Sharon txq->first_tb_bufs, txq->first_tb_dma); 280e705c121SKalle Valo } 281e705c121SKalle Valo 282e705c121SKalle Valo kfree(txq->entries); 283e705c121SKalle Valo txq->entries = NULL; 284e705c121SKalle Valo 285e705c121SKalle Valo del_timer_sync(&txq->stuck_timer); 286e705c121SKalle Valo 287e705c121SKalle Valo /* 0-fill queue descriptor structure */ 288e705c121SKalle Valo memset(txq, 0, sizeof(*txq)); 289e705c121SKalle Valo } 290e705c121SKalle Valo 291e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 292e705c121SKalle Valo { 293e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 294286ca8ebSLuca Coelho int nq = trans->trans_cfg->base_params->num_of_queues; 295e705c121SKalle Valo int chan; 296e705c121SKalle Valo u32 reg_val; 297e705c121SKalle Valo int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 298e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 299e705c121SKalle Valo 300e705c121SKalle Valo /* make sure all queue are not stopped/used */ 3014f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 3024f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 3034f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 304e705c121SKalle Valo 305e705c121SKalle Valo trans_pcie->scd_base_addr = 306e705c121SKalle Valo iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 307e705c121SKalle Valo 308e705c121SKalle Valo WARN_ON(scd_base_addr != 0 && 309e705c121SKalle Valo scd_base_addr != trans_pcie->scd_base_addr); 310e705c121SKalle Valo 311e705c121SKalle Valo /* reset context data, TX status and translation data */ 312e705c121SKalle Valo iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 313e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND, 314e705c121SKalle Valo NULL, clear_dwords); 315e705c121SKalle Valo 316e705c121SKalle Valo iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 3170179bfffSMordechay Goodstein trans->txqs.scd_bc_tbls.dma >> 10); 318e705c121SKalle Valo 319e705c121SKalle Valo /* The chain extension of the SCD doesn't work well. This feature is 320e705c121SKalle Valo * enabled by default by the HW, so we need to disable it manually. 321e705c121SKalle Valo */ 322286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->scd_chain_ext_wa) 323e705c121SKalle Valo iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 324e705c121SKalle Valo 3254f4822b7SMordechay Goodstein iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 3264f4822b7SMordechay Goodstein trans->txqs.cmd.fifo, 3274f4822b7SMordechay Goodstein trans->txqs.cmd.wdg_timeout); 328e705c121SKalle Valo 329e705c121SKalle Valo /* Activate all Tx DMA/FIFO channels */ 330e705c121SKalle Valo iwl_scd_activate_fifos(trans); 331e705c121SKalle Valo 332e705c121SKalle Valo /* Enable DMA channel */ 333e705c121SKalle Valo for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 334e705c121SKalle Valo iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 335e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 336e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 337e705c121SKalle Valo 338e705c121SKalle Valo /* Update FH chicken bits */ 339e705c121SKalle Valo reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 340e705c121SKalle Valo iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 341e705c121SKalle Valo reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 342e705c121SKalle Valo 343e705c121SKalle Valo /* Enable L1-Active */ 344286ca8ebSLuca Coelho if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 345e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 346e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 347e705c121SKalle Valo } 348e705c121SKalle Valo 349e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 350e705c121SKalle Valo { 351e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 352e705c121SKalle Valo int txq_id; 353e705c121SKalle Valo 35413a3a390SSara Sharon /* 35513a3a390SSara Sharon * we should never get here in gen2 trans mode return early to avoid 35613a3a390SSara Sharon * having invalid accesses 35713a3a390SSara Sharon */ 358286ca8ebSLuca Coelho if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 35913a3a390SSara Sharon return; 36013a3a390SSara Sharon 361286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 362e705c121SKalle Valo txq_id++) { 3634f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 364286ca8ebSLuca Coelho if (trans->trans_cfg->use_tfh) 365e22744afSSara Sharon iwl_write_direct64(trans, 366e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 367bb98ecd4SSara Sharon txq->dma_addr); 368e22744afSSara Sharon else 369e22744afSSara Sharon iwl_write_direct32(trans, 370e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 371bb98ecd4SSara Sharon txq->dma_addr >> 8); 372e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 373bb98ecd4SSara Sharon txq->read_ptr = 0; 374bb98ecd4SSara Sharon txq->write_ptr = 0; 375e705c121SKalle Valo } 376e705c121SKalle Valo 377e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 378e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 379e705c121SKalle Valo trans_pcie->kw.dma >> 4); 380e705c121SKalle Valo 381e705c121SKalle Valo /* 382e705c121SKalle Valo * Send 0 as the scd_base_addr since the device may have be reset 383e705c121SKalle Valo * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 384e705c121SKalle Valo * contain garbage. 385e705c121SKalle Valo */ 386e705c121SKalle Valo iwl_pcie_tx_start(trans, 0); 387e705c121SKalle Valo } 388e705c121SKalle Valo 389e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 390e705c121SKalle Valo { 391e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 392e705c121SKalle Valo unsigned long flags; 393e705c121SKalle Valo int ch, ret; 394e705c121SKalle Valo u32 mask = 0; 395e705c121SKalle Valo 396e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 397e705c121SKalle Valo 39823ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 399e705c121SKalle Valo goto out; 400e705c121SKalle Valo 401e705c121SKalle Valo /* Stop each Tx DMA channel */ 402e705c121SKalle Valo for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 403e705c121SKalle Valo iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 404e705c121SKalle Valo mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 405e705c121SKalle Valo } 406e705c121SKalle Valo 407e705c121SKalle Valo /* Wait for DMA channels to be idle */ 408e705c121SKalle Valo ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 409e705c121SKalle Valo if (ret < 0) 410e705c121SKalle Valo IWL_ERR(trans, 411e705c121SKalle Valo "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 412e705c121SKalle Valo ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 413e705c121SKalle Valo 414e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 415e705c121SKalle Valo 416e705c121SKalle Valo out: 417e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 418e705c121SKalle Valo } 419e705c121SKalle Valo 420e705c121SKalle Valo /* 421e705c121SKalle Valo * iwl_pcie_tx_stop - Stop all Tx DMA channels 422e705c121SKalle Valo */ 423e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans) 424e705c121SKalle Valo { 425e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 426e705c121SKalle Valo int txq_id; 427e705c121SKalle Valo 428e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 429e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 430e705c121SKalle Valo 431e705c121SKalle Valo /* Turn off all Tx DMA channels */ 432e705c121SKalle Valo iwl_pcie_tx_stop_fh(trans); 433e705c121SKalle Valo 434e705c121SKalle Valo /* 435e705c121SKalle Valo * This function can be called before the op_mode disabled the 436e705c121SKalle Valo * queues. This happens when we have an rfkill interrupt. 437e705c121SKalle Valo * Since we stop Tx altogether - mark the queues as stopped. 438e705c121SKalle Valo */ 4394f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 4404f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 4414f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 442e705c121SKalle Valo 443e705c121SKalle Valo /* This can happen: start_hw, stop_device */ 444b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) 445e705c121SKalle Valo return 0; 446e705c121SKalle Valo 447e705c121SKalle Valo /* Unmap DMA from host system and free skb's */ 448286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 449e705c121SKalle Valo txq_id++) 450e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 451e705c121SKalle Valo 452e705c121SKalle Valo return 0; 453e705c121SKalle Valo } 454e705c121SKalle Valo 455e705c121SKalle Valo /* 456e705c121SKalle Valo * iwl_trans_tx_free - Free TXQ Context 457e705c121SKalle Valo * 458e705c121SKalle Valo * Destroy all TX DMA queues and structures 459e705c121SKalle Valo */ 460e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans) 461e705c121SKalle Valo { 462e705c121SKalle Valo int txq_id; 463e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 464e705c121SKalle Valo 4654f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 466de74c455SSara Sharon 467e705c121SKalle Valo /* Tx queues */ 468b2a3b1c1SSara Sharon if (trans_pcie->txq_memory) { 469e705c121SKalle Valo for (txq_id = 0; 470286ca8ebSLuca Coelho txq_id < trans->trans_cfg->base_params->num_of_queues; 471b2a3b1c1SSara Sharon txq_id++) { 472e705c121SKalle Valo iwl_pcie_txq_free(trans, txq_id); 4734f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 474b2a3b1c1SSara Sharon } 475e705c121SKalle Valo } 476e705c121SKalle Valo 477b2a3b1c1SSara Sharon kfree(trans_pcie->txq_memory); 478b2a3b1c1SSara Sharon trans_pcie->txq_memory = NULL; 479e705c121SKalle Valo 480e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 481e705c121SKalle Valo 4820179bfffSMordechay Goodstein iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 483e705c121SKalle Valo } 484e705c121SKalle Valo 485e705c121SKalle Valo /* 486e705c121SKalle Valo * iwl_pcie_tx_alloc - allocate TX context 487e705c121SKalle Valo * Allocate all Tx DMA structures and initialize them 488e705c121SKalle Valo */ 489e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 490e705c121SKalle Valo { 491e705c121SKalle Valo int ret; 492e705c121SKalle Valo int txq_id, slots_num; 493e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 494286ca8ebSLuca Coelho u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 495e705c121SKalle Valo 496a8e82c36SJohannes Berg if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 497a8e82c36SJohannes Berg return -EINVAL; 498a8e82c36SJohannes Berg 499a8e82c36SJohannes Berg bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 500e705c121SKalle Valo 501e705c121SKalle Valo /*It is not allowed to alloc twice, so warn when this happens. 502e705c121SKalle Valo * We cannot rely on the previous allocation, so free and fail */ 503b2a3b1c1SSara Sharon if (WARN_ON(trans_pcie->txq_memory)) { 504e705c121SKalle Valo ret = -EINVAL; 505e705c121SKalle Valo goto error; 506e705c121SKalle Valo } 507e705c121SKalle Valo 5080179bfffSMordechay Goodstein ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 5097b3e42eaSGolan Ben Ami bc_tbls_size); 510e705c121SKalle Valo if (ret) { 511e705c121SKalle Valo IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 512e705c121SKalle Valo goto error; 513e705c121SKalle Valo } 514e705c121SKalle Valo 515e705c121SKalle Valo /* Alloc keep-warm buffer */ 516e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 517e705c121SKalle Valo if (ret) { 518e705c121SKalle Valo IWL_ERR(trans, "Keep Warm allocation failed\n"); 519e705c121SKalle Valo goto error; 520e705c121SKalle Valo } 521e705c121SKalle Valo 52279b6c8feSLuca Coelho trans_pcie->txq_memory = 523286ca8ebSLuca Coelho kcalloc(trans->trans_cfg->base_params->num_of_queues, 524e705c121SKalle Valo sizeof(struct iwl_txq), GFP_KERNEL); 525b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) { 526e705c121SKalle Valo IWL_ERR(trans, "Not enough memory for txq\n"); 527e705c121SKalle Valo ret = -ENOMEM; 528e705c121SKalle Valo goto error; 529e705c121SKalle Valo } 530e705c121SKalle Valo 531e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 532286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 533e705c121SKalle Valo txq_id++) { 5344f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 535b8e8d7ceSSara Sharon 536ff911dcaSShaul Triebitz if (cmd_queue) 537718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 538ff911dcaSShaul Triebitz trans->cfg->min_txq_size); 539ff911dcaSShaul Triebitz else 540718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 541c30aef01SShaul Triebitz trans->cfg->min_256_ba_txq_size); 5424f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 5430cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 5440cd1ad2dSMordechay Goodstein cmd_queue); 545e705c121SKalle Valo if (ret) { 546e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 547e705c121SKalle Valo goto error; 548e705c121SKalle Valo } 5494f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 550e705c121SKalle Valo } 551e705c121SKalle Valo 552e705c121SKalle Valo return 0; 553e705c121SKalle Valo 554e705c121SKalle Valo error: 555e705c121SKalle Valo iwl_pcie_tx_free(trans); 556e705c121SKalle Valo 557e705c121SKalle Valo return ret; 558e705c121SKalle Valo } 559eda50cdeSSara Sharon 560e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans) 561e705c121SKalle Valo { 562e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 563e705c121SKalle Valo int ret; 564e705c121SKalle Valo int txq_id, slots_num; 565e705c121SKalle Valo bool alloc = false; 566e705c121SKalle Valo 567b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) { 568e705c121SKalle Valo ret = iwl_pcie_tx_alloc(trans); 569e705c121SKalle Valo if (ret) 570e705c121SKalle Valo goto error; 571e705c121SKalle Valo alloc = true; 572e705c121SKalle Valo } 573e705c121SKalle Valo 574e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 575e705c121SKalle Valo 576e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 577e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 578e705c121SKalle Valo 579e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 580e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 581e705c121SKalle Valo trans_pcie->kw.dma >> 4); 582e705c121SKalle Valo 583e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 584e705c121SKalle Valo 585e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 586286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 587e705c121SKalle Valo txq_id++) { 5884f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 589b8e8d7ceSSara Sharon 590ff911dcaSShaul Triebitz if (cmd_queue) 591718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 592ff911dcaSShaul Triebitz trans->cfg->min_txq_size); 593ff911dcaSShaul Triebitz else 594718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 595c30aef01SShaul Triebitz trans->cfg->min_256_ba_txq_size); 5960cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 5970cd1ad2dSMordechay Goodstein cmd_queue); 598e705c121SKalle Valo if (ret) { 599e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 600e705c121SKalle Valo goto error; 601e705c121SKalle Valo } 602e705c121SKalle Valo 603eda50cdeSSara Sharon /* 604eda50cdeSSara Sharon * Tell nic where to find circular buffer of TFDs for a 605eda50cdeSSara Sharon * given Tx queue, and enable the DMA channel used for that 606eda50cdeSSara Sharon * queue. 607eda50cdeSSara Sharon * Circular buffer (TFD queue in DRAM) physical base address 608eda50cdeSSara Sharon */ 609eda50cdeSSara Sharon iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 6104f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->dma_addr >> 8); 611ae79785fSSara Sharon } 612e22744afSSara Sharon 613e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 614286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->num_of_queues > 20) 615e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, 616e705c121SKalle Valo SCD_GP_CTRL_ENABLE_31_QUEUES); 617e705c121SKalle Valo 618e705c121SKalle Valo return 0; 619e705c121SKalle Valo error: 620e705c121SKalle Valo /*Upon error, free only if we allocated something */ 621e705c121SKalle Valo if (alloc) 622e705c121SKalle Valo iwl_pcie_tx_free(trans); 623e705c121SKalle Valo return ret; 624e705c121SKalle Valo } 625e705c121SKalle Valo 626e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 627e705c121SKalle Valo const struct iwl_host_cmd *cmd) 628e705c121SKalle Valo { 629e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 630e705c121SKalle Valo int ret; 631e705c121SKalle Valo 632e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 633e705c121SKalle Valo 6342b3fae66SMatt Chen /* Make sure the NIC is still alive in the bus */ 635f60c9e59SEmmanuel Grumbach if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 636f60c9e59SEmmanuel Grumbach return -ENODEV; 6372b3fae66SMatt Chen 638e705c121SKalle Valo /* 639e705c121SKalle Valo * wake up the NIC to make sure that the firmware will see the host 640e705c121SKalle Valo * command - we will let the NIC sleep once all the host commands 641e705c121SKalle Valo * returned. This needs to be done only on NICs that have 642e705c121SKalle Valo * apmg_wake_up_wa set. 643e705c121SKalle Valo */ 6447d34a7d7SLuca Coelho if (trans->trans_cfg->base_params->apmg_wake_up_wa && 645e705c121SKalle Valo !trans_pcie->cmd_hold_nic_awake) { 646e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 6476dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 648e705c121SKalle Valo 649e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 6506dece0e9SLuca Coelho CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 6516dece0e9SLuca Coelho (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 652e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 653e705c121SKalle Valo 15000); 654e705c121SKalle Valo if (ret < 0) { 655e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 6566dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 657e705c121SKalle Valo IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 658e705c121SKalle Valo return -EIO; 659e705c121SKalle Valo } 660e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = true; 661e705c121SKalle Valo } 662e705c121SKalle Valo 663e705c121SKalle Valo return 0; 664e705c121SKalle Valo } 665e705c121SKalle Valo 666e705c121SKalle Valo /* 667e705c121SKalle Valo * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 668e705c121SKalle Valo * 669e705c121SKalle Valo * When FW advances 'R' index, all entries between old and new 'R' index 670e705c121SKalle Valo * need to be reclaimed. As result, some free space forms. If there is 671e705c121SKalle Valo * enough free space (> low mark), wake the stack that feeds us. 672e705c121SKalle Valo */ 6737216dc99SJohannes Berg static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 674e705c121SKalle Valo { 675e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 6764f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 677e705c121SKalle Valo unsigned long flags; 678e705c121SKalle Valo int nfreed = 0; 679f5955a6cSGolan Ben Ami u16 r; 680e705c121SKalle Valo 681e705c121SKalle Valo lockdep_assert_held(&txq->lock); 682e705c121SKalle Valo 6830cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, idx); 6840cd1ad2dSMordechay Goodstein r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 685f5955a6cSGolan Ben Ami 686286ca8ebSLuca Coelho if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 6870cd1ad2dSMordechay Goodstein (!iwl_txq_used(txq, idx))) { 6884f4822b7SMordechay Goodstein WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 689e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 6907b3e42eaSGolan Ben Ami __func__, txq_id, idx, 691286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 692bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr); 693e705c121SKalle Valo return; 694e705c121SKalle Valo } 695e705c121SKalle Valo 6960cd1ad2dSMordechay Goodstein for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 6970cd1ad2dSMordechay Goodstein r = iwl_txq_inc_wrap(trans, r)) { 6980cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 699e705c121SKalle Valo 700e705c121SKalle Valo if (nfreed++ > 0) { 701e705c121SKalle Valo IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 702f5955a6cSGolan Ben Ami idx, txq->write_ptr, r); 703e705c121SKalle Valo iwl_force_nmi(trans); 704e705c121SKalle Valo } 705e705c121SKalle Valo } 706e705c121SKalle Valo 707bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 708e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 709e705c121SKalle Valo iwl_pcie_clear_cmd_in_flight(trans); 710e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 711e705c121SKalle Valo } 712e705c121SKalle Valo 713a4450980SMordechay Goodstein iwl_txq_progress(txq); 714e705c121SKalle Valo } 715e705c121SKalle Valo 716e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 717e705c121SKalle Valo u16 txq_id) 718e705c121SKalle Valo { 719e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 720e705c121SKalle Valo u32 tbl_dw_addr; 721e705c121SKalle Valo u32 tbl_dw; 722e705c121SKalle Valo u16 scd_q2ratid; 723e705c121SKalle Valo 724e705c121SKalle Valo scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 725e705c121SKalle Valo 726e705c121SKalle Valo tbl_dw_addr = trans_pcie->scd_base_addr + 727e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 728e705c121SKalle Valo 729e705c121SKalle Valo tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 730e705c121SKalle Valo 731e705c121SKalle Valo if (txq_id & 0x1) 732e705c121SKalle Valo tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 733e705c121SKalle Valo else 734e705c121SKalle Valo tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 735e705c121SKalle Valo 736e705c121SKalle Valo iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 737e705c121SKalle Valo 738e705c121SKalle Valo return 0; 739e705c121SKalle Valo } 740e705c121SKalle Valo 741e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table), 742e705c121SKalle Valo * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 743e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 744e705c121SKalle Valo 745dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 746e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 747e705c121SKalle Valo unsigned int wdg_timeout) 748e705c121SKalle Valo { 749e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 7504f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 751e705c121SKalle Valo int fifo = -1; 752dcfbd67bSEmmanuel Grumbach bool scd_bug = false; 753e705c121SKalle Valo 7544f4822b7SMordechay Goodstein if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 755e705c121SKalle Valo WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 756e705c121SKalle Valo 757e705c121SKalle Valo txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 758e705c121SKalle Valo 759e705c121SKalle Valo if (cfg) { 760e705c121SKalle Valo fifo = cfg->fifo; 761e705c121SKalle Valo 762e705c121SKalle Valo /* Disable the scheduler prior configuring the cmd queue */ 7634f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id && 764e705c121SKalle Valo trans_pcie->scd_set_active) 765e705c121SKalle Valo iwl_scd_enable_set_active(trans, 0); 766e705c121SKalle Valo 767e705c121SKalle Valo /* Stop this Tx queue before configuring it */ 768e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 769e705c121SKalle Valo 770e705c121SKalle Valo /* Set this queue as a chain-building queue unless it is CMD */ 7714f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 772e705c121SKalle Valo iwl_scd_txq_set_chain(trans, txq_id); 773e705c121SKalle Valo 774e705c121SKalle Valo if (cfg->aggregate) { 775e705c121SKalle Valo u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 776e705c121SKalle Valo 777e705c121SKalle Valo /* Map receiver-address / traffic-ID to this queue */ 778e705c121SKalle Valo iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 779e705c121SKalle Valo 780e705c121SKalle Valo /* enable aggregations for the queue */ 781e705c121SKalle Valo iwl_scd_txq_enable_agg(trans, txq_id); 782e705c121SKalle Valo txq->ampdu = true; 783e705c121SKalle Valo } else { 784e705c121SKalle Valo /* 785e705c121SKalle Valo * disable aggregations for the queue, this will also 786e705c121SKalle Valo * make the ra_tid mapping configuration irrelevant 787e705c121SKalle Valo * since it is now a non-AGG queue. 788e705c121SKalle Valo */ 789e705c121SKalle Valo iwl_scd_txq_disable_agg(trans, txq_id); 790e705c121SKalle Valo 791bb98ecd4SSara Sharon ssn = txq->read_ptr; 792e705c121SKalle Valo } 793dcfbd67bSEmmanuel Grumbach } else { 794dcfbd67bSEmmanuel Grumbach /* 795dcfbd67bSEmmanuel Grumbach * If we need to move the SCD write pointer by steps of 796dcfbd67bSEmmanuel Grumbach * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 797dcfbd67bSEmmanuel Grumbach * the op_mode know by returning true later. 798dcfbd67bSEmmanuel Grumbach * Do this only in case cfg is NULL since this trick can 799dcfbd67bSEmmanuel Grumbach * be done only if we have DQA enabled which is true for mvm 800dcfbd67bSEmmanuel Grumbach * only. And mvm never sets a cfg pointer. 801dcfbd67bSEmmanuel Grumbach * This is really ugly, but this is the easiest way out for 802dcfbd67bSEmmanuel Grumbach * this sad hardware issue. 803dcfbd67bSEmmanuel Grumbach * This bug has been fixed on devices 9000 and up. 804dcfbd67bSEmmanuel Grumbach */ 805286ca8ebSLuca Coelho scd_bug = !trans->trans_cfg->mq_rx_supported && 806dcfbd67bSEmmanuel Grumbach !((ssn - txq->write_ptr) & 0x3f) && 807dcfbd67bSEmmanuel Grumbach (ssn != txq->write_ptr); 808dcfbd67bSEmmanuel Grumbach if (scd_bug) 809dcfbd67bSEmmanuel Grumbach ssn++; 810e705c121SKalle Valo } 811e705c121SKalle Valo 812e705c121SKalle Valo /* Place first TFD at index corresponding to start sequence number. 813e705c121SKalle Valo * Assumes that ssn_idx is valid (!= 0xFFF) */ 814bb98ecd4SSara Sharon txq->read_ptr = (ssn & 0xff); 815bb98ecd4SSara Sharon txq->write_ptr = (ssn & 0xff); 816e705c121SKalle Valo iwl_write_direct32(trans, HBUS_TARG_WRPTR, 817e705c121SKalle Valo (ssn & 0xff) | (txq_id << 8)); 818e705c121SKalle Valo 819e705c121SKalle Valo if (cfg) { 820e705c121SKalle Valo u8 frame_limit = cfg->frame_limit; 821e705c121SKalle Valo 822e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 823e705c121SKalle Valo 824e705c121SKalle Valo /* Set up Tx window size and frame limit for this queue */ 825e705c121SKalle Valo iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 826e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 827e705c121SKalle Valo iwl_trans_write_mem32(trans, 828e705c121SKalle Valo trans_pcie->scd_base_addr + 829e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 830f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 831f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 832e705c121SKalle Valo 833e705c121SKalle Valo /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 834e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 835e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 836e705c121SKalle Valo (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 837e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 838e705c121SKalle Valo SCD_QUEUE_STTS_REG_MSK); 839e705c121SKalle Valo 840e705c121SKalle Valo /* enable the scheduler for this queue (only) */ 8414f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id && 842e705c121SKalle Valo trans_pcie->scd_set_active) 843e705c121SKalle Valo iwl_scd_enable_set_active(trans, BIT(txq_id)); 844e705c121SKalle Valo 845e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 846e705c121SKalle Valo "Activate queue %d on FIFO %d WrPtr: %d\n", 847e705c121SKalle Valo txq_id, fifo, ssn & 0xff); 848e705c121SKalle Valo } else { 849e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 850e705c121SKalle Valo "Activate queue %d WrPtr: %d\n", 851e705c121SKalle Valo txq_id, ssn & 0xff); 852e705c121SKalle Valo } 853dcfbd67bSEmmanuel Grumbach 854dcfbd67bSEmmanuel Grumbach return scd_bug; 855e705c121SKalle Valo } 856e705c121SKalle Valo 85742db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 85842db09c1SLiad Kaufman bool shared_mode) 85942db09c1SLiad Kaufman { 8604f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 86142db09c1SLiad Kaufman 86242db09c1SLiad Kaufman txq->ampdu = !shared_mode; 86342db09c1SLiad Kaufman } 86442db09c1SLiad Kaufman 865e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 866e705c121SKalle Valo bool configure_scd) 867e705c121SKalle Valo { 868e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 869e705c121SKalle Valo u32 stts_addr = trans_pcie->scd_base_addr + 870e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq_id); 871e705c121SKalle Valo static const u32 zero_val[4] = {}; 872e705c121SKalle Valo 8734f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 8744f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen = false; 875e705c121SKalle Valo 876e705c121SKalle Valo /* 877e705c121SKalle Valo * Upon HW Rfkill - we stop the device, and then stop the queues 878e705c121SKalle Valo * in the op_mode. Just for the sake of the simplicity of the op_mode, 879e705c121SKalle Valo * allow the op_mode to call txq_disable after it already called 880e705c121SKalle Valo * stop_device. 881e705c121SKalle Valo */ 8824f4822b7SMordechay Goodstein if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 883e705c121SKalle Valo WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 884e705c121SKalle Valo "queue %d not used", txq_id); 885e705c121SKalle Valo return; 886e705c121SKalle Valo } 887e705c121SKalle Valo 888e705c121SKalle Valo if (configure_scd) { 889e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 890e705c121SKalle Valo 891e705c121SKalle Valo iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 892e705c121SKalle Valo ARRAY_SIZE(zero_val)); 893e705c121SKalle Valo } 894e705c121SKalle Valo 895e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 8964f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->ampdu = false; 897e705c121SKalle Valo 898e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 899e705c121SKalle Valo } 900e705c121SKalle Valo 901e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 902e705c121SKalle Valo 903e705c121SKalle Valo /* 904e705c121SKalle Valo * iwl_pcie_enqueue_hcmd - enqueue a uCode command 905e705c121SKalle Valo * @priv: device private data point 906e705c121SKalle Valo * @cmd: a pointer to the ucode command structure 907e705c121SKalle Valo * 908e705c121SKalle Valo * The function returns < 0 values to indicate the operation 909e705c121SKalle Valo * failed. On success, it returns the index (>= 0) of command in the 910e705c121SKalle Valo * command queue. 911e705c121SKalle Valo */ 912e705c121SKalle Valo static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 913e705c121SKalle Valo struct iwl_host_cmd *cmd) 914e705c121SKalle Valo { 915e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 9164f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 917e705c121SKalle Valo struct iwl_device_cmd *out_cmd; 918e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 919e705c121SKalle Valo unsigned long flags; 920e705c121SKalle Valo void *dup_buf = NULL; 921e705c121SKalle Valo dma_addr_t phys_addr; 922e705c121SKalle Valo int idx; 9238de437c7SSara Sharon u16 copy_size, cmd_size, tb0_size; 924e705c121SKalle Valo bool had_nocopy = false; 925e705c121SKalle Valo u8 group_id = iwl_cmd_groupid(cmd->id); 926e705c121SKalle Valo int i, ret; 927e705c121SKalle Valo u32 cmd_pos; 928e705c121SKalle Valo const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 929e705c121SKalle Valo u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 930e705c121SKalle Valo 931b7d96bcaSLuca Coelho if (WARN(!trans->wide_cmd_header && 932b7d96bcaSLuca Coelho group_id > IWL_ALWAYS_LONG_GROUP, 933b7d96bcaSLuca Coelho "unsupported wide command %#x\n", cmd->id)) 934b7d96bcaSLuca Coelho return -EINVAL; 935b7d96bcaSLuca Coelho 936e705c121SKalle Valo if (group_id != 0) { 937e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 938e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header_wide); 939e705c121SKalle Valo } else { 940e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 941e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header); 942e705c121SKalle Valo } 943e705c121SKalle Valo 944e705c121SKalle Valo /* need one for the header if the first is NOCOPY */ 945e705c121SKalle Valo BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 946e705c121SKalle Valo 947e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 948e705c121SKalle Valo cmddata[i] = cmd->data[i]; 949e705c121SKalle Valo cmdlen[i] = cmd->len[i]; 950e705c121SKalle Valo 951e705c121SKalle Valo if (!cmd->len[i]) 952e705c121SKalle Valo continue; 953e705c121SKalle Valo 9548de437c7SSara Sharon /* need at least IWL_FIRST_TB_SIZE copied */ 9558de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 9568de437c7SSara Sharon int copy = IWL_FIRST_TB_SIZE - copy_size; 957e705c121SKalle Valo 958e705c121SKalle Valo if (copy > cmdlen[i]) 959e705c121SKalle Valo copy = cmdlen[i]; 960e705c121SKalle Valo cmdlen[i] -= copy; 961e705c121SKalle Valo cmddata[i] += copy; 962e705c121SKalle Valo copy_size += copy; 963e705c121SKalle Valo } 964e705c121SKalle Valo 965e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 966e705c121SKalle Valo had_nocopy = true; 967e705c121SKalle Valo if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 968e705c121SKalle Valo idx = -EINVAL; 969e705c121SKalle Valo goto free_dup_buf; 970e705c121SKalle Valo } 971e705c121SKalle Valo } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 972e705c121SKalle Valo /* 973e705c121SKalle Valo * This is also a chunk that isn't copied 974e705c121SKalle Valo * to the static buffer so set had_nocopy. 975e705c121SKalle Valo */ 976e705c121SKalle Valo had_nocopy = true; 977e705c121SKalle Valo 978e705c121SKalle Valo /* only allowed once */ 979e705c121SKalle Valo if (WARN_ON(dup_buf)) { 980e705c121SKalle Valo idx = -EINVAL; 981e705c121SKalle Valo goto free_dup_buf; 982e705c121SKalle Valo } 983e705c121SKalle Valo 984e705c121SKalle Valo dup_buf = kmemdup(cmddata[i], cmdlen[i], 985e705c121SKalle Valo GFP_ATOMIC); 986e705c121SKalle Valo if (!dup_buf) 987e705c121SKalle Valo return -ENOMEM; 988e705c121SKalle Valo } else { 989e705c121SKalle Valo /* NOCOPY must not be followed by normal! */ 990e705c121SKalle Valo if (WARN_ON(had_nocopy)) { 991e705c121SKalle Valo idx = -EINVAL; 992e705c121SKalle Valo goto free_dup_buf; 993e705c121SKalle Valo } 994e705c121SKalle Valo copy_size += cmdlen[i]; 995e705c121SKalle Valo } 996e705c121SKalle Valo cmd_size += cmd->len[i]; 997e705c121SKalle Valo } 998e705c121SKalle Valo 999e705c121SKalle Valo /* 1000e705c121SKalle Valo * If any of the command structures end up being larger than 1001e705c121SKalle Valo * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1002e705c121SKalle Valo * allocated into separate TFDs, then we will need to 1003e705c121SKalle Valo * increase the size of the buffers. 1004e705c121SKalle Valo */ 1005e705c121SKalle Valo if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1006e705c121SKalle Valo "Command %s (%#x) is too large (%d bytes)\n", 100739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 100839bdb17eSSharon Dvir cmd->id, copy_size)) { 1009e705c121SKalle Valo idx = -EINVAL; 1010e705c121SKalle Valo goto free_dup_buf; 1011e705c121SKalle Valo } 1012e705c121SKalle Valo 1013e705c121SKalle Valo spin_lock_bh(&txq->lock); 1014e705c121SKalle Valo 10150cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1016e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1017e705c121SKalle Valo 1018e705c121SKalle Valo IWL_ERR(trans, "No space in command queue\n"); 1019e705c121SKalle Valo iwl_op_mode_cmd_queue_full(trans->op_mode); 1020e705c121SKalle Valo idx = -ENOSPC; 1021e705c121SKalle Valo goto free_dup_buf; 1022e705c121SKalle Valo } 1023e705c121SKalle Valo 10240cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1025e705c121SKalle Valo out_cmd = txq->entries[idx].cmd; 1026e705c121SKalle Valo out_meta = &txq->entries[idx].meta; 1027e705c121SKalle Valo 1028e705c121SKalle Valo memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1029e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) 1030e705c121SKalle Valo out_meta->source = cmd; 1031e705c121SKalle Valo 1032e705c121SKalle Valo /* set up the header */ 1033e705c121SKalle Valo if (group_id != 0) { 1034e705c121SKalle Valo out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1035e705c121SKalle Valo out_cmd->hdr_wide.group_id = group_id; 1036e705c121SKalle Valo out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1037e705c121SKalle Valo out_cmd->hdr_wide.length = 1038e705c121SKalle Valo cpu_to_le16(cmd_size - 1039e705c121SKalle Valo sizeof(struct iwl_cmd_header_wide)); 1040e705c121SKalle Valo out_cmd->hdr_wide.reserved = 0; 1041e705c121SKalle Valo out_cmd->hdr_wide.sequence = 10424f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1043bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1044e705c121SKalle Valo 1045e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header_wide); 1046e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1047e705c121SKalle Valo } else { 1048e705c121SKalle Valo out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1049e705c121SKalle Valo out_cmd->hdr.sequence = 10504f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1051bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1052e705c121SKalle Valo out_cmd->hdr.group_id = 0; 1053e705c121SKalle Valo 1054e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header); 1055e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1056e705c121SKalle Valo } 1057e705c121SKalle Valo 1058e705c121SKalle Valo /* and copy the data that needs to be copied */ 1059e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1060e705c121SKalle Valo int copy; 1061e705c121SKalle Valo 1062e705c121SKalle Valo if (!cmd->len[i]) 1063e705c121SKalle Valo continue; 1064e705c121SKalle Valo 1065e705c121SKalle Valo /* copy everything if not nocopy/dup */ 1066e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1067e705c121SKalle Valo IWL_HCMD_DFL_DUP))) { 1068e705c121SKalle Valo copy = cmd->len[i]; 1069e705c121SKalle Valo 1070e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1071e705c121SKalle Valo cmd_pos += copy; 1072e705c121SKalle Valo copy_size += copy; 1073e705c121SKalle Valo continue; 1074e705c121SKalle Valo } 1075e705c121SKalle Valo 1076e705c121SKalle Valo /* 10778de437c7SSara Sharon * Otherwise we need at least IWL_FIRST_TB_SIZE copied 10788de437c7SSara Sharon * in total (for bi-directional DMA), but copy up to what 1079e705c121SKalle Valo * we can fit into the payload for debug dump purposes. 1080e705c121SKalle Valo */ 1081e705c121SKalle Valo copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1082e705c121SKalle Valo 1083e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1084e705c121SKalle Valo cmd_pos += copy; 1085e705c121SKalle Valo 1086e705c121SKalle Valo /* However, treat copy_size the proper way, we need it below */ 10878de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 10888de437c7SSara Sharon copy = IWL_FIRST_TB_SIZE - copy_size; 1089e705c121SKalle Valo 1090e705c121SKalle Valo if (copy > cmd->len[i]) 1091e705c121SKalle Valo copy = cmd->len[i]; 1092e705c121SKalle Valo copy_size += copy; 1093e705c121SKalle Valo } 1094e705c121SKalle Valo } 1095e705c121SKalle Valo 1096e705c121SKalle Valo IWL_DEBUG_HC(trans, 1097e705c121SKalle Valo "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 109839bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1099e705c121SKalle Valo group_id, out_cmd->hdr.cmd, 1100e705c121SKalle Valo le16_to_cpu(out_cmd->hdr.sequence), 11014f4822b7SMordechay Goodstein cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1102e705c121SKalle Valo 11038de437c7SSara Sharon /* start the TFD with the minimum copy bytes */ 11048de437c7SSara Sharon tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 11058de437c7SSara Sharon memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1106e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, 11070cd1ad2dSMordechay Goodstein iwl_txq_get_first_tb_dma(txq, idx), 11088de437c7SSara Sharon tb0_size, true); 1109e705c121SKalle Valo 1110e705c121SKalle Valo /* map first command fragment, if any remains */ 11118de437c7SSara Sharon if (copy_size > tb0_size) { 1112e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, 11138de437c7SSara Sharon ((u8 *)&out_cmd->hdr) + tb0_size, 11148de437c7SSara Sharon copy_size - tb0_size, 1115e705c121SKalle Valo DMA_TO_DEVICE); 1116e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 11170179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1118bb98ecd4SSara Sharon txq->write_ptr); 1119e705c121SKalle Valo idx = -ENOMEM; 1120e705c121SKalle Valo goto out; 1121e705c121SKalle Valo } 1122e705c121SKalle Valo 1123e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 11248de437c7SSara Sharon copy_size - tb0_size, false); 1125e705c121SKalle Valo } 1126e705c121SKalle Valo 1127e705c121SKalle Valo /* map the remaining (adjusted) nocopy/dup fragments */ 1128e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1129e705c121SKalle Valo const void *data = cmddata[i]; 1130e705c121SKalle Valo 1131e705c121SKalle Valo if (!cmdlen[i]) 1132e705c121SKalle Valo continue; 1133e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1134e705c121SKalle Valo IWL_HCMD_DFL_DUP))) 1135e705c121SKalle Valo continue; 1136e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1137e705c121SKalle Valo data = dup_buf; 1138e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, (void *)data, 1139e705c121SKalle Valo cmdlen[i], DMA_TO_DEVICE); 1140e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 11410179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1142bb98ecd4SSara Sharon txq->write_ptr); 1143e705c121SKalle Valo idx = -ENOMEM; 1144e705c121SKalle Valo goto out; 1145e705c121SKalle Valo } 1146e705c121SKalle Valo 1147e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1148e705c121SKalle Valo } 1149e705c121SKalle Valo 11503cd1980bSSara Sharon BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1151e705c121SKalle Valo out_meta->flags = cmd->flags; 1152e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1153453431a5SWaiman Long kfree_sensitive(txq->entries[idx].free_buf); 1154e705c121SKalle Valo txq->entries[idx].free_buf = dup_buf; 1155e705c121SKalle Valo 1156e705c121SKalle Valo trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1157e705c121SKalle Valo 1158e705c121SKalle Valo /* start timer if queue currently empty */ 1159bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1160e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1161e705c121SKalle Valo 1162e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1163e705c121SKalle Valo ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1164e705c121SKalle Valo if (ret < 0) { 1165e705c121SKalle Valo idx = ret; 1166e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1167e705c121SKalle Valo goto out; 1168e705c121SKalle Valo } 1169e705c121SKalle Valo 1170e705c121SKalle Valo /* Increment and update queue's write index */ 11710cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1172e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1173e705c121SKalle Valo 1174e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1175e705c121SKalle Valo 1176e705c121SKalle Valo out: 1177e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1178e705c121SKalle Valo free_dup_buf: 1179e705c121SKalle Valo if (idx < 0) 1180e705c121SKalle Valo kfree(dup_buf); 1181e705c121SKalle Valo return idx; 1182e705c121SKalle Valo } 1183e705c121SKalle Valo 1184e705c121SKalle Valo /* 1185e705c121SKalle Valo * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1186e705c121SKalle Valo * @rxb: Rx buffer to reclaim 1187e705c121SKalle Valo */ 1188e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1189e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb) 1190e705c121SKalle Valo { 1191e705c121SKalle Valo struct iwl_rx_packet *pkt = rxb_addr(rxb); 1192e705c121SKalle Valo u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1193d490e097SJohannes Berg u8 group_id; 119439bdb17eSSharon Dvir u32 cmd_id; 1195e705c121SKalle Valo int txq_id = SEQ_TO_QUEUE(sequence); 1196e705c121SKalle Valo int index = SEQ_TO_INDEX(sequence); 1197e705c121SKalle Valo int cmd_index; 1198e705c121SKalle Valo struct iwl_device_cmd *cmd; 1199e705c121SKalle Valo struct iwl_cmd_meta *meta; 1200e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12014f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1202e705c121SKalle Valo 1203e705c121SKalle Valo /* If a Tx command is being handled and it isn't in the actual 1204e705c121SKalle Valo * command queue then there a command routing bug has been introduced 1205e705c121SKalle Valo * in the queue management code. */ 12064f4822b7SMordechay Goodstein if (WARN(txq_id != trans->txqs.cmd.q_id, 1207e705c121SKalle Valo "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 12084f4822b7SMordechay Goodstein txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1209b2a3b1c1SSara Sharon txq->write_ptr)) { 1210e705c121SKalle Valo iwl_print_hex_error(trans, pkt, 32); 1211e705c121SKalle Valo return; 1212e705c121SKalle Valo } 1213e705c121SKalle Valo 1214e705c121SKalle Valo spin_lock_bh(&txq->lock); 1215e705c121SKalle Valo 12160cd1ad2dSMordechay Goodstein cmd_index = iwl_txq_get_cmd_index(txq, index); 1217e705c121SKalle Valo cmd = txq->entries[cmd_index].cmd; 1218e705c121SKalle Valo meta = &txq->entries[cmd_index].meta; 1219d490e097SJohannes Berg group_id = cmd->hdr.group_id; 122039bdb17eSSharon Dvir cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1221e705c121SKalle Valo 12220179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1223e705c121SKalle Valo 1224e705c121SKalle Valo /* Input error checking is done when commands are added to queue. */ 1225e705c121SKalle Valo if (meta->flags & CMD_WANT_SKB) { 1226e705c121SKalle Valo struct page *p = rxb_steal_page(rxb); 1227e705c121SKalle Valo 1228e705c121SKalle Valo meta->source->resp_pkt = pkt; 1229e705c121SKalle Valo meta->source->_rx_page_addr = (unsigned long)page_address(p); 1230e705c121SKalle Valo meta->source->_rx_page_order = trans_pcie->rx_page_order; 1231e705c121SKalle Valo } 1232e705c121SKalle Valo 1233dcbb4746SEmmanuel Grumbach if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1234dcbb4746SEmmanuel Grumbach iwl_op_mode_async_cb(trans->op_mode, cmd); 1235dcbb4746SEmmanuel Grumbach 1236e705c121SKalle Valo iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1237e705c121SKalle Valo 1238e705c121SKalle Valo if (!(meta->flags & CMD_ASYNC)) { 1239e705c121SKalle Valo if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1240e705c121SKalle Valo IWL_WARN(trans, 1241e705c121SKalle Valo "HCMD_ACTIVE already clear for command %s\n", 124239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1243e705c121SKalle Valo } 1244e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1245e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 124639bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1247e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1248e705c121SKalle Valo } 1249e705c121SKalle Valo 1250e705c121SKalle Valo meta->flags = 0; 1251e705c121SKalle Valo 1252e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1253e705c121SKalle Valo } 1254e705c121SKalle Valo 1255e705c121SKalle Valo #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1256e705c121SKalle Valo 1257e705c121SKalle Valo static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1258e705c121SKalle Valo struct iwl_host_cmd *cmd) 1259e705c121SKalle Valo { 1260e705c121SKalle Valo int ret; 1261e705c121SKalle Valo 1262e705c121SKalle Valo /* An asynchronous command can not expect an SKB to be set. */ 1263e705c121SKalle Valo if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1264e705c121SKalle Valo return -EINVAL; 1265e705c121SKalle Valo 1266e705c121SKalle Valo ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1267e705c121SKalle Valo if (ret < 0) { 1268e705c121SKalle Valo IWL_ERR(trans, 1269e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 127039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1271e705c121SKalle Valo return ret; 1272e705c121SKalle Valo } 1273e705c121SKalle Valo return 0; 1274e705c121SKalle Valo } 1275e705c121SKalle Valo 1276e705c121SKalle Valo static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1277e705c121SKalle Valo struct iwl_host_cmd *cmd) 1278e705c121SKalle Valo { 1279e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12804f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1281e705c121SKalle Valo int cmd_idx; 1282e705c121SKalle Valo int ret; 1283e705c121SKalle Valo 1284e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 128539bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1286e705c121SKalle Valo 1287e705c121SKalle Valo if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1288e705c121SKalle Valo &trans->status), 1289e705c121SKalle Valo "Command %s: a command is already active!\n", 129039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id))) 1291e705c121SKalle Valo return -EIO; 1292e705c121SKalle Valo 1293e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 129439bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1295e705c121SKalle Valo 1296e705c121SKalle Valo cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1297e705c121SKalle Valo if (cmd_idx < 0) { 1298e705c121SKalle Valo ret = cmd_idx; 1299e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1300e705c121SKalle Valo IWL_ERR(trans, 1301e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 130239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1303e705c121SKalle Valo return ret; 1304e705c121SKalle Valo } 1305e705c121SKalle Valo 1306e705c121SKalle Valo ret = wait_event_timeout(trans_pcie->wait_command_queue, 1307e705c121SKalle Valo !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1308e705c121SKalle Valo &trans->status), 1309e705c121SKalle Valo HOST_COMPLETE_TIMEOUT); 1310e705c121SKalle Valo if (!ret) { 1311e705c121SKalle Valo IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 131239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1313e705c121SKalle Valo jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1314e705c121SKalle Valo 1315e705c121SKalle Valo IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1316bb98ecd4SSara Sharon txq->read_ptr, txq->write_ptr); 1317e705c121SKalle Valo 1318e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1319e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 132039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1321e705c121SKalle Valo ret = -ETIMEDOUT; 1322e705c121SKalle Valo 1323d1967ce6SShahar S Matityahu iwl_trans_pcie_sync_nmi(trans); 1324e705c121SKalle Valo goto cancel; 1325e705c121SKalle Valo } 1326e705c121SKalle Valo 1327e705c121SKalle Valo if (test_bit(STATUS_FW_ERROR, &trans->status)) { 13284290eaadSJohannes Berg iwl_trans_pcie_dump_regs(trans); 1329e705c121SKalle Valo IWL_ERR(trans, "FW error in SYNC CMD %s\n", 133039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1331e705c121SKalle Valo dump_stack(); 1332e705c121SKalle Valo ret = -EIO; 1333e705c121SKalle Valo goto cancel; 1334e705c121SKalle Valo } 1335e705c121SKalle Valo 1336e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1337326477e4SJohannes Berg test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1338e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1339e705c121SKalle Valo ret = -ERFKILL; 1340e705c121SKalle Valo goto cancel; 1341e705c121SKalle Valo } 1342e705c121SKalle Valo 1343e705c121SKalle Valo if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1344e705c121SKalle Valo IWL_ERR(trans, "Error: Response NULL in '%s'\n", 134539bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1346e705c121SKalle Valo ret = -EIO; 1347e705c121SKalle Valo goto cancel; 1348e705c121SKalle Valo } 1349e705c121SKalle Valo 1350e705c121SKalle Valo return 0; 1351e705c121SKalle Valo 1352e705c121SKalle Valo cancel: 1353e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) { 1354e705c121SKalle Valo /* 1355e705c121SKalle Valo * Cancel the CMD_WANT_SKB flag for the cmd in the 1356e705c121SKalle Valo * TX cmd queue. Otherwise in case the cmd comes 1357e705c121SKalle Valo * in later, it will possibly set an invalid 1358e705c121SKalle Valo * address (cmd->meta.source). 1359e705c121SKalle Valo */ 1360b2a3b1c1SSara Sharon txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1361e705c121SKalle Valo } 1362e705c121SKalle Valo 1363e705c121SKalle Valo if (cmd->resp_pkt) { 1364e705c121SKalle Valo iwl_free_resp(cmd); 1365e705c121SKalle Valo cmd->resp_pkt = NULL; 1366e705c121SKalle Valo } 1367e705c121SKalle Valo 1368e705c121SKalle Valo return ret; 1369e705c121SKalle Valo } 1370e705c121SKalle Valo 1371e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1372e705c121SKalle Valo { 13732b3fae66SMatt Chen /* Make sure the NIC is still alive in the bus */ 1374f60c9e59SEmmanuel Grumbach if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1375f60c9e59SEmmanuel Grumbach return -ENODEV; 13762b3fae66SMatt Chen 1377e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1378326477e4SJohannes Berg test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1379e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1380e705c121SKalle Valo cmd->id); 1381e705c121SKalle Valo return -ERFKILL; 1382e705c121SKalle Valo } 1383e705c121SKalle Valo 1384e705c121SKalle Valo if (cmd->flags & CMD_ASYNC) 1385e705c121SKalle Valo return iwl_pcie_send_hcmd_async(trans, cmd); 1386e705c121SKalle Valo 1387e705c121SKalle Valo /* We still can fail on RFKILL that can be asserted while we wait */ 1388e705c121SKalle Valo return iwl_pcie_send_hcmd_sync(trans, cmd); 1389e705c121SKalle Valo } 1390e705c121SKalle Valo 13913a0b2a42SEmmanuel Grumbach static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 13923a0b2a42SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 1393bb03927eSJohannes Berg struct iwl_cmd_meta *out_meta) 13943a0b2a42SEmmanuel Grumbach { 1395bb03927eSJohannes Berg u16 head_tb_len; 13963a0b2a42SEmmanuel Grumbach int i; 13973a0b2a42SEmmanuel Grumbach 13983a0b2a42SEmmanuel Grumbach /* 13993a0b2a42SEmmanuel Grumbach * Set up TFD's third entry to point directly to remainder 14003a0b2a42SEmmanuel Grumbach * of skb's head, if any 14013a0b2a42SEmmanuel Grumbach */ 1402bb03927eSJohannes Berg head_tb_len = skb_headlen(skb) - hdr_len; 14033a0b2a42SEmmanuel Grumbach 1404bb03927eSJohannes Berg if (head_tb_len > 0) { 1405bb03927eSJohannes Berg dma_addr_t tb_phys = dma_map_single(trans->dev, 14063a0b2a42SEmmanuel Grumbach skb->data + hdr_len, 1407bb03927eSJohannes Berg head_tb_len, DMA_TO_DEVICE); 1408bb03927eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 14093a0b2a42SEmmanuel Grumbach return -EINVAL; 14109b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 14119b08ae22SJohannes Berg tb_phys, head_tb_len); 1412bb03927eSJohannes Berg iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 14133a0b2a42SEmmanuel Grumbach } 14143a0b2a42SEmmanuel Grumbach 14153a0b2a42SEmmanuel Grumbach /* set up the remaining entries to point to the data */ 14163a0b2a42SEmmanuel Grumbach for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14173a0b2a42SEmmanuel Grumbach const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 14183a0b2a42SEmmanuel Grumbach dma_addr_t tb_phys; 14193a0b2a42SEmmanuel Grumbach int tb_idx; 14203a0b2a42SEmmanuel Grumbach 14213a0b2a42SEmmanuel Grumbach if (!skb_frag_size(frag)) 14223a0b2a42SEmmanuel Grumbach continue; 14233a0b2a42SEmmanuel Grumbach 14243a0b2a42SEmmanuel Grumbach tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 14253a0b2a42SEmmanuel Grumbach skb_frag_size(frag), DMA_TO_DEVICE); 14263a0b2a42SEmmanuel Grumbach 14277d50d76eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 14283a0b2a42SEmmanuel Grumbach return -EINVAL; 14299b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 14309b08ae22SJohannes Berg tb_phys, skb_frag_size(frag)); 14313a0b2a42SEmmanuel Grumbach tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 14323a0b2a42SEmmanuel Grumbach skb_frag_size(frag), false); 14336e00a237SJohannes Berg if (tb_idx < 0) 14346e00a237SJohannes Berg return tb_idx; 14353a0b2a42SEmmanuel Grumbach 14363cd1980bSSara Sharon out_meta->tbs |= BIT(tb_idx); 14373a0b2a42SEmmanuel Grumbach } 14383a0b2a42SEmmanuel Grumbach 14393a0b2a42SEmmanuel Grumbach return 0; 14403a0b2a42SEmmanuel Grumbach } 14413a0b2a42SEmmanuel Grumbach 14426eb5e529SEmmanuel Grumbach #ifdef CONFIG_INET 1443066fd29aSSara Sharon static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 14446eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 14456eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 1446a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, 1447a89c72ffSJohannes Berg u16 tb1_len) 14486eb5e529SEmmanuel Grumbach { 144905e5a7e5SJohannes Berg struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14506eb5e529SEmmanuel Grumbach struct ieee80211_hdr *hdr = (void *)skb->data; 14516eb5e529SEmmanuel Grumbach unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 14526eb5e529SEmmanuel Grumbach unsigned int mss = skb_shinfo(skb)->gso_size; 14536eb5e529SEmmanuel Grumbach u16 length, iv_len, amsdu_pad; 14546eb5e529SEmmanuel Grumbach u8 *start_hdr; 14556eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *hdr_page; 14566eb5e529SEmmanuel Grumbach struct tso_t tso; 14576eb5e529SEmmanuel Grumbach 14586eb5e529SEmmanuel Grumbach /* if the packet is protected, then it must be CCMP or GCMP */ 14596eb5e529SEmmanuel Grumbach BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 14606eb5e529SEmmanuel Grumbach iv_len = ieee80211_has_protected(hdr->frame_control) ? 14616eb5e529SEmmanuel Grumbach IEEE80211_CCMP_HDR_LEN : 0; 14626eb5e529SEmmanuel Grumbach 14636eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx(trans->dev, skb, 14640cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1465885375d0SMordechay Goodstein trans->txqs.tfd.size, 14668790fce4SJohannes Berg &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 14676eb5e529SEmmanuel Grumbach 14686eb5e529SEmmanuel Grumbach ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 14696eb5e529SEmmanuel Grumbach snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 14706eb5e529SEmmanuel Grumbach total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 14716eb5e529SEmmanuel Grumbach amsdu_pad = 0; 14726eb5e529SEmmanuel Grumbach 14736eb5e529SEmmanuel Grumbach /* total amount of header we may need for this A-MSDU */ 14746eb5e529SEmmanuel Grumbach hdr_room = DIV_ROUND_UP(total_len, mss) * 14756eb5e529SEmmanuel Grumbach (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 14766eb5e529SEmmanuel Grumbach 14776eb5e529SEmmanuel Grumbach /* Our device supports 9 segments at most, it will fit in 1 page */ 14787b02bf61SJohannes Berg hdr_page = get_page_hdr(trans, hdr_room, skb); 14796eb5e529SEmmanuel Grumbach if (!hdr_page) 14806eb5e529SEmmanuel Grumbach return -ENOMEM; 14816eb5e529SEmmanuel Grumbach 14826eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 14836eb5e529SEmmanuel Grumbach memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 14846eb5e529SEmmanuel Grumbach hdr_page->pos += iv_len; 14856eb5e529SEmmanuel Grumbach 14866eb5e529SEmmanuel Grumbach /* 14876eb5e529SEmmanuel Grumbach * Pull the ieee80211 header + IV to be able to use TSO core, 14886eb5e529SEmmanuel Grumbach * we will restore it for the tx_status flow. 14896eb5e529SEmmanuel Grumbach */ 14906eb5e529SEmmanuel Grumbach skb_pull(skb, hdr_len + iv_len); 14916eb5e529SEmmanuel Grumbach 149205e5a7e5SJohannes Berg /* 149305e5a7e5SJohannes Berg * Remove the length of all the headers that we don't actually 149405e5a7e5SJohannes Berg * have in the MPDU by themselves, but that we duplicate into 149505e5a7e5SJohannes Berg * all the different MSDUs inside the A-MSDU. 149605e5a7e5SJohannes Berg */ 149705e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 149805e5a7e5SJohannes Berg 14996eb5e529SEmmanuel Grumbach tso_start(skb, &tso); 15006eb5e529SEmmanuel Grumbach 15016eb5e529SEmmanuel Grumbach while (total_len) { 15026eb5e529SEmmanuel Grumbach /* this is the data left for this subframe */ 15036eb5e529SEmmanuel Grumbach unsigned int data_left = 15046eb5e529SEmmanuel Grumbach min_t(unsigned int, mss, total_len); 15056eb5e529SEmmanuel Grumbach struct sk_buff *csum_skb = NULL; 15066eb5e529SEmmanuel Grumbach unsigned int hdr_tb_len; 15076eb5e529SEmmanuel Grumbach dma_addr_t hdr_tb_phys; 150859fa61f3SEmmanuel Grumbach u8 *subf_hdrs_start = hdr_page->pos; 15096eb5e529SEmmanuel Grumbach 15106eb5e529SEmmanuel Grumbach total_len -= data_left; 15116eb5e529SEmmanuel Grumbach 15126eb5e529SEmmanuel Grumbach memset(hdr_page->pos, 0, amsdu_pad); 15136eb5e529SEmmanuel Grumbach hdr_page->pos += amsdu_pad; 15146eb5e529SEmmanuel Grumbach amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 15156eb5e529SEmmanuel Grumbach data_left)) & 0x3; 15166eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 15176eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 15186eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 15196eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 15206eb5e529SEmmanuel Grumbach 15216eb5e529SEmmanuel Grumbach length = snap_ip_tcp_hdrlen + data_left; 15226eb5e529SEmmanuel Grumbach *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 15236eb5e529SEmmanuel Grumbach hdr_page->pos += sizeof(length); 15246eb5e529SEmmanuel Grumbach 15256eb5e529SEmmanuel Grumbach /* 15266eb5e529SEmmanuel Grumbach * This will copy the SNAP as well which will be considered 15276eb5e529SEmmanuel Grumbach * as MAC header. 15286eb5e529SEmmanuel Grumbach */ 15296eb5e529SEmmanuel Grumbach tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 15306eb5e529SEmmanuel Grumbach 15316eb5e529SEmmanuel Grumbach hdr_page->pos += snap_ip_tcp_hdrlen; 15326eb5e529SEmmanuel Grumbach 15336eb5e529SEmmanuel Grumbach hdr_tb_len = hdr_page->pos - start_hdr; 15346eb5e529SEmmanuel Grumbach hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 15356eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 15366eb5e529SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 15376eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 15387d50d76eSJohannes Berg return -EINVAL; 15396eb5e529SEmmanuel Grumbach } 15406eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 15416eb5e529SEmmanuel Grumbach hdr_tb_len, false); 1542bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 15439b08ae22SJohannes Berg hdr_tb_phys, hdr_tb_len); 154405e5a7e5SJohannes Berg /* add this subframe's headers' length to the tx_cmd */ 154505e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 15466eb5e529SEmmanuel Grumbach 15476eb5e529SEmmanuel Grumbach /* prepare the start_hdr for the next subframe */ 15486eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 15496eb5e529SEmmanuel Grumbach 15506eb5e529SEmmanuel Grumbach /* put the payload */ 15516eb5e529SEmmanuel Grumbach while (data_left) { 15526eb5e529SEmmanuel Grumbach unsigned int size = min_t(unsigned int, tso.size, 15536eb5e529SEmmanuel Grumbach data_left); 15546eb5e529SEmmanuel Grumbach dma_addr_t tb_phys; 15556eb5e529SEmmanuel Grumbach 15566eb5e529SEmmanuel Grumbach tb_phys = dma_map_single(trans->dev, tso.data, 15576eb5e529SEmmanuel Grumbach size, DMA_TO_DEVICE); 15586eb5e529SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 15596eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 15607d50d76eSJohannes Berg return -EINVAL; 15616eb5e529SEmmanuel Grumbach } 15626eb5e529SEmmanuel Grumbach 15636eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 15646eb5e529SEmmanuel Grumbach size, false); 1565bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 15669b08ae22SJohannes Berg tb_phys, size); 15676eb5e529SEmmanuel Grumbach 15686eb5e529SEmmanuel Grumbach data_left -= size; 15696eb5e529SEmmanuel Grumbach tso_build_data(skb, &tso, size); 15706eb5e529SEmmanuel Grumbach } 15716eb5e529SEmmanuel Grumbach } 15726eb5e529SEmmanuel Grumbach 15736eb5e529SEmmanuel Grumbach /* re -add the WiFi header and IV */ 15746eb5e529SEmmanuel Grumbach skb_push(skb, hdr_len + iv_len); 15756eb5e529SEmmanuel Grumbach 15766eb5e529SEmmanuel Grumbach return 0; 15776eb5e529SEmmanuel Grumbach } 15786eb5e529SEmmanuel Grumbach #else /* CONFIG_INET */ 15796eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 15806eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 15816eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 1582a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, 1583a89c72ffSJohannes Berg u16 tb1_len) 15846eb5e529SEmmanuel Grumbach { 15856eb5e529SEmmanuel Grumbach /* No A-MSDU without CONFIG_INET */ 15866eb5e529SEmmanuel Grumbach WARN_ON(1); 15876eb5e529SEmmanuel Grumbach 15886eb5e529SEmmanuel Grumbach return -1; 15896eb5e529SEmmanuel Grumbach } 15906eb5e529SEmmanuel Grumbach #endif /* CONFIG_INET */ 15916eb5e529SEmmanuel Grumbach 1592e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1593a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1594e705c121SKalle Valo { 1595e705c121SKalle Valo struct ieee80211_hdr *hdr; 1596e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1597e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1598e705c121SKalle Valo struct iwl_txq *txq; 1599e705c121SKalle Valo dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1600e705c121SKalle Valo void *tb1_addr; 16014fe10bc6SSara Sharon void *tfd; 16023a0b2a42SEmmanuel Grumbach u16 len, tb1_len; 1603e705c121SKalle Valo bool wait_write_ptr; 1604e705c121SKalle Valo __le16 fc; 1605e705c121SKalle Valo u8 hdr_len; 1606e705c121SKalle Valo u16 wifi_seq; 1607c772a3d3SSara Sharon bool amsdu; 1608e705c121SKalle Valo 16094f4822b7SMordechay Goodstein txq = trans->txqs.txq[txq_id]; 1610e705c121SKalle Valo 16114f4822b7SMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1612e705c121SKalle Valo "TX on unused queue %d\n", txq_id)) 1613e705c121SKalle Valo return -EINVAL; 1614e705c121SKalle Valo 1615e705c121SKalle Valo if (skb_is_nonlinear(skb) && 1616885375d0SMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1617e705c121SKalle Valo __skb_linearize(skb)) 1618e705c121SKalle Valo return -ENOMEM; 1619e705c121SKalle Valo 1620e705c121SKalle Valo /* mac80211 always puts the full header into the SKB's head, 1621e705c121SKalle Valo * so there's no need to check if it's readable there 1622e705c121SKalle Valo */ 1623e705c121SKalle Valo hdr = (struct ieee80211_hdr *)skb->data; 1624e705c121SKalle Valo fc = hdr->frame_control; 1625e705c121SKalle Valo hdr_len = ieee80211_hdrlen(fc); 1626e705c121SKalle Valo 1627e705c121SKalle Valo spin_lock(&txq->lock); 1628e705c121SKalle Valo 16290cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 16300cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 16313955525dSEmmanuel Grumbach 16323955525dSEmmanuel Grumbach /* don't put the packet on the ring, if there is no room */ 16330cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1634a89c72ffSJohannes Berg struct iwl_device_tx_cmd **dev_cmd_ptr; 16353955525dSEmmanuel Grumbach 163621cb3222SJohannes Berg dev_cmd_ptr = (void *)((u8 *)skb->cb + 163722852fadSMordechay Goodstein trans->txqs.dev_cmd_offs); 163821cb3222SJohannes Berg 163921cb3222SJohannes Berg *dev_cmd_ptr = dev_cmd; 16403955525dSEmmanuel Grumbach __skb_queue_tail(&txq->overflow_q, skb); 16413955525dSEmmanuel Grumbach 16423955525dSEmmanuel Grumbach spin_unlock(&txq->lock); 16433955525dSEmmanuel Grumbach return 0; 16443955525dSEmmanuel Grumbach } 16453955525dSEmmanuel Grumbach } 16463955525dSEmmanuel Grumbach 1647e705c121SKalle Valo /* In AGG mode, the index in the ring must correspond to the WiFi 1648e705c121SKalle Valo * sequence number. This is a HW requirements to help the SCD to parse 1649e705c121SKalle Valo * the BA. 1650e705c121SKalle Valo * Check here that the packets are in the right place on the ring. 1651e705c121SKalle Valo */ 1652e705c121SKalle Valo wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1653e705c121SKalle Valo WARN_ONCE(txq->ampdu && 1654bb98ecd4SSara Sharon (wifi_seq & 0xff) != txq->write_ptr, 1655e705c121SKalle Valo "Q: %d WiFi Seq %d tfdNum %d", 1656bb98ecd4SSara Sharon txq_id, wifi_seq, txq->write_ptr); 1657e705c121SKalle Valo 1658e705c121SKalle Valo /* Set up driver data for this TFD */ 1659bb98ecd4SSara Sharon txq->entries[txq->write_ptr].skb = skb; 1660bb98ecd4SSara Sharon txq->entries[txq->write_ptr].cmd = dev_cmd; 1661e705c121SKalle Valo 1662e705c121SKalle Valo dev_cmd->hdr.sequence = 1663e705c121SKalle Valo cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1664bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr))); 1665e705c121SKalle Valo 16660cd1ad2dSMordechay Goodstein tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1667e705c121SKalle Valo scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1668e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch); 1669e705c121SKalle Valo 1670e705c121SKalle Valo tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1671e705c121SKalle Valo tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1672e705c121SKalle Valo 1673e705c121SKalle Valo /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1674bb98ecd4SSara Sharon out_meta = &txq->entries[txq->write_ptr].meta; 1675e705c121SKalle Valo out_meta->flags = 0; 1676e705c121SKalle Valo 1677e705c121SKalle Valo /* 1678e705c121SKalle Valo * The second TB (tb1) points to the remainder of the TX command 1679e705c121SKalle Valo * and the 802.11 header - dword aligned size 1680e705c121SKalle Valo * (This calculation modifies the TX command, so do it before the 1681e705c121SKalle Valo * setup of the first TB) 1682e705c121SKalle Valo */ 1683e705c121SKalle Valo len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 16848de437c7SSara Sharon hdr_len - IWL_FIRST_TB_SIZE; 1685c772a3d3SSara Sharon /* do not align A-MSDU to dword as the subframe header aligns it */ 1686c772a3d3SSara Sharon amsdu = ieee80211_is_data_qos(fc) && 1687c772a3d3SSara Sharon (*ieee80211_get_qos_ctl(hdr) & 1688c772a3d3SSara Sharon IEEE80211_QOS_CTL_A_MSDU_PRESENT); 168959fa61f3SEmmanuel Grumbach if (!amsdu) { 1690e705c121SKalle Valo tb1_len = ALIGN(len, 4); 1691e705c121SKalle Valo /* Tell NIC about any 2-byte padding after MAC header */ 1692e705c121SKalle Valo if (tb1_len != len) 1693d172a5efSJohannes Berg tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1694c772a3d3SSara Sharon } else { 1695c772a3d3SSara Sharon tb1_len = len; 1696c772a3d3SSara Sharon } 1697e705c121SKalle Valo 169805e5a7e5SJohannes Berg /* 169905e5a7e5SJohannes Berg * The first TB points to bi-directional DMA data, we'll 170005e5a7e5SJohannes Berg * memcpy the data into it later. 170105e5a7e5SJohannes Berg */ 1702e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 17038de437c7SSara Sharon IWL_FIRST_TB_SIZE, true); 1704e705c121SKalle Valo 1705e705c121SKalle Valo /* there must be data left over for TB1 or this code must be changed */ 17068de437c7SSara Sharon BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1707e705c121SKalle Valo 1708e705c121SKalle Valo /* map the data for TB1 */ 17098de437c7SSara Sharon tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1710e705c121SKalle Valo tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1711e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1712e705c121SKalle Valo goto out_err; 1713e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1714e705c121SKalle Valo 1715bf77ee2eSSara Sharon trace_iwlwifi_dev_tx(trans->dev, skb, 17160cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1717885375d0SMordechay Goodstein trans->txqs.tfd.size, 1718bf77ee2eSSara Sharon &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1719bf77ee2eSSara Sharon hdr_len); 1720bf77ee2eSSara Sharon 1721bf1ad897SEliad Peller /* 1722bf1ad897SEliad Peller * If gso_size wasn't set, don't give the frame "amsdu treatment" 1723bf1ad897SEliad Peller * (adding subframes, etc.). 1724bf1ad897SEliad Peller * This can happen in some testing flows when the amsdu was already 1725bf1ad897SEliad Peller * pre-built, and we just need to send the resulting skb. 1726bf1ad897SEliad Peller */ 1727bf1ad897SEliad Peller if (amsdu && skb_shinfo(skb)->gso_size) { 17286eb5e529SEmmanuel Grumbach if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 17296eb5e529SEmmanuel Grumbach out_meta, dev_cmd, 17306eb5e529SEmmanuel Grumbach tb1_len))) 1731e705c121SKalle Valo goto out_err; 1732bb03927eSJohannes Berg } else { 17330044f171SJohannes Berg struct sk_buff *frag; 17340044f171SJohannes Berg 1735bb03927eSJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1736bb03927eSJohannes Berg out_meta))) 17376eb5e529SEmmanuel Grumbach goto out_err; 1738bb03927eSJohannes Berg 17390044f171SJohannes Berg skb_walk_frags(skb, frag) { 17400044f171SJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 17410044f171SJohannes Berg out_meta))) 17420044f171SJohannes Berg goto out_err; 17430044f171SJohannes Berg } 17446eb5e529SEmmanuel Grumbach } 1745e705c121SKalle Valo 174605e5a7e5SJohannes Berg /* building the A-MSDU might have changed this data, so memcpy it now */ 1747c1f33442SLiad Kaufman memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 174805e5a7e5SJohannes Berg 17490cd1ad2dSMordechay Goodstein tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1750e705c121SKalle Valo /* Set up entry for this TFD in Tx byte-count array */ 17510179bfffSMordechay Goodstein iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 17520179bfffSMordechay Goodstein iwl_txq_gen1_tfd_get_num_tbs(trans, 17530179bfffSMordechay Goodstein tfd)); 1754e705c121SKalle Valo 1755e705c121SKalle Valo wait_write_ptr = ieee80211_has_morefrags(fc); 1756e705c121SKalle Valo 1757e705c121SKalle Valo /* start timer if queue currently empty */ 17580d52497aSEmmanuel Grumbach if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1759e705c121SKalle Valo /* 1760e705c121SKalle Valo * If the TXQ is active, then set the timer, if not, 1761e705c121SKalle Valo * set the timer in remainder so that the timer will 1762e705c121SKalle Valo * be armed with the right value when the station will 1763e705c121SKalle Valo * wake up. 1764e705c121SKalle Valo */ 1765e705c121SKalle Valo if (!txq->frozen) 1766e705c121SKalle Valo mod_timer(&txq->stuck_timer, 1767e705c121SKalle Valo jiffies + txq->wd_timeout); 1768e705c121SKalle Valo else 1769e705c121SKalle Valo txq->frozen_expiry_remainder = txq->wd_timeout; 1770e705c121SKalle Valo } 1771e705c121SKalle Valo 1772e705c121SKalle Valo /* Tell device the write index *just past* this latest filled TFD */ 17730cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1774e705c121SKalle Valo if (!wait_write_ptr) 1775e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1776e705c121SKalle Valo 1777e705c121SKalle Valo /* 1778e705c121SKalle Valo * At this point the frame is "transmitted" successfully 1779e705c121SKalle Valo * and we will get a TX status notification eventually. 1780e705c121SKalle Valo */ 1781e705c121SKalle Valo spin_unlock(&txq->lock); 1782e705c121SKalle Valo return 0; 1783e705c121SKalle Valo out_err: 17840179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1785e705c121SKalle Valo spin_unlock(&txq->lock); 1786e705c121SKalle Valo return -1; 1787e705c121SKalle Valo } 1788