18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 28e99ea8dSJohannes Berg /* 3fb54b863SJohannes Berg * Copyright (C) 2003-2014, 2018-2021 Intel Corporation 48e99ea8dSJohannes Berg * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 58e99ea8dSJohannes Berg * Copyright (C) 2016-2017 Intel Deutschland GmbH 68e99ea8dSJohannes Berg */ 7e705c121SKalle Valo #include <linux/etherdevice.h> 86eb5e529SEmmanuel Grumbach #include <linux/ieee80211.h> 9e705c121SKalle Valo #include <linux/slab.h> 10e705c121SKalle Valo #include <linux/sched.h> 116eb5e529SEmmanuel Grumbach #include <net/ip6_checksum.h> 126eb5e529SEmmanuel Grumbach #include <net/tso.h> 13e705c121SKalle Valo 14e705c121SKalle Valo #include "iwl-debug.h" 15e705c121SKalle Valo #include "iwl-csr.h" 16e705c121SKalle Valo #include "iwl-prph.h" 17e705c121SKalle Valo #include "iwl-io.h" 18e705c121SKalle Valo #include "iwl-scd.h" 19e705c121SKalle Valo #include "iwl-op-mode.h" 20e705c121SKalle Valo #include "internal.h" 21d172a5efSJohannes Berg #include "fw/api/tx.h" 22e705c121SKalle Valo 23e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24e705c121SKalle Valo * DMA services 25e705c121SKalle Valo * 26e705c121SKalle Valo * Theory of operation 27e705c121SKalle Valo * 28e705c121SKalle Valo * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29e705c121SKalle Valo * of buffer descriptors, each of which points to one or more data buffers for 30e705c121SKalle Valo * the device to read from or fill. Driver and device exchange status of each 31e705c121SKalle Valo * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32e705c121SKalle Valo * entries in each circular buffer, to protect against confusing empty and full 33e705c121SKalle Valo * queue states. 34e705c121SKalle Valo * 35e705c121SKalle Valo * The device reads or writes the data in the queues via the device's several 36e705c121SKalle Valo * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37e705c121SKalle Valo * 38e705c121SKalle Valo * For Tx queue, there are low mark and high mark limits. If, after queuing 39e705c121SKalle Valo * the packet for Tx, free space become < low mark, Tx queue stopped. When 40e705c121SKalle Valo * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41e705c121SKalle Valo * Tx queue resumed. 42e705c121SKalle Valo * 43e705c121SKalle Valo ***************************************************/ 44e22744afSSara Sharon 45e705c121SKalle Valo 4613a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47e705c121SKalle Valo struct iwl_dma_ptr *ptr, size_t size) 48e705c121SKalle Valo { 49e705c121SKalle Valo if (WARN_ON(ptr->addr)) 50e705c121SKalle Valo return -EINVAL; 51e705c121SKalle Valo 52e705c121SKalle Valo ptr->addr = dma_alloc_coherent(trans->dev, size, 53e705c121SKalle Valo &ptr->dma, GFP_KERNEL); 54e705c121SKalle Valo if (!ptr->addr) 55e705c121SKalle Valo return -ENOMEM; 56e705c121SKalle Valo ptr->size = size; 57e705c121SKalle Valo return 0; 58e705c121SKalle Valo } 59e705c121SKalle Valo 6013a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61e705c121SKalle Valo { 62e705c121SKalle Valo if (unlikely(!ptr->addr)) 63e705c121SKalle Valo return; 64e705c121SKalle Valo 65e705c121SKalle Valo dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66e705c121SKalle Valo memset(ptr, 0, sizeof(*ptr)); 67e705c121SKalle Valo } 68e705c121SKalle Valo 69e705c121SKalle Valo /* 70e705c121SKalle Valo * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71e705c121SKalle Valo */ 72e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73e705c121SKalle Valo struct iwl_txq *txq) 74e705c121SKalle Valo { 75e705c121SKalle Valo u32 reg = 0; 76bb98ecd4SSara Sharon int txq_id = txq->id; 77e705c121SKalle Valo 78e705c121SKalle Valo lockdep_assert_held(&txq->lock); 79e705c121SKalle Valo 80e705c121SKalle Valo /* 81e705c121SKalle Valo * explicitly wake up the NIC if: 82e705c121SKalle Valo * 1. shadow registers aren't enabled 83e705c121SKalle Valo * 2. NIC is woken up for CMD regardless of shadow outside this function 84e705c121SKalle Valo * 3. there is a chance that the NIC is asleep 85e705c121SKalle Valo */ 86286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->shadow_reg_enable && 874f4822b7SMordechay Goodstein txq_id != trans->txqs.cmd.q_id && 88e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89e705c121SKalle Valo /* 90e705c121SKalle Valo * wake up nic if it's powered down ... 91e705c121SKalle Valo * uCode will wake up, and interrupt us again, so next 92e705c121SKalle Valo * time we'll skip this part. 93e705c121SKalle Valo */ 94e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95e705c121SKalle Valo 96e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98e705c121SKalle Valo txq_id, reg); 99e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 1006dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101e705c121SKalle Valo txq->need_update = true; 102e705c121SKalle Valo return; 103e705c121SKalle Valo } 104e705c121SKalle Valo } 105e705c121SKalle Valo 106e705c121SKalle Valo /* 107e705c121SKalle Valo * if not in power-save mode, uCode will never sleep when we're 108e705c121SKalle Valo * trying to tx (during RFKILL, we're not trying to tx). 109e705c121SKalle Valo */ 110bb98ecd4SSara Sharon IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 1110cd58eaaSEmmanuel Grumbach if (!txq->block) 1120cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 113bb98ecd4SSara Sharon txq->write_ptr | (txq_id << 8)); 114e705c121SKalle Valo } 115e705c121SKalle Valo 116e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117e705c121SKalle Valo { 118e705c121SKalle Valo int i; 119e705c121SKalle Valo 120286ca8ebSLuca Coelho for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1214f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[i]; 122e705c121SKalle Valo 1234f4822b7SMordechay Goodstein if (!test_bit(i, trans->txqs.queue_used)) 124f6eac740SMordechai Goodstein continue; 125f6eac740SMordechai Goodstein 126e705c121SKalle Valo spin_lock_bh(&txq->lock); 127b2a3b1c1SSara Sharon if (txq->need_update) { 128e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 129b2a3b1c1SSara Sharon txq->need_update = false; 130e705c121SKalle Valo } 131e705c121SKalle Valo spin_unlock_bh(&txq->lock); 132e705c121SKalle Valo } 133e705c121SKalle Valo } 134e705c121SKalle Valo 1356983ba69SSara Sharon static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 1366983ba69SSara Sharon u8 idx, dma_addr_t addr, u16 len) 137e705c121SKalle Valo { 1386983ba69SSara Sharon struct iwl_tfd *tfd_fh = (void *)tfd; 1396983ba69SSara Sharon struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 1406983ba69SSara Sharon 141e705c121SKalle Valo u16 hi_n_len = len << 4; 142e705c121SKalle Valo 143e705c121SKalle Valo put_unaligned_le32(addr, &tb->lo); 1447abf6fdeSJohannes Berg hi_n_len |= iwl_get_dma_hi_addr(addr); 145e705c121SKalle Valo 146e705c121SKalle Valo tb->hi_n_len = cpu_to_le16(hi_n_len); 147e705c121SKalle Valo 1486983ba69SSara Sharon tfd_fh->num_tbs = idx + 1; 1496983ba69SSara Sharon } 150e705c121SKalle Valo 151e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152e705c121SKalle Valo dma_addr_t addr, u16 len, bool reset) 153e705c121SKalle Valo { 1546983ba69SSara Sharon void *tfd; 155e705c121SKalle Valo u32 num_tbs; 156e705c121SKalle Valo 1573827cb59SJohannes Berg tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158e705c121SKalle Valo 159e705c121SKalle Valo if (reset) 160885375d0SMordechay Goodstein memset(tfd, 0, trans->txqs.tfd.size); 161e705c121SKalle Valo 1620179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163e705c121SKalle Valo 1646983ba69SSara Sharon /* Each TFD can point to a maximum max_tbs Tx buffers */ 165885375d0SMordechay Goodstein if (num_tbs >= trans->txqs.tfd.max_tbs) { 166e705c121SKalle Valo IWL_ERR(trans, "Error can not send more than %d chunks\n", 167885375d0SMordechay Goodstein trans->txqs.tfd.max_tbs); 168e705c121SKalle Valo return -EINVAL; 169e705c121SKalle Valo } 170e705c121SKalle Valo 171e705c121SKalle Valo if (WARN(addr & ~IWL_TX_DMA_MASK, 172e705c121SKalle Valo "Unaligned address = %llx\n", (unsigned long long)addr)) 173e705c121SKalle Valo return -EINVAL; 174e705c121SKalle Valo 1756983ba69SSara Sharon iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176e705c121SKalle Valo 177e705c121SKalle Valo return num_tbs; 178e705c121SKalle Valo } 179e705c121SKalle Valo 18001d11cd1SSara Sharon static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 18101d11cd1SSara Sharon { 18201d11cd1SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 18301d11cd1SSara Sharon 184286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 18501d11cd1SSara Sharon return; 18672bc934cSJohannes Berg 18772bc934cSJohannes Berg spin_lock(&trans_pcie->reg_lock); 18872bc934cSJohannes Berg 18972bc934cSJohannes Berg if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { 19072bc934cSJohannes Berg spin_unlock(&trans_pcie->reg_lock); 19101d11cd1SSara Sharon return; 19272bc934cSJohannes Berg } 19301d11cd1SSara Sharon 19401d11cd1SSara Sharon trans_pcie->cmd_hold_nic_awake = false; 19501d11cd1SSara Sharon __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1966dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 19772bc934cSJohannes Berg spin_unlock(&trans_pcie->reg_lock); 19801d11cd1SSara Sharon } 19901d11cd1SSara Sharon 200e705c121SKalle Valo /* 201e705c121SKalle Valo * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 202e705c121SKalle Valo */ 203e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 204e705c121SKalle Valo { 2054f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 206e705c121SKalle Valo 20798c7d21fSEmmanuel Grumbach if (!txq) { 20898c7d21fSEmmanuel Grumbach IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 20998c7d21fSEmmanuel Grumbach return; 21098c7d21fSEmmanuel Grumbach } 21198c7d21fSEmmanuel Grumbach 212e705c121SKalle Valo spin_lock_bh(&txq->lock); 213bb98ecd4SSara Sharon while (txq->write_ptr != txq->read_ptr) { 214e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 215bb98ecd4SSara Sharon txq_id, txq->read_ptr); 2166eb5e529SEmmanuel Grumbach 2174f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 218bb98ecd4SSara Sharon struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 2196eb5e529SEmmanuel Grumbach 2206eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb)) 2216eb5e529SEmmanuel Grumbach continue; 2226eb5e529SEmmanuel Grumbach 2230cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 2246eb5e529SEmmanuel Grumbach } 225a4450980SMordechay Goodstein iwl_txq_free_tfd(trans, txq); 2260cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 22701d11cd1SSara Sharon 22872bc934cSJohannes Berg if (txq->read_ptr == txq->write_ptr && 22972bc934cSJohannes Berg txq_id == trans->txqs.cmd.q_id) 23001d11cd1SSara Sharon iwl_pcie_clear_cmd_in_flight(trans); 231e705c121SKalle Valo } 2323955525dSEmmanuel Grumbach 2333955525dSEmmanuel Grumbach while (!skb_queue_empty(&txq->overflow_q)) { 2343955525dSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 2353955525dSEmmanuel Grumbach 2363955525dSEmmanuel Grumbach iwl_op_mode_free_skb(trans->op_mode, skb); 2373955525dSEmmanuel Grumbach } 2383955525dSEmmanuel Grumbach 239e705c121SKalle Valo spin_unlock_bh(&txq->lock); 240e705c121SKalle Valo 241e705c121SKalle Valo /* just in case - this queue may have been stopped */ 242e705c121SKalle Valo iwl_wake_queue(trans, txq); 243e705c121SKalle Valo } 244e705c121SKalle Valo 245e705c121SKalle Valo /* 246e705c121SKalle Valo * iwl_pcie_txq_free - Deallocate DMA queue. 247e705c121SKalle Valo * @txq: Transmit queue to deallocate. 248e705c121SKalle Valo * 249e705c121SKalle Valo * Empty queue by removing and destroying all BD's. 250e705c121SKalle Valo * Free all buffers. 251e705c121SKalle Valo * 0-fill, but do not free "txq" descriptor structure. 252e705c121SKalle Valo */ 253e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 254e705c121SKalle Valo { 2554f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 256e705c121SKalle Valo struct device *dev = trans->dev; 257e705c121SKalle Valo int i; 258e705c121SKalle Valo 259e705c121SKalle Valo if (WARN_ON(!txq)) 260e705c121SKalle Valo return; 261e705c121SKalle Valo 262e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 263e705c121SKalle Valo 264e705c121SKalle Valo /* De-alloc array of command/tx buffers */ 2654f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 266bb98ecd4SSara Sharon for (i = 0; i < txq->n_window; i++) { 267453431a5SWaiman Long kfree_sensitive(txq->entries[i].cmd); 268453431a5SWaiman Long kfree_sensitive(txq->entries[i].free_buf); 269e705c121SKalle Valo } 270e705c121SKalle Valo 271e705c121SKalle Valo /* De-alloc circular buffer of TFDs */ 272e705c121SKalle Valo if (txq->tfds) { 273e705c121SKalle Valo dma_free_coherent(dev, 274885375d0SMordechay Goodstein trans->txqs.tfd.size * 275286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 276bb98ecd4SSara Sharon txq->tfds, txq->dma_addr); 277bb98ecd4SSara Sharon txq->dma_addr = 0; 278e705c121SKalle Valo txq->tfds = NULL; 279e705c121SKalle Valo 280e705c121SKalle Valo dma_free_coherent(dev, 281bb98ecd4SSara Sharon sizeof(*txq->first_tb_bufs) * txq->n_window, 2828de437c7SSara Sharon txq->first_tb_bufs, txq->first_tb_dma); 283e705c121SKalle Valo } 284e705c121SKalle Valo 285e705c121SKalle Valo kfree(txq->entries); 286e705c121SKalle Valo txq->entries = NULL; 287e705c121SKalle Valo 288e705c121SKalle Valo del_timer_sync(&txq->stuck_timer); 289e705c121SKalle Valo 290e705c121SKalle Valo /* 0-fill queue descriptor structure */ 291e705c121SKalle Valo memset(txq, 0, sizeof(*txq)); 292e705c121SKalle Valo } 293e705c121SKalle Valo 294e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 295e705c121SKalle Valo { 296e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 297286ca8ebSLuca Coelho int nq = trans->trans_cfg->base_params->num_of_queues; 298e705c121SKalle Valo int chan; 299e705c121SKalle Valo u32 reg_val; 300e705c121SKalle Valo int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 301e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 302e705c121SKalle Valo 303e705c121SKalle Valo /* make sure all queue are not stopped/used */ 3044f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 3054f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 3064f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 307e705c121SKalle Valo 308e705c121SKalle Valo trans_pcie->scd_base_addr = 309e705c121SKalle Valo iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 310e705c121SKalle Valo 311e705c121SKalle Valo WARN_ON(scd_base_addr != 0 && 312e705c121SKalle Valo scd_base_addr != trans_pcie->scd_base_addr); 313e705c121SKalle Valo 314e705c121SKalle Valo /* reset context data, TX status and translation data */ 315e705c121SKalle Valo iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 316e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND, 317e705c121SKalle Valo NULL, clear_dwords); 318e705c121SKalle Valo 319e705c121SKalle Valo iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 3200179bfffSMordechay Goodstein trans->txqs.scd_bc_tbls.dma >> 10); 321e705c121SKalle Valo 322e705c121SKalle Valo /* The chain extension of the SCD doesn't work well. This feature is 323e705c121SKalle Valo * enabled by default by the HW, so we need to disable it manually. 324e705c121SKalle Valo */ 325286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->scd_chain_ext_wa) 326e705c121SKalle Valo iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 327e705c121SKalle Valo 3284f4822b7SMordechay Goodstein iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 3294f4822b7SMordechay Goodstein trans->txqs.cmd.fifo, 3304f4822b7SMordechay Goodstein trans->txqs.cmd.wdg_timeout); 331e705c121SKalle Valo 332e705c121SKalle Valo /* Activate all Tx DMA/FIFO channels */ 333e705c121SKalle Valo iwl_scd_activate_fifos(trans); 334e705c121SKalle Valo 335e705c121SKalle Valo /* Enable DMA channel */ 336e705c121SKalle Valo for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 337e705c121SKalle Valo iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 338e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 339e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 340e705c121SKalle Valo 341e705c121SKalle Valo /* Update FH chicken bits */ 342e705c121SKalle Valo reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 343e705c121SKalle Valo iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 344e705c121SKalle Valo reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 345e705c121SKalle Valo 346e705c121SKalle Valo /* Enable L1-Active */ 347286ca8ebSLuca Coelho if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 348e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 349e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 350e705c121SKalle Valo } 351e705c121SKalle Valo 352e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 353e705c121SKalle Valo { 354e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 355e705c121SKalle Valo int txq_id; 356e705c121SKalle Valo 35713a3a390SSara Sharon /* 35813a3a390SSara Sharon * we should never get here in gen2 trans mode return early to avoid 35913a3a390SSara Sharon * having invalid accesses 36013a3a390SSara Sharon */ 361286ca8ebSLuca Coelho if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 36213a3a390SSara Sharon return; 36313a3a390SSara Sharon 364286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 365e705c121SKalle Valo txq_id++) { 3664f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 367286ca8ebSLuca Coelho if (trans->trans_cfg->use_tfh) 368e22744afSSara Sharon iwl_write_direct64(trans, 369e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 370bb98ecd4SSara Sharon txq->dma_addr); 371e22744afSSara Sharon else 372e22744afSSara Sharon iwl_write_direct32(trans, 373e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 374bb98ecd4SSara Sharon txq->dma_addr >> 8); 375e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 376bb98ecd4SSara Sharon txq->read_ptr = 0; 377bb98ecd4SSara Sharon txq->write_ptr = 0; 378e705c121SKalle Valo } 379e705c121SKalle Valo 380e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 381e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 382e705c121SKalle Valo trans_pcie->kw.dma >> 4); 383e705c121SKalle Valo 384e705c121SKalle Valo /* 385e705c121SKalle Valo * Send 0 as the scd_base_addr since the device may have be reset 386e705c121SKalle Valo * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 387e705c121SKalle Valo * contain garbage. 388e705c121SKalle Valo */ 389e705c121SKalle Valo iwl_pcie_tx_start(trans, 0); 390e705c121SKalle Valo } 391e705c121SKalle Valo 392e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 393e705c121SKalle Valo { 394e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 395e705c121SKalle Valo int ch, ret; 396e705c121SKalle Valo u32 mask = 0; 397e705c121SKalle Valo 39825edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 399e705c121SKalle Valo 4001ed08f6fSJohannes Berg if (!iwl_trans_grab_nic_access(trans)) 401e705c121SKalle Valo goto out; 402e705c121SKalle Valo 403e705c121SKalle Valo /* Stop each Tx DMA channel */ 404e705c121SKalle Valo for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 405e705c121SKalle Valo iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 406e705c121SKalle Valo mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 407e705c121SKalle Valo } 408e705c121SKalle Valo 409e705c121SKalle Valo /* Wait for DMA channels to be idle */ 410e705c121SKalle Valo ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 411e705c121SKalle Valo if (ret < 0) 412e705c121SKalle Valo IWL_ERR(trans, 413e705c121SKalle Valo "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 414e705c121SKalle Valo ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 415e705c121SKalle Valo 4161ed08f6fSJohannes Berg iwl_trans_release_nic_access(trans); 417e705c121SKalle Valo 418e705c121SKalle Valo out: 41925edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 420e705c121SKalle Valo } 421e705c121SKalle Valo 422e705c121SKalle Valo /* 423e705c121SKalle Valo * iwl_pcie_tx_stop - Stop all Tx DMA channels 424e705c121SKalle Valo */ 425e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans) 426e705c121SKalle Valo { 427e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 428e705c121SKalle Valo int txq_id; 429e705c121SKalle Valo 430e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 431e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 432e705c121SKalle Valo 433e705c121SKalle Valo /* Turn off all Tx DMA channels */ 434e705c121SKalle Valo iwl_pcie_tx_stop_fh(trans); 435e705c121SKalle Valo 436e705c121SKalle Valo /* 437e705c121SKalle Valo * This function can be called before the op_mode disabled the 438e705c121SKalle Valo * queues. This happens when we have an rfkill interrupt. 439e705c121SKalle Valo * Since we stop Tx altogether - mark the queues as stopped. 440e705c121SKalle Valo */ 4414f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 4424f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 4434f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 444e705c121SKalle Valo 445e705c121SKalle Valo /* This can happen: start_hw, stop_device */ 446b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) 447e705c121SKalle Valo return 0; 448e705c121SKalle Valo 449e705c121SKalle Valo /* Unmap DMA from host system and free skb's */ 450286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 451e705c121SKalle Valo txq_id++) 452e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 453e705c121SKalle Valo 454e705c121SKalle Valo return 0; 455e705c121SKalle Valo } 456e705c121SKalle Valo 457e705c121SKalle Valo /* 458e705c121SKalle Valo * iwl_trans_tx_free - Free TXQ Context 459e705c121SKalle Valo * 460e705c121SKalle Valo * Destroy all TX DMA queues and structures 461e705c121SKalle Valo */ 462e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans) 463e705c121SKalle Valo { 464e705c121SKalle Valo int txq_id; 465e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 466e705c121SKalle Valo 4674f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 468de74c455SSara Sharon 469e705c121SKalle Valo /* Tx queues */ 470b2a3b1c1SSara Sharon if (trans_pcie->txq_memory) { 471e705c121SKalle Valo for (txq_id = 0; 472286ca8ebSLuca Coelho txq_id < trans->trans_cfg->base_params->num_of_queues; 473b2a3b1c1SSara Sharon txq_id++) { 474e705c121SKalle Valo iwl_pcie_txq_free(trans, txq_id); 4754f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 476b2a3b1c1SSara Sharon } 477e705c121SKalle Valo } 478e705c121SKalle Valo 479b2a3b1c1SSara Sharon kfree(trans_pcie->txq_memory); 480b2a3b1c1SSara Sharon trans_pcie->txq_memory = NULL; 481e705c121SKalle Valo 482e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 483e705c121SKalle Valo 4840179bfffSMordechay Goodstein iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 485e705c121SKalle Valo } 486e705c121SKalle Valo 487e705c121SKalle Valo /* 488e705c121SKalle Valo * iwl_pcie_tx_alloc - allocate TX context 489e705c121SKalle Valo * Allocate all Tx DMA structures and initialize them 490e705c121SKalle Valo */ 491e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 492e705c121SKalle Valo { 493e705c121SKalle Valo int ret; 494e705c121SKalle Valo int txq_id, slots_num; 495e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 496286ca8ebSLuca Coelho u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 497e705c121SKalle Valo 498a8e82c36SJohannes Berg if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 499a8e82c36SJohannes Berg return -EINVAL; 500a8e82c36SJohannes Berg 501a8e82c36SJohannes Berg bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 502e705c121SKalle Valo 503e705c121SKalle Valo /*It is not allowed to alloc twice, so warn when this happens. 504e705c121SKalle Valo * We cannot rely on the previous allocation, so free and fail */ 505b2a3b1c1SSara Sharon if (WARN_ON(trans_pcie->txq_memory)) { 506e705c121SKalle Valo ret = -EINVAL; 507e705c121SKalle Valo goto error; 508e705c121SKalle Valo } 509e705c121SKalle Valo 5100179bfffSMordechay Goodstein ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 5117b3e42eaSGolan Ben Ami bc_tbls_size); 512e705c121SKalle Valo if (ret) { 513e705c121SKalle Valo IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 514e705c121SKalle Valo goto error; 515e705c121SKalle Valo } 516e705c121SKalle Valo 517e705c121SKalle Valo /* Alloc keep-warm buffer */ 518e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 519e705c121SKalle Valo if (ret) { 520e705c121SKalle Valo IWL_ERR(trans, "Keep Warm allocation failed\n"); 521e705c121SKalle Valo goto error; 522e705c121SKalle Valo } 523e705c121SKalle Valo 52479b6c8feSLuca Coelho trans_pcie->txq_memory = 525286ca8ebSLuca Coelho kcalloc(trans->trans_cfg->base_params->num_of_queues, 526e705c121SKalle Valo sizeof(struct iwl_txq), GFP_KERNEL); 527b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) { 528e705c121SKalle Valo IWL_ERR(trans, "Not enough memory for txq\n"); 529e705c121SKalle Valo ret = -ENOMEM; 530e705c121SKalle Valo goto error; 531e705c121SKalle Valo } 532e705c121SKalle Valo 533e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 534286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 535e705c121SKalle Valo txq_id++) { 5364f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 537b8e8d7ceSSara Sharon 538ff911dcaSShaul Triebitz if (cmd_queue) 539718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 540ff911dcaSShaul Triebitz trans->cfg->min_txq_size); 541ff911dcaSShaul Triebitz else 542718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 543*d5399f11SMordechay Goodstein trans->cfg->min_ba_txq_size); 5444f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 5450cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 5460cd1ad2dSMordechay Goodstein cmd_queue); 547e705c121SKalle Valo if (ret) { 548e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 549e705c121SKalle Valo goto error; 550e705c121SKalle Valo } 5514f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 552e705c121SKalle Valo } 553e705c121SKalle Valo 554e705c121SKalle Valo return 0; 555e705c121SKalle Valo 556e705c121SKalle Valo error: 557e705c121SKalle Valo iwl_pcie_tx_free(trans); 558e705c121SKalle Valo 559e705c121SKalle Valo return ret; 560e705c121SKalle Valo } 561eda50cdeSSara Sharon 562e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans) 563e705c121SKalle Valo { 564e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 565e705c121SKalle Valo int ret; 566e705c121SKalle Valo int txq_id, slots_num; 567e705c121SKalle Valo bool alloc = false; 568e705c121SKalle Valo 569b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) { 570e705c121SKalle Valo ret = iwl_pcie_tx_alloc(trans); 571e705c121SKalle Valo if (ret) 572e705c121SKalle Valo goto error; 573e705c121SKalle Valo alloc = true; 574e705c121SKalle Valo } 575e705c121SKalle Valo 57625edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock); 577e705c121SKalle Valo 578e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 579e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 580e705c121SKalle Valo 581e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 582e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 583e705c121SKalle Valo trans_pcie->kw.dma >> 4); 584e705c121SKalle Valo 58525edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock); 586e705c121SKalle Valo 587e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 588286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 589e705c121SKalle Valo txq_id++) { 5904f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 591b8e8d7ceSSara Sharon 592ff911dcaSShaul Triebitz if (cmd_queue) 593718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 594ff911dcaSShaul Triebitz trans->cfg->min_txq_size); 595ff911dcaSShaul Triebitz else 596718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 597*d5399f11SMordechay Goodstein trans->cfg->min_ba_txq_size); 5980cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 5990cd1ad2dSMordechay Goodstein cmd_queue); 600e705c121SKalle Valo if (ret) { 601e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 602e705c121SKalle Valo goto error; 603e705c121SKalle Valo } 604e705c121SKalle Valo 605eda50cdeSSara Sharon /* 606eda50cdeSSara Sharon * Tell nic where to find circular buffer of TFDs for a 607eda50cdeSSara Sharon * given Tx queue, and enable the DMA channel used for that 608eda50cdeSSara Sharon * queue. 609eda50cdeSSara Sharon * Circular buffer (TFD queue in DRAM) physical base address 610eda50cdeSSara Sharon */ 611eda50cdeSSara Sharon iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 6124f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->dma_addr >> 8); 613ae79785fSSara Sharon } 614e22744afSSara Sharon 615e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 616286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->num_of_queues > 20) 617e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, 618e705c121SKalle Valo SCD_GP_CTRL_ENABLE_31_QUEUES); 619e705c121SKalle Valo 620e705c121SKalle Valo return 0; 621e705c121SKalle Valo error: 622e705c121SKalle Valo /*Upon error, free only if we allocated something */ 623e705c121SKalle Valo if (alloc) 624e705c121SKalle Valo iwl_pcie_tx_free(trans); 625e705c121SKalle Valo return ret; 626e705c121SKalle Valo } 627e705c121SKalle Valo 628e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 629e705c121SKalle Valo const struct iwl_host_cmd *cmd) 630e705c121SKalle Valo { 631e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 632e705c121SKalle Valo 6332b3fae66SMatt Chen /* Make sure the NIC is still alive in the bus */ 634f60c9e59SEmmanuel Grumbach if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 635f60c9e59SEmmanuel Grumbach return -ENODEV; 6362b3fae66SMatt Chen 63772bc934cSJohannes Berg if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 63872bc934cSJohannes Berg return 0; 63972bc934cSJohannes Berg 640e705c121SKalle Valo /* 641e705c121SKalle Valo * wake up the NIC to make sure that the firmware will see the host 642e705c121SKalle Valo * command - we will let the NIC sleep once all the host commands 643e705c121SKalle Valo * returned. This needs to be done only on NICs that have 64472bc934cSJohannes Berg * apmg_wake_up_wa set (see above.) 645e705c121SKalle Valo */ 646c544d89bSJohannes Berg if (!_iwl_trans_pcie_grab_nic_access(trans)) 647416dde0fSJohannes Berg return -EIO; 648e705c121SKalle Valo 649416dde0fSJohannes Berg /* 650416dde0fSJohannes Berg * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. 651416dde0fSJohannes Berg * There, we also returned immediately if cmd_hold_nic_awake is 652416dde0fSJohannes Berg * already true, so it's OK to unconditionally set it to true. 653416dde0fSJohannes Berg */ 654e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = true; 655c544d89bSJohannes Berg spin_unlock(&trans_pcie->reg_lock); 656e705c121SKalle Valo 657416dde0fSJohannes Berg return 0; 658e705c121SKalle Valo } 659e705c121SKalle Valo 660e705c121SKalle Valo /* 661e705c121SKalle Valo * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 662e705c121SKalle Valo * 663e705c121SKalle Valo * When FW advances 'R' index, all entries between old and new 'R' index 664e705c121SKalle Valo * need to be reclaimed. As result, some free space forms. If there is 665e705c121SKalle Valo * enough free space (> low mark), wake the stack that feeds us. 666e705c121SKalle Valo */ 6677216dc99SJohannes Berg static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 668e705c121SKalle Valo { 6694f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 670e705c121SKalle Valo int nfreed = 0; 671f5955a6cSGolan Ben Ami u16 r; 672e705c121SKalle Valo 673e705c121SKalle Valo lockdep_assert_held(&txq->lock); 674e705c121SKalle Valo 6750cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, idx); 6760cd1ad2dSMordechay Goodstein r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 677f5955a6cSGolan Ben Ami 678286ca8ebSLuca Coelho if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 6790cd1ad2dSMordechay Goodstein (!iwl_txq_used(txq, idx))) { 6804f4822b7SMordechay Goodstein WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 681e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 6827b3e42eaSGolan Ben Ami __func__, txq_id, idx, 683286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 684bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr); 685e705c121SKalle Valo return; 686e705c121SKalle Valo } 687e705c121SKalle Valo 6880cd1ad2dSMordechay Goodstein for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 6890cd1ad2dSMordechay Goodstein r = iwl_txq_inc_wrap(trans, r)) { 6900cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 691e705c121SKalle Valo 692e705c121SKalle Valo if (nfreed++ > 0) { 693e705c121SKalle Valo IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 694f5955a6cSGolan Ben Ami idx, txq->write_ptr, r); 695e705c121SKalle Valo iwl_force_nmi(trans); 696e705c121SKalle Valo } 697e705c121SKalle Valo } 698e705c121SKalle Valo 69972bc934cSJohannes Berg if (txq->read_ptr == txq->write_ptr) 700e705c121SKalle Valo iwl_pcie_clear_cmd_in_flight(trans); 701e705c121SKalle Valo 702a4450980SMordechay Goodstein iwl_txq_progress(txq); 703e705c121SKalle Valo } 704e705c121SKalle Valo 705e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 706e705c121SKalle Valo u16 txq_id) 707e705c121SKalle Valo { 708e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 709e705c121SKalle Valo u32 tbl_dw_addr; 710e705c121SKalle Valo u32 tbl_dw; 711e705c121SKalle Valo u16 scd_q2ratid; 712e705c121SKalle Valo 713e705c121SKalle Valo scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 714e705c121SKalle Valo 715e705c121SKalle Valo tbl_dw_addr = trans_pcie->scd_base_addr + 716e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 717e705c121SKalle Valo 718e705c121SKalle Valo tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 719e705c121SKalle Valo 720e705c121SKalle Valo if (txq_id & 0x1) 721e705c121SKalle Valo tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 722e705c121SKalle Valo else 723e705c121SKalle Valo tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 724e705c121SKalle Valo 725e705c121SKalle Valo iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 726e705c121SKalle Valo 727e705c121SKalle Valo return 0; 728e705c121SKalle Valo } 729e705c121SKalle Valo 730e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table), 731e705c121SKalle Valo * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 732e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 733e705c121SKalle Valo 734dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 735e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 736e705c121SKalle Valo unsigned int wdg_timeout) 737e705c121SKalle Valo { 738e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 7394f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 740e705c121SKalle Valo int fifo = -1; 741dcfbd67bSEmmanuel Grumbach bool scd_bug = false; 742e705c121SKalle Valo 7434f4822b7SMordechay Goodstein if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 744e705c121SKalle Valo WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 745e705c121SKalle Valo 746e705c121SKalle Valo txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 747e705c121SKalle Valo 748e705c121SKalle Valo if (cfg) { 749e705c121SKalle Valo fifo = cfg->fifo; 750e705c121SKalle Valo 751e705c121SKalle Valo /* Disable the scheduler prior configuring the cmd queue */ 7524f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id && 753e705c121SKalle Valo trans_pcie->scd_set_active) 754e705c121SKalle Valo iwl_scd_enable_set_active(trans, 0); 755e705c121SKalle Valo 756e705c121SKalle Valo /* Stop this Tx queue before configuring it */ 757e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 758e705c121SKalle Valo 759e705c121SKalle Valo /* Set this queue as a chain-building queue unless it is CMD */ 7604f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 761e705c121SKalle Valo iwl_scd_txq_set_chain(trans, txq_id); 762e705c121SKalle Valo 763e705c121SKalle Valo if (cfg->aggregate) { 764e705c121SKalle Valo u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 765e705c121SKalle Valo 766e705c121SKalle Valo /* Map receiver-address / traffic-ID to this queue */ 767e705c121SKalle Valo iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 768e705c121SKalle Valo 769e705c121SKalle Valo /* enable aggregations for the queue */ 770e705c121SKalle Valo iwl_scd_txq_enable_agg(trans, txq_id); 771e705c121SKalle Valo txq->ampdu = true; 772e705c121SKalle Valo } else { 773e705c121SKalle Valo /* 774e705c121SKalle Valo * disable aggregations for the queue, this will also 775e705c121SKalle Valo * make the ra_tid mapping configuration irrelevant 776e705c121SKalle Valo * since it is now a non-AGG queue. 777e705c121SKalle Valo */ 778e705c121SKalle Valo iwl_scd_txq_disable_agg(trans, txq_id); 779e705c121SKalle Valo 780bb98ecd4SSara Sharon ssn = txq->read_ptr; 781e705c121SKalle Valo } 782dcfbd67bSEmmanuel Grumbach } else { 783dcfbd67bSEmmanuel Grumbach /* 784dcfbd67bSEmmanuel Grumbach * If we need to move the SCD write pointer by steps of 785dcfbd67bSEmmanuel Grumbach * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 786dcfbd67bSEmmanuel Grumbach * the op_mode know by returning true later. 787dcfbd67bSEmmanuel Grumbach * Do this only in case cfg is NULL since this trick can 788dcfbd67bSEmmanuel Grumbach * be done only if we have DQA enabled which is true for mvm 789dcfbd67bSEmmanuel Grumbach * only. And mvm never sets a cfg pointer. 790dcfbd67bSEmmanuel Grumbach * This is really ugly, but this is the easiest way out for 791dcfbd67bSEmmanuel Grumbach * this sad hardware issue. 792dcfbd67bSEmmanuel Grumbach * This bug has been fixed on devices 9000 and up. 793dcfbd67bSEmmanuel Grumbach */ 794286ca8ebSLuca Coelho scd_bug = !trans->trans_cfg->mq_rx_supported && 795dcfbd67bSEmmanuel Grumbach !((ssn - txq->write_ptr) & 0x3f) && 796dcfbd67bSEmmanuel Grumbach (ssn != txq->write_ptr); 797dcfbd67bSEmmanuel Grumbach if (scd_bug) 798dcfbd67bSEmmanuel Grumbach ssn++; 799e705c121SKalle Valo } 800e705c121SKalle Valo 801e705c121SKalle Valo /* Place first TFD at index corresponding to start sequence number. 802e705c121SKalle Valo * Assumes that ssn_idx is valid (!= 0xFFF) */ 803bb98ecd4SSara Sharon txq->read_ptr = (ssn & 0xff); 804bb98ecd4SSara Sharon txq->write_ptr = (ssn & 0xff); 805e705c121SKalle Valo iwl_write_direct32(trans, HBUS_TARG_WRPTR, 806e705c121SKalle Valo (ssn & 0xff) | (txq_id << 8)); 807e705c121SKalle Valo 808e705c121SKalle Valo if (cfg) { 809e705c121SKalle Valo u8 frame_limit = cfg->frame_limit; 810e705c121SKalle Valo 811e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 812e705c121SKalle Valo 813e705c121SKalle Valo /* Set up Tx window size and frame limit for this queue */ 814e705c121SKalle Valo iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 815e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 816e705c121SKalle Valo iwl_trans_write_mem32(trans, 817e705c121SKalle Valo trans_pcie->scd_base_addr + 818e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 819f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 820f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 821e705c121SKalle Valo 822e705c121SKalle Valo /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 823e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 824e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 825e705c121SKalle Valo (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 826e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 827e705c121SKalle Valo SCD_QUEUE_STTS_REG_MSK); 828e705c121SKalle Valo 829e705c121SKalle Valo /* enable the scheduler for this queue (only) */ 8304f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id && 831e705c121SKalle Valo trans_pcie->scd_set_active) 832e705c121SKalle Valo iwl_scd_enable_set_active(trans, BIT(txq_id)); 833e705c121SKalle Valo 834e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 835e705c121SKalle Valo "Activate queue %d on FIFO %d WrPtr: %d\n", 836e705c121SKalle Valo txq_id, fifo, ssn & 0xff); 837e705c121SKalle Valo } else { 838e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 839e705c121SKalle Valo "Activate queue %d WrPtr: %d\n", 840e705c121SKalle Valo txq_id, ssn & 0xff); 841e705c121SKalle Valo } 842dcfbd67bSEmmanuel Grumbach 843dcfbd67bSEmmanuel Grumbach return scd_bug; 844e705c121SKalle Valo } 845e705c121SKalle Valo 84642db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 84742db09c1SLiad Kaufman bool shared_mode) 84842db09c1SLiad Kaufman { 8494f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 85042db09c1SLiad Kaufman 85142db09c1SLiad Kaufman txq->ampdu = !shared_mode; 85242db09c1SLiad Kaufman } 85342db09c1SLiad Kaufman 854e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 855e705c121SKalle Valo bool configure_scd) 856e705c121SKalle Valo { 857e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 858e705c121SKalle Valo u32 stts_addr = trans_pcie->scd_base_addr + 859e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq_id); 860e705c121SKalle Valo static const u32 zero_val[4] = {}; 861e705c121SKalle Valo 8624f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 8634f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen = false; 864e705c121SKalle Valo 865e705c121SKalle Valo /* 866e705c121SKalle Valo * Upon HW Rfkill - we stop the device, and then stop the queues 867e705c121SKalle Valo * in the op_mode. Just for the sake of the simplicity of the op_mode, 868e705c121SKalle Valo * allow the op_mode to call txq_disable after it already called 869e705c121SKalle Valo * stop_device. 870e705c121SKalle Valo */ 8714f4822b7SMordechay Goodstein if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 872e705c121SKalle Valo WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 873e705c121SKalle Valo "queue %d not used", txq_id); 874e705c121SKalle Valo return; 875e705c121SKalle Valo } 876e705c121SKalle Valo 877e705c121SKalle Valo if (configure_scd) { 878e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 879e705c121SKalle Valo 88073c289baSBjoern A. Zeeb iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, 881e705c121SKalle Valo ARRAY_SIZE(zero_val)); 882e705c121SKalle Valo } 883e705c121SKalle Valo 884e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 8854f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->ampdu = false; 886e705c121SKalle Valo 887e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 888e705c121SKalle Valo } 889e705c121SKalle Valo 890e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 891e705c121SKalle Valo 892e705c121SKalle Valo /* 893e705c121SKalle Valo * iwl_pcie_enqueue_hcmd - enqueue a uCode command 894e705c121SKalle Valo * @priv: device private data point 895e705c121SKalle Valo * @cmd: a pointer to the ucode command structure 896e705c121SKalle Valo * 897e705c121SKalle Valo * The function returns < 0 values to indicate the operation 898e705c121SKalle Valo * failed. On success, it returns the index (>= 0) of command in the 899e705c121SKalle Valo * command queue. 900e705c121SKalle Valo */ 90113f028b4SMordechay Goodstein int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 902e705c121SKalle Valo struct iwl_host_cmd *cmd) 903e705c121SKalle Valo { 9044f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 905e705c121SKalle Valo struct iwl_device_cmd *out_cmd; 906e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 907e705c121SKalle Valo void *dup_buf = NULL; 908e705c121SKalle Valo dma_addr_t phys_addr; 909e705c121SKalle Valo int idx; 9108de437c7SSara Sharon u16 copy_size, cmd_size, tb0_size; 911e705c121SKalle Valo bool had_nocopy = false; 912e705c121SKalle Valo u8 group_id = iwl_cmd_groupid(cmd->id); 913e705c121SKalle Valo int i, ret; 914e705c121SKalle Valo u32 cmd_pos; 915e705c121SKalle Valo const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 916e705c121SKalle Valo u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 9172800aadcSJiri Kosina unsigned long flags; 918e705c121SKalle Valo 919b7d96bcaSLuca Coelho if (WARN(!trans->wide_cmd_header && 920b7d96bcaSLuca Coelho group_id > IWL_ALWAYS_LONG_GROUP, 921b7d96bcaSLuca Coelho "unsupported wide command %#x\n", cmd->id)) 922b7d96bcaSLuca Coelho return -EINVAL; 923b7d96bcaSLuca Coelho 924e705c121SKalle Valo if (group_id != 0) { 925e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 926e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header_wide); 927e705c121SKalle Valo } else { 928e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 929e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header); 930e705c121SKalle Valo } 931e705c121SKalle Valo 932e705c121SKalle Valo /* need one for the header if the first is NOCOPY */ 933e705c121SKalle Valo BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 934e705c121SKalle Valo 935e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 936e705c121SKalle Valo cmddata[i] = cmd->data[i]; 937e705c121SKalle Valo cmdlen[i] = cmd->len[i]; 938e705c121SKalle Valo 939e705c121SKalle Valo if (!cmd->len[i]) 940e705c121SKalle Valo continue; 941e705c121SKalle Valo 9428de437c7SSara Sharon /* need at least IWL_FIRST_TB_SIZE copied */ 9438de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 9448de437c7SSara Sharon int copy = IWL_FIRST_TB_SIZE - copy_size; 945e705c121SKalle Valo 946e705c121SKalle Valo if (copy > cmdlen[i]) 947e705c121SKalle Valo copy = cmdlen[i]; 948e705c121SKalle Valo cmdlen[i] -= copy; 949e705c121SKalle Valo cmddata[i] += copy; 950e705c121SKalle Valo copy_size += copy; 951e705c121SKalle Valo } 952e705c121SKalle Valo 953e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 954e705c121SKalle Valo had_nocopy = true; 955e705c121SKalle Valo if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 956e705c121SKalle Valo idx = -EINVAL; 957e705c121SKalle Valo goto free_dup_buf; 958e705c121SKalle Valo } 959e705c121SKalle Valo } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 960e705c121SKalle Valo /* 961e705c121SKalle Valo * This is also a chunk that isn't copied 962e705c121SKalle Valo * to the static buffer so set had_nocopy. 963e705c121SKalle Valo */ 964e705c121SKalle Valo had_nocopy = true; 965e705c121SKalle Valo 966e705c121SKalle Valo /* only allowed once */ 967e705c121SKalle Valo if (WARN_ON(dup_buf)) { 968e705c121SKalle Valo idx = -EINVAL; 969e705c121SKalle Valo goto free_dup_buf; 970e705c121SKalle Valo } 971e705c121SKalle Valo 972e705c121SKalle Valo dup_buf = kmemdup(cmddata[i], cmdlen[i], 973e705c121SKalle Valo GFP_ATOMIC); 974e705c121SKalle Valo if (!dup_buf) 975e705c121SKalle Valo return -ENOMEM; 976e705c121SKalle Valo } else { 977e705c121SKalle Valo /* NOCOPY must not be followed by normal! */ 978e705c121SKalle Valo if (WARN_ON(had_nocopy)) { 979e705c121SKalle Valo idx = -EINVAL; 980e705c121SKalle Valo goto free_dup_buf; 981e705c121SKalle Valo } 982e705c121SKalle Valo copy_size += cmdlen[i]; 983e705c121SKalle Valo } 984e705c121SKalle Valo cmd_size += cmd->len[i]; 985e705c121SKalle Valo } 986e705c121SKalle Valo 987e705c121SKalle Valo /* 988e705c121SKalle Valo * If any of the command structures end up being larger than 989e705c121SKalle Valo * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 990e705c121SKalle Valo * allocated into separate TFDs, then we will need to 991e705c121SKalle Valo * increase the size of the buffers. 992e705c121SKalle Valo */ 993e705c121SKalle Valo if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 994e705c121SKalle Valo "Command %s (%#x) is too large (%d bytes)\n", 99539bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 99639bdb17eSSharon Dvir cmd->id, copy_size)) { 997e705c121SKalle Valo idx = -EINVAL; 998e705c121SKalle Valo goto free_dup_buf; 999e705c121SKalle Valo } 1000e705c121SKalle Valo 10012800aadcSJiri Kosina spin_lock_irqsave(&txq->lock, flags); 1002e705c121SKalle Valo 10030cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 10042800aadcSJiri Kosina spin_unlock_irqrestore(&txq->lock, flags); 1005e705c121SKalle Valo 1006e705c121SKalle Valo IWL_ERR(trans, "No space in command queue\n"); 1007e705c121SKalle Valo iwl_op_mode_cmd_queue_full(trans->op_mode); 1008e705c121SKalle Valo idx = -ENOSPC; 1009e705c121SKalle Valo goto free_dup_buf; 1010e705c121SKalle Valo } 1011e705c121SKalle Valo 10120cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1013e705c121SKalle Valo out_cmd = txq->entries[idx].cmd; 1014e705c121SKalle Valo out_meta = &txq->entries[idx].meta; 1015e705c121SKalle Valo 1016e705c121SKalle Valo memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1017e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) 1018e705c121SKalle Valo out_meta->source = cmd; 1019e705c121SKalle Valo 1020e705c121SKalle Valo /* set up the header */ 1021e705c121SKalle Valo if (group_id != 0) { 1022e705c121SKalle Valo out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1023e705c121SKalle Valo out_cmd->hdr_wide.group_id = group_id; 1024e705c121SKalle Valo out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1025e705c121SKalle Valo out_cmd->hdr_wide.length = 1026e705c121SKalle Valo cpu_to_le16(cmd_size - 1027e705c121SKalle Valo sizeof(struct iwl_cmd_header_wide)); 1028e705c121SKalle Valo out_cmd->hdr_wide.reserved = 0; 1029e705c121SKalle Valo out_cmd->hdr_wide.sequence = 10304f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1031bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1032e705c121SKalle Valo 1033e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header_wide); 1034e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1035e705c121SKalle Valo } else { 1036e705c121SKalle Valo out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1037e705c121SKalle Valo out_cmd->hdr.sequence = 10384f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1039bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1040e705c121SKalle Valo out_cmd->hdr.group_id = 0; 1041e705c121SKalle Valo 1042e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header); 1043e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1044e705c121SKalle Valo } 1045e705c121SKalle Valo 1046e705c121SKalle Valo /* and copy the data that needs to be copied */ 1047e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1048e705c121SKalle Valo int copy; 1049e705c121SKalle Valo 1050e705c121SKalle Valo if (!cmd->len[i]) 1051e705c121SKalle Valo continue; 1052e705c121SKalle Valo 1053e705c121SKalle Valo /* copy everything if not nocopy/dup */ 1054e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1055e705c121SKalle Valo IWL_HCMD_DFL_DUP))) { 1056e705c121SKalle Valo copy = cmd->len[i]; 1057e705c121SKalle Valo 1058e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1059e705c121SKalle Valo cmd_pos += copy; 1060e705c121SKalle Valo copy_size += copy; 1061e705c121SKalle Valo continue; 1062e705c121SKalle Valo } 1063e705c121SKalle Valo 1064e705c121SKalle Valo /* 10658de437c7SSara Sharon * Otherwise we need at least IWL_FIRST_TB_SIZE copied 10668de437c7SSara Sharon * in total (for bi-directional DMA), but copy up to what 1067e705c121SKalle Valo * we can fit into the payload for debug dump purposes. 1068e705c121SKalle Valo */ 1069e705c121SKalle Valo copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1070e705c121SKalle Valo 1071e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1072e705c121SKalle Valo cmd_pos += copy; 1073e705c121SKalle Valo 1074e705c121SKalle Valo /* However, treat copy_size the proper way, we need it below */ 10758de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 10768de437c7SSara Sharon copy = IWL_FIRST_TB_SIZE - copy_size; 1077e705c121SKalle Valo 1078e705c121SKalle Valo if (copy > cmd->len[i]) 1079e705c121SKalle Valo copy = cmd->len[i]; 1080e705c121SKalle Valo copy_size += copy; 1081e705c121SKalle Valo } 1082e705c121SKalle Valo } 1083e705c121SKalle Valo 1084e705c121SKalle Valo IWL_DEBUG_HC(trans, 1085e705c121SKalle Valo "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 108639bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1087e705c121SKalle Valo group_id, out_cmd->hdr.cmd, 1088e705c121SKalle Valo le16_to_cpu(out_cmd->hdr.sequence), 10894f4822b7SMordechay Goodstein cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1090e705c121SKalle Valo 10918de437c7SSara Sharon /* start the TFD with the minimum copy bytes */ 10928de437c7SSara Sharon tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 10938de437c7SSara Sharon memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1094e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, 10950cd1ad2dSMordechay Goodstein iwl_txq_get_first_tb_dma(txq, idx), 10968de437c7SSara Sharon tb0_size, true); 1097e705c121SKalle Valo 1098e705c121SKalle Valo /* map first command fragment, if any remains */ 10998de437c7SSara Sharon if (copy_size > tb0_size) { 1100e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, 11018de437c7SSara Sharon ((u8 *)&out_cmd->hdr) + tb0_size, 11028de437c7SSara Sharon copy_size - tb0_size, 1103e705c121SKalle Valo DMA_TO_DEVICE); 1104e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 11050179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1106bb98ecd4SSara Sharon txq->write_ptr); 1107e705c121SKalle Valo idx = -ENOMEM; 1108e705c121SKalle Valo goto out; 1109e705c121SKalle Valo } 1110e705c121SKalle Valo 1111e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 11128de437c7SSara Sharon copy_size - tb0_size, false); 1113e705c121SKalle Valo } 1114e705c121SKalle Valo 1115e705c121SKalle Valo /* map the remaining (adjusted) nocopy/dup fragments */ 1116e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 11170301bcd5SBjoern A. Zeeb void *data = (void *)(uintptr_t)cmddata[i]; 1118e705c121SKalle Valo 1119e705c121SKalle Valo if (!cmdlen[i]) 1120e705c121SKalle Valo continue; 1121e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1122e705c121SKalle Valo IWL_HCMD_DFL_DUP))) 1123e705c121SKalle Valo continue; 1124e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1125e705c121SKalle Valo data = dup_buf; 11260301bcd5SBjoern A. Zeeb phys_addr = dma_map_single(trans->dev, data, 1127e705c121SKalle Valo cmdlen[i], DMA_TO_DEVICE); 1128e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 11290179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1130bb98ecd4SSara Sharon txq->write_ptr); 1131e705c121SKalle Valo idx = -ENOMEM; 1132e705c121SKalle Valo goto out; 1133e705c121SKalle Valo } 1134e705c121SKalle Valo 1135e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1136e705c121SKalle Valo } 1137e705c121SKalle Valo 11383cd1980bSSara Sharon BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1139e705c121SKalle Valo out_meta->flags = cmd->flags; 1140e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1141453431a5SWaiman Long kfree_sensitive(txq->entries[idx].free_buf); 1142e705c121SKalle Valo txq->entries[idx].free_buf = dup_buf; 1143e705c121SKalle Valo 1144e705c121SKalle Valo trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1145e705c121SKalle Valo 1146e705c121SKalle Valo /* start timer if queue currently empty */ 1147bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1148e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1149e705c121SKalle Valo 1150e705c121SKalle Valo ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1151e705c121SKalle Valo if (ret < 0) { 1152e705c121SKalle Valo idx = ret; 115372bc934cSJohannes Berg goto out; 1154e705c121SKalle Valo } 1155e705c121SKalle Valo 1156e705c121SKalle Valo /* Increment and update queue's write index */ 11570cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1158e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1159e705c121SKalle Valo 1160e705c121SKalle Valo out: 11612800aadcSJiri Kosina spin_unlock_irqrestore(&txq->lock, flags); 1162e705c121SKalle Valo free_dup_buf: 1163e705c121SKalle Valo if (idx < 0) 1164e705c121SKalle Valo kfree(dup_buf); 1165e705c121SKalle Valo return idx; 1166e705c121SKalle Valo } 1167e705c121SKalle Valo 1168e705c121SKalle Valo /* 1169e705c121SKalle Valo * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1170e705c121SKalle Valo * @rxb: Rx buffer to reclaim 1171e705c121SKalle Valo */ 1172e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1173e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb) 1174e705c121SKalle Valo { 1175e705c121SKalle Valo struct iwl_rx_packet *pkt = rxb_addr(rxb); 1176e705c121SKalle Valo u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1177d490e097SJohannes Berg u8 group_id; 117839bdb17eSSharon Dvir u32 cmd_id; 1179e705c121SKalle Valo int txq_id = SEQ_TO_QUEUE(sequence); 1180e705c121SKalle Valo int index = SEQ_TO_INDEX(sequence); 1181e705c121SKalle Valo int cmd_index; 1182e705c121SKalle Valo struct iwl_device_cmd *cmd; 1183e705c121SKalle Valo struct iwl_cmd_meta *meta; 1184e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11854f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1186e705c121SKalle Valo 1187e705c121SKalle Valo /* If a Tx command is being handled and it isn't in the actual 1188e705c121SKalle Valo * command queue then there a command routing bug has been introduced 1189e705c121SKalle Valo * in the queue management code. */ 11904f4822b7SMordechay Goodstein if (WARN(txq_id != trans->txqs.cmd.q_id, 1191e705c121SKalle Valo "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 11924f4822b7SMordechay Goodstein txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1193b2a3b1c1SSara Sharon txq->write_ptr)) { 1194e705c121SKalle Valo iwl_print_hex_error(trans, pkt, 32); 1195e705c121SKalle Valo return; 1196e705c121SKalle Valo } 1197e705c121SKalle Valo 1198e705c121SKalle Valo spin_lock_bh(&txq->lock); 1199e705c121SKalle Valo 12000cd1ad2dSMordechay Goodstein cmd_index = iwl_txq_get_cmd_index(txq, index); 1201e705c121SKalle Valo cmd = txq->entries[cmd_index].cmd; 1202e705c121SKalle Valo meta = &txq->entries[cmd_index].meta; 1203d490e097SJohannes Berg group_id = cmd->hdr.group_id; 1204f0c86427SJohannes Berg cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); 1205e705c121SKalle Valo 12060179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1207e705c121SKalle Valo 1208e705c121SKalle Valo /* Input error checking is done when commands are added to queue. */ 1209e705c121SKalle Valo if (meta->flags & CMD_WANT_SKB) { 1210e705c121SKalle Valo struct page *p = rxb_steal_page(rxb); 1211e705c121SKalle Valo 1212e705c121SKalle Valo meta->source->resp_pkt = pkt; 1213e705c121SKalle Valo meta->source->_rx_page_addr = (unsigned long)page_address(p); 1214e705c121SKalle Valo meta->source->_rx_page_order = trans_pcie->rx_page_order; 1215e705c121SKalle Valo } 1216e705c121SKalle Valo 1217dcbb4746SEmmanuel Grumbach if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1218dcbb4746SEmmanuel Grumbach iwl_op_mode_async_cb(trans->op_mode, cmd); 1219dcbb4746SEmmanuel Grumbach 1220e705c121SKalle Valo iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1221e705c121SKalle Valo 1222e705c121SKalle Valo if (!(meta->flags & CMD_ASYNC)) { 1223e705c121SKalle Valo if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1224e705c121SKalle Valo IWL_WARN(trans, 1225e705c121SKalle Valo "HCMD_ACTIVE already clear for command %s\n", 122639bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1227e705c121SKalle Valo } 1228e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1229e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 123039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 123113f028b4SMordechay Goodstein wake_up(&trans->wait_command_queue); 1232e705c121SKalle Valo } 1233e705c121SKalle Valo 1234e705c121SKalle Valo meta->flags = 0; 1235e705c121SKalle Valo 1236e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1237e705c121SKalle Valo } 1238e705c121SKalle Valo 12393a0b2a42SEmmanuel Grumbach static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 12403a0b2a42SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 1241bb03927eSJohannes Berg struct iwl_cmd_meta *out_meta) 12423a0b2a42SEmmanuel Grumbach { 1243bb03927eSJohannes Berg u16 head_tb_len; 12443a0b2a42SEmmanuel Grumbach int i; 12453a0b2a42SEmmanuel Grumbach 12463a0b2a42SEmmanuel Grumbach /* 12473a0b2a42SEmmanuel Grumbach * Set up TFD's third entry to point directly to remainder 12483a0b2a42SEmmanuel Grumbach * of skb's head, if any 12493a0b2a42SEmmanuel Grumbach */ 1250bb03927eSJohannes Berg head_tb_len = skb_headlen(skb) - hdr_len; 12513a0b2a42SEmmanuel Grumbach 1252bb03927eSJohannes Berg if (head_tb_len > 0) { 1253bb03927eSJohannes Berg dma_addr_t tb_phys = dma_map_single(trans->dev, 12543a0b2a42SEmmanuel Grumbach skb->data + hdr_len, 1255bb03927eSJohannes Berg head_tb_len, DMA_TO_DEVICE); 1256bb03927eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 12573a0b2a42SEmmanuel Grumbach return -EINVAL; 12589b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 12599b08ae22SJohannes Berg tb_phys, head_tb_len); 1260bb03927eSJohannes Berg iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 12613a0b2a42SEmmanuel Grumbach } 12623a0b2a42SEmmanuel Grumbach 12633a0b2a42SEmmanuel Grumbach /* set up the remaining entries to point to the data */ 12643a0b2a42SEmmanuel Grumbach for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 12653a0b2a42SEmmanuel Grumbach const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 12663a0b2a42SEmmanuel Grumbach dma_addr_t tb_phys; 12673a0b2a42SEmmanuel Grumbach int tb_idx; 12683a0b2a42SEmmanuel Grumbach 12693a0b2a42SEmmanuel Grumbach if (!skb_frag_size(frag)) 12703a0b2a42SEmmanuel Grumbach continue; 12713a0b2a42SEmmanuel Grumbach 12723a0b2a42SEmmanuel Grumbach tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 12733a0b2a42SEmmanuel Grumbach skb_frag_size(frag), DMA_TO_DEVICE); 12743a0b2a42SEmmanuel Grumbach 12757d50d76eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 12763a0b2a42SEmmanuel Grumbach return -EINVAL; 12779b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 12789b08ae22SJohannes Berg tb_phys, skb_frag_size(frag)); 12793a0b2a42SEmmanuel Grumbach tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 12803a0b2a42SEmmanuel Grumbach skb_frag_size(frag), false); 12816e00a237SJohannes Berg if (tb_idx < 0) 12826e00a237SJohannes Berg return tb_idx; 12833a0b2a42SEmmanuel Grumbach 12843cd1980bSSara Sharon out_meta->tbs |= BIT(tb_idx); 12853a0b2a42SEmmanuel Grumbach } 12863a0b2a42SEmmanuel Grumbach 12873a0b2a42SEmmanuel Grumbach return 0; 12883a0b2a42SEmmanuel Grumbach } 12893a0b2a42SEmmanuel Grumbach 12906eb5e529SEmmanuel Grumbach #ifdef CONFIG_INET 1291066fd29aSSara Sharon static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 12926eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 12936eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 1294a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, 1295a89c72ffSJohannes Berg u16 tb1_len) 12966eb5e529SEmmanuel Grumbach { 129705e5a7e5SJohannes Berg struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 12986eb5e529SEmmanuel Grumbach struct ieee80211_hdr *hdr = (void *)skb->data; 12996eb5e529SEmmanuel Grumbach unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 13006eb5e529SEmmanuel Grumbach unsigned int mss = skb_shinfo(skb)->gso_size; 13016eb5e529SEmmanuel Grumbach u16 length, iv_len, amsdu_pad; 13026eb5e529SEmmanuel Grumbach u8 *start_hdr; 13036eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *hdr_page; 13046eb5e529SEmmanuel Grumbach struct tso_t tso; 13056eb5e529SEmmanuel Grumbach 13066eb5e529SEmmanuel Grumbach /* if the packet is protected, then it must be CCMP or GCMP */ 13076eb5e529SEmmanuel Grumbach BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 13086eb5e529SEmmanuel Grumbach iv_len = ieee80211_has_protected(hdr->frame_control) ? 13096eb5e529SEmmanuel Grumbach IEEE80211_CCMP_HDR_LEN : 0; 13106eb5e529SEmmanuel Grumbach 13116eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx(trans->dev, skb, 13120cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1313885375d0SMordechay Goodstein trans->txqs.tfd.size, 13148790fce4SJohannes Berg &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 13156eb5e529SEmmanuel Grumbach 13166eb5e529SEmmanuel Grumbach ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 13176eb5e529SEmmanuel Grumbach snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 13186eb5e529SEmmanuel Grumbach total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 13196eb5e529SEmmanuel Grumbach amsdu_pad = 0; 13206eb5e529SEmmanuel Grumbach 13216eb5e529SEmmanuel Grumbach /* total amount of header we may need for this A-MSDU */ 13226eb5e529SEmmanuel Grumbach hdr_room = DIV_ROUND_UP(total_len, mss) * 13236eb5e529SEmmanuel Grumbach (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 13246eb5e529SEmmanuel Grumbach 13256eb5e529SEmmanuel Grumbach /* Our device supports 9 segments at most, it will fit in 1 page */ 13267b02bf61SJohannes Berg hdr_page = get_page_hdr(trans, hdr_room, skb); 13276eb5e529SEmmanuel Grumbach if (!hdr_page) 13286eb5e529SEmmanuel Grumbach return -ENOMEM; 13296eb5e529SEmmanuel Grumbach 13306eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 13316eb5e529SEmmanuel Grumbach memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 13326eb5e529SEmmanuel Grumbach hdr_page->pos += iv_len; 13336eb5e529SEmmanuel Grumbach 13346eb5e529SEmmanuel Grumbach /* 13356eb5e529SEmmanuel Grumbach * Pull the ieee80211 header + IV to be able to use TSO core, 13366eb5e529SEmmanuel Grumbach * we will restore it for the tx_status flow. 13376eb5e529SEmmanuel Grumbach */ 13386eb5e529SEmmanuel Grumbach skb_pull(skb, hdr_len + iv_len); 13396eb5e529SEmmanuel Grumbach 134005e5a7e5SJohannes Berg /* 134105e5a7e5SJohannes Berg * Remove the length of all the headers that we don't actually 134205e5a7e5SJohannes Berg * have in the MPDU by themselves, but that we duplicate into 134305e5a7e5SJohannes Berg * all the different MSDUs inside the A-MSDU. 134405e5a7e5SJohannes Berg */ 134505e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 134605e5a7e5SJohannes Berg 13476eb5e529SEmmanuel Grumbach tso_start(skb, &tso); 13486eb5e529SEmmanuel Grumbach 13496eb5e529SEmmanuel Grumbach while (total_len) { 13506eb5e529SEmmanuel Grumbach /* this is the data left for this subframe */ 13516eb5e529SEmmanuel Grumbach unsigned int data_left = 13526eb5e529SEmmanuel Grumbach min_t(unsigned int, mss, total_len); 13536eb5e529SEmmanuel Grumbach unsigned int hdr_tb_len; 13546eb5e529SEmmanuel Grumbach dma_addr_t hdr_tb_phys; 135559fa61f3SEmmanuel Grumbach u8 *subf_hdrs_start = hdr_page->pos; 13566eb5e529SEmmanuel Grumbach 13576eb5e529SEmmanuel Grumbach total_len -= data_left; 13586eb5e529SEmmanuel Grumbach 13596eb5e529SEmmanuel Grumbach memset(hdr_page->pos, 0, amsdu_pad); 13606eb5e529SEmmanuel Grumbach hdr_page->pos += amsdu_pad; 13616eb5e529SEmmanuel Grumbach amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 13626eb5e529SEmmanuel Grumbach data_left)) & 0x3; 13636eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 13646eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 13656eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 13666eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 13676eb5e529SEmmanuel Grumbach 13686eb5e529SEmmanuel Grumbach length = snap_ip_tcp_hdrlen + data_left; 13696eb5e529SEmmanuel Grumbach *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 13706eb5e529SEmmanuel Grumbach hdr_page->pos += sizeof(length); 13716eb5e529SEmmanuel Grumbach 13726eb5e529SEmmanuel Grumbach /* 13736eb5e529SEmmanuel Grumbach * This will copy the SNAP as well which will be considered 13746eb5e529SEmmanuel Grumbach * as MAC header. 13756eb5e529SEmmanuel Grumbach */ 13766eb5e529SEmmanuel Grumbach tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 13776eb5e529SEmmanuel Grumbach 13786eb5e529SEmmanuel Grumbach hdr_page->pos += snap_ip_tcp_hdrlen; 13796eb5e529SEmmanuel Grumbach 13806eb5e529SEmmanuel Grumbach hdr_tb_len = hdr_page->pos - start_hdr; 13816eb5e529SEmmanuel Grumbach hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 13826eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 1383fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) 13847d50d76eSJohannes Berg return -EINVAL; 13856eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 13866eb5e529SEmmanuel Grumbach hdr_tb_len, false); 1387bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 13889b08ae22SJohannes Berg hdr_tb_phys, hdr_tb_len); 138905e5a7e5SJohannes Berg /* add this subframe's headers' length to the tx_cmd */ 139005e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 13916eb5e529SEmmanuel Grumbach 13926eb5e529SEmmanuel Grumbach /* prepare the start_hdr for the next subframe */ 13936eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 13946eb5e529SEmmanuel Grumbach 13956eb5e529SEmmanuel Grumbach /* put the payload */ 13966eb5e529SEmmanuel Grumbach while (data_left) { 13976eb5e529SEmmanuel Grumbach unsigned int size = min_t(unsigned int, tso.size, 13986eb5e529SEmmanuel Grumbach data_left); 13996eb5e529SEmmanuel Grumbach dma_addr_t tb_phys; 14006eb5e529SEmmanuel Grumbach 14016eb5e529SEmmanuel Grumbach tb_phys = dma_map_single(trans->dev, tso.data, 14026eb5e529SEmmanuel Grumbach size, DMA_TO_DEVICE); 1403fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 14047d50d76eSJohannes Berg return -EINVAL; 14056eb5e529SEmmanuel Grumbach 14066eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 14076eb5e529SEmmanuel Grumbach size, false); 1408bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 14099b08ae22SJohannes Berg tb_phys, size); 14106eb5e529SEmmanuel Grumbach 14116eb5e529SEmmanuel Grumbach data_left -= size; 14126eb5e529SEmmanuel Grumbach tso_build_data(skb, &tso, size); 14136eb5e529SEmmanuel Grumbach } 14146eb5e529SEmmanuel Grumbach } 14156eb5e529SEmmanuel Grumbach 14166eb5e529SEmmanuel Grumbach /* re -add the WiFi header and IV */ 14176eb5e529SEmmanuel Grumbach skb_push(skb, hdr_len + iv_len); 14186eb5e529SEmmanuel Grumbach 14196eb5e529SEmmanuel Grumbach return 0; 14206eb5e529SEmmanuel Grumbach } 14216eb5e529SEmmanuel Grumbach #else /* CONFIG_INET */ 14226eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 14236eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 14246eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 1425a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, 1426a89c72ffSJohannes Berg u16 tb1_len) 14276eb5e529SEmmanuel Grumbach { 14286eb5e529SEmmanuel Grumbach /* No A-MSDU without CONFIG_INET */ 14296eb5e529SEmmanuel Grumbach WARN_ON(1); 14306eb5e529SEmmanuel Grumbach 14316eb5e529SEmmanuel Grumbach return -1; 14326eb5e529SEmmanuel Grumbach } 14336eb5e529SEmmanuel Grumbach #endif /* CONFIG_INET */ 14346eb5e529SEmmanuel Grumbach 1435e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1436a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1437e705c121SKalle Valo { 1438e705c121SKalle Valo struct ieee80211_hdr *hdr; 1439e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1440e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1441e705c121SKalle Valo struct iwl_txq *txq; 1442e705c121SKalle Valo dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1443e705c121SKalle Valo void *tb1_addr; 14444fe10bc6SSara Sharon void *tfd; 14453a0b2a42SEmmanuel Grumbach u16 len, tb1_len; 1446e705c121SKalle Valo bool wait_write_ptr; 1447e705c121SKalle Valo __le16 fc; 1448e705c121SKalle Valo u8 hdr_len; 1449e705c121SKalle Valo u16 wifi_seq; 1450c772a3d3SSara Sharon bool amsdu; 1451e705c121SKalle Valo 14524f4822b7SMordechay Goodstein txq = trans->txqs.txq[txq_id]; 1453e705c121SKalle Valo 14544f4822b7SMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1455e705c121SKalle Valo "TX on unused queue %d\n", txq_id)) 1456e705c121SKalle Valo return -EINVAL; 1457e705c121SKalle Valo 1458e705c121SKalle Valo if (skb_is_nonlinear(skb) && 1459885375d0SMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1460e705c121SKalle Valo __skb_linearize(skb)) 1461e705c121SKalle Valo return -ENOMEM; 1462e705c121SKalle Valo 1463e705c121SKalle Valo /* mac80211 always puts the full header into the SKB's head, 1464e705c121SKalle Valo * so there's no need to check if it's readable there 1465e705c121SKalle Valo */ 1466e705c121SKalle Valo hdr = (struct ieee80211_hdr *)skb->data; 1467e705c121SKalle Valo fc = hdr->frame_control; 1468e705c121SKalle Valo hdr_len = ieee80211_hdrlen(fc); 1469e705c121SKalle Valo 1470e705c121SKalle Valo spin_lock(&txq->lock); 1471e705c121SKalle Valo 14720cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 14730cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 14743955525dSEmmanuel Grumbach 14753955525dSEmmanuel Grumbach /* don't put the packet on the ring, if there is no room */ 14760cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1477a89c72ffSJohannes Berg struct iwl_device_tx_cmd **dev_cmd_ptr; 14783955525dSEmmanuel Grumbach 147921cb3222SJohannes Berg dev_cmd_ptr = (void *)((u8 *)skb->cb + 148022852fadSMordechay Goodstein trans->txqs.dev_cmd_offs); 148121cb3222SJohannes Berg 148221cb3222SJohannes Berg *dev_cmd_ptr = dev_cmd; 14833955525dSEmmanuel Grumbach __skb_queue_tail(&txq->overflow_q, skb); 14843955525dSEmmanuel Grumbach 14853955525dSEmmanuel Grumbach spin_unlock(&txq->lock); 14863955525dSEmmanuel Grumbach return 0; 14873955525dSEmmanuel Grumbach } 14883955525dSEmmanuel Grumbach } 14893955525dSEmmanuel Grumbach 1490e705c121SKalle Valo /* In AGG mode, the index in the ring must correspond to the WiFi 1491e705c121SKalle Valo * sequence number. This is a HW requirements to help the SCD to parse 1492e705c121SKalle Valo * the BA. 1493e705c121SKalle Valo * Check here that the packets are in the right place on the ring. 1494e705c121SKalle Valo */ 1495e705c121SKalle Valo wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1496e705c121SKalle Valo WARN_ONCE(txq->ampdu && 1497bb98ecd4SSara Sharon (wifi_seq & 0xff) != txq->write_ptr, 1498e705c121SKalle Valo "Q: %d WiFi Seq %d tfdNum %d", 1499bb98ecd4SSara Sharon txq_id, wifi_seq, txq->write_ptr); 1500e705c121SKalle Valo 1501e705c121SKalle Valo /* Set up driver data for this TFD */ 1502bb98ecd4SSara Sharon txq->entries[txq->write_ptr].skb = skb; 1503bb98ecd4SSara Sharon txq->entries[txq->write_ptr].cmd = dev_cmd; 1504e705c121SKalle Valo 1505e705c121SKalle Valo dev_cmd->hdr.sequence = 1506e705c121SKalle Valo cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1507bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr))); 1508e705c121SKalle Valo 15090cd1ad2dSMordechay Goodstein tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1510e705c121SKalle Valo scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1511e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch); 1512e705c121SKalle Valo 1513e705c121SKalle Valo tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1514e705c121SKalle Valo tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1515e705c121SKalle Valo 1516e705c121SKalle Valo /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1517bb98ecd4SSara Sharon out_meta = &txq->entries[txq->write_ptr].meta; 1518e705c121SKalle Valo out_meta->flags = 0; 1519e705c121SKalle Valo 1520e705c121SKalle Valo /* 1521e705c121SKalle Valo * The second TB (tb1) points to the remainder of the TX command 1522e705c121SKalle Valo * and the 802.11 header - dword aligned size 1523e705c121SKalle Valo * (This calculation modifies the TX command, so do it before the 1524e705c121SKalle Valo * setup of the first TB) 1525e705c121SKalle Valo */ 1526e705c121SKalle Valo len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 15278de437c7SSara Sharon hdr_len - IWL_FIRST_TB_SIZE; 1528c772a3d3SSara Sharon /* do not align A-MSDU to dword as the subframe header aligns it */ 1529c772a3d3SSara Sharon amsdu = ieee80211_is_data_qos(fc) && 1530c772a3d3SSara Sharon (*ieee80211_get_qos_ctl(hdr) & 1531c772a3d3SSara Sharon IEEE80211_QOS_CTL_A_MSDU_PRESENT); 153259fa61f3SEmmanuel Grumbach if (!amsdu) { 1533e705c121SKalle Valo tb1_len = ALIGN(len, 4); 1534e705c121SKalle Valo /* Tell NIC about any 2-byte padding after MAC header */ 1535e705c121SKalle Valo if (tb1_len != len) 1536d172a5efSJohannes Berg tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1537c772a3d3SSara Sharon } else { 1538c772a3d3SSara Sharon tb1_len = len; 1539c772a3d3SSara Sharon } 1540e705c121SKalle Valo 154105e5a7e5SJohannes Berg /* 154205e5a7e5SJohannes Berg * The first TB points to bi-directional DMA data, we'll 154305e5a7e5SJohannes Berg * memcpy the data into it later. 154405e5a7e5SJohannes Berg */ 1545e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 15468de437c7SSara Sharon IWL_FIRST_TB_SIZE, true); 1547e705c121SKalle Valo 1548e705c121SKalle Valo /* there must be data left over for TB1 or this code must be changed */ 15498de437c7SSara Sharon BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1550e705c121SKalle Valo 1551e705c121SKalle Valo /* map the data for TB1 */ 15528de437c7SSara Sharon tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1553e705c121SKalle Valo tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1554e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1555e705c121SKalle Valo goto out_err; 1556e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1557e705c121SKalle Valo 1558bf77ee2eSSara Sharon trace_iwlwifi_dev_tx(trans->dev, skb, 15590cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1560885375d0SMordechay Goodstein trans->txqs.tfd.size, 1561bf77ee2eSSara Sharon &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1562bf77ee2eSSara Sharon hdr_len); 1563bf77ee2eSSara Sharon 1564bf1ad897SEliad Peller /* 1565bf1ad897SEliad Peller * If gso_size wasn't set, don't give the frame "amsdu treatment" 1566bf1ad897SEliad Peller * (adding subframes, etc.). 1567bf1ad897SEliad Peller * This can happen in some testing flows when the amsdu was already 1568bf1ad897SEliad Peller * pre-built, and we just need to send the resulting skb. 1569bf1ad897SEliad Peller */ 1570bf1ad897SEliad Peller if (amsdu && skb_shinfo(skb)->gso_size) { 15716eb5e529SEmmanuel Grumbach if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 15726eb5e529SEmmanuel Grumbach out_meta, dev_cmd, 15736eb5e529SEmmanuel Grumbach tb1_len))) 1574e705c121SKalle Valo goto out_err; 1575bb03927eSJohannes Berg } else { 15760044f171SJohannes Berg struct sk_buff *frag; 15770044f171SJohannes Berg 1578bb03927eSJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1579bb03927eSJohannes Berg out_meta))) 15806eb5e529SEmmanuel Grumbach goto out_err; 1581bb03927eSJohannes Berg 15820044f171SJohannes Berg skb_walk_frags(skb, frag) { 15830044f171SJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 15840044f171SJohannes Berg out_meta))) 15850044f171SJohannes Berg goto out_err; 15860044f171SJohannes Berg } 15876eb5e529SEmmanuel Grumbach } 1588e705c121SKalle Valo 158905e5a7e5SJohannes Berg /* building the A-MSDU might have changed this data, so memcpy it now */ 1590c1f33442SLiad Kaufman memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 159105e5a7e5SJohannes Berg 15920cd1ad2dSMordechay Goodstein tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1593e705c121SKalle Valo /* Set up entry for this TFD in Tx byte-count array */ 15940179bfffSMordechay Goodstein iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 15950179bfffSMordechay Goodstein iwl_txq_gen1_tfd_get_num_tbs(trans, 15960179bfffSMordechay Goodstein tfd)); 1597e705c121SKalle Valo 1598e705c121SKalle Valo wait_write_ptr = ieee80211_has_morefrags(fc); 1599e705c121SKalle Valo 1600e705c121SKalle Valo /* start timer if queue currently empty */ 16010d52497aSEmmanuel Grumbach if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1602e705c121SKalle Valo /* 1603e705c121SKalle Valo * If the TXQ is active, then set the timer, if not, 1604e705c121SKalle Valo * set the timer in remainder so that the timer will 1605e705c121SKalle Valo * be armed with the right value when the station will 1606e705c121SKalle Valo * wake up. 1607e705c121SKalle Valo */ 1608e705c121SKalle Valo if (!txq->frozen) 1609e705c121SKalle Valo mod_timer(&txq->stuck_timer, 1610e705c121SKalle Valo jiffies + txq->wd_timeout); 1611e705c121SKalle Valo else 1612e705c121SKalle Valo txq->frozen_expiry_remainder = txq->wd_timeout; 1613e705c121SKalle Valo } 1614e705c121SKalle Valo 1615e705c121SKalle Valo /* Tell device the write index *just past* this latest filled TFD */ 16160cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1617e705c121SKalle Valo if (!wait_write_ptr) 1618e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1619e705c121SKalle Valo 1620e705c121SKalle Valo /* 1621e705c121SKalle Valo * At this point the frame is "transmitted" successfully 1622e705c121SKalle Valo * and we will get a TX status notification eventually. 1623e705c121SKalle Valo */ 1624e705c121SKalle Valo spin_unlock(&txq->lock); 1625e705c121SKalle Valo return 0; 1626e705c121SKalle Valo out_err: 16270179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1628e705c121SKalle Valo spin_unlock(&txq->lock); 1629e705c121SKalle Valo return -1; 1630e705c121SKalle Valo } 1631