1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3cefec29eSJohannes Berg * This file is provided under a dual BSD/GPLv2 license. When using or 4cefec29eSJohannes Berg * redistributing this file, you may do so under either license. 5cefec29eSJohannes Berg * 6cefec29eSJohannes Berg * GPL LICENSE SUMMARY 7cefec29eSJohannes Berg * 8e705c121SKalle Valo * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 94cbb8e50SLuciano Coelho * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10eda50cdeSSara Sharon * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11a8e82c36SJohannes Berg * Copyright(c) 2018 - 2020 Intel Corporation 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 14e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 15e705c121SKalle Valo * published by the Free Software Foundation. 16e705c121SKalle Valo * 17e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 18e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20e705c121SKalle Valo * more details. 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23cefec29eSJohannes Berg * file called COPYING. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26cb2f8277SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29cefec29eSJohannes Berg * BSD LICENSE 30cefec29eSJohannes Berg * 31cefec29eSJohannes Berg * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 32cefec29eSJohannes Berg * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33cefec29eSJohannes Berg * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34a8e82c36SJohannes Berg * Copyright(c) 2018 - 2020 Intel Corporation 35cefec29eSJohannes Berg * All rights reserved. 36cefec29eSJohannes Berg * 37cefec29eSJohannes Berg * Redistribution and use in source and binary forms, with or without 38cefec29eSJohannes Berg * modification, are permitted provided that the following conditions 39cefec29eSJohannes Berg * are met: 40cefec29eSJohannes Berg * 41cefec29eSJohannes Berg * * Redistributions of source code must retain the above copyright 42cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer. 43cefec29eSJohannes Berg * * Redistributions in binary form must reproduce the above copyright 44cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer in 45cefec29eSJohannes Berg * the documentation and/or other materials provided with the 46cefec29eSJohannes Berg * distribution. 47cefec29eSJohannes Berg * * Neither the name Intel Corporation nor the names of its 48cefec29eSJohannes Berg * contributors may be used to endorse or promote products derived 49cefec29eSJohannes Berg * from this software without specific prior written permission. 50cefec29eSJohannes Berg * 51cefec29eSJohannes Berg * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52cefec29eSJohannes Berg * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53cefec29eSJohannes Berg * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54cefec29eSJohannes Berg * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55cefec29eSJohannes Berg * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56cefec29eSJohannes Berg * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57cefec29eSJohannes Berg * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58cefec29eSJohannes Berg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59cefec29eSJohannes Berg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60cefec29eSJohannes Berg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61cefec29eSJohannes Berg * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62cefec29eSJohannes Berg * 63e705c121SKalle Valo *****************************************************************************/ 64e705c121SKalle Valo #include <linux/etherdevice.h> 656eb5e529SEmmanuel Grumbach #include <linux/ieee80211.h> 66e705c121SKalle Valo #include <linux/slab.h> 67e705c121SKalle Valo #include <linux/sched.h> 686eb5e529SEmmanuel Grumbach #include <net/ip6_checksum.h> 696eb5e529SEmmanuel Grumbach #include <net/tso.h> 70e705c121SKalle Valo 71e705c121SKalle Valo #include "iwl-debug.h" 72e705c121SKalle Valo #include "iwl-csr.h" 73e705c121SKalle Valo #include "iwl-prph.h" 74e705c121SKalle Valo #include "iwl-io.h" 75e705c121SKalle Valo #include "iwl-scd.h" 76e705c121SKalle Valo #include "iwl-op-mode.h" 77e705c121SKalle Valo #include "internal.h" 78d172a5efSJohannes Berg #include "fw/api/tx.h" 79e705c121SKalle Valo 80e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 81e705c121SKalle Valo * DMA services 82e705c121SKalle Valo * 83e705c121SKalle Valo * Theory of operation 84e705c121SKalle Valo * 85e705c121SKalle Valo * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 86e705c121SKalle Valo * of buffer descriptors, each of which points to one or more data buffers for 87e705c121SKalle Valo * the device to read from or fill. Driver and device exchange status of each 88e705c121SKalle Valo * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 89e705c121SKalle Valo * entries in each circular buffer, to protect against confusing empty and full 90e705c121SKalle Valo * queue states. 91e705c121SKalle Valo * 92e705c121SKalle Valo * The device reads or writes the data in the queues via the device's several 93e705c121SKalle Valo * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 94e705c121SKalle Valo * 95e705c121SKalle Valo * For Tx queue, there are low mark and high mark limits. If, after queuing 96e705c121SKalle Valo * the packet for Tx, free space become < low mark, Tx queue stopped. When 97e705c121SKalle Valo * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 98e705c121SKalle Valo * Tx queue resumed. 99e705c121SKalle Valo * 100e705c121SKalle Valo ***************************************************/ 101e22744afSSara Sharon 102e705c121SKalle Valo 10313a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 104e705c121SKalle Valo struct iwl_dma_ptr *ptr, size_t size) 105e705c121SKalle Valo { 106e705c121SKalle Valo if (WARN_ON(ptr->addr)) 107e705c121SKalle Valo return -EINVAL; 108e705c121SKalle Valo 109e705c121SKalle Valo ptr->addr = dma_alloc_coherent(trans->dev, size, 110e705c121SKalle Valo &ptr->dma, GFP_KERNEL); 111e705c121SKalle Valo if (!ptr->addr) 112e705c121SKalle Valo return -ENOMEM; 113e705c121SKalle Valo ptr->size = size; 114e705c121SKalle Valo return 0; 115e705c121SKalle Valo } 116e705c121SKalle Valo 11713a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 118e705c121SKalle Valo { 119e705c121SKalle Valo if (unlikely(!ptr->addr)) 120e705c121SKalle Valo return; 121e705c121SKalle Valo 122e705c121SKalle Valo dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 123e705c121SKalle Valo memset(ptr, 0, sizeof(*ptr)); 124e705c121SKalle Valo } 125e705c121SKalle Valo 126e705c121SKalle Valo /* 127e705c121SKalle Valo * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 128e705c121SKalle Valo */ 129e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 130e705c121SKalle Valo struct iwl_txq *txq) 131e705c121SKalle Valo { 132e705c121SKalle Valo u32 reg = 0; 133bb98ecd4SSara Sharon int txq_id = txq->id; 134e705c121SKalle Valo 135e705c121SKalle Valo lockdep_assert_held(&txq->lock); 136e705c121SKalle Valo 137e705c121SKalle Valo /* 138e705c121SKalle Valo * explicitly wake up the NIC if: 139e705c121SKalle Valo * 1. shadow registers aren't enabled 140e705c121SKalle Valo * 2. NIC is woken up for CMD regardless of shadow outside this function 141e705c121SKalle Valo * 3. there is a chance that the NIC is asleep 142e705c121SKalle Valo */ 143286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->shadow_reg_enable && 1444f4822b7SMordechay Goodstein txq_id != trans->txqs.cmd.q_id && 145e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 146e705c121SKalle Valo /* 147e705c121SKalle Valo * wake up nic if it's powered down ... 148e705c121SKalle Valo * uCode will wake up, and interrupt us again, so next 149e705c121SKalle Valo * time we'll skip this part. 150e705c121SKalle Valo */ 151e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 152e705c121SKalle Valo 153e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 154e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 155e705c121SKalle Valo txq_id, reg); 156e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 1576dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 158e705c121SKalle Valo txq->need_update = true; 159e705c121SKalle Valo return; 160e705c121SKalle Valo } 161e705c121SKalle Valo } 162e705c121SKalle Valo 163e705c121SKalle Valo /* 164e705c121SKalle Valo * if not in power-save mode, uCode will never sleep when we're 165e705c121SKalle Valo * trying to tx (during RFKILL, we're not trying to tx). 166e705c121SKalle Valo */ 167bb98ecd4SSara Sharon IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 1680cd58eaaSEmmanuel Grumbach if (!txq->block) 1690cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 170bb98ecd4SSara Sharon txq->write_ptr | (txq_id << 8)); 171e705c121SKalle Valo } 172e705c121SKalle Valo 173e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 174e705c121SKalle Valo { 175e705c121SKalle Valo int i; 176e705c121SKalle Valo 177286ca8ebSLuca Coelho for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1784f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[i]; 179e705c121SKalle Valo 1804f4822b7SMordechay Goodstein if (!test_bit(i, trans->txqs.queue_used)) 181f6eac740SMordechai Goodstein continue; 182f6eac740SMordechai Goodstein 183e705c121SKalle Valo spin_lock_bh(&txq->lock); 184b2a3b1c1SSara Sharon if (txq->need_update) { 185e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 186b2a3b1c1SSara Sharon txq->need_update = false; 187e705c121SKalle Valo } 188e705c121SKalle Valo spin_unlock_bh(&txq->lock); 189e705c121SKalle Valo } 190e705c121SKalle Valo } 191e705c121SKalle Valo 1926983ba69SSara Sharon static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 1936983ba69SSara Sharon u8 idx, dma_addr_t addr, u16 len) 194e705c121SKalle Valo { 1956983ba69SSara Sharon struct iwl_tfd *tfd_fh = (void *)tfd; 1966983ba69SSara Sharon struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 1976983ba69SSara Sharon 198e705c121SKalle Valo u16 hi_n_len = len << 4; 199e705c121SKalle Valo 200e705c121SKalle Valo put_unaligned_le32(addr, &tb->lo); 2017abf6fdeSJohannes Berg hi_n_len |= iwl_get_dma_hi_addr(addr); 202e705c121SKalle Valo 203e705c121SKalle Valo tb->hi_n_len = cpu_to_le16(hi_n_len); 204e705c121SKalle Valo 2056983ba69SSara Sharon tfd_fh->num_tbs = idx + 1; 2066983ba69SSara Sharon } 207e705c121SKalle Valo 208e705c121SKalle Valo /* 209e705c121SKalle Valo * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 210e705c121SKalle Valo * @trans - transport private data 211e705c121SKalle Valo * @txq - tx queue 212e705c121SKalle Valo * @dma_dir - the direction of the DMA mapping 213e705c121SKalle Valo * 214e705c121SKalle Valo * Does NOT advance any TFD circular buffer read/write indexes 215e705c121SKalle Valo * Does NOT free the TFD itself (which is within circular buffer) 216e705c121SKalle Valo */ 2176b35ff91SSara Sharon void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 218e705c121SKalle Valo { 219e705c121SKalle Valo /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 220e705c121SKalle Valo * idx is bounded by n_window 221e705c121SKalle Valo */ 222bb98ecd4SSara Sharon int rd_ptr = txq->read_ptr; 2230cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 224e705c121SKalle Valo 225e705c121SKalle Valo lockdep_assert_held(&txq->lock); 226e705c121SKalle Valo 227e705c121SKalle Valo /* We have only q->n_window txq->entries, but we use 228e705c121SKalle Valo * TFD_QUEUE_SIZE_MAX tfds 229e705c121SKalle Valo */ 2300179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 231e705c121SKalle Valo 232e705c121SKalle Valo /* free SKB */ 233e705c121SKalle Valo if (txq->entries) { 234e705c121SKalle Valo struct sk_buff *skb; 235e705c121SKalle Valo 236e705c121SKalle Valo skb = txq->entries[idx].skb; 237e705c121SKalle Valo 238e705c121SKalle Valo /* Can be called from irqs-disabled context 239e705c121SKalle Valo * If skb is not NULL, it means that the whole queue is being 240e705c121SKalle Valo * freed and that the queue is not empty - free the skb 241e705c121SKalle Valo */ 242e705c121SKalle Valo if (skb) { 243e705c121SKalle Valo iwl_op_mode_free_skb(trans->op_mode, skb); 244e705c121SKalle Valo txq->entries[idx].skb = NULL; 245e705c121SKalle Valo } 246e705c121SKalle Valo } 247e705c121SKalle Valo } 248e705c121SKalle Valo 249e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 250e705c121SKalle Valo dma_addr_t addr, u16 len, bool reset) 251e705c121SKalle Valo { 2526983ba69SSara Sharon void *tfd; 253e705c121SKalle Valo u32 num_tbs; 254e705c121SKalle Valo 255885375d0SMordechay Goodstein tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 256e705c121SKalle Valo 257e705c121SKalle Valo if (reset) 258885375d0SMordechay Goodstein memset(tfd, 0, trans->txqs.tfd.size); 259e705c121SKalle Valo 2600179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 261e705c121SKalle Valo 2626983ba69SSara Sharon /* Each TFD can point to a maximum max_tbs Tx buffers */ 263885375d0SMordechay Goodstein if (num_tbs >= trans->txqs.tfd.max_tbs) { 264e705c121SKalle Valo IWL_ERR(trans, "Error can not send more than %d chunks\n", 265885375d0SMordechay Goodstein trans->txqs.tfd.max_tbs); 266e705c121SKalle Valo return -EINVAL; 267e705c121SKalle Valo } 268e705c121SKalle Valo 269e705c121SKalle Valo if (WARN(addr & ~IWL_TX_DMA_MASK, 270e705c121SKalle Valo "Unaligned address = %llx\n", (unsigned long long)addr)) 271e705c121SKalle Valo return -EINVAL; 272e705c121SKalle Valo 2736983ba69SSara Sharon iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 274e705c121SKalle Valo 275e705c121SKalle Valo return num_tbs; 276e705c121SKalle Valo } 277e705c121SKalle Valo 27801d11cd1SSara Sharon static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 27901d11cd1SSara Sharon { 28001d11cd1SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 28101d11cd1SSara Sharon 28201d11cd1SSara Sharon lockdep_assert_held(&trans_pcie->reg_lock); 28301d11cd1SSara Sharon 284286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 28501d11cd1SSara Sharon return; 28601d11cd1SSara Sharon if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 28701d11cd1SSara Sharon return; 28801d11cd1SSara Sharon 28901d11cd1SSara Sharon trans_pcie->cmd_hold_nic_awake = false; 29001d11cd1SSara Sharon __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2916dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 29201d11cd1SSara Sharon } 29301d11cd1SSara Sharon 294e705c121SKalle Valo /* 295e705c121SKalle Valo * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 296e705c121SKalle Valo */ 297e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 298e705c121SKalle Valo { 299e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3004f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 301e705c121SKalle Valo 302e705c121SKalle Valo spin_lock_bh(&txq->lock); 303bb98ecd4SSara Sharon while (txq->write_ptr != txq->read_ptr) { 304e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 305bb98ecd4SSara Sharon txq_id, txq->read_ptr); 3066eb5e529SEmmanuel Grumbach 3074f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 308bb98ecd4SSara Sharon struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 3096eb5e529SEmmanuel Grumbach 3106eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb)) 3116eb5e529SEmmanuel Grumbach continue; 3126eb5e529SEmmanuel Grumbach 3130cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 3146eb5e529SEmmanuel Grumbach } 315e705c121SKalle Valo iwl_pcie_txq_free_tfd(trans, txq); 3160cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 31701d11cd1SSara Sharon 318bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 31901d11cd1SSara Sharon unsigned long flags; 32001d11cd1SSara Sharon 32101d11cd1SSara Sharon spin_lock_irqsave(&trans_pcie->reg_lock, flags); 3224f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 32301d11cd1SSara Sharon iwl_pcie_clear_cmd_in_flight(trans); 32401d11cd1SSara Sharon spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 32501d11cd1SSara Sharon } 326e705c121SKalle Valo } 3273955525dSEmmanuel Grumbach 3283955525dSEmmanuel Grumbach while (!skb_queue_empty(&txq->overflow_q)) { 3293955525dSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 3303955525dSEmmanuel Grumbach 3313955525dSEmmanuel Grumbach iwl_op_mode_free_skb(trans->op_mode, skb); 3323955525dSEmmanuel Grumbach } 3333955525dSEmmanuel Grumbach 334e705c121SKalle Valo spin_unlock_bh(&txq->lock); 335e705c121SKalle Valo 336e705c121SKalle Valo /* just in case - this queue may have been stopped */ 337e705c121SKalle Valo iwl_wake_queue(trans, txq); 338e705c121SKalle Valo } 339e705c121SKalle Valo 340e705c121SKalle Valo /* 341e705c121SKalle Valo * iwl_pcie_txq_free - Deallocate DMA queue. 342e705c121SKalle Valo * @txq: Transmit queue to deallocate. 343e705c121SKalle Valo * 344e705c121SKalle Valo * Empty queue by removing and destroying all BD's. 345e705c121SKalle Valo * Free all buffers. 346e705c121SKalle Valo * 0-fill, but do not free "txq" descriptor structure. 347e705c121SKalle Valo */ 348e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 349e705c121SKalle Valo { 3504f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 351e705c121SKalle Valo struct device *dev = trans->dev; 352e705c121SKalle Valo int i; 353e705c121SKalle Valo 354e705c121SKalle Valo if (WARN_ON(!txq)) 355e705c121SKalle Valo return; 356e705c121SKalle Valo 357e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 358e705c121SKalle Valo 359e705c121SKalle Valo /* De-alloc array of command/tx buffers */ 3604f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 361bb98ecd4SSara Sharon for (i = 0; i < txq->n_window; i++) { 362453431a5SWaiman Long kfree_sensitive(txq->entries[i].cmd); 363453431a5SWaiman Long kfree_sensitive(txq->entries[i].free_buf); 364e705c121SKalle Valo } 365e705c121SKalle Valo 366e705c121SKalle Valo /* De-alloc circular buffer of TFDs */ 367e705c121SKalle Valo if (txq->tfds) { 368e705c121SKalle Valo dma_free_coherent(dev, 369885375d0SMordechay Goodstein trans->txqs.tfd.size * 370286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 371bb98ecd4SSara Sharon txq->tfds, txq->dma_addr); 372bb98ecd4SSara Sharon txq->dma_addr = 0; 373e705c121SKalle Valo txq->tfds = NULL; 374e705c121SKalle Valo 375e705c121SKalle Valo dma_free_coherent(dev, 376bb98ecd4SSara Sharon sizeof(*txq->first_tb_bufs) * txq->n_window, 3778de437c7SSara Sharon txq->first_tb_bufs, txq->first_tb_dma); 378e705c121SKalle Valo } 379e705c121SKalle Valo 380e705c121SKalle Valo kfree(txq->entries); 381e705c121SKalle Valo txq->entries = NULL; 382e705c121SKalle Valo 383e705c121SKalle Valo del_timer_sync(&txq->stuck_timer); 384e705c121SKalle Valo 385e705c121SKalle Valo /* 0-fill queue descriptor structure */ 386e705c121SKalle Valo memset(txq, 0, sizeof(*txq)); 387e705c121SKalle Valo } 388e705c121SKalle Valo 389e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 390e705c121SKalle Valo { 391e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 392286ca8ebSLuca Coelho int nq = trans->trans_cfg->base_params->num_of_queues; 393e705c121SKalle Valo int chan; 394e705c121SKalle Valo u32 reg_val; 395e705c121SKalle Valo int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 396e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 397e705c121SKalle Valo 398e705c121SKalle Valo /* make sure all queue are not stopped/used */ 3994f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 4004f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 4014f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 402e705c121SKalle Valo 403e705c121SKalle Valo trans_pcie->scd_base_addr = 404e705c121SKalle Valo iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 405e705c121SKalle Valo 406e705c121SKalle Valo WARN_ON(scd_base_addr != 0 && 407e705c121SKalle Valo scd_base_addr != trans_pcie->scd_base_addr); 408e705c121SKalle Valo 409e705c121SKalle Valo /* reset context data, TX status and translation data */ 410e705c121SKalle Valo iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 411e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND, 412e705c121SKalle Valo NULL, clear_dwords); 413e705c121SKalle Valo 414e705c121SKalle Valo iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 4150179bfffSMordechay Goodstein trans->txqs.scd_bc_tbls.dma >> 10); 416e705c121SKalle Valo 417e705c121SKalle Valo /* The chain extension of the SCD doesn't work well. This feature is 418e705c121SKalle Valo * enabled by default by the HW, so we need to disable it manually. 419e705c121SKalle Valo */ 420286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->scd_chain_ext_wa) 421e705c121SKalle Valo iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 422e705c121SKalle Valo 4234f4822b7SMordechay Goodstein iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 4244f4822b7SMordechay Goodstein trans->txqs.cmd.fifo, 4254f4822b7SMordechay Goodstein trans->txqs.cmd.wdg_timeout); 426e705c121SKalle Valo 427e705c121SKalle Valo /* Activate all Tx DMA/FIFO channels */ 428e705c121SKalle Valo iwl_scd_activate_fifos(trans); 429e705c121SKalle Valo 430e705c121SKalle Valo /* Enable DMA channel */ 431e705c121SKalle Valo for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 432e705c121SKalle Valo iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 433e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 434e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 435e705c121SKalle Valo 436e705c121SKalle Valo /* Update FH chicken bits */ 437e705c121SKalle Valo reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 438e705c121SKalle Valo iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 439e705c121SKalle Valo reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 440e705c121SKalle Valo 441e705c121SKalle Valo /* Enable L1-Active */ 442286ca8ebSLuca Coelho if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 443e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 444e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 445e705c121SKalle Valo } 446e705c121SKalle Valo 447e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 448e705c121SKalle Valo { 449e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 450e705c121SKalle Valo int txq_id; 451e705c121SKalle Valo 45213a3a390SSara Sharon /* 45313a3a390SSara Sharon * we should never get here in gen2 trans mode return early to avoid 45413a3a390SSara Sharon * having invalid accesses 45513a3a390SSara Sharon */ 456286ca8ebSLuca Coelho if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 45713a3a390SSara Sharon return; 45813a3a390SSara Sharon 459286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 460e705c121SKalle Valo txq_id++) { 4614f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 462286ca8ebSLuca Coelho if (trans->trans_cfg->use_tfh) 463e22744afSSara Sharon iwl_write_direct64(trans, 464e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 465bb98ecd4SSara Sharon txq->dma_addr); 466e22744afSSara Sharon else 467e22744afSSara Sharon iwl_write_direct32(trans, 468e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 469bb98ecd4SSara Sharon txq->dma_addr >> 8); 470e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 471bb98ecd4SSara Sharon txq->read_ptr = 0; 472bb98ecd4SSara Sharon txq->write_ptr = 0; 473e705c121SKalle Valo } 474e705c121SKalle Valo 475e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 476e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 477e705c121SKalle Valo trans_pcie->kw.dma >> 4); 478e705c121SKalle Valo 479e705c121SKalle Valo /* 480e705c121SKalle Valo * Send 0 as the scd_base_addr since the device may have be reset 481e705c121SKalle Valo * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 482e705c121SKalle Valo * contain garbage. 483e705c121SKalle Valo */ 484e705c121SKalle Valo iwl_pcie_tx_start(trans, 0); 485e705c121SKalle Valo } 486e705c121SKalle Valo 487e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 488e705c121SKalle Valo { 489e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 490e705c121SKalle Valo unsigned long flags; 491e705c121SKalle Valo int ch, ret; 492e705c121SKalle Valo u32 mask = 0; 493e705c121SKalle Valo 494e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 495e705c121SKalle Valo 49623ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 497e705c121SKalle Valo goto out; 498e705c121SKalle Valo 499e705c121SKalle Valo /* Stop each Tx DMA channel */ 500e705c121SKalle Valo for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 501e705c121SKalle Valo iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 502e705c121SKalle Valo mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 503e705c121SKalle Valo } 504e705c121SKalle Valo 505e705c121SKalle Valo /* Wait for DMA channels to be idle */ 506e705c121SKalle Valo ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 507e705c121SKalle Valo if (ret < 0) 508e705c121SKalle Valo IWL_ERR(trans, 509e705c121SKalle Valo "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 510e705c121SKalle Valo ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 511e705c121SKalle Valo 512e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 513e705c121SKalle Valo 514e705c121SKalle Valo out: 515e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 516e705c121SKalle Valo } 517e705c121SKalle Valo 518e705c121SKalle Valo /* 519e705c121SKalle Valo * iwl_pcie_tx_stop - Stop all Tx DMA channels 520e705c121SKalle Valo */ 521e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans) 522e705c121SKalle Valo { 523e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 524e705c121SKalle Valo int txq_id; 525e705c121SKalle Valo 526e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 527e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 528e705c121SKalle Valo 529e705c121SKalle Valo /* Turn off all Tx DMA channels */ 530e705c121SKalle Valo iwl_pcie_tx_stop_fh(trans); 531e705c121SKalle Valo 532e705c121SKalle Valo /* 533e705c121SKalle Valo * This function can be called before the op_mode disabled the 534e705c121SKalle Valo * queues. This happens when we have an rfkill interrupt. 535e705c121SKalle Valo * Since we stop Tx altogether - mark the queues as stopped. 536e705c121SKalle Valo */ 5374f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 5384f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 5394f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 540e705c121SKalle Valo 541e705c121SKalle Valo /* This can happen: start_hw, stop_device */ 542b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) 543e705c121SKalle Valo return 0; 544e705c121SKalle Valo 545e705c121SKalle Valo /* Unmap DMA from host system and free skb's */ 546286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 547e705c121SKalle Valo txq_id++) 548e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 549e705c121SKalle Valo 550e705c121SKalle Valo return 0; 551e705c121SKalle Valo } 552e705c121SKalle Valo 553e705c121SKalle Valo /* 554e705c121SKalle Valo * iwl_trans_tx_free - Free TXQ Context 555e705c121SKalle Valo * 556e705c121SKalle Valo * Destroy all TX DMA queues and structures 557e705c121SKalle Valo */ 558e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans) 559e705c121SKalle Valo { 560e705c121SKalle Valo int txq_id; 561e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 562e705c121SKalle Valo 5634f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 564de74c455SSara Sharon 565e705c121SKalle Valo /* Tx queues */ 566b2a3b1c1SSara Sharon if (trans_pcie->txq_memory) { 567e705c121SKalle Valo for (txq_id = 0; 568286ca8ebSLuca Coelho txq_id < trans->trans_cfg->base_params->num_of_queues; 569b2a3b1c1SSara Sharon txq_id++) { 570e705c121SKalle Valo iwl_pcie_txq_free(trans, txq_id); 5714f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 572b2a3b1c1SSara Sharon } 573e705c121SKalle Valo } 574e705c121SKalle Valo 575b2a3b1c1SSara Sharon kfree(trans_pcie->txq_memory); 576b2a3b1c1SSara Sharon trans_pcie->txq_memory = NULL; 577e705c121SKalle Valo 578e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 579e705c121SKalle Valo 5800179bfffSMordechay Goodstein iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 581e705c121SKalle Valo } 582e705c121SKalle Valo 583e705c121SKalle Valo /* 584e705c121SKalle Valo * iwl_pcie_tx_alloc - allocate TX context 585e705c121SKalle Valo * Allocate all Tx DMA structures and initialize them 586e705c121SKalle Valo */ 587e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 588e705c121SKalle Valo { 589e705c121SKalle Valo int ret; 590e705c121SKalle Valo int txq_id, slots_num; 591e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 592286ca8ebSLuca Coelho u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 593e705c121SKalle Valo 594a8e82c36SJohannes Berg if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 595a8e82c36SJohannes Berg return -EINVAL; 596a8e82c36SJohannes Berg 597a8e82c36SJohannes Berg bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 598e705c121SKalle Valo 599e705c121SKalle Valo /*It is not allowed to alloc twice, so warn when this happens. 600e705c121SKalle Valo * We cannot rely on the previous allocation, so free and fail */ 601b2a3b1c1SSara Sharon if (WARN_ON(trans_pcie->txq_memory)) { 602e705c121SKalle Valo ret = -EINVAL; 603e705c121SKalle Valo goto error; 604e705c121SKalle Valo } 605e705c121SKalle Valo 6060179bfffSMordechay Goodstein ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 6077b3e42eaSGolan Ben Ami bc_tbls_size); 608e705c121SKalle Valo if (ret) { 609e705c121SKalle Valo IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 610e705c121SKalle Valo goto error; 611e705c121SKalle Valo } 612e705c121SKalle Valo 613e705c121SKalle Valo /* Alloc keep-warm buffer */ 614e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 615e705c121SKalle Valo if (ret) { 616e705c121SKalle Valo IWL_ERR(trans, "Keep Warm allocation failed\n"); 617e705c121SKalle Valo goto error; 618e705c121SKalle Valo } 619e705c121SKalle Valo 62079b6c8feSLuca Coelho trans_pcie->txq_memory = 621286ca8ebSLuca Coelho kcalloc(trans->trans_cfg->base_params->num_of_queues, 622e705c121SKalle Valo sizeof(struct iwl_txq), GFP_KERNEL); 623b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) { 624e705c121SKalle Valo IWL_ERR(trans, "Not enough memory for txq\n"); 625e705c121SKalle Valo ret = -ENOMEM; 626e705c121SKalle Valo goto error; 627e705c121SKalle Valo } 628e705c121SKalle Valo 629e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 630286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 631e705c121SKalle Valo txq_id++) { 6324f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 633b8e8d7ceSSara Sharon 634ff911dcaSShaul Triebitz if (cmd_queue) 635718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 636ff911dcaSShaul Triebitz trans->cfg->min_txq_size); 637ff911dcaSShaul Triebitz else 638718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 639c30aef01SShaul Triebitz trans->cfg->min_256_ba_txq_size); 6404f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 6410cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 6420cd1ad2dSMordechay Goodstein cmd_queue); 643e705c121SKalle Valo if (ret) { 644e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 645e705c121SKalle Valo goto error; 646e705c121SKalle Valo } 6474f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 648e705c121SKalle Valo } 649e705c121SKalle Valo 650e705c121SKalle Valo return 0; 651e705c121SKalle Valo 652e705c121SKalle Valo error: 653e705c121SKalle Valo iwl_pcie_tx_free(trans); 654e705c121SKalle Valo 655e705c121SKalle Valo return ret; 656e705c121SKalle Valo } 657eda50cdeSSara Sharon 658e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans) 659e705c121SKalle Valo { 660e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 661e705c121SKalle Valo int ret; 662e705c121SKalle Valo int txq_id, slots_num; 663e705c121SKalle Valo bool alloc = false; 664e705c121SKalle Valo 665b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) { 666e705c121SKalle Valo ret = iwl_pcie_tx_alloc(trans); 667e705c121SKalle Valo if (ret) 668e705c121SKalle Valo goto error; 669e705c121SKalle Valo alloc = true; 670e705c121SKalle Valo } 671e705c121SKalle Valo 672e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 673e705c121SKalle Valo 674e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 675e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 676e705c121SKalle Valo 677e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 678e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 679e705c121SKalle Valo trans_pcie->kw.dma >> 4); 680e705c121SKalle Valo 681e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 682e705c121SKalle Valo 683e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 684286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 685e705c121SKalle Valo txq_id++) { 6864f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 687b8e8d7ceSSara Sharon 688ff911dcaSShaul Triebitz if (cmd_queue) 689718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 690ff911dcaSShaul Triebitz trans->cfg->min_txq_size); 691ff911dcaSShaul Triebitz else 692718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 693c30aef01SShaul Triebitz trans->cfg->min_256_ba_txq_size); 6940cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 6950cd1ad2dSMordechay Goodstein cmd_queue); 696e705c121SKalle Valo if (ret) { 697e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 698e705c121SKalle Valo goto error; 699e705c121SKalle Valo } 700e705c121SKalle Valo 701eda50cdeSSara Sharon /* 702eda50cdeSSara Sharon * Tell nic where to find circular buffer of TFDs for a 703eda50cdeSSara Sharon * given Tx queue, and enable the DMA channel used for that 704eda50cdeSSara Sharon * queue. 705eda50cdeSSara Sharon * Circular buffer (TFD queue in DRAM) physical base address 706eda50cdeSSara Sharon */ 707eda50cdeSSara Sharon iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 7084f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->dma_addr >> 8); 709ae79785fSSara Sharon } 710e22744afSSara Sharon 711e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 712286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->num_of_queues > 20) 713e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, 714e705c121SKalle Valo SCD_GP_CTRL_ENABLE_31_QUEUES); 715e705c121SKalle Valo 716e705c121SKalle Valo return 0; 717e705c121SKalle Valo error: 718e705c121SKalle Valo /*Upon error, free only if we allocated something */ 719e705c121SKalle Valo if (alloc) 720e705c121SKalle Valo iwl_pcie_tx_free(trans); 721e705c121SKalle Valo return ret; 722e705c121SKalle Valo } 723e705c121SKalle Valo 724e705c121SKalle Valo static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 725e705c121SKalle Valo { 726e705c121SKalle Valo lockdep_assert_held(&txq->lock); 727e705c121SKalle Valo 728e705c121SKalle Valo if (!txq->wd_timeout) 729e705c121SKalle Valo return; 730e705c121SKalle Valo 731e705c121SKalle Valo /* 732e705c121SKalle Valo * station is asleep and we send data - that must 733e705c121SKalle Valo * be uAPSD or PS-Poll. Don't rearm the timer. 734e705c121SKalle Valo */ 735e705c121SKalle Valo if (txq->frozen) 736e705c121SKalle Valo return; 737e705c121SKalle Valo 738e705c121SKalle Valo /* 739e705c121SKalle Valo * if empty delete timer, otherwise move timer forward 740e705c121SKalle Valo * since we're making progress on this queue 741e705c121SKalle Valo */ 742bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) 743e705c121SKalle Valo del_timer(&txq->stuck_timer); 744e705c121SKalle Valo else 745e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 746e705c121SKalle Valo } 747e705c121SKalle Valo 748e705c121SKalle Valo /* Frees buffers until index _not_ inclusive */ 749e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 750e705c121SKalle Valo struct sk_buff_head *skbs) 751e705c121SKalle Valo { 7524f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7530cd1ad2dSMordechay Goodstein int tfd_num = iwl_txq_get_cmd_index(txq, ssn); 7540cd1ad2dSMordechay Goodstein int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 755e705c121SKalle Valo int last_to_free; 756e705c121SKalle Valo 757e705c121SKalle Valo /* This function is not meant to release cmd queue*/ 7584f4822b7SMordechay Goodstein if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 759e705c121SKalle Valo return; 760e705c121SKalle Valo 761e705c121SKalle Valo spin_lock_bh(&txq->lock); 762e705c121SKalle Valo 7634f4822b7SMordechay Goodstein if (!test_bit(txq_id, trans->txqs.queue_used)) { 764e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 765e705c121SKalle Valo txq_id, ssn); 766e705c121SKalle Valo goto out; 767e705c121SKalle Valo } 768e705c121SKalle Valo 7697b3e42eaSGolan Ben Ami if (read_ptr == tfd_num) 770e705c121SKalle Valo goto out; 771e705c121SKalle Valo 772e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 773bb98ecd4SSara Sharon txq_id, txq->read_ptr, tfd_num, ssn); 774e705c121SKalle Valo 775e705c121SKalle Valo /*Since we free until index _not_ inclusive, the one before index is 776e705c121SKalle Valo * the last we will free. This one must be used */ 7770cd1ad2dSMordechay Goodstein last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 778e705c121SKalle Valo 7790cd1ad2dSMordechay Goodstein if (!iwl_txq_used(txq, last_to_free)) { 780e705c121SKalle Valo IWL_ERR(trans, 78181f0c661SGolan Ben Ami "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 7827b3e42eaSGolan Ben Ami __func__, txq_id, last_to_free, 783286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 784bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr); 785e705c121SKalle Valo goto out; 786e705c121SKalle Valo } 787e705c121SKalle Valo 788e705c121SKalle Valo if (WARN_ON(!skb_queue_empty(skbs))) 789e705c121SKalle Valo goto out; 790e705c121SKalle Valo 791e705c121SKalle Valo for (; 7927b3e42eaSGolan Ben Ami read_ptr != tfd_num; 7930cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 7940cd1ad2dSMordechay Goodstein read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 7957b3e42eaSGolan Ben Ami struct sk_buff *skb = txq->entries[read_ptr].skb; 796e705c121SKalle Valo 7976eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb)) 798e705c121SKalle Valo continue; 799e705c121SKalle Valo 8000cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 8016eb5e529SEmmanuel Grumbach 8026eb5e529SEmmanuel Grumbach __skb_queue_tail(skbs, skb); 803e705c121SKalle Valo 8047b3e42eaSGolan Ben Ami txq->entries[read_ptr].skb = NULL; 805e705c121SKalle Valo 806286ca8ebSLuca Coelho if (!trans->trans_cfg->use_tfh) 8070179bfffSMordechay Goodstein iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 808e705c121SKalle Valo 809e705c121SKalle Valo iwl_pcie_txq_free_tfd(trans, txq); 810e705c121SKalle Valo } 811e705c121SKalle Valo 812e705c121SKalle Valo iwl_pcie_txq_progress(txq); 813e705c121SKalle Valo 8140cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark && 8154f4822b7SMordechay Goodstein test_bit(txq_id, trans->txqs.queue_stopped)) { 816685b346cSEmmanuel Grumbach struct sk_buff_head overflow_skbs; 8173955525dSEmmanuel Grumbach 818685b346cSEmmanuel Grumbach __skb_queue_head_init(&overflow_skbs); 819685b346cSEmmanuel Grumbach skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 8203955525dSEmmanuel Grumbach 8213955525dSEmmanuel Grumbach /* 8222ae48edcSSara Sharon * We are going to transmit from the overflow queue. 8232ae48edcSSara Sharon * Remember this state so that wait_for_txq_empty will know we 8242ae48edcSSara Sharon * are adding more packets to the TFD queue. It cannot rely on 8252ae48edcSSara Sharon * the state of &txq->overflow_q, as we just emptied it, but 8262ae48edcSSara Sharon * haven't TXed the content yet. 8272ae48edcSSara Sharon */ 8282ae48edcSSara Sharon txq->overflow_tx = true; 8292ae48edcSSara Sharon 8302ae48edcSSara Sharon /* 8313955525dSEmmanuel Grumbach * This is tricky: we are in reclaim path which is non 8323955525dSEmmanuel Grumbach * re-entrant, so noone will try to take the access the 8333955525dSEmmanuel Grumbach * txq data from that path. We stopped tx, so we can't 8343955525dSEmmanuel Grumbach * have tx as well. Bottom line, we can unlock and re-lock 8353955525dSEmmanuel Grumbach * later. 8363955525dSEmmanuel Grumbach */ 8373955525dSEmmanuel Grumbach spin_unlock_bh(&txq->lock); 8383955525dSEmmanuel Grumbach 839685b346cSEmmanuel Grumbach while (!skb_queue_empty(&overflow_skbs)) { 840685b346cSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 841a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd_ptr; 84221cb3222SJohannes Berg 84321cb3222SJohannes Berg dev_cmd_ptr = *(void **)((u8 *)skb->cb + 84422852fadSMordechay Goodstein trans->txqs.dev_cmd_offs); 8453955525dSEmmanuel Grumbach 8463955525dSEmmanuel Grumbach /* 8473955525dSEmmanuel Grumbach * Note that we can very well be overflowing again. 8480cd1ad2dSMordechay Goodstein * In that case, iwl_txq_space will be small again 8493955525dSEmmanuel Grumbach * and we won't wake mac80211's queue. 8503955525dSEmmanuel Grumbach */ 851f79b8f9dSEmmanuel Grumbach iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 8523955525dSEmmanuel Grumbach } 8533955525dSEmmanuel Grumbach 8540cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark) 855e705c121SKalle Valo iwl_wake_queue(trans, txq); 85636817294SSara Sharon 85736817294SSara Sharon spin_lock_bh(&txq->lock); 8582ae48edcSSara Sharon txq->overflow_tx = false; 8593955525dSEmmanuel Grumbach } 860e705c121SKalle Valo 861e705c121SKalle Valo out: 862e705c121SKalle Valo spin_unlock_bh(&txq->lock); 863e705c121SKalle Valo } 864e705c121SKalle Valo 865ba7136f3SAlex Malamud /* Set wr_ptr of specific device and txq */ 866ba7136f3SAlex Malamud void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 867ba7136f3SAlex Malamud { 8684f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 869ba7136f3SAlex Malamud 870ba7136f3SAlex Malamud spin_lock_bh(&txq->lock); 871ba7136f3SAlex Malamud 872ba7136f3SAlex Malamud txq->write_ptr = ptr; 873ba7136f3SAlex Malamud txq->read_ptr = txq->write_ptr; 874ba7136f3SAlex Malamud 875ba7136f3SAlex Malamud spin_unlock_bh(&txq->lock); 876ba7136f3SAlex Malamud } 877ba7136f3SAlex Malamud 878e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 879e705c121SKalle Valo const struct iwl_host_cmd *cmd) 880e705c121SKalle Valo { 881e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 882e705c121SKalle Valo int ret; 883e705c121SKalle Valo 884e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 885e705c121SKalle Valo 8862b3fae66SMatt Chen /* Make sure the NIC is still alive in the bus */ 887f60c9e59SEmmanuel Grumbach if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 888f60c9e59SEmmanuel Grumbach return -ENODEV; 8892b3fae66SMatt Chen 890e705c121SKalle Valo /* 891e705c121SKalle Valo * wake up the NIC to make sure that the firmware will see the host 892e705c121SKalle Valo * command - we will let the NIC sleep once all the host commands 893e705c121SKalle Valo * returned. This needs to be done only on NICs that have 894e705c121SKalle Valo * apmg_wake_up_wa set. 895e705c121SKalle Valo */ 8967d34a7d7SLuca Coelho if (trans->trans_cfg->base_params->apmg_wake_up_wa && 897e705c121SKalle Valo !trans_pcie->cmd_hold_nic_awake) { 898e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 8996dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 900e705c121SKalle Valo 901e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 9026dece0e9SLuca Coelho CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 9036dece0e9SLuca Coelho (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 904e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 905e705c121SKalle Valo 15000); 906e705c121SKalle Valo if (ret < 0) { 907e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 9086dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 909e705c121SKalle Valo IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 910e705c121SKalle Valo return -EIO; 911e705c121SKalle Valo } 912e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = true; 913e705c121SKalle Valo } 914e705c121SKalle Valo 915e705c121SKalle Valo return 0; 916e705c121SKalle Valo } 917e705c121SKalle Valo 918e705c121SKalle Valo /* 919e705c121SKalle Valo * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 920e705c121SKalle Valo * 921e705c121SKalle Valo * When FW advances 'R' index, all entries between old and new 'R' index 922e705c121SKalle Valo * need to be reclaimed. As result, some free space forms. If there is 923e705c121SKalle Valo * enough free space (> low mark), wake the stack that feeds us. 924e705c121SKalle Valo */ 9257216dc99SJohannes Berg static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 926e705c121SKalle Valo { 927e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 9284f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 929e705c121SKalle Valo unsigned long flags; 930e705c121SKalle Valo int nfreed = 0; 931f5955a6cSGolan Ben Ami u16 r; 932e705c121SKalle Valo 933e705c121SKalle Valo lockdep_assert_held(&txq->lock); 934e705c121SKalle Valo 9350cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, idx); 9360cd1ad2dSMordechay Goodstein r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 937f5955a6cSGolan Ben Ami 938286ca8ebSLuca Coelho if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 9390cd1ad2dSMordechay Goodstein (!iwl_txq_used(txq, idx))) { 9404f4822b7SMordechay Goodstein WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 941e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 9427b3e42eaSGolan Ben Ami __func__, txq_id, idx, 943286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size, 944bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr); 945e705c121SKalle Valo return; 946e705c121SKalle Valo } 947e705c121SKalle Valo 9480cd1ad2dSMordechay Goodstein for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 9490cd1ad2dSMordechay Goodstein r = iwl_txq_inc_wrap(trans, r)) { 9500cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 951e705c121SKalle Valo 952e705c121SKalle Valo if (nfreed++ > 0) { 953e705c121SKalle Valo IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 954f5955a6cSGolan Ben Ami idx, txq->write_ptr, r); 955e705c121SKalle Valo iwl_force_nmi(trans); 956e705c121SKalle Valo } 957e705c121SKalle Valo } 958e705c121SKalle Valo 959bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 960e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 961e705c121SKalle Valo iwl_pcie_clear_cmd_in_flight(trans); 962e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 963e705c121SKalle Valo } 964e705c121SKalle Valo 965e705c121SKalle Valo iwl_pcie_txq_progress(txq); 966e705c121SKalle Valo } 967e705c121SKalle Valo 968e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 969e705c121SKalle Valo u16 txq_id) 970e705c121SKalle Valo { 971e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 972e705c121SKalle Valo u32 tbl_dw_addr; 973e705c121SKalle Valo u32 tbl_dw; 974e705c121SKalle Valo u16 scd_q2ratid; 975e705c121SKalle Valo 976e705c121SKalle Valo scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 977e705c121SKalle Valo 978e705c121SKalle Valo tbl_dw_addr = trans_pcie->scd_base_addr + 979e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 980e705c121SKalle Valo 981e705c121SKalle Valo tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 982e705c121SKalle Valo 983e705c121SKalle Valo if (txq_id & 0x1) 984e705c121SKalle Valo tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 985e705c121SKalle Valo else 986e705c121SKalle Valo tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 987e705c121SKalle Valo 988e705c121SKalle Valo iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 989e705c121SKalle Valo 990e705c121SKalle Valo return 0; 991e705c121SKalle Valo } 992e705c121SKalle Valo 993e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table), 994e705c121SKalle Valo * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 995e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 996e705c121SKalle Valo 997dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 998e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 999e705c121SKalle Valo unsigned int wdg_timeout) 1000e705c121SKalle Valo { 1001e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 10024f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1003e705c121SKalle Valo int fifo = -1; 1004dcfbd67bSEmmanuel Grumbach bool scd_bug = false; 1005e705c121SKalle Valo 10064f4822b7SMordechay Goodstein if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 1007e705c121SKalle Valo WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1008e705c121SKalle Valo 1009e705c121SKalle Valo txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1010e705c121SKalle Valo 1011e705c121SKalle Valo if (cfg) { 1012e705c121SKalle Valo fifo = cfg->fifo; 1013e705c121SKalle Valo 1014e705c121SKalle Valo /* Disable the scheduler prior configuring the cmd queue */ 10154f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id && 1016e705c121SKalle Valo trans_pcie->scd_set_active) 1017e705c121SKalle Valo iwl_scd_enable_set_active(trans, 0); 1018e705c121SKalle Valo 1019e705c121SKalle Valo /* Stop this Tx queue before configuring it */ 1020e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 1021e705c121SKalle Valo 1022e705c121SKalle Valo /* Set this queue as a chain-building queue unless it is CMD */ 10234f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 1024e705c121SKalle Valo iwl_scd_txq_set_chain(trans, txq_id); 1025e705c121SKalle Valo 1026e705c121SKalle Valo if (cfg->aggregate) { 1027e705c121SKalle Valo u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1028e705c121SKalle Valo 1029e705c121SKalle Valo /* Map receiver-address / traffic-ID to this queue */ 1030e705c121SKalle Valo iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1031e705c121SKalle Valo 1032e705c121SKalle Valo /* enable aggregations for the queue */ 1033e705c121SKalle Valo iwl_scd_txq_enable_agg(trans, txq_id); 1034e705c121SKalle Valo txq->ampdu = true; 1035e705c121SKalle Valo } else { 1036e705c121SKalle Valo /* 1037e705c121SKalle Valo * disable aggregations for the queue, this will also 1038e705c121SKalle Valo * make the ra_tid mapping configuration irrelevant 1039e705c121SKalle Valo * since it is now a non-AGG queue. 1040e705c121SKalle Valo */ 1041e705c121SKalle Valo iwl_scd_txq_disable_agg(trans, txq_id); 1042e705c121SKalle Valo 1043bb98ecd4SSara Sharon ssn = txq->read_ptr; 1044e705c121SKalle Valo } 1045dcfbd67bSEmmanuel Grumbach } else { 1046dcfbd67bSEmmanuel Grumbach /* 1047dcfbd67bSEmmanuel Grumbach * If we need to move the SCD write pointer by steps of 1048dcfbd67bSEmmanuel Grumbach * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 1049dcfbd67bSEmmanuel Grumbach * the op_mode know by returning true later. 1050dcfbd67bSEmmanuel Grumbach * Do this only in case cfg is NULL since this trick can 1051dcfbd67bSEmmanuel Grumbach * be done only if we have DQA enabled which is true for mvm 1052dcfbd67bSEmmanuel Grumbach * only. And mvm never sets a cfg pointer. 1053dcfbd67bSEmmanuel Grumbach * This is really ugly, but this is the easiest way out for 1054dcfbd67bSEmmanuel Grumbach * this sad hardware issue. 1055dcfbd67bSEmmanuel Grumbach * This bug has been fixed on devices 9000 and up. 1056dcfbd67bSEmmanuel Grumbach */ 1057286ca8ebSLuca Coelho scd_bug = !trans->trans_cfg->mq_rx_supported && 1058dcfbd67bSEmmanuel Grumbach !((ssn - txq->write_ptr) & 0x3f) && 1059dcfbd67bSEmmanuel Grumbach (ssn != txq->write_ptr); 1060dcfbd67bSEmmanuel Grumbach if (scd_bug) 1061dcfbd67bSEmmanuel Grumbach ssn++; 1062e705c121SKalle Valo } 1063e705c121SKalle Valo 1064e705c121SKalle Valo /* Place first TFD at index corresponding to start sequence number. 1065e705c121SKalle Valo * Assumes that ssn_idx is valid (!= 0xFFF) */ 1066bb98ecd4SSara Sharon txq->read_ptr = (ssn & 0xff); 1067bb98ecd4SSara Sharon txq->write_ptr = (ssn & 0xff); 1068e705c121SKalle Valo iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1069e705c121SKalle Valo (ssn & 0xff) | (txq_id << 8)); 1070e705c121SKalle Valo 1071e705c121SKalle Valo if (cfg) { 1072e705c121SKalle Valo u8 frame_limit = cfg->frame_limit; 1073e705c121SKalle Valo 1074e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1075e705c121SKalle Valo 1076e705c121SKalle Valo /* Set up Tx window size and frame limit for this queue */ 1077e705c121SKalle Valo iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1078e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1079e705c121SKalle Valo iwl_trans_write_mem32(trans, 1080e705c121SKalle Valo trans_pcie->scd_base_addr + 1081e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1082f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 1083f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 1084e705c121SKalle Valo 1085e705c121SKalle Valo /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1086e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1087e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1088e705c121SKalle Valo (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1089e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1090e705c121SKalle Valo SCD_QUEUE_STTS_REG_MSK); 1091e705c121SKalle Valo 1092e705c121SKalle Valo /* enable the scheduler for this queue (only) */ 10934f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id && 1094e705c121SKalle Valo trans_pcie->scd_set_active) 1095e705c121SKalle Valo iwl_scd_enable_set_active(trans, BIT(txq_id)); 1096e705c121SKalle Valo 1097e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 1098e705c121SKalle Valo "Activate queue %d on FIFO %d WrPtr: %d\n", 1099e705c121SKalle Valo txq_id, fifo, ssn & 0xff); 1100e705c121SKalle Valo } else { 1101e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 1102e705c121SKalle Valo "Activate queue %d WrPtr: %d\n", 1103e705c121SKalle Valo txq_id, ssn & 0xff); 1104e705c121SKalle Valo } 1105dcfbd67bSEmmanuel Grumbach 1106dcfbd67bSEmmanuel Grumbach return scd_bug; 1107e705c121SKalle Valo } 1108e705c121SKalle Valo 110942db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 111042db09c1SLiad Kaufman bool shared_mode) 111142db09c1SLiad Kaufman { 11124f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 111342db09c1SLiad Kaufman 111442db09c1SLiad Kaufman txq->ampdu = !shared_mode; 111542db09c1SLiad Kaufman } 111642db09c1SLiad Kaufman 1117e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1118e705c121SKalle Valo bool configure_scd) 1119e705c121SKalle Valo { 1120e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1121e705c121SKalle Valo u32 stts_addr = trans_pcie->scd_base_addr + 1122e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1123e705c121SKalle Valo static const u32 zero_val[4] = {}; 1124e705c121SKalle Valo 11254f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 11264f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen = false; 1127e705c121SKalle Valo 1128e705c121SKalle Valo /* 1129e705c121SKalle Valo * Upon HW Rfkill - we stop the device, and then stop the queues 1130e705c121SKalle Valo * in the op_mode. Just for the sake of the simplicity of the op_mode, 1131e705c121SKalle Valo * allow the op_mode to call txq_disable after it already called 1132e705c121SKalle Valo * stop_device. 1133e705c121SKalle Valo */ 11344f4822b7SMordechay Goodstein if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 1135e705c121SKalle Valo WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1136e705c121SKalle Valo "queue %d not used", txq_id); 1137e705c121SKalle Valo return; 1138e705c121SKalle Valo } 1139e705c121SKalle Valo 1140e705c121SKalle Valo if (configure_scd) { 1141e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 1142e705c121SKalle Valo 1143e705c121SKalle Valo iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1144e705c121SKalle Valo ARRAY_SIZE(zero_val)); 1145e705c121SKalle Valo } 1146e705c121SKalle Valo 1147e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 11484f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->ampdu = false; 1149e705c121SKalle Valo 1150e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1151e705c121SKalle Valo } 1152e705c121SKalle Valo 1153e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1154e705c121SKalle Valo 1155e705c121SKalle Valo /* 1156e705c121SKalle Valo * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1157e705c121SKalle Valo * @priv: device private data point 1158e705c121SKalle Valo * @cmd: a pointer to the ucode command structure 1159e705c121SKalle Valo * 1160e705c121SKalle Valo * The function returns < 0 values to indicate the operation 1161e705c121SKalle Valo * failed. On success, it returns the index (>= 0) of command in the 1162e705c121SKalle Valo * command queue. 1163e705c121SKalle Valo */ 1164e705c121SKalle Valo static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1165e705c121SKalle Valo struct iwl_host_cmd *cmd) 1166e705c121SKalle Valo { 1167e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11684f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1169e705c121SKalle Valo struct iwl_device_cmd *out_cmd; 1170e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1171e705c121SKalle Valo unsigned long flags; 1172e705c121SKalle Valo void *dup_buf = NULL; 1173e705c121SKalle Valo dma_addr_t phys_addr; 1174e705c121SKalle Valo int idx; 11758de437c7SSara Sharon u16 copy_size, cmd_size, tb0_size; 1176e705c121SKalle Valo bool had_nocopy = false; 1177e705c121SKalle Valo u8 group_id = iwl_cmd_groupid(cmd->id); 1178e705c121SKalle Valo int i, ret; 1179e705c121SKalle Valo u32 cmd_pos; 1180e705c121SKalle Valo const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1181e705c121SKalle Valo u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1182e705c121SKalle Valo 1183e705c121SKalle Valo if (group_id != 0) { 1184e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1185e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header_wide); 1186e705c121SKalle Valo } else { 1187e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1188e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header); 1189e705c121SKalle Valo } 1190e705c121SKalle Valo 1191e705c121SKalle Valo /* need one for the header if the first is NOCOPY */ 1192e705c121SKalle Valo BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1193e705c121SKalle Valo 1194e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1195e705c121SKalle Valo cmddata[i] = cmd->data[i]; 1196e705c121SKalle Valo cmdlen[i] = cmd->len[i]; 1197e705c121SKalle Valo 1198e705c121SKalle Valo if (!cmd->len[i]) 1199e705c121SKalle Valo continue; 1200e705c121SKalle Valo 12018de437c7SSara Sharon /* need at least IWL_FIRST_TB_SIZE copied */ 12028de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 12038de437c7SSara Sharon int copy = IWL_FIRST_TB_SIZE - copy_size; 1204e705c121SKalle Valo 1205e705c121SKalle Valo if (copy > cmdlen[i]) 1206e705c121SKalle Valo copy = cmdlen[i]; 1207e705c121SKalle Valo cmdlen[i] -= copy; 1208e705c121SKalle Valo cmddata[i] += copy; 1209e705c121SKalle Valo copy_size += copy; 1210e705c121SKalle Valo } 1211e705c121SKalle Valo 1212e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1213e705c121SKalle Valo had_nocopy = true; 1214e705c121SKalle Valo if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1215e705c121SKalle Valo idx = -EINVAL; 1216e705c121SKalle Valo goto free_dup_buf; 1217e705c121SKalle Valo } 1218e705c121SKalle Valo } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1219e705c121SKalle Valo /* 1220e705c121SKalle Valo * This is also a chunk that isn't copied 1221e705c121SKalle Valo * to the static buffer so set had_nocopy. 1222e705c121SKalle Valo */ 1223e705c121SKalle Valo had_nocopy = true; 1224e705c121SKalle Valo 1225e705c121SKalle Valo /* only allowed once */ 1226e705c121SKalle Valo if (WARN_ON(dup_buf)) { 1227e705c121SKalle Valo idx = -EINVAL; 1228e705c121SKalle Valo goto free_dup_buf; 1229e705c121SKalle Valo } 1230e705c121SKalle Valo 1231e705c121SKalle Valo dup_buf = kmemdup(cmddata[i], cmdlen[i], 1232e705c121SKalle Valo GFP_ATOMIC); 1233e705c121SKalle Valo if (!dup_buf) 1234e705c121SKalle Valo return -ENOMEM; 1235e705c121SKalle Valo } else { 1236e705c121SKalle Valo /* NOCOPY must not be followed by normal! */ 1237e705c121SKalle Valo if (WARN_ON(had_nocopy)) { 1238e705c121SKalle Valo idx = -EINVAL; 1239e705c121SKalle Valo goto free_dup_buf; 1240e705c121SKalle Valo } 1241e705c121SKalle Valo copy_size += cmdlen[i]; 1242e705c121SKalle Valo } 1243e705c121SKalle Valo cmd_size += cmd->len[i]; 1244e705c121SKalle Valo } 1245e705c121SKalle Valo 1246e705c121SKalle Valo /* 1247e705c121SKalle Valo * If any of the command structures end up being larger than 1248e705c121SKalle Valo * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1249e705c121SKalle Valo * allocated into separate TFDs, then we will need to 1250e705c121SKalle Valo * increase the size of the buffers. 1251e705c121SKalle Valo */ 1252e705c121SKalle Valo if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1253e705c121SKalle Valo "Command %s (%#x) is too large (%d bytes)\n", 125439bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 125539bdb17eSSharon Dvir cmd->id, copy_size)) { 1256e705c121SKalle Valo idx = -EINVAL; 1257e705c121SKalle Valo goto free_dup_buf; 1258e705c121SKalle Valo } 1259e705c121SKalle Valo 1260e705c121SKalle Valo spin_lock_bh(&txq->lock); 1261e705c121SKalle Valo 12620cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1263e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1264e705c121SKalle Valo 1265e705c121SKalle Valo IWL_ERR(trans, "No space in command queue\n"); 1266e705c121SKalle Valo iwl_op_mode_cmd_queue_full(trans->op_mode); 1267e705c121SKalle Valo idx = -ENOSPC; 1268e705c121SKalle Valo goto free_dup_buf; 1269e705c121SKalle Valo } 1270e705c121SKalle Valo 12710cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1272e705c121SKalle Valo out_cmd = txq->entries[idx].cmd; 1273e705c121SKalle Valo out_meta = &txq->entries[idx].meta; 1274e705c121SKalle Valo 1275e705c121SKalle Valo memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1276e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) 1277e705c121SKalle Valo out_meta->source = cmd; 1278e705c121SKalle Valo 1279e705c121SKalle Valo /* set up the header */ 1280e705c121SKalle Valo if (group_id != 0) { 1281e705c121SKalle Valo out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1282e705c121SKalle Valo out_cmd->hdr_wide.group_id = group_id; 1283e705c121SKalle Valo out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1284e705c121SKalle Valo out_cmd->hdr_wide.length = 1285e705c121SKalle Valo cpu_to_le16(cmd_size - 1286e705c121SKalle Valo sizeof(struct iwl_cmd_header_wide)); 1287e705c121SKalle Valo out_cmd->hdr_wide.reserved = 0; 1288e705c121SKalle Valo out_cmd->hdr_wide.sequence = 12894f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1290bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1291e705c121SKalle Valo 1292e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header_wide); 1293e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1294e705c121SKalle Valo } else { 1295e705c121SKalle Valo out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1296e705c121SKalle Valo out_cmd->hdr.sequence = 12974f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1298bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1299e705c121SKalle Valo out_cmd->hdr.group_id = 0; 1300e705c121SKalle Valo 1301e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header); 1302e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1303e705c121SKalle Valo } 1304e705c121SKalle Valo 1305e705c121SKalle Valo /* and copy the data that needs to be copied */ 1306e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1307e705c121SKalle Valo int copy; 1308e705c121SKalle Valo 1309e705c121SKalle Valo if (!cmd->len[i]) 1310e705c121SKalle Valo continue; 1311e705c121SKalle Valo 1312e705c121SKalle Valo /* copy everything if not nocopy/dup */ 1313e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1314e705c121SKalle Valo IWL_HCMD_DFL_DUP))) { 1315e705c121SKalle Valo copy = cmd->len[i]; 1316e705c121SKalle Valo 1317e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1318e705c121SKalle Valo cmd_pos += copy; 1319e705c121SKalle Valo copy_size += copy; 1320e705c121SKalle Valo continue; 1321e705c121SKalle Valo } 1322e705c121SKalle Valo 1323e705c121SKalle Valo /* 13248de437c7SSara Sharon * Otherwise we need at least IWL_FIRST_TB_SIZE copied 13258de437c7SSara Sharon * in total (for bi-directional DMA), but copy up to what 1326e705c121SKalle Valo * we can fit into the payload for debug dump purposes. 1327e705c121SKalle Valo */ 1328e705c121SKalle Valo copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1329e705c121SKalle Valo 1330e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1331e705c121SKalle Valo cmd_pos += copy; 1332e705c121SKalle Valo 1333e705c121SKalle Valo /* However, treat copy_size the proper way, we need it below */ 13348de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 13358de437c7SSara Sharon copy = IWL_FIRST_TB_SIZE - copy_size; 1336e705c121SKalle Valo 1337e705c121SKalle Valo if (copy > cmd->len[i]) 1338e705c121SKalle Valo copy = cmd->len[i]; 1339e705c121SKalle Valo copy_size += copy; 1340e705c121SKalle Valo } 1341e705c121SKalle Valo } 1342e705c121SKalle Valo 1343e705c121SKalle Valo IWL_DEBUG_HC(trans, 1344e705c121SKalle Valo "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 134539bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1346e705c121SKalle Valo group_id, out_cmd->hdr.cmd, 1347e705c121SKalle Valo le16_to_cpu(out_cmd->hdr.sequence), 13484f4822b7SMordechay Goodstein cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1349e705c121SKalle Valo 13508de437c7SSara Sharon /* start the TFD with the minimum copy bytes */ 13518de437c7SSara Sharon tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 13528de437c7SSara Sharon memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1353e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, 13540cd1ad2dSMordechay Goodstein iwl_txq_get_first_tb_dma(txq, idx), 13558de437c7SSara Sharon tb0_size, true); 1356e705c121SKalle Valo 1357e705c121SKalle Valo /* map first command fragment, if any remains */ 13588de437c7SSara Sharon if (copy_size > tb0_size) { 1359e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, 13608de437c7SSara Sharon ((u8 *)&out_cmd->hdr) + tb0_size, 13618de437c7SSara Sharon copy_size - tb0_size, 1362e705c121SKalle Valo DMA_TO_DEVICE); 1363e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 13640179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1365bb98ecd4SSara Sharon txq->write_ptr); 1366e705c121SKalle Valo idx = -ENOMEM; 1367e705c121SKalle Valo goto out; 1368e705c121SKalle Valo } 1369e705c121SKalle Valo 1370e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 13718de437c7SSara Sharon copy_size - tb0_size, false); 1372e705c121SKalle Valo } 1373e705c121SKalle Valo 1374e705c121SKalle Valo /* map the remaining (adjusted) nocopy/dup fragments */ 1375e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1376e705c121SKalle Valo const void *data = cmddata[i]; 1377e705c121SKalle Valo 1378e705c121SKalle Valo if (!cmdlen[i]) 1379e705c121SKalle Valo continue; 1380e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1381e705c121SKalle Valo IWL_HCMD_DFL_DUP))) 1382e705c121SKalle Valo continue; 1383e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1384e705c121SKalle Valo data = dup_buf; 1385e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, (void *)data, 1386e705c121SKalle Valo cmdlen[i], DMA_TO_DEVICE); 1387e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 13880179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1389bb98ecd4SSara Sharon txq->write_ptr); 1390e705c121SKalle Valo idx = -ENOMEM; 1391e705c121SKalle Valo goto out; 1392e705c121SKalle Valo } 1393e705c121SKalle Valo 1394e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1395e705c121SKalle Valo } 1396e705c121SKalle Valo 13973cd1980bSSara Sharon BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1398e705c121SKalle Valo out_meta->flags = cmd->flags; 1399e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1400453431a5SWaiman Long kfree_sensitive(txq->entries[idx].free_buf); 1401e705c121SKalle Valo txq->entries[idx].free_buf = dup_buf; 1402e705c121SKalle Valo 1403e705c121SKalle Valo trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1404e705c121SKalle Valo 1405e705c121SKalle Valo /* start timer if queue currently empty */ 1406bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1407e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1408e705c121SKalle Valo 1409e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1410e705c121SKalle Valo ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1411e705c121SKalle Valo if (ret < 0) { 1412e705c121SKalle Valo idx = ret; 1413e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1414e705c121SKalle Valo goto out; 1415e705c121SKalle Valo } 1416e705c121SKalle Valo 1417e705c121SKalle Valo /* Increment and update queue's write index */ 14180cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1419e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1420e705c121SKalle Valo 1421e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1422e705c121SKalle Valo 1423e705c121SKalle Valo out: 1424e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1425e705c121SKalle Valo free_dup_buf: 1426e705c121SKalle Valo if (idx < 0) 1427e705c121SKalle Valo kfree(dup_buf); 1428e705c121SKalle Valo return idx; 1429e705c121SKalle Valo } 1430e705c121SKalle Valo 1431e705c121SKalle Valo /* 1432e705c121SKalle Valo * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1433e705c121SKalle Valo * @rxb: Rx buffer to reclaim 1434e705c121SKalle Valo */ 1435e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1436e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb) 1437e705c121SKalle Valo { 1438e705c121SKalle Valo struct iwl_rx_packet *pkt = rxb_addr(rxb); 1439e705c121SKalle Valo u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1440d490e097SJohannes Berg u8 group_id; 144139bdb17eSSharon Dvir u32 cmd_id; 1442e705c121SKalle Valo int txq_id = SEQ_TO_QUEUE(sequence); 1443e705c121SKalle Valo int index = SEQ_TO_INDEX(sequence); 1444e705c121SKalle Valo int cmd_index; 1445e705c121SKalle Valo struct iwl_device_cmd *cmd; 1446e705c121SKalle Valo struct iwl_cmd_meta *meta; 1447e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 14484f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1449e705c121SKalle Valo 1450e705c121SKalle Valo /* If a Tx command is being handled and it isn't in the actual 1451e705c121SKalle Valo * command queue then there a command routing bug has been introduced 1452e705c121SKalle Valo * in the queue management code. */ 14534f4822b7SMordechay Goodstein if (WARN(txq_id != trans->txqs.cmd.q_id, 1454e705c121SKalle Valo "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 14554f4822b7SMordechay Goodstein txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1456b2a3b1c1SSara Sharon txq->write_ptr)) { 1457e705c121SKalle Valo iwl_print_hex_error(trans, pkt, 32); 1458e705c121SKalle Valo return; 1459e705c121SKalle Valo } 1460e705c121SKalle Valo 1461e705c121SKalle Valo spin_lock_bh(&txq->lock); 1462e705c121SKalle Valo 14630cd1ad2dSMordechay Goodstein cmd_index = iwl_txq_get_cmd_index(txq, index); 1464e705c121SKalle Valo cmd = txq->entries[cmd_index].cmd; 1465e705c121SKalle Valo meta = &txq->entries[cmd_index].meta; 1466d490e097SJohannes Berg group_id = cmd->hdr.group_id; 146739bdb17eSSharon Dvir cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1468e705c121SKalle Valo 14690179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1470e705c121SKalle Valo 1471e705c121SKalle Valo /* Input error checking is done when commands are added to queue. */ 1472e705c121SKalle Valo if (meta->flags & CMD_WANT_SKB) { 1473e705c121SKalle Valo struct page *p = rxb_steal_page(rxb); 1474e705c121SKalle Valo 1475e705c121SKalle Valo meta->source->resp_pkt = pkt; 1476e705c121SKalle Valo meta->source->_rx_page_addr = (unsigned long)page_address(p); 1477e705c121SKalle Valo meta->source->_rx_page_order = trans_pcie->rx_page_order; 1478e705c121SKalle Valo } 1479e705c121SKalle Valo 1480dcbb4746SEmmanuel Grumbach if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1481dcbb4746SEmmanuel Grumbach iwl_op_mode_async_cb(trans->op_mode, cmd); 1482dcbb4746SEmmanuel Grumbach 1483e705c121SKalle Valo iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1484e705c121SKalle Valo 1485e705c121SKalle Valo if (!(meta->flags & CMD_ASYNC)) { 1486e705c121SKalle Valo if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1487e705c121SKalle Valo IWL_WARN(trans, 1488e705c121SKalle Valo "HCMD_ACTIVE already clear for command %s\n", 148939bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1490e705c121SKalle Valo } 1491e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1492e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 149339bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1494e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1495e705c121SKalle Valo } 1496e705c121SKalle Valo 1497e705c121SKalle Valo meta->flags = 0; 1498e705c121SKalle Valo 1499e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1500e705c121SKalle Valo } 1501e705c121SKalle Valo 1502e705c121SKalle Valo #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1503e705c121SKalle Valo 1504e705c121SKalle Valo static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1505e705c121SKalle Valo struct iwl_host_cmd *cmd) 1506e705c121SKalle Valo { 1507e705c121SKalle Valo int ret; 1508e705c121SKalle Valo 1509e705c121SKalle Valo /* An asynchronous command can not expect an SKB to be set. */ 1510e705c121SKalle Valo if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1511e705c121SKalle Valo return -EINVAL; 1512e705c121SKalle Valo 1513e705c121SKalle Valo ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1514e705c121SKalle Valo if (ret < 0) { 1515e705c121SKalle Valo IWL_ERR(trans, 1516e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 151739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1518e705c121SKalle Valo return ret; 1519e705c121SKalle Valo } 1520e705c121SKalle Valo return 0; 1521e705c121SKalle Valo } 1522e705c121SKalle Valo 1523e705c121SKalle Valo static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1524e705c121SKalle Valo struct iwl_host_cmd *cmd) 1525e705c121SKalle Valo { 1526e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 15274f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1528e705c121SKalle Valo int cmd_idx; 1529e705c121SKalle Valo int ret; 1530e705c121SKalle Valo 1531e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 153239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1533e705c121SKalle Valo 1534e705c121SKalle Valo if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1535e705c121SKalle Valo &trans->status), 1536e705c121SKalle Valo "Command %s: a command is already active!\n", 153739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id))) 1538e705c121SKalle Valo return -EIO; 1539e705c121SKalle Valo 1540e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 154139bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1542e705c121SKalle Valo 1543e705c121SKalle Valo cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1544e705c121SKalle Valo if (cmd_idx < 0) { 1545e705c121SKalle Valo ret = cmd_idx; 1546e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1547e705c121SKalle Valo IWL_ERR(trans, 1548e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 154939bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1550e705c121SKalle Valo return ret; 1551e705c121SKalle Valo } 1552e705c121SKalle Valo 1553e705c121SKalle Valo ret = wait_event_timeout(trans_pcie->wait_command_queue, 1554e705c121SKalle Valo !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1555e705c121SKalle Valo &trans->status), 1556e705c121SKalle Valo HOST_COMPLETE_TIMEOUT); 1557e705c121SKalle Valo if (!ret) { 1558e705c121SKalle Valo IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 155939bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1560e705c121SKalle Valo jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1561e705c121SKalle Valo 1562e705c121SKalle Valo IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1563bb98ecd4SSara Sharon txq->read_ptr, txq->write_ptr); 1564e705c121SKalle Valo 1565e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1566e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 156739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1568e705c121SKalle Valo ret = -ETIMEDOUT; 1569e705c121SKalle Valo 1570d1967ce6SShahar S Matityahu iwl_trans_pcie_sync_nmi(trans); 1571e705c121SKalle Valo goto cancel; 1572e705c121SKalle Valo } 1573e705c121SKalle Valo 1574e705c121SKalle Valo if (test_bit(STATUS_FW_ERROR, &trans->status)) { 15754290eaadSJohannes Berg iwl_trans_pcie_dump_regs(trans); 1576e705c121SKalle Valo IWL_ERR(trans, "FW error in SYNC CMD %s\n", 157739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1578e705c121SKalle Valo dump_stack(); 1579e705c121SKalle Valo ret = -EIO; 1580e705c121SKalle Valo goto cancel; 1581e705c121SKalle Valo } 1582e705c121SKalle Valo 1583e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1584326477e4SJohannes Berg test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1585e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1586e705c121SKalle Valo ret = -ERFKILL; 1587e705c121SKalle Valo goto cancel; 1588e705c121SKalle Valo } 1589e705c121SKalle Valo 1590e705c121SKalle Valo if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1591e705c121SKalle Valo IWL_ERR(trans, "Error: Response NULL in '%s'\n", 159239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1593e705c121SKalle Valo ret = -EIO; 1594e705c121SKalle Valo goto cancel; 1595e705c121SKalle Valo } 1596e705c121SKalle Valo 1597e705c121SKalle Valo return 0; 1598e705c121SKalle Valo 1599e705c121SKalle Valo cancel: 1600e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) { 1601e705c121SKalle Valo /* 1602e705c121SKalle Valo * Cancel the CMD_WANT_SKB flag for the cmd in the 1603e705c121SKalle Valo * TX cmd queue. Otherwise in case the cmd comes 1604e705c121SKalle Valo * in later, it will possibly set an invalid 1605e705c121SKalle Valo * address (cmd->meta.source). 1606e705c121SKalle Valo */ 1607b2a3b1c1SSara Sharon txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1608e705c121SKalle Valo } 1609e705c121SKalle Valo 1610e705c121SKalle Valo if (cmd->resp_pkt) { 1611e705c121SKalle Valo iwl_free_resp(cmd); 1612e705c121SKalle Valo cmd->resp_pkt = NULL; 1613e705c121SKalle Valo } 1614e705c121SKalle Valo 1615e705c121SKalle Valo return ret; 1616e705c121SKalle Valo } 1617e705c121SKalle Valo 1618e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1619e705c121SKalle Valo { 16202b3fae66SMatt Chen /* Make sure the NIC is still alive in the bus */ 1621f60c9e59SEmmanuel Grumbach if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1622f60c9e59SEmmanuel Grumbach return -ENODEV; 16232b3fae66SMatt Chen 1624e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1625326477e4SJohannes Berg test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1626e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1627e705c121SKalle Valo cmd->id); 1628e705c121SKalle Valo return -ERFKILL; 1629e705c121SKalle Valo } 1630e705c121SKalle Valo 1631e705c121SKalle Valo if (cmd->flags & CMD_ASYNC) 1632e705c121SKalle Valo return iwl_pcie_send_hcmd_async(trans, cmd); 1633e705c121SKalle Valo 1634e705c121SKalle Valo /* We still can fail on RFKILL that can be asserted while we wait */ 1635e705c121SKalle Valo return iwl_pcie_send_hcmd_sync(trans, cmd); 1636e705c121SKalle Valo } 1637e705c121SKalle Valo 16383a0b2a42SEmmanuel Grumbach static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 16393a0b2a42SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 1640bb03927eSJohannes Berg struct iwl_cmd_meta *out_meta) 16413a0b2a42SEmmanuel Grumbach { 1642bb03927eSJohannes Berg u16 head_tb_len; 16433a0b2a42SEmmanuel Grumbach int i; 16443a0b2a42SEmmanuel Grumbach 16453a0b2a42SEmmanuel Grumbach /* 16463a0b2a42SEmmanuel Grumbach * Set up TFD's third entry to point directly to remainder 16473a0b2a42SEmmanuel Grumbach * of skb's head, if any 16483a0b2a42SEmmanuel Grumbach */ 1649bb03927eSJohannes Berg head_tb_len = skb_headlen(skb) - hdr_len; 16503a0b2a42SEmmanuel Grumbach 1651bb03927eSJohannes Berg if (head_tb_len > 0) { 1652bb03927eSJohannes Berg dma_addr_t tb_phys = dma_map_single(trans->dev, 16533a0b2a42SEmmanuel Grumbach skb->data + hdr_len, 1654bb03927eSJohannes Berg head_tb_len, DMA_TO_DEVICE); 1655bb03927eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 16563a0b2a42SEmmanuel Grumbach return -EINVAL; 16579b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 16589b08ae22SJohannes Berg tb_phys, head_tb_len); 1659bb03927eSJohannes Berg iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 16603a0b2a42SEmmanuel Grumbach } 16613a0b2a42SEmmanuel Grumbach 16623a0b2a42SEmmanuel Grumbach /* set up the remaining entries to point to the data */ 16633a0b2a42SEmmanuel Grumbach for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 16643a0b2a42SEmmanuel Grumbach const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 16653a0b2a42SEmmanuel Grumbach dma_addr_t tb_phys; 16663a0b2a42SEmmanuel Grumbach int tb_idx; 16673a0b2a42SEmmanuel Grumbach 16683a0b2a42SEmmanuel Grumbach if (!skb_frag_size(frag)) 16693a0b2a42SEmmanuel Grumbach continue; 16703a0b2a42SEmmanuel Grumbach 16713a0b2a42SEmmanuel Grumbach tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 16723a0b2a42SEmmanuel Grumbach skb_frag_size(frag), DMA_TO_DEVICE); 16733a0b2a42SEmmanuel Grumbach 16747d50d76eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 16753a0b2a42SEmmanuel Grumbach return -EINVAL; 16769b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 16779b08ae22SJohannes Berg tb_phys, skb_frag_size(frag)); 16783a0b2a42SEmmanuel Grumbach tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 16793a0b2a42SEmmanuel Grumbach skb_frag_size(frag), false); 16806e00a237SJohannes Berg if (tb_idx < 0) 16816e00a237SJohannes Berg return tb_idx; 16823a0b2a42SEmmanuel Grumbach 16833cd1980bSSara Sharon out_meta->tbs |= BIT(tb_idx); 16843a0b2a42SEmmanuel Grumbach } 16853a0b2a42SEmmanuel Grumbach 16863a0b2a42SEmmanuel Grumbach return 0; 16873a0b2a42SEmmanuel Grumbach } 16883a0b2a42SEmmanuel Grumbach 16896eb5e529SEmmanuel Grumbach #ifdef CONFIG_INET 16906eb5e529SEmmanuel Grumbach static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 16916eb5e529SEmmanuel Grumbach bool ipv6, unsigned int len) 16926eb5e529SEmmanuel Grumbach { 16936eb5e529SEmmanuel Grumbach if (ipv6) { 16946eb5e529SEmmanuel Grumbach struct ipv6hdr *iphv6 = iph; 16956eb5e529SEmmanuel Grumbach 16966eb5e529SEmmanuel Grumbach tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 16976eb5e529SEmmanuel Grumbach len + tcph->doff * 4, 16986eb5e529SEmmanuel Grumbach IPPROTO_TCP, 0); 16996eb5e529SEmmanuel Grumbach } else { 17006eb5e529SEmmanuel Grumbach struct iphdr *iphv4 = iph; 17016eb5e529SEmmanuel Grumbach 17026eb5e529SEmmanuel Grumbach ip_send_check(iphv4); 17036eb5e529SEmmanuel Grumbach tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 17046eb5e529SEmmanuel Grumbach len + tcph->doff * 4, 17056eb5e529SEmmanuel Grumbach IPPROTO_TCP, 0); 17066eb5e529SEmmanuel Grumbach } 17076eb5e529SEmmanuel Grumbach } 17086eb5e529SEmmanuel Grumbach 1709066fd29aSSara Sharon static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 17106eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 17116eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 1712a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, 1713a89c72ffSJohannes Berg u16 tb1_len) 17146eb5e529SEmmanuel Grumbach { 171505e5a7e5SJohannes Berg struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1716fcac7002SMordechay Goodstein struct iwl_trans_pcie *trans_pcie = 1717fcac7002SMordechay Goodstein IWL_TRANS_GET_PCIE_TRANS(txq->trans); 17186eb5e529SEmmanuel Grumbach struct ieee80211_hdr *hdr = (void *)skb->data; 17196eb5e529SEmmanuel Grumbach unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 17206eb5e529SEmmanuel Grumbach unsigned int mss = skb_shinfo(skb)->gso_size; 17216eb5e529SEmmanuel Grumbach u16 length, iv_len, amsdu_pad; 17226eb5e529SEmmanuel Grumbach u8 *start_hdr; 17236eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *hdr_page; 17246eb5e529SEmmanuel Grumbach struct tso_t tso; 17256eb5e529SEmmanuel Grumbach 17266eb5e529SEmmanuel Grumbach /* if the packet is protected, then it must be CCMP or GCMP */ 17276eb5e529SEmmanuel Grumbach BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 17286eb5e529SEmmanuel Grumbach iv_len = ieee80211_has_protected(hdr->frame_control) ? 17296eb5e529SEmmanuel Grumbach IEEE80211_CCMP_HDR_LEN : 0; 17306eb5e529SEmmanuel Grumbach 17316eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx(trans->dev, skb, 17320cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1733885375d0SMordechay Goodstein trans->txqs.tfd.size, 17348790fce4SJohannes Berg &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 17356eb5e529SEmmanuel Grumbach 17366eb5e529SEmmanuel Grumbach ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 17376eb5e529SEmmanuel Grumbach snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 17386eb5e529SEmmanuel Grumbach total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 17396eb5e529SEmmanuel Grumbach amsdu_pad = 0; 17406eb5e529SEmmanuel Grumbach 17416eb5e529SEmmanuel Grumbach /* total amount of header we may need for this A-MSDU */ 17426eb5e529SEmmanuel Grumbach hdr_room = DIV_ROUND_UP(total_len, mss) * 17436eb5e529SEmmanuel Grumbach (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 17446eb5e529SEmmanuel Grumbach 17456eb5e529SEmmanuel Grumbach /* Our device supports 9 segments at most, it will fit in 1 page */ 17467b02bf61SJohannes Berg hdr_page = get_page_hdr(trans, hdr_room, skb); 17476eb5e529SEmmanuel Grumbach if (!hdr_page) 17486eb5e529SEmmanuel Grumbach return -ENOMEM; 17496eb5e529SEmmanuel Grumbach 17506eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 17516eb5e529SEmmanuel Grumbach memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 17526eb5e529SEmmanuel Grumbach hdr_page->pos += iv_len; 17536eb5e529SEmmanuel Grumbach 17546eb5e529SEmmanuel Grumbach /* 17556eb5e529SEmmanuel Grumbach * Pull the ieee80211 header + IV to be able to use TSO core, 17566eb5e529SEmmanuel Grumbach * we will restore it for the tx_status flow. 17576eb5e529SEmmanuel Grumbach */ 17586eb5e529SEmmanuel Grumbach skb_pull(skb, hdr_len + iv_len); 17596eb5e529SEmmanuel Grumbach 176005e5a7e5SJohannes Berg /* 176105e5a7e5SJohannes Berg * Remove the length of all the headers that we don't actually 176205e5a7e5SJohannes Berg * have in the MPDU by themselves, but that we duplicate into 176305e5a7e5SJohannes Berg * all the different MSDUs inside the A-MSDU. 176405e5a7e5SJohannes Berg */ 176505e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 176605e5a7e5SJohannes Berg 17676eb5e529SEmmanuel Grumbach tso_start(skb, &tso); 17686eb5e529SEmmanuel Grumbach 17696eb5e529SEmmanuel Grumbach while (total_len) { 17706eb5e529SEmmanuel Grumbach /* this is the data left for this subframe */ 17716eb5e529SEmmanuel Grumbach unsigned int data_left = 17726eb5e529SEmmanuel Grumbach min_t(unsigned int, mss, total_len); 17736eb5e529SEmmanuel Grumbach struct sk_buff *csum_skb = NULL; 17746eb5e529SEmmanuel Grumbach unsigned int hdr_tb_len; 17756eb5e529SEmmanuel Grumbach dma_addr_t hdr_tb_phys; 17766eb5e529SEmmanuel Grumbach struct tcphdr *tcph; 177705e5a7e5SJohannes Berg u8 *iph, *subf_hdrs_start = hdr_page->pos; 17786eb5e529SEmmanuel Grumbach 17796eb5e529SEmmanuel Grumbach total_len -= data_left; 17806eb5e529SEmmanuel Grumbach 17816eb5e529SEmmanuel Grumbach memset(hdr_page->pos, 0, amsdu_pad); 17826eb5e529SEmmanuel Grumbach hdr_page->pos += amsdu_pad; 17836eb5e529SEmmanuel Grumbach amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 17846eb5e529SEmmanuel Grumbach data_left)) & 0x3; 17856eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 17866eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 17876eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 17886eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 17896eb5e529SEmmanuel Grumbach 17906eb5e529SEmmanuel Grumbach length = snap_ip_tcp_hdrlen + data_left; 17916eb5e529SEmmanuel Grumbach *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 17926eb5e529SEmmanuel Grumbach hdr_page->pos += sizeof(length); 17936eb5e529SEmmanuel Grumbach 17946eb5e529SEmmanuel Grumbach /* 17956eb5e529SEmmanuel Grumbach * This will copy the SNAP as well which will be considered 17966eb5e529SEmmanuel Grumbach * as MAC header. 17976eb5e529SEmmanuel Grumbach */ 17986eb5e529SEmmanuel Grumbach tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 17996eb5e529SEmmanuel Grumbach iph = hdr_page->pos + 8; 18006eb5e529SEmmanuel Grumbach tcph = (void *)(iph + ip_hdrlen); 18016eb5e529SEmmanuel Grumbach 18026eb5e529SEmmanuel Grumbach /* For testing on current hardware only */ 18036eb5e529SEmmanuel Grumbach if (trans_pcie->sw_csum_tx) { 18046eb5e529SEmmanuel Grumbach csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 18056eb5e529SEmmanuel Grumbach GFP_ATOMIC); 18067d50d76eSJohannes Berg if (!csum_skb) 18077d50d76eSJohannes Berg return -ENOMEM; 18086eb5e529SEmmanuel Grumbach 18096eb5e529SEmmanuel Grumbach iwl_compute_pseudo_hdr_csum(iph, tcph, 18106eb5e529SEmmanuel Grumbach skb->protocol == 18116eb5e529SEmmanuel Grumbach htons(ETH_P_IPV6), 18126eb5e529SEmmanuel Grumbach data_left); 18136eb5e529SEmmanuel Grumbach 181459ae1d12SJohannes Berg skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); 1815a52a8a4dSZhang Shengju skb_reset_transport_header(csum_skb); 18166eb5e529SEmmanuel Grumbach csum_skb->csum_start = 18176eb5e529SEmmanuel Grumbach (unsigned char *)tcp_hdr(csum_skb) - 18186eb5e529SEmmanuel Grumbach csum_skb->head; 18196eb5e529SEmmanuel Grumbach } 18206eb5e529SEmmanuel Grumbach 18216eb5e529SEmmanuel Grumbach hdr_page->pos += snap_ip_tcp_hdrlen; 18226eb5e529SEmmanuel Grumbach 18236eb5e529SEmmanuel Grumbach hdr_tb_len = hdr_page->pos - start_hdr; 18246eb5e529SEmmanuel Grumbach hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 18256eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 18266eb5e529SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 18276eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 18287d50d76eSJohannes Berg return -EINVAL; 18296eb5e529SEmmanuel Grumbach } 18306eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 18316eb5e529SEmmanuel Grumbach hdr_tb_len, false); 1832bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 18339b08ae22SJohannes Berg hdr_tb_phys, hdr_tb_len); 183405e5a7e5SJohannes Berg /* add this subframe's headers' length to the tx_cmd */ 183505e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 18366eb5e529SEmmanuel Grumbach 18376eb5e529SEmmanuel Grumbach /* prepare the start_hdr for the next subframe */ 18386eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 18396eb5e529SEmmanuel Grumbach 18406eb5e529SEmmanuel Grumbach /* put the payload */ 18416eb5e529SEmmanuel Grumbach while (data_left) { 18426eb5e529SEmmanuel Grumbach unsigned int size = min_t(unsigned int, tso.size, 18436eb5e529SEmmanuel Grumbach data_left); 18446eb5e529SEmmanuel Grumbach dma_addr_t tb_phys; 18456eb5e529SEmmanuel Grumbach 18466eb5e529SEmmanuel Grumbach if (trans_pcie->sw_csum_tx) 184759ae1d12SJohannes Berg skb_put_data(csum_skb, tso.data, size); 18486eb5e529SEmmanuel Grumbach 18496eb5e529SEmmanuel Grumbach tb_phys = dma_map_single(trans->dev, tso.data, 18506eb5e529SEmmanuel Grumbach size, DMA_TO_DEVICE); 18516eb5e529SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 18526eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 18537d50d76eSJohannes Berg return -EINVAL; 18546eb5e529SEmmanuel Grumbach } 18556eb5e529SEmmanuel Grumbach 18566eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 18576eb5e529SEmmanuel Grumbach size, false); 1858bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 18599b08ae22SJohannes Berg tb_phys, size); 18606eb5e529SEmmanuel Grumbach 18616eb5e529SEmmanuel Grumbach data_left -= size; 18626eb5e529SEmmanuel Grumbach tso_build_data(skb, &tso, size); 18636eb5e529SEmmanuel Grumbach } 18646eb5e529SEmmanuel Grumbach 18656eb5e529SEmmanuel Grumbach /* For testing on early hardware only */ 18666eb5e529SEmmanuel Grumbach if (trans_pcie->sw_csum_tx) { 18676eb5e529SEmmanuel Grumbach __wsum csum; 18686eb5e529SEmmanuel Grumbach 18696eb5e529SEmmanuel Grumbach csum = skb_checksum(csum_skb, 18706eb5e529SEmmanuel Grumbach skb_checksum_start_offset(csum_skb), 18716eb5e529SEmmanuel Grumbach csum_skb->len - 18726eb5e529SEmmanuel Grumbach skb_checksum_start_offset(csum_skb), 18736eb5e529SEmmanuel Grumbach 0); 18746eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 18756eb5e529SEmmanuel Grumbach dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 18766eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 18776eb5e529SEmmanuel Grumbach tcph->check = csum_fold(csum); 18786eb5e529SEmmanuel Grumbach dma_sync_single_for_device(trans->dev, hdr_tb_phys, 18796eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 18806eb5e529SEmmanuel Grumbach } 18816eb5e529SEmmanuel Grumbach } 18826eb5e529SEmmanuel Grumbach 18836eb5e529SEmmanuel Grumbach /* re -add the WiFi header and IV */ 18846eb5e529SEmmanuel Grumbach skb_push(skb, hdr_len + iv_len); 18856eb5e529SEmmanuel Grumbach 18866eb5e529SEmmanuel Grumbach return 0; 18876eb5e529SEmmanuel Grumbach } 18886eb5e529SEmmanuel Grumbach #else /* CONFIG_INET */ 18896eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 18906eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 18916eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 1892a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, 1893a89c72ffSJohannes Berg u16 tb1_len) 18946eb5e529SEmmanuel Grumbach { 18956eb5e529SEmmanuel Grumbach /* No A-MSDU without CONFIG_INET */ 18966eb5e529SEmmanuel Grumbach WARN_ON(1); 18976eb5e529SEmmanuel Grumbach 18986eb5e529SEmmanuel Grumbach return -1; 18996eb5e529SEmmanuel Grumbach } 19006eb5e529SEmmanuel Grumbach #endif /* CONFIG_INET */ 19016eb5e529SEmmanuel Grumbach 1902e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1903a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1904e705c121SKalle Valo { 1905e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1906e705c121SKalle Valo struct ieee80211_hdr *hdr; 1907e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1908e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1909e705c121SKalle Valo struct iwl_txq *txq; 1910e705c121SKalle Valo dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1911e705c121SKalle Valo void *tb1_addr; 19124fe10bc6SSara Sharon void *tfd; 19133a0b2a42SEmmanuel Grumbach u16 len, tb1_len; 1914e705c121SKalle Valo bool wait_write_ptr; 1915e705c121SKalle Valo __le16 fc; 1916e705c121SKalle Valo u8 hdr_len; 1917e705c121SKalle Valo u16 wifi_seq; 1918c772a3d3SSara Sharon bool amsdu; 1919e705c121SKalle Valo 19204f4822b7SMordechay Goodstein txq = trans->txqs.txq[txq_id]; 1921e705c121SKalle Valo 19224f4822b7SMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1923e705c121SKalle Valo "TX on unused queue %d\n", txq_id)) 1924e705c121SKalle Valo return -EINVAL; 1925e705c121SKalle Valo 192641837ca9SEmmanuel Grumbach if (unlikely(trans_pcie->sw_csum_tx && 192741837ca9SEmmanuel Grumbach skb->ip_summed == CHECKSUM_PARTIAL)) { 192841837ca9SEmmanuel Grumbach int offs = skb_checksum_start_offset(skb); 192941837ca9SEmmanuel Grumbach int csum_offs = offs + skb->csum_offset; 193041837ca9SEmmanuel Grumbach __wsum csum; 193141837ca9SEmmanuel Grumbach 193241837ca9SEmmanuel Grumbach if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 193341837ca9SEmmanuel Grumbach return -1; 193441837ca9SEmmanuel Grumbach 193541837ca9SEmmanuel Grumbach csum = skb_checksum(skb, offs, skb->len - offs, 0); 193641837ca9SEmmanuel Grumbach *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 19373955525dSEmmanuel Grumbach 19383955525dSEmmanuel Grumbach skb->ip_summed = CHECKSUM_UNNECESSARY; 193941837ca9SEmmanuel Grumbach } 194041837ca9SEmmanuel Grumbach 1941e705c121SKalle Valo if (skb_is_nonlinear(skb) && 1942885375d0SMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1943e705c121SKalle Valo __skb_linearize(skb)) 1944e705c121SKalle Valo return -ENOMEM; 1945e705c121SKalle Valo 1946e705c121SKalle Valo /* mac80211 always puts the full header into the SKB's head, 1947e705c121SKalle Valo * so there's no need to check if it's readable there 1948e705c121SKalle Valo */ 1949e705c121SKalle Valo hdr = (struct ieee80211_hdr *)skb->data; 1950e705c121SKalle Valo fc = hdr->frame_control; 1951e705c121SKalle Valo hdr_len = ieee80211_hdrlen(fc); 1952e705c121SKalle Valo 1953e705c121SKalle Valo spin_lock(&txq->lock); 1954e705c121SKalle Valo 19550cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 19560cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 19573955525dSEmmanuel Grumbach 19583955525dSEmmanuel Grumbach /* don't put the packet on the ring, if there is no room */ 19590cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1960a89c72ffSJohannes Berg struct iwl_device_tx_cmd **dev_cmd_ptr; 19613955525dSEmmanuel Grumbach 196221cb3222SJohannes Berg dev_cmd_ptr = (void *)((u8 *)skb->cb + 196322852fadSMordechay Goodstein trans->txqs.dev_cmd_offs); 196421cb3222SJohannes Berg 196521cb3222SJohannes Berg *dev_cmd_ptr = dev_cmd; 19663955525dSEmmanuel Grumbach __skb_queue_tail(&txq->overflow_q, skb); 19673955525dSEmmanuel Grumbach 19683955525dSEmmanuel Grumbach spin_unlock(&txq->lock); 19693955525dSEmmanuel Grumbach return 0; 19703955525dSEmmanuel Grumbach } 19713955525dSEmmanuel Grumbach } 19723955525dSEmmanuel Grumbach 1973e705c121SKalle Valo /* In AGG mode, the index in the ring must correspond to the WiFi 1974e705c121SKalle Valo * sequence number. This is a HW requirements to help the SCD to parse 1975e705c121SKalle Valo * the BA. 1976e705c121SKalle Valo * Check here that the packets are in the right place on the ring. 1977e705c121SKalle Valo */ 1978e705c121SKalle Valo wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1979e705c121SKalle Valo WARN_ONCE(txq->ampdu && 1980bb98ecd4SSara Sharon (wifi_seq & 0xff) != txq->write_ptr, 1981e705c121SKalle Valo "Q: %d WiFi Seq %d tfdNum %d", 1982bb98ecd4SSara Sharon txq_id, wifi_seq, txq->write_ptr); 1983e705c121SKalle Valo 1984e705c121SKalle Valo /* Set up driver data for this TFD */ 1985bb98ecd4SSara Sharon txq->entries[txq->write_ptr].skb = skb; 1986bb98ecd4SSara Sharon txq->entries[txq->write_ptr].cmd = dev_cmd; 1987e705c121SKalle Valo 1988e705c121SKalle Valo dev_cmd->hdr.sequence = 1989e705c121SKalle Valo cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1990bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr))); 1991e705c121SKalle Valo 19920cd1ad2dSMordechay Goodstein tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1993e705c121SKalle Valo scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1994e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch); 1995e705c121SKalle Valo 1996e705c121SKalle Valo tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1997e705c121SKalle Valo tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1998e705c121SKalle Valo 1999e705c121SKalle Valo /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2000bb98ecd4SSara Sharon out_meta = &txq->entries[txq->write_ptr].meta; 2001e705c121SKalle Valo out_meta->flags = 0; 2002e705c121SKalle Valo 2003e705c121SKalle Valo /* 2004e705c121SKalle Valo * The second TB (tb1) points to the remainder of the TX command 2005e705c121SKalle Valo * and the 802.11 header - dword aligned size 2006e705c121SKalle Valo * (This calculation modifies the TX command, so do it before the 2007e705c121SKalle Valo * setup of the first TB) 2008e705c121SKalle Valo */ 2009e705c121SKalle Valo len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 20108de437c7SSara Sharon hdr_len - IWL_FIRST_TB_SIZE; 2011c772a3d3SSara Sharon /* do not align A-MSDU to dword as the subframe header aligns it */ 2012c772a3d3SSara Sharon amsdu = ieee80211_is_data_qos(fc) && 2013c772a3d3SSara Sharon (*ieee80211_get_qos_ctl(hdr) & 2014c772a3d3SSara Sharon IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2015c772a3d3SSara Sharon if (trans_pcie->sw_csum_tx || !amsdu) { 2016e705c121SKalle Valo tb1_len = ALIGN(len, 4); 2017e705c121SKalle Valo /* Tell NIC about any 2-byte padding after MAC header */ 2018e705c121SKalle Valo if (tb1_len != len) 2019d172a5efSJohannes Berg tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 2020c772a3d3SSara Sharon } else { 2021c772a3d3SSara Sharon tb1_len = len; 2022c772a3d3SSara Sharon } 2023e705c121SKalle Valo 202405e5a7e5SJohannes Berg /* 202505e5a7e5SJohannes Berg * The first TB points to bi-directional DMA data, we'll 202605e5a7e5SJohannes Berg * memcpy the data into it later. 202705e5a7e5SJohannes Berg */ 2028e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 20298de437c7SSara Sharon IWL_FIRST_TB_SIZE, true); 2030e705c121SKalle Valo 2031e705c121SKalle Valo /* there must be data left over for TB1 or this code must be changed */ 20328de437c7SSara Sharon BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2033e705c121SKalle Valo 2034e705c121SKalle Valo /* map the data for TB1 */ 20358de437c7SSara Sharon tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2036e705c121SKalle Valo tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2037e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2038e705c121SKalle Valo goto out_err; 2039e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2040e705c121SKalle Valo 2041bf77ee2eSSara Sharon trace_iwlwifi_dev_tx(trans->dev, skb, 20420cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr), 2043885375d0SMordechay Goodstein trans->txqs.tfd.size, 2044bf77ee2eSSara Sharon &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2045bf77ee2eSSara Sharon hdr_len); 2046bf77ee2eSSara Sharon 2047bf1ad897SEliad Peller /* 2048bf1ad897SEliad Peller * If gso_size wasn't set, don't give the frame "amsdu treatment" 2049bf1ad897SEliad Peller * (adding subframes, etc.). 2050bf1ad897SEliad Peller * This can happen in some testing flows when the amsdu was already 2051bf1ad897SEliad Peller * pre-built, and we just need to send the resulting skb. 2052bf1ad897SEliad Peller */ 2053bf1ad897SEliad Peller if (amsdu && skb_shinfo(skb)->gso_size) { 20546eb5e529SEmmanuel Grumbach if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 20556eb5e529SEmmanuel Grumbach out_meta, dev_cmd, 20566eb5e529SEmmanuel Grumbach tb1_len))) 2057e705c121SKalle Valo goto out_err; 2058bb03927eSJohannes Berg } else { 20590044f171SJohannes Berg struct sk_buff *frag; 20600044f171SJohannes Berg 2061bb03927eSJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2062bb03927eSJohannes Berg out_meta))) 20636eb5e529SEmmanuel Grumbach goto out_err; 2064bb03927eSJohannes Berg 20650044f171SJohannes Berg skb_walk_frags(skb, frag) { 20660044f171SJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 20670044f171SJohannes Berg out_meta))) 20680044f171SJohannes Berg goto out_err; 20690044f171SJohannes Berg } 20706eb5e529SEmmanuel Grumbach } 2071e705c121SKalle Valo 207205e5a7e5SJohannes Berg /* building the A-MSDU might have changed this data, so memcpy it now */ 2073c1f33442SLiad Kaufman memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 207405e5a7e5SJohannes Berg 20750cd1ad2dSMordechay Goodstein tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 2076e705c121SKalle Valo /* Set up entry for this TFD in Tx byte-count array */ 20770179bfffSMordechay Goodstein iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 20780179bfffSMordechay Goodstein iwl_txq_gen1_tfd_get_num_tbs(trans, 20790179bfffSMordechay Goodstein tfd)); 2080e705c121SKalle Valo 2081e705c121SKalle Valo wait_write_ptr = ieee80211_has_morefrags(fc); 2082e705c121SKalle Valo 2083e705c121SKalle Valo /* start timer if queue currently empty */ 20840d52497aSEmmanuel Grumbach if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 2085e705c121SKalle Valo /* 2086e705c121SKalle Valo * If the TXQ is active, then set the timer, if not, 2087e705c121SKalle Valo * set the timer in remainder so that the timer will 2088e705c121SKalle Valo * be armed with the right value when the station will 2089e705c121SKalle Valo * wake up. 2090e705c121SKalle Valo */ 2091e705c121SKalle Valo if (!txq->frozen) 2092e705c121SKalle Valo mod_timer(&txq->stuck_timer, 2093e705c121SKalle Valo jiffies + txq->wd_timeout); 2094e705c121SKalle Valo else 2095e705c121SKalle Valo txq->frozen_expiry_remainder = txq->wd_timeout; 2096e705c121SKalle Valo } 2097e705c121SKalle Valo 2098e705c121SKalle Valo /* Tell device the write index *just past* this latest filled TFD */ 20990cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 2100e705c121SKalle Valo if (!wait_write_ptr) 2101e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 2102e705c121SKalle Valo 2103e705c121SKalle Valo /* 2104e705c121SKalle Valo * At this point the frame is "transmitted" successfully 2105e705c121SKalle Valo * and we will get a TX status notification eventually. 2106e705c121SKalle Valo */ 2107e705c121SKalle Valo spin_unlock(&txq->lock); 2108e705c121SKalle Valo return 0; 2109e705c121SKalle Valo out_err: 21100179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2111e705c121SKalle Valo spin_unlock(&txq->lock); 2112e705c121SKalle Valo return -1; 2113e705c121SKalle Valo } 2114