1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 44cbb8e50SLuciano Coelho * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5eda50cdeSSara Sharon * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6e705c121SKalle Valo * 7e705c121SKalle Valo * Portions of this file are derived from the ipw3945 project, as well 8e705c121SKalle Valo * as portions of the ieee80211 subsystem header files. 9e705c121SKalle Valo * 10e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 11e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 12e705c121SKalle Valo * published by the Free Software Foundation. 13e705c121SKalle Valo * 14e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 15e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17e705c121SKalle Valo * more details. 18e705c121SKalle Valo * 19e705c121SKalle Valo * You should have received a copy of the GNU General Public License along with 20e705c121SKalle Valo * this program; if not, write to the Free Software Foundation, Inc., 21e705c121SKalle Valo * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22e705c121SKalle Valo * 23e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 24e705c121SKalle Valo * file called LICENSE. 25e705c121SKalle Valo * 26e705c121SKalle Valo * Contact Information: 27cb2f8277SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 28e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29e705c121SKalle Valo * 30e705c121SKalle Valo *****************************************************************************/ 31e705c121SKalle Valo #include <linux/etherdevice.h> 326eb5e529SEmmanuel Grumbach #include <linux/ieee80211.h> 33e705c121SKalle Valo #include <linux/slab.h> 34e705c121SKalle Valo #include <linux/sched.h> 3571b1230cSLuca Coelho #include <linux/pm_runtime.h> 366eb5e529SEmmanuel Grumbach #include <net/ip6_checksum.h> 376eb5e529SEmmanuel Grumbach #include <net/tso.h> 38e705c121SKalle Valo 39e705c121SKalle Valo #include "iwl-debug.h" 40e705c121SKalle Valo #include "iwl-csr.h" 41e705c121SKalle Valo #include "iwl-prph.h" 42e705c121SKalle Valo #include "iwl-io.h" 43e705c121SKalle Valo #include "iwl-scd.h" 44e705c121SKalle Valo #include "iwl-op-mode.h" 45e705c121SKalle Valo #include "internal.h" 46e705c121SKalle Valo /* FIXME: need to abstract out TX command (once we know what it looks like) */ 47e705c121SKalle Valo #include "dvm/commands.h" 48e705c121SKalle Valo 49e705c121SKalle Valo #define IWL_TX_CRC_SIZE 4 50e705c121SKalle Valo #define IWL_TX_DELIMITER_SIZE 4 51e705c121SKalle Valo 52e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 53e705c121SKalle Valo * DMA services 54e705c121SKalle Valo * 55e705c121SKalle Valo * Theory of operation 56e705c121SKalle Valo * 57e705c121SKalle Valo * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 58e705c121SKalle Valo * of buffer descriptors, each of which points to one or more data buffers for 59e705c121SKalle Valo * the device to read from or fill. Driver and device exchange status of each 60e705c121SKalle Valo * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 61e705c121SKalle Valo * entries in each circular buffer, to protect against confusing empty and full 62e705c121SKalle Valo * queue states. 63e705c121SKalle Valo * 64e705c121SKalle Valo * The device reads or writes the data in the queues via the device's several 65e705c121SKalle Valo * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 66e705c121SKalle Valo * 67e705c121SKalle Valo * For Tx queue, there are low mark and high mark limits. If, after queuing 68e705c121SKalle Valo * the packet for Tx, free space become < low mark, Tx queue stopped. When 69e705c121SKalle Valo * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 70e705c121SKalle Valo * Tx queue resumed. 71e705c121SKalle Valo * 72e705c121SKalle Valo ***************************************************/ 73e22744afSSara Sharon 74bb98ecd4SSara Sharon static int iwl_queue_space(const struct iwl_txq *q) 75e705c121SKalle Valo { 76e705c121SKalle Valo unsigned int max; 77e705c121SKalle Valo unsigned int used; 78e705c121SKalle Valo 79e705c121SKalle Valo /* 80e705c121SKalle Valo * To avoid ambiguity between empty and completely full queues, there 81e705c121SKalle Valo * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 82e705c121SKalle Valo * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 83e705c121SKalle Valo * to reserve any queue entries for this purpose. 84e705c121SKalle Valo */ 85e705c121SKalle Valo if (q->n_window < TFD_QUEUE_SIZE_MAX) 86e705c121SKalle Valo max = q->n_window; 87e705c121SKalle Valo else 88e705c121SKalle Valo max = TFD_QUEUE_SIZE_MAX - 1; 89e705c121SKalle Valo 90e705c121SKalle Valo /* 91e705c121SKalle Valo * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 92e705c121SKalle Valo * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 93e705c121SKalle Valo */ 94e705c121SKalle Valo used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 95e705c121SKalle Valo 96e705c121SKalle Valo if (WARN_ON(used > max)) 97e705c121SKalle Valo return 0; 98e705c121SKalle Valo 99e705c121SKalle Valo return max - used; 100e705c121SKalle Valo } 101e705c121SKalle Valo 102e705c121SKalle Valo /* 103e705c121SKalle Valo * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 104e705c121SKalle Valo */ 105bb98ecd4SSara Sharon static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) 106e705c121SKalle Valo { 107e705c121SKalle Valo q->n_window = slots_num; 108e705c121SKalle Valo q->id = id; 109e705c121SKalle Valo 110e705c121SKalle Valo /* slots_num must be power-of-two size, otherwise 111e705c121SKalle Valo * get_cmd_index is broken. */ 112e705c121SKalle Valo if (WARN_ON(!is_power_of_2(slots_num))) 113e705c121SKalle Valo return -EINVAL; 114e705c121SKalle Valo 115e705c121SKalle Valo q->low_mark = q->n_window / 4; 116e705c121SKalle Valo if (q->low_mark < 4) 117e705c121SKalle Valo q->low_mark = 4; 118e705c121SKalle Valo 119e705c121SKalle Valo q->high_mark = q->n_window / 8; 120e705c121SKalle Valo if (q->high_mark < 2) 121e705c121SKalle Valo q->high_mark = 2; 122e705c121SKalle Valo 123e705c121SKalle Valo q->write_ptr = 0; 124e705c121SKalle Valo q->read_ptr = 0; 125e705c121SKalle Valo 126e705c121SKalle Valo return 0; 127e705c121SKalle Valo } 128e705c121SKalle Valo 129e705c121SKalle Valo static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 130e705c121SKalle Valo struct iwl_dma_ptr *ptr, size_t size) 131e705c121SKalle Valo { 132e705c121SKalle Valo if (WARN_ON(ptr->addr)) 133e705c121SKalle Valo return -EINVAL; 134e705c121SKalle Valo 135e705c121SKalle Valo ptr->addr = dma_alloc_coherent(trans->dev, size, 136e705c121SKalle Valo &ptr->dma, GFP_KERNEL); 137e705c121SKalle Valo if (!ptr->addr) 138e705c121SKalle Valo return -ENOMEM; 139e705c121SKalle Valo ptr->size = size; 140e705c121SKalle Valo return 0; 141e705c121SKalle Valo } 142e705c121SKalle Valo 143e705c121SKalle Valo static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, 144e705c121SKalle Valo struct iwl_dma_ptr *ptr) 145e705c121SKalle Valo { 146e705c121SKalle Valo if (unlikely(!ptr->addr)) 147e705c121SKalle Valo return; 148e705c121SKalle Valo 149e705c121SKalle Valo dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 150e705c121SKalle Valo memset(ptr, 0, sizeof(*ptr)); 151e705c121SKalle Valo } 152e705c121SKalle Valo 153e705c121SKalle Valo static void iwl_pcie_txq_stuck_timer(unsigned long data) 154e705c121SKalle Valo { 155e705c121SKalle Valo struct iwl_txq *txq = (void *)data; 156e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 157e705c121SKalle Valo struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 158e705c121SKalle Valo 159e705c121SKalle Valo spin_lock(&txq->lock); 160e705c121SKalle Valo /* check if triggered erroneously */ 161bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 162e705c121SKalle Valo spin_unlock(&txq->lock); 163e705c121SKalle Valo return; 164e705c121SKalle Valo } 165e705c121SKalle Valo spin_unlock(&txq->lock); 166e705c121SKalle Valo 16738398efbSSara Sharon iwl_trans_pcie_log_scd_error(trans, txq); 168e705c121SKalle Valo 169e705c121SKalle Valo iwl_force_nmi(trans); 170e705c121SKalle Valo } 171e705c121SKalle Valo 172e705c121SKalle Valo /* 173e705c121SKalle Valo * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 174e705c121SKalle Valo */ 175e705c121SKalle Valo static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 1764fe10bc6SSara Sharon struct iwl_txq *txq, u16 byte_cnt, 1774fe10bc6SSara Sharon int num_tbs) 178e705c121SKalle Valo { 179e705c121SKalle Valo struct iwlagn_scd_bc_tbl *scd_bc_tbl; 180e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 181bb98ecd4SSara Sharon int write_ptr = txq->write_ptr; 182bb98ecd4SSara Sharon int txq_id = txq->id; 183e705c121SKalle Valo u8 sec_ctl = 0; 184e705c121SKalle Valo u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 185e705c121SKalle Valo __le16 bc_ent; 186e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = 187bb98ecd4SSara Sharon (void *)txq->entries[txq->write_ptr].cmd->payload; 188e705c121SKalle Valo 189e705c121SKalle Valo scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 190e705c121SKalle Valo 191e705c121SKalle Valo sec_ctl = tx_cmd->sec_ctl; 192e705c121SKalle Valo 193e705c121SKalle Valo switch (sec_ctl & TX_CMD_SEC_MSK) { 194e705c121SKalle Valo case TX_CMD_SEC_CCM: 195e705c121SKalle Valo len += IEEE80211_CCMP_MIC_LEN; 196e705c121SKalle Valo break; 197e705c121SKalle Valo case TX_CMD_SEC_TKIP: 198e705c121SKalle Valo len += IEEE80211_TKIP_ICV_LEN; 199e705c121SKalle Valo break; 200e705c121SKalle Valo case TX_CMD_SEC_WEP: 201e705c121SKalle Valo len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 202e705c121SKalle Valo break; 203e705c121SKalle Valo } 204e705c121SKalle Valo if (trans_pcie->bc_table_dword) 205e705c121SKalle Valo len = DIV_ROUND_UP(len, 4); 206e705c121SKalle Valo 207e705c121SKalle Valo if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 208e705c121SKalle Valo return; 209e705c121SKalle Valo 2104fe10bc6SSara Sharon if (trans->cfg->use_tfh) { 2114fe10bc6SSara Sharon u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 2124fe10bc6SSara Sharon num_tbs * sizeof(struct iwl_tfh_tb); 2134fe10bc6SSara Sharon /* 2144fe10bc6SSara Sharon * filled_tfd_size contains the number of filled bytes in the 2154fe10bc6SSara Sharon * TFD. 2164fe10bc6SSara Sharon * Dividing it by 64 will give the number of chunks to fetch 2174fe10bc6SSara Sharon * to SRAM- 0 for one chunk, 1 for 2 and so on. 2184fe10bc6SSara Sharon * If, for example, TFD contains only 3 TBs then 32 bytes 2194fe10bc6SSara Sharon * of the TFD are used, and only one chunk of 64 bytes should 2204fe10bc6SSara Sharon * be fetched 2214fe10bc6SSara Sharon */ 2224fe10bc6SSara Sharon u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 2234fe10bc6SSara Sharon 2244fe10bc6SSara Sharon bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 2254fe10bc6SSara Sharon } else { 2264fe10bc6SSara Sharon u8 sta_id = tx_cmd->sta_id; 2274fe10bc6SSara Sharon 228e705c121SKalle Valo bc_ent = cpu_to_le16(len | (sta_id << 12)); 2294fe10bc6SSara Sharon } 230e705c121SKalle Valo 231e705c121SKalle Valo scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 232e705c121SKalle Valo 233e705c121SKalle Valo if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 234e705c121SKalle Valo scd_bc_tbl[txq_id]. 235e705c121SKalle Valo tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 236e705c121SKalle Valo } 237e705c121SKalle Valo 238e705c121SKalle Valo static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 239e705c121SKalle Valo struct iwl_txq *txq) 240e705c121SKalle Valo { 241e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = 242e705c121SKalle Valo IWL_TRANS_GET_PCIE_TRANS(trans); 243e705c121SKalle Valo struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 244bb98ecd4SSara Sharon int txq_id = txq->id; 245bb98ecd4SSara Sharon int read_ptr = txq->read_ptr; 246e705c121SKalle Valo u8 sta_id = 0; 247e705c121SKalle Valo __le16 bc_ent; 248e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = 249bb98ecd4SSara Sharon (void *)txq->entries[read_ptr].cmd->payload; 250e705c121SKalle Valo 251e705c121SKalle Valo WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 252e705c121SKalle Valo 253e705c121SKalle Valo if (txq_id != trans_pcie->cmd_queue) 254e705c121SKalle Valo sta_id = tx_cmd->sta_id; 255e705c121SKalle Valo 256e705c121SKalle Valo bc_ent = cpu_to_le16(1 | (sta_id << 12)); 2574fe10bc6SSara Sharon 258e705c121SKalle Valo scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 259e705c121SKalle Valo 260e705c121SKalle Valo if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 261e705c121SKalle Valo scd_bc_tbl[txq_id]. 262e705c121SKalle Valo tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 263e705c121SKalle Valo } 264e705c121SKalle Valo 265e705c121SKalle Valo /* 266e705c121SKalle Valo * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 267e705c121SKalle Valo */ 268e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 269e705c121SKalle Valo struct iwl_txq *txq) 270e705c121SKalle Valo { 271e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 272e705c121SKalle Valo u32 reg = 0; 273bb98ecd4SSara Sharon int txq_id = txq->id; 274e705c121SKalle Valo 275e705c121SKalle Valo lockdep_assert_held(&txq->lock); 276e705c121SKalle Valo 277e705c121SKalle Valo /* 278e705c121SKalle Valo * explicitly wake up the NIC if: 279e705c121SKalle Valo * 1. shadow registers aren't enabled 280e705c121SKalle Valo * 2. NIC is woken up for CMD regardless of shadow outside this function 281e705c121SKalle Valo * 3. there is a chance that the NIC is asleep 282e705c121SKalle Valo */ 283e705c121SKalle Valo if (!trans->cfg->base_params->shadow_reg_enable && 284e705c121SKalle Valo txq_id != trans_pcie->cmd_queue && 285e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 286e705c121SKalle Valo /* 287e705c121SKalle Valo * wake up nic if it's powered down ... 288e705c121SKalle Valo * uCode will wake up, and interrupt us again, so next 289e705c121SKalle Valo * time we'll skip this part. 290e705c121SKalle Valo */ 291e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 292e705c121SKalle Valo 293e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 294e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 295e705c121SKalle Valo txq_id, reg); 296e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 297e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 298e705c121SKalle Valo txq->need_update = true; 299e705c121SKalle Valo return; 300e705c121SKalle Valo } 301e705c121SKalle Valo } 302e705c121SKalle Valo 303e705c121SKalle Valo /* 304e705c121SKalle Valo * if not in power-save mode, uCode will never sleep when we're 305e705c121SKalle Valo * trying to tx (during RFKILL, we're not trying to tx). 306e705c121SKalle Valo */ 307bb98ecd4SSara Sharon IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 3080cd58eaaSEmmanuel Grumbach if (!txq->block) 3090cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 310bb98ecd4SSara Sharon txq->write_ptr | (txq_id << 8)); 311e705c121SKalle Valo } 312e705c121SKalle Valo 313e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 314e705c121SKalle Valo { 315e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 316e705c121SKalle Valo int i; 317e705c121SKalle Valo 318e705c121SKalle Valo for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 319e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[i]; 320e705c121SKalle Valo 321e705c121SKalle Valo spin_lock_bh(&txq->lock); 322e705c121SKalle Valo if (trans_pcie->txq[i].need_update) { 323e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 324e705c121SKalle Valo trans_pcie->txq[i].need_update = false; 325e705c121SKalle Valo } 326e705c121SKalle Valo spin_unlock_bh(&txq->lock); 327e705c121SKalle Valo } 328e705c121SKalle Valo } 329e705c121SKalle Valo 3306983ba69SSara Sharon static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 3316983ba69SSara Sharon struct iwl_txq *txq, int idx) 332e705c121SKalle Valo { 3336983ba69SSara Sharon return txq->tfds + trans_pcie->tfd_size * idx; 3346983ba69SSara Sharon } 335e705c121SKalle Valo 3366983ba69SSara Sharon static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, 337cc2f41f8SJohannes Berg void *_tfd, u8 idx) 3386983ba69SSara Sharon { 3396983ba69SSara Sharon 3406983ba69SSara Sharon if (trans->cfg->use_tfh) { 341cc2f41f8SJohannes Berg struct iwl_tfh_tfd *tfd = _tfd; 342cc2f41f8SJohannes Berg struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 3436983ba69SSara Sharon 3446983ba69SSara Sharon return (dma_addr_t)(le64_to_cpu(tb->addr)); 345cc2f41f8SJohannes Berg } else { 346cc2f41f8SJohannes Berg struct iwl_tfd *tfd = _tfd; 347cc2f41f8SJohannes Berg struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 348cc2f41f8SJohannes Berg dma_addr_t addr = get_unaligned_le32(&tb->lo); 349cc2f41f8SJohannes Berg dma_addr_t hi_len; 3506983ba69SSara Sharon 351cc2f41f8SJohannes Berg if (sizeof(dma_addr_t) <= sizeof(u32)) 352e705c121SKalle Valo return addr; 353cc2f41f8SJohannes Berg 354cc2f41f8SJohannes Berg hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 355cc2f41f8SJohannes Berg 356cc2f41f8SJohannes Berg /* 357cc2f41f8SJohannes Berg * shift by 16 twice to avoid warnings on 32-bit 358cc2f41f8SJohannes Berg * (where this code never runs anyway due to the 359cc2f41f8SJohannes Berg * if statement above) 360cc2f41f8SJohannes Berg */ 361cc2f41f8SJohannes Berg return addr | ((hi_len << 16) << 16); 362cc2f41f8SJohannes Berg } 363e705c121SKalle Valo } 364e705c121SKalle Valo 3656983ba69SSara Sharon static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 3666983ba69SSara Sharon u8 idx, dma_addr_t addr, u16 len) 367e705c121SKalle Valo { 3686983ba69SSara Sharon if (trans->cfg->use_tfh) { 3696983ba69SSara Sharon struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 3706983ba69SSara Sharon struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; 3716983ba69SSara Sharon 3726983ba69SSara Sharon put_unaligned_le64(addr, &tb->addr); 3736983ba69SSara Sharon tb->tb_len = cpu_to_le16(len); 3746983ba69SSara Sharon 3756983ba69SSara Sharon tfd_fh->num_tbs = cpu_to_le16(idx + 1); 3766983ba69SSara Sharon } else { 3776983ba69SSara Sharon struct iwl_tfd *tfd_fh = (void *)tfd; 3786983ba69SSara Sharon struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 3796983ba69SSara Sharon 380e705c121SKalle Valo u16 hi_n_len = len << 4; 381e705c121SKalle Valo 382e705c121SKalle Valo put_unaligned_le32(addr, &tb->lo); 3837abf6fdeSJohannes Berg hi_n_len |= iwl_get_dma_hi_addr(addr); 384e705c121SKalle Valo 385e705c121SKalle Valo tb->hi_n_len = cpu_to_le16(hi_n_len); 386e705c121SKalle Valo 3876983ba69SSara Sharon tfd_fh->num_tbs = idx + 1; 3886983ba69SSara Sharon } 389e705c121SKalle Valo } 390e705c121SKalle Valo 391cc2f41f8SJohannes Berg static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) 392e705c121SKalle Valo { 3936983ba69SSara Sharon if (trans->cfg->use_tfh) { 394cc2f41f8SJohannes Berg struct iwl_tfh_tfd *tfd = _tfd; 3956983ba69SSara Sharon 396cc2f41f8SJohannes Berg return le16_to_cpu(tfd->num_tbs) & 0x1f; 397cc2f41f8SJohannes Berg } else { 398cc2f41f8SJohannes Berg struct iwl_tfd *tfd = _tfd; 399cc2f41f8SJohannes Berg 400cc2f41f8SJohannes Berg return tfd->num_tbs & 0x1f; 4016983ba69SSara Sharon } 402e705c121SKalle Valo } 403e705c121SKalle Valo 404e705c121SKalle Valo static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 405e705c121SKalle Valo struct iwl_cmd_meta *meta, 4066983ba69SSara Sharon struct iwl_txq *txq, int index) 407e705c121SKalle Valo { 4083cd1980bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4093cd1980bSSara Sharon int i, num_tbs; 4106983ba69SSara Sharon void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 411e705c121SKalle Valo 412e705c121SKalle Valo /* Sanity check on number of chunks */ 4136983ba69SSara Sharon num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 414e705c121SKalle Valo 4153cd1980bSSara Sharon if (num_tbs >= trans_pcie->max_tbs) { 416e705c121SKalle Valo IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 417e705c121SKalle Valo /* @todo issue fatal error, it is quite serious situation */ 418e705c121SKalle Valo return; 419e705c121SKalle Valo } 420e705c121SKalle Valo 4218de437c7SSara Sharon /* first TB is never freed - it's the bidirectional DMA data */ 422e705c121SKalle Valo 423e705c121SKalle Valo for (i = 1; i < num_tbs; i++) { 4243cd1980bSSara Sharon if (meta->tbs & BIT(i)) 425e705c121SKalle Valo dma_unmap_page(trans->dev, 4266983ba69SSara Sharon iwl_pcie_tfd_tb_get_addr(trans, tfd, i), 4276983ba69SSara Sharon iwl_pcie_tfd_tb_get_len(trans, tfd, i), 428e705c121SKalle Valo DMA_TO_DEVICE); 429e705c121SKalle Valo else 430e705c121SKalle Valo dma_unmap_single(trans->dev, 4316983ba69SSara Sharon iwl_pcie_tfd_tb_get_addr(trans, tfd, 4326983ba69SSara Sharon i), 4336983ba69SSara Sharon iwl_pcie_tfd_tb_get_len(trans, tfd, 4346983ba69SSara Sharon i), 435e705c121SKalle Valo DMA_TO_DEVICE); 436e705c121SKalle Valo } 4376983ba69SSara Sharon 4386983ba69SSara Sharon if (trans->cfg->use_tfh) { 4396983ba69SSara Sharon struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 4406983ba69SSara Sharon 4416983ba69SSara Sharon tfd_fh->num_tbs = 0; 4426983ba69SSara Sharon } else { 4436983ba69SSara Sharon struct iwl_tfd *tfd_fh = (void *)tfd; 4446983ba69SSara Sharon 4456983ba69SSara Sharon tfd_fh->num_tbs = 0; 4466983ba69SSara Sharon } 4476983ba69SSara Sharon 448e705c121SKalle Valo } 449e705c121SKalle Valo 450e705c121SKalle Valo /* 451e705c121SKalle Valo * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 452e705c121SKalle Valo * @trans - transport private data 453e705c121SKalle Valo * @txq - tx queue 454e705c121SKalle Valo * @dma_dir - the direction of the DMA mapping 455e705c121SKalle Valo * 456e705c121SKalle Valo * Does NOT advance any TFD circular buffer read/write indexes 457e705c121SKalle Valo * Does NOT free the TFD itself (which is within circular buffer) 458e705c121SKalle Valo */ 459e705c121SKalle Valo static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 460e705c121SKalle Valo { 461e705c121SKalle Valo /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 462e705c121SKalle Valo * idx is bounded by n_window 463e705c121SKalle Valo */ 464bb98ecd4SSara Sharon int rd_ptr = txq->read_ptr; 465bb98ecd4SSara Sharon int idx = get_cmd_index(txq, rd_ptr); 466e705c121SKalle Valo 467e705c121SKalle Valo lockdep_assert_held(&txq->lock); 468e705c121SKalle Valo 469e705c121SKalle Valo /* We have only q->n_window txq->entries, but we use 470e705c121SKalle Valo * TFD_QUEUE_SIZE_MAX tfds 471e705c121SKalle Valo */ 4726983ba69SSara Sharon iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 473e705c121SKalle Valo 474e705c121SKalle Valo /* free SKB */ 475e705c121SKalle Valo if (txq->entries) { 476e705c121SKalle Valo struct sk_buff *skb; 477e705c121SKalle Valo 478e705c121SKalle Valo skb = txq->entries[idx].skb; 479e705c121SKalle Valo 480e705c121SKalle Valo /* Can be called from irqs-disabled context 481e705c121SKalle Valo * If skb is not NULL, it means that the whole queue is being 482e705c121SKalle Valo * freed and that the queue is not empty - free the skb 483e705c121SKalle Valo */ 484e705c121SKalle Valo if (skb) { 485e705c121SKalle Valo iwl_op_mode_free_skb(trans->op_mode, skb); 486e705c121SKalle Valo txq->entries[idx].skb = NULL; 487e705c121SKalle Valo } 488e705c121SKalle Valo } 489e705c121SKalle Valo } 490e705c121SKalle Valo 491e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 492e705c121SKalle Valo dma_addr_t addr, u16 len, bool reset) 493e705c121SKalle Valo { 4943cd1980bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4956983ba69SSara Sharon void *tfd; 496e705c121SKalle Valo u32 num_tbs; 497e705c121SKalle Valo 498bb98ecd4SSara Sharon tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; 499e705c121SKalle Valo 500e705c121SKalle Valo if (reset) 5016983ba69SSara Sharon memset(tfd, 0, trans_pcie->tfd_size); 502e705c121SKalle Valo 5036983ba69SSara Sharon num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 504e705c121SKalle Valo 5056983ba69SSara Sharon /* Each TFD can point to a maximum max_tbs Tx buffers */ 5063cd1980bSSara Sharon if (num_tbs >= trans_pcie->max_tbs) { 507e705c121SKalle Valo IWL_ERR(trans, "Error can not send more than %d chunks\n", 5083cd1980bSSara Sharon trans_pcie->max_tbs); 509e705c121SKalle Valo return -EINVAL; 510e705c121SKalle Valo } 511e705c121SKalle Valo 512e705c121SKalle Valo if (WARN(addr & ~IWL_TX_DMA_MASK, 513e705c121SKalle Valo "Unaligned address = %llx\n", (unsigned long long)addr)) 514e705c121SKalle Valo return -EINVAL; 515e705c121SKalle Valo 5166983ba69SSara Sharon iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 517e705c121SKalle Valo 518e705c121SKalle Valo return num_tbs; 519e705c121SKalle Valo } 520e705c121SKalle Valo 521e705c121SKalle Valo static int iwl_pcie_txq_alloc(struct iwl_trans *trans, 522e705c121SKalle Valo struct iwl_txq *txq, int slots_num, 523e705c121SKalle Valo u32 txq_id) 524e705c121SKalle Valo { 525e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5266983ba69SSara Sharon size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; 5278de437c7SSara Sharon size_t tb0_buf_sz; 528e705c121SKalle Valo int i; 529e705c121SKalle Valo 530e705c121SKalle Valo if (WARN_ON(txq->entries || txq->tfds)) 531e705c121SKalle Valo return -EINVAL; 532e705c121SKalle Valo 533e705c121SKalle Valo setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 534e705c121SKalle Valo (unsigned long)txq); 535e705c121SKalle Valo txq->trans_pcie = trans_pcie; 536e705c121SKalle Valo 537bb98ecd4SSara Sharon txq->n_window = slots_num; 538e705c121SKalle Valo 539e705c121SKalle Valo txq->entries = kcalloc(slots_num, 540e705c121SKalle Valo sizeof(struct iwl_pcie_txq_entry), 541e705c121SKalle Valo GFP_KERNEL); 542e705c121SKalle Valo 543e705c121SKalle Valo if (!txq->entries) 544e705c121SKalle Valo goto error; 545e705c121SKalle Valo 546e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue) 547e705c121SKalle Valo for (i = 0; i < slots_num; i++) { 548e705c121SKalle Valo txq->entries[i].cmd = 549e705c121SKalle Valo kmalloc(sizeof(struct iwl_device_cmd), 550e705c121SKalle Valo GFP_KERNEL); 551e705c121SKalle Valo if (!txq->entries[i].cmd) 552e705c121SKalle Valo goto error; 553e705c121SKalle Valo } 554e705c121SKalle Valo 555e705c121SKalle Valo /* Circular buffer of transmit frame descriptors (TFDs), 556e705c121SKalle Valo * shared with device */ 557e705c121SKalle Valo txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 558bb98ecd4SSara Sharon &txq->dma_addr, GFP_KERNEL); 559e705c121SKalle Valo if (!txq->tfds) 560e705c121SKalle Valo goto error; 561e705c121SKalle Valo 5628de437c7SSara Sharon BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); 563e705c121SKalle Valo 5648de437c7SSara Sharon tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 565e705c121SKalle Valo 5668de437c7SSara Sharon txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 5678de437c7SSara Sharon &txq->first_tb_dma, 568e705c121SKalle Valo GFP_KERNEL); 5698de437c7SSara Sharon if (!txq->first_tb_bufs) 570e705c121SKalle Valo goto err_free_tfds; 571e705c121SKalle Valo 572bb98ecd4SSara Sharon txq->id = txq_id; 573e705c121SKalle Valo 574e705c121SKalle Valo return 0; 575e705c121SKalle Valo err_free_tfds: 576bb98ecd4SSara Sharon dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 577e705c121SKalle Valo error: 578e705c121SKalle Valo if (txq->entries && txq_id == trans_pcie->cmd_queue) 579e705c121SKalle Valo for (i = 0; i < slots_num; i++) 580e705c121SKalle Valo kfree(txq->entries[i].cmd); 581e705c121SKalle Valo kfree(txq->entries); 582e705c121SKalle Valo txq->entries = NULL; 583e705c121SKalle Valo 584e705c121SKalle Valo return -ENOMEM; 585e705c121SKalle Valo 586e705c121SKalle Valo } 587e705c121SKalle Valo 588e705c121SKalle Valo static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 589e705c121SKalle Valo int slots_num, u32 txq_id) 590e705c121SKalle Valo { 591faead41cSJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 592e705c121SKalle Valo int ret; 593e705c121SKalle Valo 594e705c121SKalle Valo txq->need_update = false; 595e705c121SKalle Valo 596e705c121SKalle Valo /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 597e705c121SKalle Valo * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 598e705c121SKalle Valo BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 599e705c121SKalle Valo 600e705c121SKalle Valo /* Initialize queue's high/low-water marks, and head/tail indexes */ 601bb98ecd4SSara Sharon ret = iwl_queue_init(txq, slots_num, txq_id); 602e705c121SKalle Valo if (ret) 603e705c121SKalle Valo return ret; 604e705c121SKalle Valo 605e705c121SKalle Valo spin_lock_init(&txq->lock); 606faead41cSJohannes Berg 607faead41cSJohannes Berg if (txq_id == trans_pcie->cmd_queue) { 608faead41cSJohannes Berg static struct lock_class_key iwl_pcie_cmd_queue_lock_class; 609faead41cSJohannes Berg 610faead41cSJohannes Berg lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); 611faead41cSJohannes Berg } 612faead41cSJohannes Berg 6133955525dSEmmanuel Grumbach __skb_queue_head_init(&txq->overflow_q); 614e705c121SKalle Valo 615e705c121SKalle Valo return 0; 616e705c121SKalle Valo } 617e705c121SKalle Valo 61821cb3222SJohannes Berg static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 61921cb3222SJohannes Berg struct sk_buff *skb) 6206eb5e529SEmmanuel Grumbach { 62121cb3222SJohannes Berg struct page **page_ptr; 6226eb5e529SEmmanuel Grumbach 62321cb3222SJohannes Berg page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 6246eb5e529SEmmanuel Grumbach 62521cb3222SJohannes Berg if (*page_ptr) { 62621cb3222SJohannes Berg __free_page(*page_ptr); 62721cb3222SJohannes Berg *page_ptr = NULL; 6286eb5e529SEmmanuel Grumbach } 6296eb5e529SEmmanuel Grumbach } 6306eb5e529SEmmanuel Grumbach 63101d11cd1SSara Sharon static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 63201d11cd1SSara Sharon { 63301d11cd1SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 63401d11cd1SSara Sharon 63501d11cd1SSara Sharon lockdep_assert_held(&trans_pcie->reg_lock); 63601d11cd1SSara Sharon 63701d11cd1SSara Sharon if (trans_pcie->ref_cmd_in_flight) { 63801d11cd1SSara Sharon trans_pcie->ref_cmd_in_flight = false; 63901d11cd1SSara Sharon IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 640c24c7f58SLuca Coelho iwl_trans_unref(trans); 64101d11cd1SSara Sharon } 64201d11cd1SSara Sharon 64301d11cd1SSara Sharon if (!trans->cfg->base_params->apmg_wake_up_wa) 64401d11cd1SSara Sharon return; 64501d11cd1SSara Sharon if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 64601d11cd1SSara Sharon return; 64701d11cd1SSara Sharon 64801d11cd1SSara Sharon trans_pcie->cmd_hold_nic_awake = false; 64901d11cd1SSara Sharon __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 65001d11cd1SSara Sharon CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 65101d11cd1SSara Sharon } 65201d11cd1SSara Sharon 653e705c121SKalle Valo /* 654e705c121SKalle Valo * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 655e705c121SKalle Valo */ 656e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 657e705c121SKalle Valo { 658e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 659e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 660e705c121SKalle Valo 661e705c121SKalle Valo spin_lock_bh(&txq->lock); 662bb98ecd4SSara Sharon while (txq->write_ptr != txq->read_ptr) { 663e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 664bb98ecd4SSara Sharon txq_id, txq->read_ptr); 6656eb5e529SEmmanuel Grumbach 6666eb5e529SEmmanuel Grumbach if (txq_id != trans_pcie->cmd_queue) { 667bb98ecd4SSara Sharon struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 6686eb5e529SEmmanuel Grumbach 6696eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb)) 6706eb5e529SEmmanuel Grumbach continue; 6716eb5e529SEmmanuel Grumbach 67221cb3222SJohannes Berg iwl_pcie_free_tso_page(trans_pcie, skb); 6736eb5e529SEmmanuel Grumbach } 674e705c121SKalle Valo iwl_pcie_txq_free_tfd(trans, txq); 675bb98ecd4SSara Sharon txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 67601d11cd1SSara Sharon 677bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 67801d11cd1SSara Sharon unsigned long flags; 67901d11cd1SSara Sharon 68001d11cd1SSara Sharon spin_lock_irqsave(&trans_pcie->reg_lock, flags); 68101d11cd1SSara Sharon if (txq_id != trans_pcie->cmd_queue) { 68201d11cd1SSara Sharon IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", 683bb98ecd4SSara Sharon txq->id); 684c24c7f58SLuca Coelho iwl_trans_unref(trans); 68501d11cd1SSara Sharon } else { 68601d11cd1SSara Sharon iwl_pcie_clear_cmd_in_flight(trans); 68701d11cd1SSara Sharon } 68801d11cd1SSara Sharon spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 68901d11cd1SSara Sharon } 690e705c121SKalle Valo } 6913955525dSEmmanuel Grumbach 6923955525dSEmmanuel Grumbach while (!skb_queue_empty(&txq->overflow_q)) { 6933955525dSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 6943955525dSEmmanuel Grumbach 6953955525dSEmmanuel Grumbach iwl_op_mode_free_skb(trans->op_mode, skb); 6963955525dSEmmanuel Grumbach } 6973955525dSEmmanuel Grumbach 698e705c121SKalle Valo spin_unlock_bh(&txq->lock); 699e705c121SKalle Valo 700e705c121SKalle Valo /* just in case - this queue may have been stopped */ 701e705c121SKalle Valo iwl_wake_queue(trans, txq); 702e705c121SKalle Valo } 703e705c121SKalle Valo 704e705c121SKalle Valo /* 705e705c121SKalle Valo * iwl_pcie_txq_free - Deallocate DMA queue. 706e705c121SKalle Valo * @txq: Transmit queue to deallocate. 707e705c121SKalle Valo * 708e705c121SKalle Valo * Empty queue by removing and destroying all BD's. 709e705c121SKalle Valo * Free all buffers. 710e705c121SKalle Valo * 0-fill, but do not free "txq" descriptor structure. 711e705c121SKalle Valo */ 712e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 713e705c121SKalle Valo { 714e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 715e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 716e705c121SKalle Valo struct device *dev = trans->dev; 717e705c121SKalle Valo int i; 718e705c121SKalle Valo 719e705c121SKalle Valo if (WARN_ON(!txq)) 720e705c121SKalle Valo return; 721e705c121SKalle Valo 722e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 723e705c121SKalle Valo 724e705c121SKalle Valo /* De-alloc array of command/tx buffers */ 725e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue) 726bb98ecd4SSara Sharon for (i = 0; i < txq->n_window; i++) { 727e705c121SKalle Valo kzfree(txq->entries[i].cmd); 728e705c121SKalle Valo kzfree(txq->entries[i].free_buf); 729e705c121SKalle Valo } 730e705c121SKalle Valo 731e705c121SKalle Valo /* De-alloc circular buffer of TFDs */ 732e705c121SKalle Valo if (txq->tfds) { 733e705c121SKalle Valo dma_free_coherent(dev, 7346983ba69SSara Sharon trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, 735bb98ecd4SSara Sharon txq->tfds, txq->dma_addr); 736bb98ecd4SSara Sharon txq->dma_addr = 0; 737e705c121SKalle Valo txq->tfds = NULL; 738e705c121SKalle Valo 739e705c121SKalle Valo dma_free_coherent(dev, 740bb98ecd4SSara Sharon sizeof(*txq->first_tb_bufs) * txq->n_window, 7418de437c7SSara Sharon txq->first_tb_bufs, txq->first_tb_dma); 742e705c121SKalle Valo } 743e705c121SKalle Valo 744e705c121SKalle Valo kfree(txq->entries); 745e705c121SKalle Valo txq->entries = NULL; 746e705c121SKalle Valo 747e705c121SKalle Valo del_timer_sync(&txq->stuck_timer); 748e705c121SKalle Valo 749e705c121SKalle Valo /* 0-fill queue descriptor structure */ 750e705c121SKalle Valo memset(txq, 0, sizeof(*txq)); 751e705c121SKalle Valo } 752e705c121SKalle Valo 753e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 754e705c121SKalle Valo { 755e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 756e705c121SKalle Valo int nq = trans->cfg->base_params->num_of_queues; 757e705c121SKalle Valo int chan; 758e705c121SKalle Valo u32 reg_val; 759e705c121SKalle Valo int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 760e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 761e705c121SKalle Valo 762e705c121SKalle Valo /* make sure all queue are not stopped/used */ 763e705c121SKalle Valo memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 764e705c121SKalle Valo memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 765e705c121SKalle Valo 766e705c121SKalle Valo trans_pcie->scd_base_addr = 767e705c121SKalle Valo iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 768e705c121SKalle Valo 769e705c121SKalle Valo WARN_ON(scd_base_addr != 0 && 770e705c121SKalle Valo scd_base_addr != trans_pcie->scd_base_addr); 771e705c121SKalle Valo 772e705c121SKalle Valo /* reset context data, TX status and translation data */ 773e705c121SKalle Valo iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 774e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND, 775e705c121SKalle Valo NULL, clear_dwords); 776e705c121SKalle Valo 777e705c121SKalle Valo iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 778e705c121SKalle Valo trans_pcie->scd_bc_tbls.dma >> 10); 779e705c121SKalle Valo 780e705c121SKalle Valo /* The chain extension of the SCD doesn't work well. This feature is 781e705c121SKalle Valo * enabled by default by the HW, so we need to disable it manually. 782e705c121SKalle Valo */ 783e705c121SKalle Valo if (trans->cfg->base_params->scd_chain_ext_wa) 784e705c121SKalle Valo iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 785e705c121SKalle Valo 786e705c121SKalle Valo iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 787e705c121SKalle Valo trans_pcie->cmd_fifo, 788e705c121SKalle Valo trans_pcie->cmd_q_wdg_timeout); 789e705c121SKalle Valo 790e705c121SKalle Valo /* Activate all Tx DMA/FIFO channels */ 791e705c121SKalle Valo iwl_scd_activate_fifos(trans); 792e705c121SKalle Valo 793e705c121SKalle Valo /* Enable DMA channel */ 794e705c121SKalle Valo for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 795e705c121SKalle Valo iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 796e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 797e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 798e705c121SKalle Valo 799e705c121SKalle Valo /* Update FH chicken bits */ 800e705c121SKalle Valo reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 801e705c121SKalle Valo iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 802e705c121SKalle Valo reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 803e705c121SKalle Valo 804e705c121SKalle Valo /* Enable L1-Active */ 805e705c121SKalle Valo if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 806e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 807e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 808e705c121SKalle Valo } 809e705c121SKalle Valo 810e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 811e705c121SKalle Valo { 812e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 813e705c121SKalle Valo int txq_id; 814e705c121SKalle Valo 815e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 816e705c121SKalle Valo txq_id++) { 817e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 818e22744afSSara Sharon if (trans->cfg->use_tfh) 819e22744afSSara Sharon iwl_write_direct64(trans, 820e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 821bb98ecd4SSara Sharon txq->dma_addr); 822e22744afSSara Sharon else 823e22744afSSara Sharon iwl_write_direct32(trans, 824e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id), 825bb98ecd4SSara Sharon txq->dma_addr >> 8); 826e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 827bb98ecd4SSara Sharon txq->read_ptr = 0; 828bb98ecd4SSara Sharon txq->write_ptr = 0; 829e705c121SKalle Valo } 830e705c121SKalle Valo 831e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 832e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 833e705c121SKalle Valo trans_pcie->kw.dma >> 4); 834e705c121SKalle Valo 835e705c121SKalle Valo /* 836e705c121SKalle Valo * Send 0 as the scd_base_addr since the device may have be reset 837e705c121SKalle Valo * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 838e705c121SKalle Valo * contain garbage. 839e705c121SKalle Valo */ 840e705c121SKalle Valo iwl_pcie_tx_start(trans, 0); 841e705c121SKalle Valo } 842e705c121SKalle Valo 843e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 844e705c121SKalle Valo { 845e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 846e705c121SKalle Valo unsigned long flags; 847e705c121SKalle Valo int ch, ret; 848e705c121SKalle Valo u32 mask = 0; 849e705c121SKalle Valo 850e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 851e705c121SKalle Valo 85223ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 853e705c121SKalle Valo goto out; 854e705c121SKalle Valo 855e705c121SKalle Valo /* Stop each Tx DMA channel */ 856e705c121SKalle Valo for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 857e705c121SKalle Valo iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 858e705c121SKalle Valo mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 859e705c121SKalle Valo } 860e705c121SKalle Valo 861e705c121SKalle Valo /* Wait for DMA channels to be idle */ 862e705c121SKalle Valo ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 863e705c121SKalle Valo if (ret < 0) 864e705c121SKalle Valo IWL_ERR(trans, 865e705c121SKalle Valo "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 866e705c121SKalle Valo ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 867e705c121SKalle Valo 868e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 869e705c121SKalle Valo 870e705c121SKalle Valo out: 871e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 872e705c121SKalle Valo } 873e705c121SKalle Valo 874e705c121SKalle Valo /* 875e705c121SKalle Valo * iwl_pcie_tx_stop - Stop all Tx DMA channels 876e705c121SKalle Valo */ 877e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans) 878e705c121SKalle Valo { 879e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 880e705c121SKalle Valo int txq_id; 881e705c121SKalle Valo 882e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 883e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 884e705c121SKalle Valo 885e705c121SKalle Valo /* Turn off all Tx DMA channels */ 886e705c121SKalle Valo iwl_pcie_tx_stop_fh(trans); 887e705c121SKalle Valo 888e705c121SKalle Valo /* 889e705c121SKalle Valo * This function can be called before the op_mode disabled the 890e705c121SKalle Valo * queues. This happens when we have an rfkill interrupt. 891e705c121SKalle Valo * Since we stop Tx altogether - mark the queues as stopped. 892e705c121SKalle Valo */ 893e705c121SKalle Valo memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 894e705c121SKalle Valo memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 895e705c121SKalle Valo 896e705c121SKalle Valo /* This can happen: start_hw, stop_device */ 897e705c121SKalle Valo if (!trans_pcie->txq) 898e705c121SKalle Valo return 0; 899e705c121SKalle Valo 900e705c121SKalle Valo /* Unmap DMA from host system and free skb's */ 901e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 902e705c121SKalle Valo txq_id++) 903e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 904e705c121SKalle Valo 905e705c121SKalle Valo return 0; 906e705c121SKalle Valo } 907e705c121SKalle Valo 908e705c121SKalle Valo /* 909e705c121SKalle Valo * iwl_trans_tx_free - Free TXQ Context 910e705c121SKalle Valo * 911e705c121SKalle Valo * Destroy all TX DMA queues and structures 912e705c121SKalle Valo */ 913e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans) 914e705c121SKalle Valo { 915e705c121SKalle Valo int txq_id; 916e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 917e705c121SKalle Valo 918de74c455SSara Sharon memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 919de74c455SSara Sharon 920e705c121SKalle Valo /* Tx queues */ 921e705c121SKalle Valo if (trans_pcie->txq) { 922e705c121SKalle Valo for (txq_id = 0; 923e705c121SKalle Valo txq_id < trans->cfg->base_params->num_of_queues; txq_id++) 924e705c121SKalle Valo iwl_pcie_txq_free(trans, txq_id); 925e705c121SKalle Valo } 926e705c121SKalle Valo 927e705c121SKalle Valo kfree(trans_pcie->txq); 928e705c121SKalle Valo trans_pcie->txq = NULL; 929e705c121SKalle Valo 930e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 931e705c121SKalle Valo 932e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 933e705c121SKalle Valo } 934e705c121SKalle Valo 935e705c121SKalle Valo /* 936e705c121SKalle Valo * iwl_pcie_tx_alloc - allocate TX context 937e705c121SKalle Valo * Allocate all Tx DMA structures and initialize them 938e705c121SKalle Valo */ 939e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 940e705c121SKalle Valo { 941e705c121SKalle Valo int ret; 942e705c121SKalle Valo int txq_id, slots_num; 943e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 944e705c121SKalle Valo 945e705c121SKalle Valo u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 946e705c121SKalle Valo sizeof(struct iwlagn_scd_bc_tbl); 947e705c121SKalle Valo 948e705c121SKalle Valo /*It is not allowed to alloc twice, so warn when this happens. 949e705c121SKalle Valo * We cannot rely on the previous allocation, so free and fail */ 950e705c121SKalle Valo if (WARN_ON(trans_pcie->txq)) { 951e705c121SKalle Valo ret = -EINVAL; 952e705c121SKalle Valo goto error; 953e705c121SKalle Valo } 954e705c121SKalle Valo 955e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 956e705c121SKalle Valo scd_bc_tbls_size); 957e705c121SKalle Valo if (ret) { 958e705c121SKalle Valo IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 959e705c121SKalle Valo goto error; 960e705c121SKalle Valo } 961e705c121SKalle Valo 962e705c121SKalle Valo /* Alloc keep-warm buffer */ 963e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 964e705c121SKalle Valo if (ret) { 965e705c121SKalle Valo IWL_ERR(trans, "Keep Warm allocation failed\n"); 966e705c121SKalle Valo goto error; 967e705c121SKalle Valo } 968e705c121SKalle Valo 969e705c121SKalle Valo trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, 970e705c121SKalle Valo sizeof(struct iwl_txq), GFP_KERNEL); 971e705c121SKalle Valo if (!trans_pcie->txq) { 972e705c121SKalle Valo IWL_ERR(trans, "Not enough memory for txq\n"); 973e705c121SKalle Valo ret = -ENOMEM; 974e705c121SKalle Valo goto error; 975e705c121SKalle Valo } 976e705c121SKalle Valo 977e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 978e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 979e705c121SKalle Valo txq_id++) { 980e705c121SKalle Valo slots_num = (txq_id == trans_pcie->cmd_queue) ? 981e705c121SKalle Valo TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 982e705c121SKalle Valo ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], 983e705c121SKalle Valo slots_num, txq_id); 984e705c121SKalle Valo if (ret) { 985e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 986e705c121SKalle Valo goto error; 987e705c121SKalle Valo } 988e705c121SKalle Valo } 989e705c121SKalle Valo 990e705c121SKalle Valo return 0; 991e705c121SKalle Valo 992e705c121SKalle Valo error: 993e705c121SKalle Valo iwl_pcie_tx_free(trans); 994e705c121SKalle Valo 995e705c121SKalle Valo return ret; 996e705c121SKalle Valo } 997eda50cdeSSara Sharon 998e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans) 999e705c121SKalle Valo { 1000e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1001e705c121SKalle Valo int ret; 1002e705c121SKalle Valo int txq_id, slots_num; 1003e705c121SKalle Valo bool alloc = false; 1004e705c121SKalle Valo 1005e705c121SKalle Valo if (!trans_pcie->txq) { 1006e705c121SKalle Valo ret = iwl_pcie_tx_alloc(trans); 1007e705c121SKalle Valo if (ret) 1008e705c121SKalle Valo goto error; 1009e705c121SKalle Valo alloc = true; 1010e705c121SKalle Valo } 1011e705c121SKalle Valo 1012e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 1013e705c121SKalle Valo 1014e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 1015e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 1016e705c121SKalle Valo 1017e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 1018e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 1019e705c121SKalle Valo trans_pcie->kw.dma >> 4); 1020e705c121SKalle Valo 1021e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 1022e705c121SKalle Valo 1023e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 1024e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1025e705c121SKalle Valo txq_id++) { 1026e705c121SKalle Valo slots_num = (txq_id == trans_pcie->cmd_queue) ? 1027e705c121SKalle Valo TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1028e705c121SKalle Valo ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], 1029e705c121SKalle Valo slots_num, txq_id); 1030e705c121SKalle Valo if (ret) { 1031e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1032e705c121SKalle Valo goto error; 1033e705c121SKalle Valo } 1034e705c121SKalle Valo 1035eda50cdeSSara Sharon /* 1036eda50cdeSSara Sharon * Tell nic where to find circular buffer of TFDs for a 1037eda50cdeSSara Sharon * given Tx queue, and enable the DMA channel used for that 1038eda50cdeSSara Sharon * queue. 1039eda50cdeSSara Sharon * Circular buffer (TFD queue in DRAM) physical base address 1040eda50cdeSSara Sharon */ 1041eda50cdeSSara Sharon iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 1042eda50cdeSSara Sharon trans_pcie->txq[txq_id].dma_addr >> 8); 1043ae79785fSSara Sharon } 1044e22744afSSara Sharon 1045e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1046e705c121SKalle Valo if (trans->cfg->base_params->num_of_queues > 20) 1047e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, 1048e705c121SKalle Valo SCD_GP_CTRL_ENABLE_31_QUEUES); 1049e705c121SKalle Valo 1050e705c121SKalle Valo return 0; 1051e705c121SKalle Valo error: 1052e705c121SKalle Valo /*Upon error, free only if we allocated something */ 1053e705c121SKalle Valo if (alloc) 1054e705c121SKalle Valo iwl_pcie_tx_free(trans); 1055e705c121SKalle Valo return ret; 1056e705c121SKalle Valo } 1057e705c121SKalle Valo 1058eda50cdeSSara Sharon int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) 1059eda50cdeSSara Sharon { 1060eda50cdeSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1061eda50cdeSSara Sharon int ret; 1062eda50cdeSSara Sharon int txq_id, slots_num; 1063eda50cdeSSara Sharon bool alloc = false; 1064eda50cdeSSara Sharon 1065eda50cdeSSara Sharon if (!trans_pcie->txq) { 1066eda50cdeSSara Sharon /* TODO: change this when moving to new TX alloc model */ 1067eda50cdeSSara Sharon ret = iwl_pcie_tx_alloc(trans); 1068eda50cdeSSara Sharon if (ret) 1069eda50cdeSSara Sharon goto error; 1070eda50cdeSSara Sharon alloc = true; 1071eda50cdeSSara Sharon } 1072eda50cdeSSara Sharon 1073eda50cdeSSara Sharon spin_lock(&trans_pcie->irq_lock); 1074eda50cdeSSara Sharon 1075eda50cdeSSara Sharon /* Tell NIC where to find the "keep warm" buffer */ 1076eda50cdeSSara Sharon iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 1077eda50cdeSSara Sharon trans_pcie->kw.dma >> 4); 1078eda50cdeSSara Sharon 1079eda50cdeSSara Sharon spin_unlock(&trans_pcie->irq_lock); 1080eda50cdeSSara Sharon 1081eda50cdeSSara Sharon /* TODO: remove this when moving to new TX alloc model */ 1082eda50cdeSSara Sharon for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1083eda50cdeSSara Sharon txq_id++) { 1084eda50cdeSSara Sharon slots_num = (txq_id == trans_pcie->cmd_queue) ? 1085eda50cdeSSara Sharon TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1086eda50cdeSSara Sharon ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], 1087eda50cdeSSara Sharon slots_num, txq_id); 1088eda50cdeSSara Sharon if (ret) { 1089eda50cdeSSara Sharon IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1090eda50cdeSSara Sharon goto error; 1091eda50cdeSSara Sharon } 1092eda50cdeSSara Sharon } 1093eda50cdeSSara Sharon 1094eda50cdeSSara Sharon return 0; 1095eda50cdeSSara Sharon 1096eda50cdeSSara Sharon error: 1097eda50cdeSSara Sharon /* Upon error, free only if we allocated something */ 1098eda50cdeSSara Sharon if (alloc) 1099eda50cdeSSara Sharon iwl_pcie_tx_free(trans); 1100eda50cdeSSara Sharon return ret; 1101eda50cdeSSara Sharon } 1102eda50cdeSSara Sharon 1103e705c121SKalle Valo static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 1104e705c121SKalle Valo { 1105e705c121SKalle Valo lockdep_assert_held(&txq->lock); 1106e705c121SKalle Valo 1107e705c121SKalle Valo if (!txq->wd_timeout) 1108e705c121SKalle Valo return; 1109e705c121SKalle Valo 1110e705c121SKalle Valo /* 1111e705c121SKalle Valo * station is asleep and we send data - that must 1112e705c121SKalle Valo * be uAPSD or PS-Poll. Don't rearm the timer. 1113e705c121SKalle Valo */ 1114e705c121SKalle Valo if (txq->frozen) 1115e705c121SKalle Valo return; 1116e705c121SKalle Valo 1117e705c121SKalle Valo /* 1118e705c121SKalle Valo * if empty delete timer, otherwise move timer forward 1119e705c121SKalle Valo * since we're making progress on this queue 1120e705c121SKalle Valo */ 1121bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) 1122e705c121SKalle Valo del_timer(&txq->stuck_timer); 1123e705c121SKalle Valo else 1124e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1125e705c121SKalle Valo } 1126e705c121SKalle Valo 1127e705c121SKalle Valo /* Frees buffers until index _not_ inclusive */ 1128e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1129e705c121SKalle Valo struct sk_buff_head *skbs) 1130e705c121SKalle Valo { 1131e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1132e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1133e705c121SKalle Valo int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 1134e705c121SKalle Valo int last_to_free; 1135e705c121SKalle Valo 1136e705c121SKalle Valo /* This function is not meant to release cmd queue*/ 1137e705c121SKalle Valo if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1138e705c121SKalle Valo return; 1139e705c121SKalle Valo 1140e705c121SKalle Valo spin_lock_bh(&txq->lock); 1141e705c121SKalle Valo 1142de74c455SSara Sharon if (!test_bit(txq_id, trans_pcie->queue_used)) { 1143e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1144e705c121SKalle Valo txq_id, ssn); 1145e705c121SKalle Valo goto out; 1146e705c121SKalle Valo } 1147e705c121SKalle Valo 1148bb98ecd4SSara Sharon if (txq->read_ptr == tfd_num) 1149e705c121SKalle Valo goto out; 1150e705c121SKalle Valo 1151e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1152bb98ecd4SSara Sharon txq_id, txq->read_ptr, tfd_num, ssn); 1153e705c121SKalle Valo 1154e705c121SKalle Valo /*Since we free until index _not_ inclusive, the one before index is 1155e705c121SKalle Valo * the last we will free. This one must be used */ 1156e705c121SKalle Valo last_to_free = iwl_queue_dec_wrap(tfd_num); 1157e705c121SKalle Valo 1158bb98ecd4SSara Sharon if (!iwl_queue_used(txq, last_to_free)) { 1159e705c121SKalle Valo IWL_ERR(trans, 1160e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1161e705c121SKalle Valo __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1162bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr); 1163e705c121SKalle Valo goto out; 1164e705c121SKalle Valo } 1165e705c121SKalle Valo 1166e705c121SKalle Valo if (WARN_ON(!skb_queue_empty(skbs))) 1167e705c121SKalle Valo goto out; 1168e705c121SKalle Valo 1169e705c121SKalle Valo for (; 1170bb98ecd4SSara Sharon txq->read_ptr != tfd_num; 1171bb98ecd4SSara Sharon txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1172bb98ecd4SSara Sharon struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 1173e705c121SKalle Valo 11746eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb)) 1175e705c121SKalle Valo continue; 1176e705c121SKalle Valo 117721cb3222SJohannes Berg iwl_pcie_free_tso_page(trans_pcie, skb); 11786eb5e529SEmmanuel Grumbach 11796eb5e529SEmmanuel Grumbach __skb_queue_tail(skbs, skb); 1180e705c121SKalle Valo 1181bb98ecd4SSara Sharon txq->entries[txq->read_ptr].skb = NULL; 1182e705c121SKalle Valo 11834fe10bc6SSara Sharon if (!trans->cfg->use_tfh) 1184e705c121SKalle Valo iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1185e705c121SKalle Valo 1186e705c121SKalle Valo iwl_pcie_txq_free_tfd(trans, txq); 1187e705c121SKalle Valo } 1188e705c121SKalle Valo 1189e705c121SKalle Valo iwl_pcie_txq_progress(txq); 1190e705c121SKalle Valo 1191bb98ecd4SSara Sharon if (iwl_queue_space(txq) > txq->low_mark && 11923955525dSEmmanuel Grumbach test_bit(txq_id, trans_pcie->queue_stopped)) { 1193685b346cSEmmanuel Grumbach struct sk_buff_head overflow_skbs; 11943955525dSEmmanuel Grumbach 1195685b346cSEmmanuel Grumbach __skb_queue_head_init(&overflow_skbs); 1196685b346cSEmmanuel Grumbach skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 11973955525dSEmmanuel Grumbach 11983955525dSEmmanuel Grumbach /* 11993955525dSEmmanuel Grumbach * This is tricky: we are in reclaim path which is non 12003955525dSEmmanuel Grumbach * re-entrant, so noone will try to take the access the 12013955525dSEmmanuel Grumbach * txq data from that path. We stopped tx, so we can't 12023955525dSEmmanuel Grumbach * have tx as well. Bottom line, we can unlock and re-lock 12033955525dSEmmanuel Grumbach * later. 12043955525dSEmmanuel Grumbach */ 12053955525dSEmmanuel Grumbach spin_unlock_bh(&txq->lock); 12063955525dSEmmanuel Grumbach 1207685b346cSEmmanuel Grumbach while (!skb_queue_empty(&overflow_skbs)) { 1208685b346cSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 120921cb3222SJohannes Berg struct iwl_device_cmd *dev_cmd_ptr; 121021cb3222SJohannes Berg 121121cb3222SJohannes Berg dev_cmd_ptr = *(void **)((u8 *)skb->cb + 121221cb3222SJohannes Berg trans_pcie->dev_cmd_offs); 12133955525dSEmmanuel Grumbach 12143955525dSEmmanuel Grumbach /* 12153955525dSEmmanuel Grumbach * Note that we can very well be overflowing again. 12163955525dSEmmanuel Grumbach * In that case, iwl_queue_space will be small again 12173955525dSEmmanuel Grumbach * and we won't wake mac80211's queue. 12183955525dSEmmanuel Grumbach */ 121921cb3222SJohannes Berg iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); 12203955525dSEmmanuel Grumbach } 12213955525dSEmmanuel Grumbach spin_lock_bh(&txq->lock); 12223955525dSEmmanuel Grumbach 1223bb98ecd4SSara Sharon if (iwl_queue_space(txq) > txq->low_mark) 1224e705c121SKalle Valo iwl_wake_queue(trans, txq); 12253955525dSEmmanuel Grumbach } 1226e705c121SKalle Valo 1227bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 1228bb98ecd4SSara Sharon IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); 1229c24c7f58SLuca Coelho iwl_trans_unref(trans); 1230e705c121SKalle Valo } 1231e705c121SKalle Valo 1232e705c121SKalle Valo out: 1233e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1234e705c121SKalle Valo } 1235e705c121SKalle Valo 1236e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1237e705c121SKalle Valo const struct iwl_host_cmd *cmd) 1238e705c121SKalle Valo { 1239e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1240e705c121SKalle Valo int ret; 1241e705c121SKalle Valo 1242e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 1243e705c121SKalle Valo 1244e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1245e705c121SKalle Valo !trans_pcie->ref_cmd_in_flight) { 1246e705c121SKalle Valo trans_pcie->ref_cmd_in_flight = true; 1247e705c121SKalle Valo IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1248c24c7f58SLuca Coelho iwl_trans_ref(trans); 1249e705c121SKalle Valo } 1250e705c121SKalle Valo 1251e705c121SKalle Valo /* 1252e705c121SKalle Valo * wake up the NIC to make sure that the firmware will see the host 1253e705c121SKalle Valo * command - we will let the NIC sleep once all the host commands 1254e705c121SKalle Valo * returned. This needs to be done only on NICs that have 1255e705c121SKalle Valo * apmg_wake_up_wa set. 1256e705c121SKalle Valo */ 1257e705c121SKalle Valo if (trans->cfg->base_params->apmg_wake_up_wa && 1258e705c121SKalle Valo !trans_pcie->cmd_hold_nic_awake) { 1259e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1260e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1261e705c121SKalle Valo 1262e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1263e705c121SKalle Valo CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1264e705c121SKalle Valo (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1265e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1266e705c121SKalle Valo 15000); 1267e705c121SKalle Valo if (ret < 0) { 1268e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1269e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1270e705c121SKalle Valo IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1271e705c121SKalle Valo return -EIO; 1272e705c121SKalle Valo } 1273e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = true; 1274e705c121SKalle Valo } 1275e705c121SKalle Valo 1276e705c121SKalle Valo return 0; 1277e705c121SKalle Valo } 1278e705c121SKalle Valo 1279e705c121SKalle Valo /* 1280e705c121SKalle Valo * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1281e705c121SKalle Valo * 1282e705c121SKalle Valo * When FW advances 'R' index, all entries between old and new 'R' index 1283e705c121SKalle Valo * need to be reclaimed. As result, some free space forms. If there is 1284e705c121SKalle Valo * enough free space (> low mark), wake the stack that feeds us. 1285e705c121SKalle Valo */ 1286e705c121SKalle Valo static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1287e705c121SKalle Valo { 1288e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1289e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1290e705c121SKalle Valo unsigned long flags; 1291e705c121SKalle Valo int nfreed = 0; 1292e705c121SKalle Valo 1293e705c121SKalle Valo lockdep_assert_held(&txq->lock); 1294e705c121SKalle Valo 1295bb98ecd4SSara Sharon if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { 1296e705c121SKalle Valo IWL_ERR(trans, 1297e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1298e705c121SKalle Valo __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1299bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr); 1300e705c121SKalle Valo return; 1301e705c121SKalle Valo } 1302e705c121SKalle Valo 1303bb98ecd4SSara Sharon for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; 1304bb98ecd4SSara Sharon txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1305e705c121SKalle Valo 1306e705c121SKalle Valo if (nfreed++ > 0) { 1307e705c121SKalle Valo IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1308bb98ecd4SSara Sharon idx, txq->write_ptr, txq->read_ptr); 1309e705c121SKalle Valo iwl_force_nmi(trans); 1310e705c121SKalle Valo } 1311e705c121SKalle Valo } 1312e705c121SKalle Valo 1313bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 1314e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1315e705c121SKalle Valo iwl_pcie_clear_cmd_in_flight(trans); 1316e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1317e705c121SKalle Valo } 1318e705c121SKalle Valo 1319e705c121SKalle Valo iwl_pcie_txq_progress(txq); 1320e705c121SKalle Valo } 1321e705c121SKalle Valo 1322e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1323e705c121SKalle Valo u16 txq_id) 1324e705c121SKalle Valo { 1325e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1326e705c121SKalle Valo u32 tbl_dw_addr; 1327e705c121SKalle Valo u32 tbl_dw; 1328e705c121SKalle Valo u16 scd_q2ratid; 1329e705c121SKalle Valo 1330e705c121SKalle Valo scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1331e705c121SKalle Valo 1332e705c121SKalle Valo tbl_dw_addr = trans_pcie->scd_base_addr + 1333e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1334e705c121SKalle Valo 1335e705c121SKalle Valo tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1336e705c121SKalle Valo 1337e705c121SKalle Valo if (txq_id & 0x1) 1338e705c121SKalle Valo tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1339e705c121SKalle Valo else 1340e705c121SKalle Valo tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1341e705c121SKalle Valo 1342e705c121SKalle Valo iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1343e705c121SKalle Valo 1344e705c121SKalle Valo return 0; 1345e705c121SKalle Valo } 1346e705c121SKalle Valo 1347e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table), 1348e705c121SKalle Valo * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1349e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1350e705c121SKalle Valo 1351e705c121SKalle Valo void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1352e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 1353e705c121SKalle Valo unsigned int wdg_timeout) 1354e705c121SKalle Valo { 1355e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1356e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1357e705c121SKalle Valo int fifo = -1; 1358e705c121SKalle Valo 1359e705c121SKalle Valo if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1360e705c121SKalle Valo WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1361e705c121SKalle Valo 1362ae79785fSSara Sharon if (cfg && trans->cfg->use_tfh) 1363ae79785fSSara Sharon WARN_ONCE(1, "Expected no calls to SCD configuration"); 1364ae79785fSSara Sharon 1365e705c121SKalle Valo txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1366e705c121SKalle Valo 1367e705c121SKalle Valo if (cfg) { 1368e705c121SKalle Valo fifo = cfg->fifo; 1369e705c121SKalle Valo 1370e705c121SKalle Valo /* Disable the scheduler prior configuring the cmd queue */ 1371e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue && 1372e705c121SKalle Valo trans_pcie->scd_set_active) 1373e705c121SKalle Valo iwl_scd_enable_set_active(trans, 0); 1374e705c121SKalle Valo 1375e705c121SKalle Valo /* Stop this Tx queue before configuring it */ 1376e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 1377e705c121SKalle Valo 1378e705c121SKalle Valo /* Set this queue as a chain-building queue unless it is CMD */ 1379e705c121SKalle Valo if (txq_id != trans_pcie->cmd_queue) 1380e705c121SKalle Valo iwl_scd_txq_set_chain(trans, txq_id); 1381e705c121SKalle Valo 1382e705c121SKalle Valo if (cfg->aggregate) { 1383e705c121SKalle Valo u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1384e705c121SKalle Valo 1385e705c121SKalle Valo /* Map receiver-address / traffic-ID to this queue */ 1386e705c121SKalle Valo iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1387e705c121SKalle Valo 1388e705c121SKalle Valo /* enable aggregations for the queue */ 1389e705c121SKalle Valo iwl_scd_txq_enable_agg(trans, txq_id); 1390e705c121SKalle Valo txq->ampdu = true; 1391e705c121SKalle Valo } else { 1392e705c121SKalle Valo /* 1393e705c121SKalle Valo * disable aggregations for the queue, this will also 1394e705c121SKalle Valo * make the ra_tid mapping configuration irrelevant 1395e705c121SKalle Valo * since it is now a non-AGG queue. 1396e705c121SKalle Valo */ 1397e705c121SKalle Valo iwl_scd_txq_disable_agg(trans, txq_id); 1398e705c121SKalle Valo 1399bb98ecd4SSara Sharon ssn = txq->read_ptr; 1400e705c121SKalle Valo } 1401e705c121SKalle Valo } 1402e705c121SKalle Valo 1403e705c121SKalle Valo /* Place first TFD at index corresponding to start sequence number. 1404e705c121SKalle Valo * Assumes that ssn_idx is valid (!= 0xFFF) */ 1405bb98ecd4SSara Sharon txq->read_ptr = (ssn & 0xff); 1406bb98ecd4SSara Sharon txq->write_ptr = (ssn & 0xff); 1407e705c121SKalle Valo iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1408e705c121SKalle Valo (ssn & 0xff) | (txq_id << 8)); 1409e705c121SKalle Valo 1410e705c121SKalle Valo if (cfg) { 1411e705c121SKalle Valo u8 frame_limit = cfg->frame_limit; 1412e705c121SKalle Valo 1413e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1414e705c121SKalle Valo 1415e705c121SKalle Valo /* Set up Tx window size and frame limit for this queue */ 1416e705c121SKalle Valo iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1417e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1418e705c121SKalle Valo iwl_trans_write_mem32(trans, 1419e705c121SKalle Valo trans_pcie->scd_base_addr + 1420e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1421e705c121SKalle Valo ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1422e705c121SKalle Valo SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1423e705c121SKalle Valo ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1424e705c121SKalle Valo SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1425e705c121SKalle Valo 1426e705c121SKalle Valo /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1427e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1428e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1429e705c121SKalle Valo (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1430e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1431e705c121SKalle Valo SCD_QUEUE_STTS_REG_MSK); 1432e705c121SKalle Valo 1433e705c121SKalle Valo /* enable the scheduler for this queue (only) */ 1434e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue && 1435e705c121SKalle Valo trans_pcie->scd_set_active) 1436e705c121SKalle Valo iwl_scd_enable_set_active(trans, BIT(txq_id)); 1437e705c121SKalle Valo 1438e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 1439e705c121SKalle Valo "Activate queue %d on FIFO %d WrPtr: %d\n", 1440e705c121SKalle Valo txq_id, fifo, ssn & 0xff); 1441e705c121SKalle Valo } else { 1442e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 1443e705c121SKalle Valo "Activate queue %d WrPtr: %d\n", 1444e705c121SKalle Valo txq_id, ssn & 0xff); 1445e705c121SKalle Valo } 1446e705c121SKalle Valo } 1447e705c121SKalle Valo 144842db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 144942db09c1SLiad Kaufman bool shared_mode) 145042db09c1SLiad Kaufman { 145142db09c1SLiad Kaufman struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 145242db09c1SLiad Kaufman struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 145342db09c1SLiad Kaufman 145442db09c1SLiad Kaufman txq->ampdu = !shared_mode; 145542db09c1SLiad Kaufman } 145642db09c1SLiad Kaufman 1457e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1458e705c121SKalle Valo bool configure_scd) 1459e705c121SKalle Valo { 1460e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1461e705c121SKalle Valo u32 stts_addr = trans_pcie->scd_base_addr + 1462e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1463e705c121SKalle Valo static const u32 zero_val[4] = {}; 1464e705c121SKalle Valo 1465e705c121SKalle Valo trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; 1466e705c121SKalle Valo trans_pcie->txq[txq_id].frozen = false; 1467e705c121SKalle Valo 1468e705c121SKalle Valo /* 1469e705c121SKalle Valo * Upon HW Rfkill - we stop the device, and then stop the queues 1470e705c121SKalle Valo * in the op_mode. Just for the sake of the simplicity of the op_mode, 1471e705c121SKalle Valo * allow the op_mode to call txq_disable after it already called 1472e705c121SKalle Valo * stop_device. 1473e705c121SKalle Valo */ 1474e705c121SKalle Valo if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1475e705c121SKalle Valo WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1476e705c121SKalle Valo "queue %d not used", txq_id); 1477e705c121SKalle Valo return; 1478e705c121SKalle Valo } 1479e705c121SKalle Valo 1480ae79785fSSara Sharon if (configure_scd && trans->cfg->use_tfh) 1481ae79785fSSara Sharon WARN_ONCE(1, "Expected no calls to SCD configuration"); 1482ae79785fSSara Sharon 1483e705c121SKalle Valo if (configure_scd) { 1484e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 1485e705c121SKalle Valo 1486e705c121SKalle Valo iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1487e705c121SKalle Valo ARRAY_SIZE(zero_val)); 1488e705c121SKalle Valo } 1489e705c121SKalle Valo 1490e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 1491e705c121SKalle Valo trans_pcie->txq[txq_id].ampdu = false; 1492e705c121SKalle Valo 1493e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1494e705c121SKalle Valo } 1495e705c121SKalle Valo 1496e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1497e705c121SKalle Valo 1498e705c121SKalle Valo /* 1499e705c121SKalle Valo * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1500e705c121SKalle Valo * @priv: device private data point 1501e705c121SKalle Valo * @cmd: a pointer to the ucode command structure 1502e705c121SKalle Valo * 1503e705c121SKalle Valo * The function returns < 0 values to indicate the operation 1504e705c121SKalle Valo * failed. On success, it returns the index (>= 0) of command in the 1505e705c121SKalle Valo * command queue. 1506e705c121SKalle Valo */ 1507e705c121SKalle Valo static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1508e705c121SKalle Valo struct iwl_host_cmd *cmd) 1509e705c121SKalle Valo { 1510e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1511e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1512e705c121SKalle Valo struct iwl_device_cmd *out_cmd; 1513e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1514e705c121SKalle Valo unsigned long flags; 1515e705c121SKalle Valo void *dup_buf = NULL; 1516e705c121SKalle Valo dma_addr_t phys_addr; 1517e705c121SKalle Valo int idx; 15188de437c7SSara Sharon u16 copy_size, cmd_size, tb0_size; 1519e705c121SKalle Valo bool had_nocopy = false; 1520e705c121SKalle Valo u8 group_id = iwl_cmd_groupid(cmd->id); 1521e705c121SKalle Valo int i, ret; 1522e705c121SKalle Valo u32 cmd_pos; 1523e705c121SKalle Valo const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1524e705c121SKalle Valo u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1525e705c121SKalle Valo 15265b88792cSSara Sharon if (WARN(!trans->wide_cmd_header && 1527e705c121SKalle Valo group_id > IWL_ALWAYS_LONG_GROUP, 1528e705c121SKalle Valo "unsupported wide command %#x\n", cmd->id)) 1529e705c121SKalle Valo return -EINVAL; 1530e705c121SKalle Valo 1531e705c121SKalle Valo if (group_id != 0) { 1532e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1533e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header_wide); 1534e705c121SKalle Valo } else { 1535e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1536e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header); 1537e705c121SKalle Valo } 1538e705c121SKalle Valo 1539e705c121SKalle Valo /* need one for the header if the first is NOCOPY */ 1540e705c121SKalle Valo BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1541e705c121SKalle Valo 1542e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1543e705c121SKalle Valo cmddata[i] = cmd->data[i]; 1544e705c121SKalle Valo cmdlen[i] = cmd->len[i]; 1545e705c121SKalle Valo 1546e705c121SKalle Valo if (!cmd->len[i]) 1547e705c121SKalle Valo continue; 1548e705c121SKalle Valo 15498de437c7SSara Sharon /* need at least IWL_FIRST_TB_SIZE copied */ 15508de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 15518de437c7SSara Sharon int copy = IWL_FIRST_TB_SIZE - copy_size; 1552e705c121SKalle Valo 1553e705c121SKalle Valo if (copy > cmdlen[i]) 1554e705c121SKalle Valo copy = cmdlen[i]; 1555e705c121SKalle Valo cmdlen[i] -= copy; 1556e705c121SKalle Valo cmddata[i] += copy; 1557e705c121SKalle Valo copy_size += copy; 1558e705c121SKalle Valo } 1559e705c121SKalle Valo 1560e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1561e705c121SKalle Valo had_nocopy = true; 1562e705c121SKalle Valo if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1563e705c121SKalle Valo idx = -EINVAL; 1564e705c121SKalle Valo goto free_dup_buf; 1565e705c121SKalle Valo } 1566e705c121SKalle Valo } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1567e705c121SKalle Valo /* 1568e705c121SKalle Valo * This is also a chunk that isn't copied 1569e705c121SKalle Valo * to the static buffer so set had_nocopy. 1570e705c121SKalle Valo */ 1571e705c121SKalle Valo had_nocopy = true; 1572e705c121SKalle Valo 1573e705c121SKalle Valo /* only allowed once */ 1574e705c121SKalle Valo if (WARN_ON(dup_buf)) { 1575e705c121SKalle Valo idx = -EINVAL; 1576e705c121SKalle Valo goto free_dup_buf; 1577e705c121SKalle Valo } 1578e705c121SKalle Valo 1579e705c121SKalle Valo dup_buf = kmemdup(cmddata[i], cmdlen[i], 1580e705c121SKalle Valo GFP_ATOMIC); 1581e705c121SKalle Valo if (!dup_buf) 1582e705c121SKalle Valo return -ENOMEM; 1583e705c121SKalle Valo } else { 1584e705c121SKalle Valo /* NOCOPY must not be followed by normal! */ 1585e705c121SKalle Valo if (WARN_ON(had_nocopy)) { 1586e705c121SKalle Valo idx = -EINVAL; 1587e705c121SKalle Valo goto free_dup_buf; 1588e705c121SKalle Valo } 1589e705c121SKalle Valo copy_size += cmdlen[i]; 1590e705c121SKalle Valo } 1591e705c121SKalle Valo cmd_size += cmd->len[i]; 1592e705c121SKalle Valo } 1593e705c121SKalle Valo 1594e705c121SKalle Valo /* 1595e705c121SKalle Valo * If any of the command structures end up being larger than 1596e705c121SKalle Valo * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1597e705c121SKalle Valo * allocated into separate TFDs, then we will need to 1598e705c121SKalle Valo * increase the size of the buffers. 1599e705c121SKalle Valo */ 1600e705c121SKalle Valo if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1601e705c121SKalle Valo "Command %s (%#x) is too large (%d bytes)\n", 160239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 160339bdb17eSSharon Dvir cmd->id, copy_size)) { 1604e705c121SKalle Valo idx = -EINVAL; 1605e705c121SKalle Valo goto free_dup_buf; 1606e705c121SKalle Valo } 1607e705c121SKalle Valo 1608e705c121SKalle Valo spin_lock_bh(&txq->lock); 1609e705c121SKalle Valo 1610bb98ecd4SSara Sharon if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1611e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1612e705c121SKalle Valo 1613e705c121SKalle Valo IWL_ERR(trans, "No space in command queue\n"); 1614e705c121SKalle Valo iwl_op_mode_cmd_queue_full(trans->op_mode); 1615e705c121SKalle Valo idx = -ENOSPC; 1616e705c121SKalle Valo goto free_dup_buf; 1617e705c121SKalle Valo } 1618e705c121SKalle Valo 1619bb98ecd4SSara Sharon idx = get_cmd_index(txq, txq->write_ptr); 1620e705c121SKalle Valo out_cmd = txq->entries[idx].cmd; 1621e705c121SKalle Valo out_meta = &txq->entries[idx].meta; 1622e705c121SKalle Valo 1623e705c121SKalle Valo memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1624e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) 1625e705c121SKalle Valo out_meta->source = cmd; 1626e705c121SKalle Valo 1627e705c121SKalle Valo /* set up the header */ 1628e705c121SKalle Valo if (group_id != 0) { 1629e705c121SKalle Valo out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1630e705c121SKalle Valo out_cmd->hdr_wide.group_id = group_id; 1631e705c121SKalle Valo out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1632e705c121SKalle Valo out_cmd->hdr_wide.length = 1633e705c121SKalle Valo cpu_to_le16(cmd_size - 1634e705c121SKalle Valo sizeof(struct iwl_cmd_header_wide)); 1635e705c121SKalle Valo out_cmd->hdr_wide.reserved = 0; 1636e705c121SKalle Valo out_cmd->hdr_wide.sequence = 1637e705c121SKalle Valo cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1638bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1639e705c121SKalle Valo 1640e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header_wide); 1641e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1642e705c121SKalle Valo } else { 1643e705c121SKalle Valo out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1644e705c121SKalle Valo out_cmd->hdr.sequence = 1645e705c121SKalle Valo cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1646bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)); 1647e705c121SKalle Valo out_cmd->hdr.group_id = 0; 1648e705c121SKalle Valo 1649e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header); 1650e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1651e705c121SKalle Valo } 1652e705c121SKalle Valo 1653e705c121SKalle Valo /* and copy the data that needs to be copied */ 1654e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1655e705c121SKalle Valo int copy; 1656e705c121SKalle Valo 1657e705c121SKalle Valo if (!cmd->len[i]) 1658e705c121SKalle Valo continue; 1659e705c121SKalle Valo 1660e705c121SKalle Valo /* copy everything if not nocopy/dup */ 1661e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1662e705c121SKalle Valo IWL_HCMD_DFL_DUP))) { 1663e705c121SKalle Valo copy = cmd->len[i]; 1664e705c121SKalle Valo 1665e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1666e705c121SKalle Valo cmd_pos += copy; 1667e705c121SKalle Valo copy_size += copy; 1668e705c121SKalle Valo continue; 1669e705c121SKalle Valo } 1670e705c121SKalle Valo 1671e705c121SKalle Valo /* 16728de437c7SSara Sharon * Otherwise we need at least IWL_FIRST_TB_SIZE copied 16738de437c7SSara Sharon * in total (for bi-directional DMA), but copy up to what 1674e705c121SKalle Valo * we can fit into the payload for debug dump purposes. 1675e705c121SKalle Valo */ 1676e705c121SKalle Valo copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1677e705c121SKalle Valo 1678e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1679e705c121SKalle Valo cmd_pos += copy; 1680e705c121SKalle Valo 1681e705c121SKalle Valo /* However, treat copy_size the proper way, we need it below */ 16828de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) { 16838de437c7SSara Sharon copy = IWL_FIRST_TB_SIZE - copy_size; 1684e705c121SKalle Valo 1685e705c121SKalle Valo if (copy > cmd->len[i]) 1686e705c121SKalle Valo copy = cmd->len[i]; 1687e705c121SKalle Valo copy_size += copy; 1688e705c121SKalle Valo } 1689e705c121SKalle Valo } 1690e705c121SKalle Valo 1691e705c121SKalle Valo IWL_DEBUG_HC(trans, 1692e705c121SKalle Valo "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 169339bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1694e705c121SKalle Valo group_id, out_cmd->hdr.cmd, 1695e705c121SKalle Valo le16_to_cpu(out_cmd->hdr.sequence), 1696bb98ecd4SSara Sharon cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); 1697e705c121SKalle Valo 16988de437c7SSara Sharon /* start the TFD with the minimum copy bytes */ 16998de437c7SSara Sharon tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 17008de437c7SSara Sharon memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1701e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, 17028de437c7SSara Sharon iwl_pcie_get_first_tb_dma(txq, idx), 17038de437c7SSara Sharon tb0_size, true); 1704e705c121SKalle Valo 1705e705c121SKalle Valo /* map first command fragment, if any remains */ 17068de437c7SSara Sharon if (copy_size > tb0_size) { 1707e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, 17088de437c7SSara Sharon ((u8 *)&out_cmd->hdr) + tb0_size, 17098de437c7SSara Sharon copy_size - tb0_size, 1710e705c121SKalle Valo DMA_TO_DEVICE); 1711e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 1712bb98ecd4SSara Sharon iwl_pcie_tfd_unmap(trans, out_meta, txq, 1713bb98ecd4SSara Sharon txq->write_ptr); 1714e705c121SKalle Valo idx = -ENOMEM; 1715e705c121SKalle Valo goto out; 1716e705c121SKalle Valo } 1717e705c121SKalle Valo 1718e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 17198de437c7SSara Sharon copy_size - tb0_size, false); 1720e705c121SKalle Valo } 1721e705c121SKalle Valo 1722e705c121SKalle Valo /* map the remaining (adjusted) nocopy/dup fragments */ 1723e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1724e705c121SKalle Valo const void *data = cmddata[i]; 1725e705c121SKalle Valo 1726e705c121SKalle Valo if (!cmdlen[i]) 1727e705c121SKalle Valo continue; 1728e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1729e705c121SKalle Valo IWL_HCMD_DFL_DUP))) 1730e705c121SKalle Valo continue; 1731e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1732e705c121SKalle Valo data = dup_buf; 1733e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, (void *)data, 1734e705c121SKalle Valo cmdlen[i], DMA_TO_DEVICE); 1735e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 1736bb98ecd4SSara Sharon iwl_pcie_tfd_unmap(trans, out_meta, txq, 1737bb98ecd4SSara Sharon txq->write_ptr); 1738e705c121SKalle Valo idx = -ENOMEM; 1739e705c121SKalle Valo goto out; 1740e705c121SKalle Valo } 1741e705c121SKalle Valo 1742e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1743e705c121SKalle Valo } 1744e705c121SKalle Valo 17453cd1980bSSara Sharon BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1746e705c121SKalle Valo out_meta->flags = cmd->flags; 1747e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1748e705c121SKalle Valo kzfree(txq->entries[idx].free_buf); 1749e705c121SKalle Valo txq->entries[idx].free_buf = dup_buf; 1750e705c121SKalle Valo 1751e705c121SKalle Valo trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1752e705c121SKalle Valo 1753e705c121SKalle Valo /* start timer if queue currently empty */ 1754bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1755e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1756e705c121SKalle Valo 1757e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1758e705c121SKalle Valo ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1759e705c121SKalle Valo if (ret < 0) { 1760e705c121SKalle Valo idx = ret; 1761e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1762e705c121SKalle Valo goto out; 1763e705c121SKalle Valo } 1764e705c121SKalle Valo 1765e705c121SKalle Valo /* Increment and update queue's write index */ 1766bb98ecd4SSara Sharon txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 1767e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1768e705c121SKalle Valo 1769e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1770e705c121SKalle Valo 1771e705c121SKalle Valo out: 1772e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1773e705c121SKalle Valo free_dup_buf: 1774e705c121SKalle Valo if (idx < 0) 1775e705c121SKalle Valo kfree(dup_buf); 1776e705c121SKalle Valo return idx; 1777e705c121SKalle Valo } 1778e705c121SKalle Valo 1779e705c121SKalle Valo /* 1780e705c121SKalle Valo * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1781e705c121SKalle Valo * @rxb: Rx buffer to reclaim 1782e705c121SKalle Valo */ 1783e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1784e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb) 1785e705c121SKalle Valo { 1786e705c121SKalle Valo struct iwl_rx_packet *pkt = rxb_addr(rxb); 1787e705c121SKalle Valo u16 sequence = le16_to_cpu(pkt->hdr.sequence); 178839bdb17eSSharon Dvir u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id); 178939bdb17eSSharon Dvir u32 cmd_id; 1790e705c121SKalle Valo int txq_id = SEQ_TO_QUEUE(sequence); 1791e705c121SKalle Valo int index = SEQ_TO_INDEX(sequence); 1792e705c121SKalle Valo int cmd_index; 1793e705c121SKalle Valo struct iwl_device_cmd *cmd; 1794e705c121SKalle Valo struct iwl_cmd_meta *meta; 1795e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1796e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1797e705c121SKalle Valo 1798e705c121SKalle Valo /* If a Tx command is being handled and it isn't in the actual 1799e705c121SKalle Valo * command queue then there a command routing bug has been introduced 1800e705c121SKalle Valo * in the queue management code. */ 1801e705c121SKalle Valo if (WARN(txq_id != trans_pcie->cmd_queue, 1802e705c121SKalle Valo "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1803e705c121SKalle Valo txq_id, trans_pcie->cmd_queue, sequence, 1804bb98ecd4SSara Sharon trans_pcie->txq[trans_pcie->cmd_queue].read_ptr, 1805bb98ecd4SSara Sharon trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) { 1806e705c121SKalle Valo iwl_print_hex_error(trans, pkt, 32); 1807e705c121SKalle Valo return; 1808e705c121SKalle Valo } 1809e705c121SKalle Valo 1810e705c121SKalle Valo spin_lock_bh(&txq->lock); 1811e705c121SKalle Valo 1812bb98ecd4SSara Sharon cmd_index = get_cmd_index(txq, index); 1813e705c121SKalle Valo cmd = txq->entries[cmd_index].cmd; 1814e705c121SKalle Valo meta = &txq->entries[cmd_index].meta; 181539bdb17eSSharon Dvir cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1816e705c121SKalle Valo 18176983ba69SSara Sharon iwl_pcie_tfd_unmap(trans, meta, txq, index); 1818e705c121SKalle Valo 1819e705c121SKalle Valo /* Input error checking is done when commands are added to queue. */ 1820e705c121SKalle Valo if (meta->flags & CMD_WANT_SKB) { 1821e705c121SKalle Valo struct page *p = rxb_steal_page(rxb); 1822e705c121SKalle Valo 1823e705c121SKalle Valo meta->source->resp_pkt = pkt; 1824e705c121SKalle Valo meta->source->_rx_page_addr = (unsigned long)page_address(p); 1825e705c121SKalle Valo meta->source->_rx_page_order = trans_pcie->rx_page_order; 1826e705c121SKalle Valo } 1827e705c121SKalle Valo 1828dcbb4746SEmmanuel Grumbach if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1829dcbb4746SEmmanuel Grumbach iwl_op_mode_async_cb(trans->op_mode, cmd); 1830dcbb4746SEmmanuel Grumbach 1831e705c121SKalle Valo iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1832e705c121SKalle Valo 1833e705c121SKalle Valo if (!(meta->flags & CMD_ASYNC)) { 1834e705c121SKalle Valo if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1835e705c121SKalle Valo IWL_WARN(trans, 1836e705c121SKalle Valo "HCMD_ACTIVE already clear for command %s\n", 183739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1838e705c121SKalle Valo } 1839e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1840e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 184139bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1842e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1843e705c121SKalle Valo } 1844e705c121SKalle Valo 18454cbb8e50SLuciano Coelho if (meta->flags & CMD_MAKE_TRANS_IDLE) { 18464cbb8e50SLuciano Coelho IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 18474cbb8e50SLuciano Coelho iwl_get_cmd_string(trans, cmd->hdr.cmd)); 18484cbb8e50SLuciano Coelho set_bit(STATUS_TRANS_IDLE, &trans->status); 18494cbb8e50SLuciano Coelho wake_up(&trans_pcie->d0i3_waitq); 18504cbb8e50SLuciano Coelho } 18514cbb8e50SLuciano Coelho 18524cbb8e50SLuciano Coelho if (meta->flags & CMD_WAKE_UP_TRANS) { 18534cbb8e50SLuciano Coelho IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 18544cbb8e50SLuciano Coelho iwl_get_cmd_string(trans, cmd->hdr.cmd)); 18554cbb8e50SLuciano Coelho clear_bit(STATUS_TRANS_IDLE, &trans->status); 18564cbb8e50SLuciano Coelho wake_up(&trans_pcie->d0i3_waitq); 18574cbb8e50SLuciano Coelho } 18584cbb8e50SLuciano Coelho 1859e705c121SKalle Valo meta->flags = 0; 1860e705c121SKalle Valo 1861e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1862e705c121SKalle Valo } 1863e705c121SKalle Valo 1864e705c121SKalle Valo #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1865e705c121SKalle Valo 1866e705c121SKalle Valo static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1867e705c121SKalle Valo struct iwl_host_cmd *cmd) 1868e705c121SKalle Valo { 1869e705c121SKalle Valo int ret; 1870e705c121SKalle Valo 1871e705c121SKalle Valo /* An asynchronous command can not expect an SKB to be set. */ 1872e705c121SKalle Valo if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1873e705c121SKalle Valo return -EINVAL; 1874e705c121SKalle Valo 1875e705c121SKalle Valo ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1876e705c121SKalle Valo if (ret < 0) { 1877e705c121SKalle Valo IWL_ERR(trans, 1878e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 187939bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1880e705c121SKalle Valo return ret; 1881e705c121SKalle Valo } 1882e705c121SKalle Valo return 0; 1883e705c121SKalle Valo } 1884e705c121SKalle Valo 1885e705c121SKalle Valo static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1886e705c121SKalle Valo struct iwl_host_cmd *cmd) 1887e705c121SKalle Valo { 1888e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1889e705c121SKalle Valo int cmd_idx; 1890e705c121SKalle Valo int ret; 1891e705c121SKalle Valo 1892e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 189339bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1894e705c121SKalle Valo 1895e705c121SKalle Valo if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1896e705c121SKalle Valo &trans->status), 1897e705c121SKalle Valo "Command %s: a command is already active!\n", 189839bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id))) 1899e705c121SKalle Valo return -EIO; 1900e705c121SKalle Valo 1901e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 190239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1903e705c121SKalle Valo 190471b1230cSLuca Coelho if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { 190571b1230cSLuca Coelho ret = wait_event_timeout(trans_pcie->d0i3_waitq, 190671b1230cSLuca Coelho pm_runtime_active(&trans_pcie->pci_dev->dev), 190771b1230cSLuca Coelho msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 190871b1230cSLuca Coelho if (!ret) { 190971b1230cSLuca Coelho IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); 191071b1230cSLuca Coelho return -ETIMEDOUT; 191171b1230cSLuca Coelho } 191271b1230cSLuca Coelho } 191371b1230cSLuca Coelho 1914e705c121SKalle Valo cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1915e705c121SKalle Valo if (cmd_idx < 0) { 1916e705c121SKalle Valo ret = cmd_idx; 1917e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1918e705c121SKalle Valo IWL_ERR(trans, 1919e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 192039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1921e705c121SKalle Valo return ret; 1922e705c121SKalle Valo } 1923e705c121SKalle Valo 1924e705c121SKalle Valo ret = wait_event_timeout(trans_pcie->wait_command_queue, 1925e705c121SKalle Valo !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1926e705c121SKalle Valo &trans->status), 1927e705c121SKalle Valo HOST_COMPLETE_TIMEOUT); 1928e705c121SKalle Valo if (!ret) { 1929e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1930e705c121SKalle Valo 1931e705c121SKalle Valo IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 193239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1933e705c121SKalle Valo jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1934e705c121SKalle Valo 1935e705c121SKalle Valo IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1936bb98ecd4SSara Sharon txq->read_ptr, txq->write_ptr); 1937e705c121SKalle Valo 1938e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1939e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 194039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1941e705c121SKalle Valo ret = -ETIMEDOUT; 1942e705c121SKalle Valo 1943e705c121SKalle Valo iwl_force_nmi(trans); 1944e705c121SKalle Valo iwl_trans_fw_error(trans); 1945e705c121SKalle Valo 1946e705c121SKalle Valo goto cancel; 1947e705c121SKalle Valo } 1948e705c121SKalle Valo 1949e705c121SKalle Valo if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1950e705c121SKalle Valo IWL_ERR(trans, "FW error in SYNC CMD %s\n", 195139bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1952e705c121SKalle Valo dump_stack(); 1953e705c121SKalle Valo ret = -EIO; 1954e705c121SKalle Valo goto cancel; 1955e705c121SKalle Valo } 1956e705c121SKalle Valo 1957e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1958e705c121SKalle Valo test_bit(STATUS_RFKILL, &trans->status)) { 1959e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1960e705c121SKalle Valo ret = -ERFKILL; 1961e705c121SKalle Valo goto cancel; 1962e705c121SKalle Valo } 1963e705c121SKalle Valo 1964e705c121SKalle Valo if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1965e705c121SKalle Valo IWL_ERR(trans, "Error: Response NULL in '%s'\n", 196639bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1967e705c121SKalle Valo ret = -EIO; 1968e705c121SKalle Valo goto cancel; 1969e705c121SKalle Valo } 1970e705c121SKalle Valo 1971e705c121SKalle Valo return 0; 1972e705c121SKalle Valo 1973e705c121SKalle Valo cancel: 1974e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) { 1975e705c121SKalle Valo /* 1976e705c121SKalle Valo * Cancel the CMD_WANT_SKB flag for the cmd in the 1977e705c121SKalle Valo * TX cmd queue. Otherwise in case the cmd comes 1978e705c121SKalle Valo * in later, it will possibly set an invalid 1979e705c121SKalle Valo * address (cmd->meta.source). 1980e705c121SKalle Valo */ 1981e705c121SKalle Valo trans_pcie->txq[trans_pcie->cmd_queue]. 1982e705c121SKalle Valo entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1983e705c121SKalle Valo } 1984e705c121SKalle Valo 1985e705c121SKalle Valo if (cmd->resp_pkt) { 1986e705c121SKalle Valo iwl_free_resp(cmd); 1987e705c121SKalle Valo cmd->resp_pkt = NULL; 1988e705c121SKalle Valo } 1989e705c121SKalle Valo 1990e705c121SKalle Valo return ret; 1991e705c121SKalle Valo } 1992e705c121SKalle Valo 1993e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1994e705c121SKalle Valo { 1995e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1996e705c121SKalle Valo test_bit(STATUS_RFKILL, &trans->status)) { 1997e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1998e705c121SKalle Valo cmd->id); 1999e705c121SKalle Valo return -ERFKILL; 2000e705c121SKalle Valo } 2001e705c121SKalle Valo 2002e705c121SKalle Valo if (cmd->flags & CMD_ASYNC) 2003e705c121SKalle Valo return iwl_pcie_send_hcmd_async(trans, cmd); 2004e705c121SKalle Valo 2005e705c121SKalle Valo /* We still can fail on RFKILL that can be asserted while we wait */ 2006e705c121SKalle Valo return iwl_pcie_send_hcmd_sync(trans, cmd); 2007e705c121SKalle Valo } 2008e705c121SKalle Valo 20093a0b2a42SEmmanuel Grumbach static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 20103a0b2a42SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 20113a0b2a42SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 20123a0b2a42SEmmanuel Grumbach struct iwl_device_cmd *dev_cmd, u16 tb1_len) 20133a0b2a42SEmmanuel Grumbach { 20146983ba69SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20153a0b2a42SEmmanuel Grumbach u16 tb2_len; 20163a0b2a42SEmmanuel Grumbach int i; 20173a0b2a42SEmmanuel Grumbach 20183a0b2a42SEmmanuel Grumbach /* 20193a0b2a42SEmmanuel Grumbach * Set up TFD's third entry to point directly to remainder 20203a0b2a42SEmmanuel Grumbach * of skb's head, if any 20213a0b2a42SEmmanuel Grumbach */ 20223a0b2a42SEmmanuel Grumbach tb2_len = skb_headlen(skb) - hdr_len; 20233a0b2a42SEmmanuel Grumbach 20243a0b2a42SEmmanuel Grumbach if (tb2_len > 0) { 20253a0b2a42SEmmanuel Grumbach dma_addr_t tb2_phys = dma_map_single(trans->dev, 20263a0b2a42SEmmanuel Grumbach skb->data + hdr_len, 20273a0b2a42SEmmanuel Grumbach tb2_len, DMA_TO_DEVICE); 20283a0b2a42SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 2029bb98ecd4SSara Sharon iwl_pcie_tfd_unmap(trans, out_meta, txq, 2030bb98ecd4SSara Sharon txq->write_ptr); 20313a0b2a42SEmmanuel Grumbach return -EINVAL; 20323a0b2a42SEmmanuel Grumbach } 20333a0b2a42SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 20343a0b2a42SEmmanuel Grumbach } 20353a0b2a42SEmmanuel Grumbach 20363a0b2a42SEmmanuel Grumbach /* set up the remaining entries to point to the data */ 20373a0b2a42SEmmanuel Grumbach for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 20383a0b2a42SEmmanuel Grumbach const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 20393a0b2a42SEmmanuel Grumbach dma_addr_t tb_phys; 20403a0b2a42SEmmanuel Grumbach int tb_idx; 20413a0b2a42SEmmanuel Grumbach 20423a0b2a42SEmmanuel Grumbach if (!skb_frag_size(frag)) 20433a0b2a42SEmmanuel Grumbach continue; 20443a0b2a42SEmmanuel Grumbach 20453a0b2a42SEmmanuel Grumbach tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 20463a0b2a42SEmmanuel Grumbach skb_frag_size(frag), DMA_TO_DEVICE); 20473a0b2a42SEmmanuel Grumbach 20483a0b2a42SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2049bb98ecd4SSara Sharon iwl_pcie_tfd_unmap(trans, out_meta, txq, 2050bb98ecd4SSara Sharon txq->write_ptr); 20513a0b2a42SEmmanuel Grumbach return -EINVAL; 20523a0b2a42SEmmanuel Grumbach } 20533a0b2a42SEmmanuel Grumbach tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 20543a0b2a42SEmmanuel Grumbach skb_frag_size(frag), false); 20553a0b2a42SEmmanuel Grumbach 20563cd1980bSSara Sharon out_meta->tbs |= BIT(tb_idx); 20573a0b2a42SEmmanuel Grumbach } 20583a0b2a42SEmmanuel Grumbach 20593a0b2a42SEmmanuel Grumbach trace_iwlwifi_dev_tx(trans->dev, skb, 2060bb98ecd4SSara Sharon iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 20616983ba69SSara Sharon trans_pcie->tfd_size, 20628de437c7SSara Sharon &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 20633a0b2a42SEmmanuel Grumbach skb->data + hdr_len, tb2_len); 20643a0b2a42SEmmanuel Grumbach trace_iwlwifi_dev_tx_data(trans->dev, skb, 20653a0b2a42SEmmanuel Grumbach hdr_len, skb->len - hdr_len); 20663a0b2a42SEmmanuel Grumbach return 0; 20673a0b2a42SEmmanuel Grumbach } 20683a0b2a42SEmmanuel Grumbach 20696eb5e529SEmmanuel Grumbach #ifdef CONFIG_INET 20706eb5e529SEmmanuel Grumbach static struct iwl_tso_hdr_page * 20716eb5e529SEmmanuel Grumbach get_page_hdr(struct iwl_trans *trans, size_t len) 20726eb5e529SEmmanuel Grumbach { 20736eb5e529SEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20746eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); 20756eb5e529SEmmanuel Grumbach 20766eb5e529SEmmanuel Grumbach if (!p->page) 20776eb5e529SEmmanuel Grumbach goto alloc; 20786eb5e529SEmmanuel Grumbach 20796eb5e529SEmmanuel Grumbach /* enough room on this page */ 20806eb5e529SEmmanuel Grumbach if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) 20816eb5e529SEmmanuel Grumbach return p; 20826eb5e529SEmmanuel Grumbach 20836eb5e529SEmmanuel Grumbach /* We don't have enough room on this page, get a new one. */ 20846eb5e529SEmmanuel Grumbach __free_page(p->page); 20856eb5e529SEmmanuel Grumbach 20866eb5e529SEmmanuel Grumbach alloc: 20876eb5e529SEmmanuel Grumbach p->page = alloc_page(GFP_ATOMIC); 20886eb5e529SEmmanuel Grumbach if (!p->page) 20896eb5e529SEmmanuel Grumbach return NULL; 20906eb5e529SEmmanuel Grumbach p->pos = page_address(p->page); 20916eb5e529SEmmanuel Grumbach return p; 20926eb5e529SEmmanuel Grumbach } 20936eb5e529SEmmanuel Grumbach 20946eb5e529SEmmanuel Grumbach static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 20956eb5e529SEmmanuel Grumbach bool ipv6, unsigned int len) 20966eb5e529SEmmanuel Grumbach { 20976eb5e529SEmmanuel Grumbach if (ipv6) { 20986eb5e529SEmmanuel Grumbach struct ipv6hdr *iphv6 = iph; 20996eb5e529SEmmanuel Grumbach 21006eb5e529SEmmanuel Grumbach tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 21016eb5e529SEmmanuel Grumbach len + tcph->doff * 4, 21026eb5e529SEmmanuel Grumbach IPPROTO_TCP, 0); 21036eb5e529SEmmanuel Grumbach } else { 21046eb5e529SEmmanuel Grumbach struct iphdr *iphv4 = iph; 21056eb5e529SEmmanuel Grumbach 21066eb5e529SEmmanuel Grumbach ip_send_check(iphv4); 21076eb5e529SEmmanuel Grumbach tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 21086eb5e529SEmmanuel Grumbach len + tcph->doff * 4, 21096eb5e529SEmmanuel Grumbach IPPROTO_TCP, 0); 21106eb5e529SEmmanuel Grumbach } 21116eb5e529SEmmanuel Grumbach } 21126eb5e529SEmmanuel Grumbach 21136eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 21146eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 21156eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 21166eb5e529SEmmanuel Grumbach struct iwl_device_cmd *dev_cmd, u16 tb1_len) 21176eb5e529SEmmanuel Grumbach { 211805e5a7e5SJohannes Berg struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 21196eb5e529SEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 21206eb5e529SEmmanuel Grumbach struct ieee80211_hdr *hdr = (void *)skb->data; 21216eb5e529SEmmanuel Grumbach unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 21226eb5e529SEmmanuel Grumbach unsigned int mss = skb_shinfo(skb)->gso_size; 21236eb5e529SEmmanuel Grumbach u16 length, iv_len, amsdu_pad; 21246eb5e529SEmmanuel Grumbach u8 *start_hdr; 21256eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *hdr_page; 212621cb3222SJohannes Berg struct page **page_ptr; 21276eb5e529SEmmanuel Grumbach int ret; 21286eb5e529SEmmanuel Grumbach struct tso_t tso; 21296eb5e529SEmmanuel Grumbach 21306eb5e529SEmmanuel Grumbach /* if the packet is protected, then it must be CCMP or GCMP */ 21316eb5e529SEmmanuel Grumbach BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 21326eb5e529SEmmanuel Grumbach iv_len = ieee80211_has_protected(hdr->frame_control) ? 21336eb5e529SEmmanuel Grumbach IEEE80211_CCMP_HDR_LEN : 0; 21346eb5e529SEmmanuel Grumbach 21356eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx(trans->dev, skb, 2136bb98ecd4SSara Sharon iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 21376983ba69SSara Sharon trans_pcie->tfd_size, 21388de437c7SSara Sharon &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 21396eb5e529SEmmanuel Grumbach NULL, 0); 21406eb5e529SEmmanuel Grumbach 21416eb5e529SEmmanuel Grumbach ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 21426eb5e529SEmmanuel Grumbach snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 21436eb5e529SEmmanuel Grumbach total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 21446eb5e529SEmmanuel Grumbach amsdu_pad = 0; 21456eb5e529SEmmanuel Grumbach 21466eb5e529SEmmanuel Grumbach /* total amount of header we may need for this A-MSDU */ 21476eb5e529SEmmanuel Grumbach hdr_room = DIV_ROUND_UP(total_len, mss) * 21486eb5e529SEmmanuel Grumbach (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 21496eb5e529SEmmanuel Grumbach 21506eb5e529SEmmanuel Grumbach /* Our device supports 9 segments at most, it will fit in 1 page */ 21516eb5e529SEmmanuel Grumbach hdr_page = get_page_hdr(trans, hdr_room); 21526eb5e529SEmmanuel Grumbach if (!hdr_page) 21536eb5e529SEmmanuel Grumbach return -ENOMEM; 21546eb5e529SEmmanuel Grumbach 21556eb5e529SEmmanuel Grumbach get_page(hdr_page->page); 21566eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 215721cb3222SJohannes Berg page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 215821cb3222SJohannes Berg *page_ptr = hdr_page->page; 21596eb5e529SEmmanuel Grumbach memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 21606eb5e529SEmmanuel Grumbach hdr_page->pos += iv_len; 21616eb5e529SEmmanuel Grumbach 21626eb5e529SEmmanuel Grumbach /* 21636eb5e529SEmmanuel Grumbach * Pull the ieee80211 header + IV to be able to use TSO core, 21646eb5e529SEmmanuel Grumbach * we will restore it for the tx_status flow. 21656eb5e529SEmmanuel Grumbach */ 21666eb5e529SEmmanuel Grumbach skb_pull(skb, hdr_len + iv_len); 21676eb5e529SEmmanuel Grumbach 216805e5a7e5SJohannes Berg /* 216905e5a7e5SJohannes Berg * Remove the length of all the headers that we don't actually 217005e5a7e5SJohannes Berg * have in the MPDU by themselves, but that we duplicate into 217105e5a7e5SJohannes Berg * all the different MSDUs inside the A-MSDU. 217205e5a7e5SJohannes Berg */ 217305e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 217405e5a7e5SJohannes Berg 21756eb5e529SEmmanuel Grumbach tso_start(skb, &tso); 21766eb5e529SEmmanuel Grumbach 21776eb5e529SEmmanuel Grumbach while (total_len) { 21786eb5e529SEmmanuel Grumbach /* this is the data left for this subframe */ 21796eb5e529SEmmanuel Grumbach unsigned int data_left = 21806eb5e529SEmmanuel Grumbach min_t(unsigned int, mss, total_len); 21816eb5e529SEmmanuel Grumbach struct sk_buff *csum_skb = NULL; 21826eb5e529SEmmanuel Grumbach unsigned int hdr_tb_len; 21836eb5e529SEmmanuel Grumbach dma_addr_t hdr_tb_phys; 21846eb5e529SEmmanuel Grumbach struct tcphdr *tcph; 218505e5a7e5SJohannes Berg u8 *iph, *subf_hdrs_start = hdr_page->pos; 21866eb5e529SEmmanuel Grumbach 21876eb5e529SEmmanuel Grumbach total_len -= data_left; 21886eb5e529SEmmanuel Grumbach 21896eb5e529SEmmanuel Grumbach memset(hdr_page->pos, 0, amsdu_pad); 21906eb5e529SEmmanuel Grumbach hdr_page->pos += amsdu_pad; 21916eb5e529SEmmanuel Grumbach amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 21926eb5e529SEmmanuel Grumbach data_left)) & 0x3; 21936eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 21946eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 21956eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 21966eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN; 21976eb5e529SEmmanuel Grumbach 21986eb5e529SEmmanuel Grumbach length = snap_ip_tcp_hdrlen + data_left; 21996eb5e529SEmmanuel Grumbach *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 22006eb5e529SEmmanuel Grumbach hdr_page->pos += sizeof(length); 22016eb5e529SEmmanuel Grumbach 22026eb5e529SEmmanuel Grumbach /* 22036eb5e529SEmmanuel Grumbach * This will copy the SNAP as well which will be considered 22046eb5e529SEmmanuel Grumbach * as MAC header. 22056eb5e529SEmmanuel Grumbach */ 22066eb5e529SEmmanuel Grumbach tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 22076eb5e529SEmmanuel Grumbach iph = hdr_page->pos + 8; 22086eb5e529SEmmanuel Grumbach tcph = (void *)(iph + ip_hdrlen); 22096eb5e529SEmmanuel Grumbach 22106eb5e529SEmmanuel Grumbach /* For testing on current hardware only */ 22116eb5e529SEmmanuel Grumbach if (trans_pcie->sw_csum_tx) { 22126eb5e529SEmmanuel Grumbach csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 22136eb5e529SEmmanuel Grumbach GFP_ATOMIC); 22146eb5e529SEmmanuel Grumbach if (!csum_skb) { 22156eb5e529SEmmanuel Grumbach ret = -ENOMEM; 22166eb5e529SEmmanuel Grumbach goto out_unmap; 22176eb5e529SEmmanuel Grumbach } 22186eb5e529SEmmanuel Grumbach 22196eb5e529SEmmanuel Grumbach iwl_compute_pseudo_hdr_csum(iph, tcph, 22206eb5e529SEmmanuel Grumbach skb->protocol == 22216eb5e529SEmmanuel Grumbach htons(ETH_P_IPV6), 22226eb5e529SEmmanuel Grumbach data_left); 22236eb5e529SEmmanuel Grumbach 22246eb5e529SEmmanuel Grumbach memcpy(skb_put(csum_skb, tcp_hdrlen(skb)), 22256eb5e529SEmmanuel Grumbach tcph, tcp_hdrlen(skb)); 2226a52a8a4dSZhang Shengju skb_reset_transport_header(csum_skb); 22276eb5e529SEmmanuel Grumbach csum_skb->csum_start = 22286eb5e529SEmmanuel Grumbach (unsigned char *)tcp_hdr(csum_skb) - 22296eb5e529SEmmanuel Grumbach csum_skb->head; 22306eb5e529SEmmanuel Grumbach } 22316eb5e529SEmmanuel Grumbach 22326eb5e529SEmmanuel Grumbach hdr_page->pos += snap_ip_tcp_hdrlen; 22336eb5e529SEmmanuel Grumbach 22346eb5e529SEmmanuel Grumbach hdr_tb_len = hdr_page->pos - start_hdr; 22356eb5e529SEmmanuel Grumbach hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 22366eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 22376eb5e529SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 22386eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 22396eb5e529SEmmanuel Grumbach ret = -EINVAL; 22406eb5e529SEmmanuel Grumbach goto out_unmap; 22416eb5e529SEmmanuel Grumbach } 22426eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 22436eb5e529SEmmanuel Grumbach hdr_tb_len, false); 22446eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 22456eb5e529SEmmanuel Grumbach hdr_tb_len); 224605e5a7e5SJohannes Berg /* add this subframe's headers' length to the tx_cmd */ 224705e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 22486eb5e529SEmmanuel Grumbach 22496eb5e529SEmmanuel Grumbach /* prepare the start_hdr for the next subframe */ 22506eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos; 22516eb5e529SEmmanuel Grumbach 22526eb5e529SEmmanuel Grumbach /* put the payload */ 22536eb5e529SEmmanuel Grumbach while (data_left) { 22546eb5e529SEmmanuel Grumbach unsigned int size = min_t(unsigned int, tso.size, 22556eb5e529SEmmanuel Grumbach data_left); 22566eb5e529SEmmanuel Grumbach dma_addr_t tb_phys; 22576eb5e529SEmmanuel Grumbach 22586eb5e529SEmmanuel Grumbach if (trans_pcie->sw_csum_tx) 22596eb5e529SEmmanuel Grumbach memcpy(skb_put(csum_skb, size), tso.data, size); 22606eb5e529SEmmanuel Grumbach 22616eb5e529SEmmanuel Grumbach tb_phys = dma_map_single(trans->dev, tso.data, 22626eb5e529SEmmanuel Grumbach size, DMA_TO_DEVICE); 22636eb5e529SEmmanuel Grumbach if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 22646eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 22656eb5e529SEmmanuel Grumbach ret = -EINVAL; 22666eb5e529SEmmanuel Grumbach goto out_unmap; 22676eb5e529SEmmanuel Grumbach } 22686eb5e529SEmmanuel Grumbach 22696eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 22706eb5e529SEmmanuel Grumbach size, false); 22716eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 22726eb5e529SEmmanuel Grumbach size); 22736eb5e529SEmmanuel Grumbach 22746eb5e529SEmmanuel Grumbach data_left -= size; 22756eb5e529SEmmanuel Grumbach tso_build_data(skb, &tso, size); 22766eb5e529SEmmanuel Grumbach } 22776eb5e529SEmmanuel Grumbach 22786eb5e529SEmmanuel Grumbach /* For testing on early hardware only */ 22796eb5e529SEmmanuel Grumbach if (trans_pcie->sw_csum_tx) { 22806eb5e529SEmmanuel Grumbach __wsum csum; 22816eb5e529SEmmanuel Grumbach 22826eb5e529SEmmanuel Grumbach csum = skb_checksum(csum_skb, 22836eb5e529SEmmanuel Grumbach skb_checksum_start_offset(csum_skb), 22846eb5e529SEmmanuel Grumbach csum_skb->len - 22856eb5e529SEmmanuel Grumbach skb_checksum_start_offset(csum_skb), 22866eb5e529SEmmanuel Grumbach 0); 22876eb5e529SEmmanuel Grumbach dev_kfree_skb(csum_skb); 22886eb5e529SEmmanuel Grumbach dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 22896eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 22906eb5e529SEmmanuel Grumbach tcph->check = csum_fold(csum); 22916eb5e529SEmmanuel Grumbach dma_sync_single_for_device(trans->dev, hdr_tb_phys, 22926eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE); 22936eb5e529SEmmanuel Grumbach } 22946eb5e529SEmmanuel Grumbach } 22956eb5e529SEmmanuel Grumbach 22966eb5e529SEmmanuel Grumbach /* re -add the WiFi header and IV */ 22976eb5e529SEmmanuel Grumbach skb_push(skb, hdr_len + iv_len); 22986eb5e529SEmmanuel Grumbach 22996eb5e529SEmmanuel Grumbach return 0; 23006eb5e529SEmmanuel Grumbach 23016eb5e529SEmmanuel Grumbach out_unmap: 2302bb98ecd4SSara Sharon iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 23036eb5e529SEmmanuel Grumbach return ret; 23046eb5e529SEmmanuel Grumbach } 23056eb5e529SEmmanuel Grumbach #else /* CONFIG_INET */ 23066eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 23076eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len, 23086eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta, 23096eb5e529SEmmanuel Grumbach struct iwl_device_cmd *dev_cmd, u16 tb1_len) 23106eb5e529SEmmanuel Grumbach { 23116eb5e529SEmmanuel Grumbach /* No A-MSDU without CONFIG_INET */ 23126eb5e529SEmmanuel Grumbach WARN_ON(1); 23136eb5e529SEmmanuel Grumbach 23146eb5e529SEmmanuel Grumbach return -1; 23156eb5e529SEmmanuel Grumbach } 23166eb5e529SEmmanuel Grumbach #endif /* CONFIG_INET */ 23176eb5e529SEmmanuel Grumbach 2318e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2319e705c121SKalle Valo struct iwl_device_cmd *dev_cmd, int txq_id) 2320e705c121SKalle Valo { 2321e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2322e705c121SKalle Valo struct ieee80211_hdr *hdr; 2323e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2324e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 2325e705c121SKalle Valo struct iwl_txq *txq; 2326e705c121SKalle Valo dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2327e705c121SKalle Valo void *tb1_addr; 23284fe10bc6SSara Sharon void *tfd; 23293a0b2a42SEmmanuel Grumbach u16 len, tb1_len; 2330e705c121SKalle Valo bool wait_write_ptr; 2331e705c121SKalle Valo __le16 fc; 2332e705c121SKalle Valo u8 hdr_len; 2333e705c121SKalle Valo u16 wifi_seq; 2334c772a3d3SSara Sharon bool amsdu; 2335e705c121SKalle Valo 2336e705c121SKalle Valo txq = &trans_pcie->txq[txq_id]; 2337e705c121SKalle Valo 2338e705c121SKalle Valo if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 2339e705c121SKalle Valo "TX on unused queue %d\n", txq_id)) 2340e705c121SKalle Valo return -EINVAL; 2341e705c121SKalle Valo 234241837ca9SEmmanuel Grumbach if (unlikely(trans_pcie->sw_csum_tx && 234341837ca9SEmmanuel Grumbach skb->ip_summed == CHECKSUM_PARTIAL)) { 234441837ca9SEmmanuel Grumbach int offs = skb_checksum_start_offset(skb); 234541837ca9SEmmanuel Grumbach int csum_offs = offs + skb->csum_offset; 234641837ca9SEmmanuel Grumbach __wsum csum; 234741837ca9SEmmanuel Grumbach 234841837ca9SEmmanuel Grumbach if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 234941837ca9SEmmanuel Grumbach return -1; 235041837ca9SEmmanuel Grumbach 235141837ca9SEmmanuel Grumbach csum = skb_checksum(skb, offs, skb->len - offs, 0); 235241837ca9SEmmanuel Grumbach *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 23533955525dSEmmanuel Grumbach 23543955525dSEmmanuel Grumbach skb->ip_summed = CHECKSUM_UNNECESSARY; 235541837ca9SEmmanuel Grumbach } 235641837ca9SEmmanuel Grumbach 2357e705c121SKalle Valo if (skb_is_nonlinear(skb) && 23583cd1980bSSara Sharon skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && 2359e705c121SKalle Valo __skb_linearize(skb)) 2360e705c121SKalle Valo return -ENOMEM; 2361e705c121SKalle Valo 2362e705c121SKalle Valo /* mac80211 always puts the full header into the SKB's head, 2363e705c121SKalle Valo * so there's no need to check if it's readable there 2364e705c121SKalle Valo */ 2365e705c121SKalle Valo hdr = (struct ieee80211_hdr *)skb->data; 2366e705c121SKalle Valo fc = hdr->frame_control; 2367e705c121SKalle Valo hdr_len = ieee80211_hdrlen(fc); 2368e705c121SKalle Valo 2369e705c121SKalle Valo spin_lock(&txq->lock); 2370e705c121SKalle Valo 2371bb98ecd4SSara Sharon if (iwl_queue_space(txq) < txq->high_mark) { 23723955525dSEmmanuel Grumbach iwl_stop_queue(trans, txq); 23733955525dSEmmanuel Grumbach 23743955525dSEmmanuel Grumbach /* don't put the packet on the ring, if there is no room */ 2375bb98ecd4SSara Sharon if (unlikely(iwl_queue_space(txq) < 3)) { 237621cb3222SJohannes Berg struct iwl_device_cmd **dev_cmd_ptr; 23773955525dSEmmanuel Grumbach 237821cb3222SJohannes Berg dev_cmd_ptr = (void *)((u8 *)skb->cb + 237921cb3222SJohannes Berg trans_pcie->dev_cmd_offs); 238021cb3222SJohannes Berg 238121cb3222SJohannes Berg *dev_cmd_ptr = dev_cmd; 23823955525dSEmmanuel Grumbach __skb_queue_tail(&txq->overflow_q, skb); 23833955525dSEmmanuel Grumbach 23843955525dSEmmanuel Grumbach spin_unlock(&txq->lock); 23853955525dSEmmanuel Grumbach return 0; 23863955525dSEmmanuel Grumbach } 23873955525dSEmmanuel Grumbach } 23883955525dSEmmanuel Grumbach 2389e705c121SKalle Valo /* In AGG mode, the index in the ring must correspond to the WiFi 2390e705c121SKalle Valo * sequence number. This is a HW requirements to help the SCD to parse 2391e705c121SKalle Valo * the BA. 2392e705c121SKalle Valo * Check here that the packets are in the right place on the ring. 2393e705c121SKalle Valo */ 2394e705c121SKalle Valo wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2395e705c121SKalle Valo WARN_ONCE(txq->ampdu && 2396bb98ecd4SSara Sharon (wifi_seq & 0xff) != txq->write_ptr, 2397e705c121SKalle Valo "Q: %d WiFi Seq %d tfdNum %d", 2398bb98ecd4SSara Sharon txq_id, wifi_seq, txq->write_ptr); 2399e705c121SKalle Valo 2400e705c121SKalle Valo /* Set up driver data for this TFD */ 2401bb98ecd4SSara Sharon txq->entries[txq->write_ptr].skb = skb; 2402bb98ecd4SSara Sharon txq->entries[txq->write_ptr].cmd = dev_cmd; 2403e705c121SKalle Valo 2404e705c121SKalle Valo dev_cmd->hdr.sequence = 2405e705c121SKalle Valo cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2406bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr))); 2407e705c121SKalle Valo 2408bb98ecd4SSara Sharon tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); 2409e705c121SKalle Valo scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2410e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch); 2411e705c121SKalle Valo 2412e705c121SKalle Valo tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2413e705c121SKalle Valo tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2414e705c121SKalle Valo 2415e705c121SKalle Valo /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2416bb98ecd4SSara Sharon out_meta = &txq->entries[txq->write_ptr].meta; 2417e705c121SKalle Valo out_meta->flags = 0; 2418e705c121SKalle Valo 2419e705c121SKalle Valo /* 2420e705c121SKalle Valo * The second TB (tb1) points to the remainder of the TX command 2421e705c121SKalle Valo * and the 802.11 header - dword aligned size 2422e705c121SKalle Valo * (This calculation modifies the TX command, so do it before the 2423e705c121SKalle Valo * setup of the first TB) 2424e705c121SKalle Valo */ 2425e705c121SKalle Valo len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 24268de437c7SSara Sharon hdr_len - IWL_FIRST_TB_SIZE; 2427c772a3d3SSara Sharon /* do not align A-MSDU to dword as the subframe header aligns it */ 2428c772a3d3SSara Sharon amsdu = ieee80211_is_data_qos(fc) && 2429c772a3d3SSara Sharon (*ieee80211_get_qos_ctl(hdr) & 2430c772a3d3SSara Sharon IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2431c772a3d3SSara Sharon if (trans_pcie->sw_csum_tx || !amsdu) { 2432e705c121SKalle Valo tb1_len = ALIGN(len, 4); 2433e705c121SKalle Valo /* Tell NIC about any 2-byte padding after MAC header */ 2434e705c121SKalle Valo if (tb1_len != len) 2435e705c121SKalle Valo tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2436c772a3d3SSara Sharon } else { 2437c772a3d3SSara Sharon tb1_len = len; 2438c772a3d3SSara Sharon } 2439e705c121SKalle Valo 244005e5a7e5SJohannes Berg /* 244105e5a7e5SJohannes Berg * The first TB points to bi-directional DMA data, we'll 244205e5a7e5SJohannes Berg * memcpy the data into it later. 244305e5a7e5SJohannes Berg */ 2444e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 24458de437c7SSara Sharon IWL_FIRST_TB_SIZE, true); 2446e705c121SKalle Valo 2447e705c121SKalle Valo /* there must be data left over for TB1 or this code must be changed */ 24488de437c7SSara Sharon BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2449e705c121SKalle Valo 2450e705c121SKalle Valo /* map the data for TB1 */ 24518de437c7SSara Sharon tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2452e705c121SKalle Valo tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2453e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2454e705c121SKalle Valo goto out_err; 2455e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2456e705c121SKalle Valo 2457c772a3d3SSara Sharon if (amsdu) { 24586eb5e529SEmmanuel Grumbach if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 24596eb5e529SEmmanuel Grumbach out_meta, dev_cmd, 24606eb5e529SEmmanuel Grumbach tb1_len))) 2461e705c121SKalle Valo goto out_err; 24626eb5e529SEmmanuel Grumbach } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 24636eb5e529SEmmanuel Grumbach out_meta, dev_cmd, tb1_len))) { 24646eb5e529SEmmanuel Grumbach goto out_err; 24656eb5e529SEmmanuel Grumbach } 2466e705c121SKalle Valo 246705e5a7e5SJohannes Berg /* building the A-MSDU might have changed this data, so memcpy it now */ 246805e5a7e5SJohannes Berg memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 246905e5a7e5SJohannes Berg IWL_FIRST_TB_SIZE); 247005e5a7e5SJohannes Berg 2471bb98ecd4SSara Sharon tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2472e705c121SKalle Valo /* Set up entry for this TFD in Tx byte-count array */ 24734fe10bc6SSara Sharon iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 24744fe10bc6SSara Sharon iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2475e705c121SKalle Valo 2476e705c121SKalle Valo wait_write_ptr = ieee80211_has_morefrags(fc); 2477e705c121SKalle Valo 2478e705c121SKalle Valo /* start timer if queue currently empty */ 2479bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) { 2480e705c121SKalle Valo if (txq->wd_timeout) { 2481e705c121SKalle Valo /* 2482e705c121SKalle Valo * If the TXQ is active, then set the timer, if not, 2483e705c121SKalle Valo * set the timer in remainder so that the timer will 2484e705c121SKalle Valo * be armed with the right value when the station will 2485e705c121SKalle Valo * wake up. 2486e705c121SKalle Valo */ 2487e705c121SKalle Valo if (!txq->frozen) 2488e705c121SKalle Valo mod_timer(&txq->stuck_timer, 2489e705c121SKalle Valo jiffies + txq->wd_timeout); 2490e705c121SKalle Valo else 2491e705c121SKalle Valo txq->frozen_expiry_remainder = txq->wd_timeout; 2492e705c121SKalle Valo } 2493bb98ecd4SSara Sharon IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); 2494c24c7f58SLuca Coelho iwl_trans_ref(trans); 2495e705c121SKalle Valo } 2496e705c121SKalle Valo 2497e705c121SKalle Valo /* Tell device the write index *just past* this latest filled TFD */ 2498bb98ecd4SSara Sharon txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 2499e705c121SKalle Valo if (!wait_write_ptr) 2500e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 2501e705c121SKalle Valo 2502e705c121SKalle Valo /* 2503e705c121SKalle Valo * At this point the frame is "transmitted" successfully 2504e705c121SKalle Valo * and we will get a TX status notification eventually. 2505e705c121SKalle Valo */ 2506e705c121SKalle Valo spin_unlock(&txq->lock); 2507e705c121SKalle Valo return 0; 2508e705c121SKalle Valo out_err: 2509e705c121SKalle Valo spin_unlock(&txq->lock); 2510e705c121SKalle Valo return -1; 2511e705c121SKalle Valo } 2512