1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4e705c121SKalle Valo * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 5e705c121SKalle Valo * 6e705c121SKalle Valo * Portions of this file are derived from the ipw3945 project, as well 7e705c121SKalle Valo * as portions of the ieee80211 subsystem header files. 8e705c121SKalle Valo * 9e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 10e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 11e705c121SKalle Valo * published by the Free Software Foundation. 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 14e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16e705c121SKalle Valo * more details. 17e705c121SKalle Valo * 18e705c121SKalle Valo * You should have received a copy of the GNU General Public License along with 19e705c121SKalle Valo * this program; if not, write to the Free Software Foundation, Inc., 20e705c121SKalle Valo * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23e705c121SKalle Valo * file called LICENSE. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26e705c121SKalle Valo * Intel Linux Wireless <ilw@linux.intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29e705c121SKalle Valo *****************************************************************************/ 30e705c121SKalle Valo #include <linux/etherdevice.h> 31e705c121SKalle Valo #include <linux/slab.h> 32e705c121SKalle Valo #include <linux/sched.h> 33e705c121SKalle Valo 34e705c121SKalle Valo #include "iwl-debug.h" 35e705c121SKalle Valo #include "iwl-csr.h" 36e705c121SKalle Valo #include "iwl-prph.h" 37e705c121SKalle Valo #include "iwl-io.h" 38e705c121SKalle Valo #include "iwl-scd.h" 39e705c121SKalle Valo #include "iwl-op-mode.h" 40e705c121SKalle Valo #include "internal.h" 41e705c121SKalle Valo /* FIXME: need to abstract out TX command (once we know what it looks like) */ 42e705c121SKalle Valo #include "dvm/commands.h" 43e705c121SKalle Valo 44e705c121SKalle Valo #define IWL_TX_CRC_SIZE 4 45e705c121SKalle Valo #define IWL_TX_DELIMITER_SIZE 4 46e705c121SKalle Valo 47e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 48e705c121SKalle Valo * DMA services 49e705c121SKalle Valo * 50e705c121SKalle Valo * Theory of operation 51e705c121SKalle Valo * 52e705c121SKalle Valo * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 53e705c121SKalle Valo * of buffer descriptors, each of which points to one or more data buffers for 54e705c121SKalle Valo * the device to read from or fill. Driver and device exchange status of each 55e705c121SKalle Valo * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 56e705c121SKalle Valo * entries in each circular buffer, to protect against confusing empty and full 57e705c121SKalle Valo * queue states. 58e705c121SKalle Valo * 59e705c121SKalle Valo * The device reads or writes the data in the queues via the device's several 60e705c121SKalle Valo * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 61e705c121SKalle Valo * 62e705c121SKalle Valo * For Tx queue, there are low mark and high mark limits. If, after queuing 63e705c121SKalle Valo * the packet for Tx, free space become < low mark, Tx queue stopped. When 64e705c121SKalle Valo * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 65e705c121SKalle Valo * Tx queue resumed. 66e705c121SKalle Valo * 67e705c121SKalle Valo ***************************************************/ 68e705c121SKalle Valo static int iwl_queue_space(const struct iwl_queue *q) 69e705c121SKalle Valo { 70e705c121SKalle Valo unsigned int max; 71e705c121SKalle Valo unsigned int used; 72e705c121SKalle Valo 73e705c121SKalle Valo /* 74e705c121SKalle Valo * To avoid ambiguity between empty and completely full queues, there 75e705c121SKalle Valo * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 76e705c121SKalle Valo * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 77e705c121SKalle Valo * to reserve any queue entries for this purpose. 78e705c121SKalle Valo */ 79e705c121SKalle Valo if (q->n_window < TFD_QUEUE_SIZE_MAX) 80e705c121SKalle Valo max = q->n_window; 81e705c121SKalle Valo else 82e705c121SKalle Valo max = TFD_QUEUE_SIZE_MAX - 1; 83e705c121SKalle Valo 84e705c121SKalle Valo /* 85e705c121SKalle Valo * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 86e705c121SKalle Valo * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 87e705c121SKalle Valo */ 88e705c121SKalle Valo used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 89e705c121SKalle Valo 90e705c121SKalle Valo if (WARN_ON(used > max)) 91e705c121SKalle Valo return 0; 92e705c121SKalle Valo 93e705c121SKalle Valo return max - used; 94e705c121SKalle Valo } 95e705c121SKalle Valo 96e705c121SKalle Valo /* 97e705c121SKalle Valo * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 98e705c121SKalle Valo */ 99e705c121SKalle Valo static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) 100e705c121SKalle Valo { 101e705c121SKalle Valo q->n_window = slots_num; 102e705c121SKalle Valo q->id = id; 103e705c121SKalle Valo 104e705c121SKalle Valo /* slots_num must be power-of-two size, otherwise 105e705c121SKalle Valo * get_cmd_index is broken. */ 106e705c121SKalle Valo if (WARN_ON(!is_power_of_2(slots_num))) 107e705c121SKalle Valo return -EINVAL; 108e705c121SKalle Valo 109e705c121SKalle Valo q->low_mark = q->n_window / 4; 110e705c121SKalle Valo if (q->low_mark < 4) 111e705c121SKalle Valo q->low_mark = 4; 112e705c121SKalle Valo 113e705c121SKalle Valo q->high_mark = q->n_window / 8; 114e705c121SKalle Valo if (q->high_mark < 2) 115e705c121SKalle Valo q->high_mark = 2; 116e705c121SKalle Valo 117e705c121SKalle Valo q->write_ptr = 0; 118e705c121SKalle Valo q->read_ptr = 0; 119e705c121SKalle Valo 120e705c121SKalle Valo return 0; 121e705c121SKalle Valo } 122e705c121SKalle Valo 123e705c121SKalle Valo static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 124e705c121SKalle Valo struct iwl_dma_ptr *ptr, size_t size) 125e705c121SKalle Valo { 126e705c121SKalle Valo if (WARN_ON(ptr->addr)) 127e705c121SKalle Valo return -EINVAL; 128e705c121SKalle Valo 129e705c121SKalle Valo ptr->addr = dma_alloc_coherent(trans->dev, size, 130e705c121SKalle Valo &ptr->dma, GFP_KERNEL); 131e705c121SKalle Valo if (!ptr->addr) 132e705c121SKalle Valo return -ENOMEM; 133e705c121SKalle Valo ptr->size = size; 134e705c121SKalle Valo return 0; 135e705c121SKalle Valo } 136e705c121SKalle Valo 137e705c121SKalle Valo static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, 138e705c121SKalle Valo struct iwl_dma_ptr *ptr) 139e705c121SKalle Valo { 140e705c121SKalle Valo if (unlikely(!ptr->addr)) 141e705c121SKalle Valo return; 142e705c121SKalle Valo 143e705c121SKalle Valo dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 144e705c121SKalle Valo memset(ptr, 0, sizeof(*ptr)); 145e705c121SKalle Valo } 146e705c121SKalle Valo 147e705c121SKalle Valo static void iwl_pcie_txq_stuck_timer(unsigned long data) 148e705c121SKalle Valo { 149e705c121SKalle Valo struct iwl_txq *txq = (void *)data; 150e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 151e705c121SKalle Valo struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 152e705c121SKalle Valo u32 scd_sram_addr = trans_pcie->scd_base_addr + 153e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); 154e705c121SKalle Valo u8 buf[16]; 155e705c121SKalle Valo int i; 156e705c121SKalle Valo 157e705c121SKalle Valo spin_lock(&txq->lock); 158e705c121SKalle Valo /* check if triggered erroneously */ 159e705c121SKalle Valo if (txq->q.read_ptr == txq->q.write_ptr) { 160e705c121SKalle Valo spin_unlock(&txq->lock); 161e705c121SKalle Valo return; 162e705c121SKalle Valo } 163e705c121SKalle Valo spin_unlock(&txq->lock); 164e705c121SKalle Valo 165e705c121SKalle Valo IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, 166e705c121SKalle Valo jiffies_to_msecs(txq->wd_timeout)); 167e705c121SKalle Valo IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 168e705c121SKalle Valo txq->q.read_ptr, txq->q.write_ptr); 169e705c121SKalle Valo 170e705c121SKalle Valo iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); 171e705c121SKalle Valo 172e705c121SKalle Valo iwl_print_hex_error(trans, buf, sizeof(buf)); 173e705c121SKalle Valo 174e705c121SKalle Valo for (i = 0; i < FH_TCSR_CHNL_NUM; i++) 175e705c121SKalle Valo IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, 176e705c121SKalle Valo iwl_read_direct32(trans, FH_TX_TRB_REG(i))); 177e705c121SKalle Valo 178e705c121SKalle Valo for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 179e705c121SKalle Valo u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); 180e705c121SKalle Valo u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 181e705c121SKalle Valo bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 182e705c121SKalle Valo u32 tbl_dw = 183e705c121SKalle Valo iwl_trans_read_mem32(trans, 184e705c121SKalle Valo trans_pcie->scd_base_addr + 185e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(i)); 186e705c121SKalle Valo 187e705c121SKalle Valo if (i & 0x1) 188e705c121SKalle Valo tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; 189e705c121SKalle Valo else 190e705c121SKalle Valo tbl_dw = tbl_dw & 0x0000FFFF; 191e705c121SKalle Valo 192e705c121SKalle Valo IWL_ERR(trans, 193e705c121SKalle Valo "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 194e705c121SKalle Valo i, active ? "" : "in", fifo, tbl_dw, 195e705c121SKalle Valo iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & 196e705c121SKalle Valo (TFD_QUEUE_SIZE_MAX - 1), 197e705c121SKalle Valo iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); 198e705c121SKalle Valo } 199e705c121SKalle Valo 200e705c121SKalle Valo iwl_force_nmi(trans); 201e705c121SKalle Valo } 202e705c121SKalle Valo 203e705c121SKalle Valo /* 204e705c121SKalle Valo * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 205e705c121SKalle Valo */ 206e705c121SKalle Valo static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 207e705c121SKalle Valo struct iwl_txq *txq, u16 byte_cnt) 208e705c121SKalle Valo { 209e705c121SKalle Valo struct iwlagn_scd_bc_tbl *scd_bc_tbl; 210e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 211e705c121SKalle Valo int write_ptr = txq->q.write_ptr; 212e705c121SKalle Valo int txq_id = txq->q.id; 213e705c121SKalle Valo u8 sec_ctl = 0; 214e705c121SKalle Valo u8 sta_id = 0; 215e705c121SKalle Valo u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 216e705c121SKalle Valo __le16 bc_ent; 217e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = 218e705c121SKalle Valo (void *) txq->entries[txq->q.write_ptr].cmd->payload; 219e705c121SKalle Valo 220e705c121SKalle Valo scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 221e705c121SKalle Valo 222e705c121SKalle Valo sta_id = tx_cmd->sta_id; 223e705c121SKalle Valo sec_ctl = tx_cmd->sec_ctl; 224e705c121SKalle Valo 225e705c121SKalle Valo switch (sec_ctl & TX_CMD_SEC_MSK) { 226e705c121SKalle Valo case TX_CMD_SEC_CCM: 227e705c121SKalle Valo len += IEEE80211_CCMP_MIC_LEN; 228e705c121SKalle Valo break; 229e705c121SKalle Valo case TX_CMD_SEC_TKIP: 230e705c121SKalle Valo len += IEEE80211_TKIP_ICV_LEN; 231e705c121SKalle Valo break; 232e705c121SKalle Valo case TX_CMD_SEC_WEP: 233e705c121SKalle Valo len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 234e705c121SKalle Valo break; 235e705c121SKalle Valo } 236e705c121SKalle Valo 237e705c121SKalle Valo if (trans_pcie->bc_table_dword) 238e705c121SKalle Valo len = DIV_ROUND_UP(len, 4); 239e705c121SKalle Valo 240e705c121SKalle Valo if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 241e705c121SKalle Valo return; 242e705c121SKalle Valo 243e705c121SKalle Valo bc_ent = cpu_to_le16(len | (sta_id << 12)); 244e705c121SKalle Valo 245e705c121SKalle Valo scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 246e705c121SKalle Valo 247e705c121SKalle Valo if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 248e705c121SKalle Valo scd_bc_tbl[txq_id]. 249e705c121SKalle Valo tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 250e705c121SKalle Valo } 251e705c121SKalle Valo 252e705c121SKalle Valo static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 253e705c121SKalle Valo struct iwl_txq *txq) 254e705c121SKalle Valo { 255e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = 256e705c121SKalle Valo IWL_TRANS_GET_PCIE_TRANS(trans); 257e705c121SKalle Valo struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 258e705c121SKalle Valo int txq_id = txq->q.id; 259e705c121SKalle Valo int read_ptr = txq->q.read_ptr; 260e705c121SKalle Valo u8 sta_id = 0; 261e705c121SKalle Valo __le16 bc_ent; 262e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = 263e705c121SKalle Valo (void *)txq->entries[txq->q.read_ptr].cmd->payload; 264e705c121SKalle Valo 265e705c121SKalle Valo WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 266e705c121SKalle Valo 267e705c121SKalle Valo if (txq_id != trans_pcie->cmd_queue) 268e705c121SKalle Valo sta_id = tx_cmd->sta_id; 269e705c121SKalle Valo 270e705c121SKalle Valo bc_ent = cpu_to_le16(1 | (sta_id << 12)); 271e705c121SKalle Valo scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 272e705c121SKalle Valo 273e705c121SKalle Valo if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 274e705c121SKalle Valo scd_bc_tbl[txq_id]. 275e705c121SKalle Valo tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 276e705c121SKalle Valo } 277e705c121SKalle Valo 278e705c121SKalle Valo /* 279e705c121SKalle Valo * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 280e705c121SKalle Valo */ 281e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 282e705c121SKalle Valo struct iwl_txq *txq) 283e705c121SKalle Valo { 284e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 285e705c121SKalle Valo u32 reg = 0; 286e705c121SKalle Valo int txq_id = txq->q.id; 287e705c121SKalle Valo 288e705c121SKalle Valo lockdep_assert_held(&txq->lock); 289e705c121SKalle Valo 290e705c121SKalle Valo /* 291e705c121SKalle Valo * explicitly wake up the NIC if: 292e705c121SKalle Valo * 1. shadow registers aren't enabled 293e705c121SKalle Valo * 2. NIC is woken up for CMD regardless of shadow outside this function 294e705c121SKalle Valo * 3. there is a chance that the NIC is asleep 295e705c121SKalle Valo */ 296e705c121SKalle Valo if (!trans->cfg->base_params->shadow_reg_enable && 297e705c121SKalle Valo txq_id != trans_pcie->cmd_queue && 298e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) { 299e705c121SKalle Valo /* 300e705c121SKalle Valo * wake up nic if it's powered down ... 301e705c121SKalle Valo * uCode will wake up, and interrupt us again, so next 302e705c121SKalle Valo * time we'll skip this part. 303e705c121SKalle Valo */ 304e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 305e705c121SKalle Valo 306e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 307e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 308e705c121SKalle Valo txq_id, reg); 309e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 310e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 311e705c121SKalle Valo txq->need_update = true; 312e705c121SKalle Valo return; 313e705c121SKalle Valo } 314e705c121SKalle Valo } 315e705c121SKalle Valo 316e705c121SKalle Valo /* 317e705c121SKalle Valo * if not in power-save mode, uCode will never sleep when we're 318e705c121SKalle Valo * trying to tx (during RFKILL, we're not trying to tx). 319e705c121SKalle Valo */ 320e705c121SKalle Valo IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); 3210cd58eaaSEmmanuel Grumbach if (!txq->block) 3220cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 3230cd58eaaSEmmanuel Grumbach txq->q.write_ptr | (txq_id << 8)); 324e705c121SKalle Valo } 325e705c121SKalle Valo 326e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 327e705c121SKalle Valo { 328e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 329e705c121SKalle Valo int i; 330e705c121SKalle Valo 331e705c121SKalle Valo for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 332e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[i]; 333e705c121SKalle Valo 334e705c121SKalle Valo spin_lock_bh(&txq->lock); 335e705c121SKalle Valo if (trans_pcie->txq[i].need_update) { 336e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 337e705c121SKalle Valo trans_pcie->txq[i].need_update = false; 338e705c121SKalle Valo } 339e705c121SKalle Valo spin_unlock_bh(&txq->lock); 340e705c121SKalle Valo } 341e705c121SKalle Valo } 342e705c121SKalle Valo 343e705c121SKalle Valo static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 344e705c121SKalle Valo { 345e705c121SKalle Valo struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 346e705c121SKalle Valo 347e705c121SKalle Valo dma_addr_t addr = get_unaligned_le32(&tb->lo); 348e705c121SKalle Valo if (sizeof(dma_addr_t) > sizeof(u32)) 349e705c121SKalle Valo addr |= 350e705c121SKalle Valo ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; 351e705c121SKalle Valo 352e705c121SKalle Valo return addr; 353e705c121SKalle Valo } 354e705c121SKalle Valo 355e705c121SKalle Valo static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 356e705c121SKalle Valo dma_addr_t addr, u16 len) 357e705c121SKalle Valo { 358e705c121SKalle Valo struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 359e705c121SKalle Valo u16 hi_n_len = len << 4; 360e705c121SKalle Valo 361e705c121SKalle Valo put_unaligned_le32(addr, &tb->lo); 362e705c121SKalle Valo if (sizeof(dma_addr_t) > sizeof(u32)) 363e705c121SKalle Valo hi_n_len |= ((addr >> 16) >> 16) & 0xF; 364e705c121SKalle Valo 365e705c121SKalle Valo tb->hi_n_len = cpu_to_le16(hi_n_len); 366e705c121SKalle Valo 367e705c121SKalle Valo tfd->num_tbs = idx + 1; 368e705c121SKalle Valo } 369e705c121SKalle Valo 370e705c121SKalle Valo static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) 371e705c121SKalle Valo { 372e705c121SKalle Valo return tfd->num_tbs & 0x1f; 373e705c121SKalle Valo } 374e705c121SKalle Valo 375e705c121SKalle Valo static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 376e705c121SKalle Valo struct iwl_cmd_meta *meta, 377e705c121SKalle Valo struct iwl_tfd *tfd) 378e705c121SKalle Valo { 379e705c121SKalle Valo int i; 380e705c121SKalle Valo int num_tbs; 381e705c121SKalle Valo 382e705c121SKalle Valo /* Sanity check on number of chunks */ 383e705c121SKalle Valo num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); 384e705c121SKalle Valo 385e705c121SKalle Valo if (num_tbs >= IWL_NUM_OF_TBS) { 386e705c121SKalle Valo IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 387e705c121SKalle Valo /* @todo issue fatal error, it is quite serious situation */ 388e705c121SKalle Valo return; 389e705c121SKalle Valo } 390e705c121SKalle Valo 391e705c121SKalle Valo /* first TB is never freed - it's the scratchbuf data */ 392e705c121SKalle Valo 393e705c121SKalle Valo for (i = 1; i < num_tbs; i++) { 394e705c121SKalle Valo if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) 395e705c121SKalle Valo dma_unmap_page(trans->dev, 396e705c121SKalle Valo iwl_pcie_tfd_tb_get_addr(tfd, i), 397e705c121SKalle Valo iwl_pcie_tfd_tb_get_len(tfd, i), 398e705c121SKalle Valo DMA_TO_DEVICE); 399e705c121SKalle Valo else 400e705c121SKalle Valo dma_unmap_single(trans->dev, 401e705c121SKalle Valo iwl_pcie_tfd_tb_get_addr(tfd, i), 402e705c121SKalle Valo iwl_pcie_tfd_tb_get_len(tfd, i), 403e705c121SKalle Valo DMA_TO_DEVICE); 404e705c121SKalle Valo } 405e705c121SKalle Valo tfd->num_tbs = 0; 406e705c121SKalle Valo } 407e705c121SKalle Valo 408e705c121SKalle Valo /* 409e705c121SKalle Valo * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 410e705c121SKalle Valo * @trans - transport private data 411e705c121SKalle Valo * @txq - tx queue 412e705c121SKalle Valo * @dma_dir - the direction of the DMA mapping 413e705c121SKalle Valo * 414e705c121SKalle Valo * Does NOT advance any TFD circular buffer read/write indexes 415e705c121SKalle Valo * Does NOT free the TFD itself (which is within circular buffer) 416e705c121SKalle Valo */ 417e705c121SKalle Valo static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 418e705c121SKalle Valo { 419e705c121SKalle Valo struct iwl_tfd *tfd_tmp = txq->tfds; 420e705c121SKalle Valo 421e705c121SKalle Valo /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 422e705c121SKalle Valo * idx is bounded by n_window 423e705c121SKalle Valo */ 424e705c121SKalle Valo int rd_ptr = txq->q.read_ptr; 425e705c121SKalle Valo int idx = get_cmd_index(&txq->q, rd_ptr); 426e705c121SKalle Valo 427e705c121SKalle Valo lockdep_assert_held(&txq->lock); 428e705c121SKalle Valo 429e705c121SKalle Valo /* We have only q->n_window txq->entries, but we use 430e705c121SKalle Valo * TFD_QUEUE_SIZE_MAX tfds 431e705c121SKalle Valo */ 432e705c121SKalle Valo iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); 433e705c121SKalle Valo 434e705c121SKalle Valo /* free SKB */ 435e705c121SKalle Valo if (txq->entries) { 436e705c121SKalle Valo struct sk_buff *skb; 437e705c121SKalle Valo 438e705c121SKalle Valo skb = txq->entries[idx].skb; 439e705c121SKalle Valo 440e705c121SKalle Valo /* Can be called from irqs-disabled context 441e705c121SKalle Valo * If skb is not NULL, it means that the whole queue is being 442e705c121SKalle Valo * freed and that the queue is not empty - free the skb 443e705c121SKalle Valo */ 444e705c121SKalle Valo if (skb) { 445e705c121SKalle Valo iwl_op_mode_free_skb(trans->op_mode, skb); 446e705c121SKalle Valo txq->entries[idx].skb = NULL; 447e705c121SKalle Valo } 448e705c121SKalle Valo } 449e705c121SKalle Valo } 450e705c121SKalle Valo 451e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 452e705c121SKalle Valo dma_addr_t addr, u16 len, bool reset) 453e705c121SKalle Valo { 454e705c121SKalle Valo struct iwl_queue *q; 455e705c121SKalle Valo struct iwl_tfd *tfd, *tfd_tmp; 456e705c121SKalle Valo u32 num_tbs; 457e705c121SKalle Valo 458e705c121SKalle Valo q = &txq->q; 459e705c121SKalle Valo tfd_tmp = txq->tfds; 460e705c121SKalle Valo tfd = &tfd_tmp[q->write_ptr]; 461e705c121SKalle Valo 462e705c121SKalle Valo if (reset) 463e705c121SKalle Valo memset(tfd, 0, sizeof(*tfd)); 464e705c121SKalle Valo 465e705c121SKalle Valo num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); 466e705c121SKalle Valo 467e705c121SKalle Valo /* Each TFD can point to a maximum 20 Tx buffers */ 468e705c121SKalle Valo if (num_tbs >= IWL_NUM_OF_TBS) { 469e705c121SKalle Valo IWL_ERR(trans, "Error can not send more than %d chunks\n", 470e705c121SKalle Valo IWL_NUM_OF_TBS); 471e705c121SKalle Valo return -EINVAL; 472e705c121SKalle Valo } 473e705c121SKalle Valo 474e705c121SKalle Valo if (WARN(addr & ~IWL_TX_DMA_MASK, 475e705c121SKalle Valo "Unaligned address = %llx\n", (unsigned long long)addr)) 476e705c121SKalle Valo return -EINVAL; 477e705c121SKalle Valo 478e705c121SKalle Valo iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); 479e705c121SKalle Valo 480e705c121SKalle Valo return num_tbs; 481e705c121SKalle Valo } 482e705c121SKalle Valo 483e705c121SKalle Valo static int iwl_pcie_txq_alloc(struct iwl_trans *trans, 484e705c121SKalle Valo struct iwl_txq *txq, int slots_num, 485e705c121SKalle Valo u32 txq_id) 486e705c121SKalle Valo { 487e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 488e705c121SKalle Valo size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 489e705c121SKalle Valo size_t scratchbuf_sz; 490e705c121SKalle Valo int i; 491e705c121SKalle Valo 492e705c121SKalle Valo if (WARN_ON(txq->entries || txq->tfds)) 493e705c121SKalle Valo return -EINVAL; 494e705c121SKalle Valo 495e705c121SKalle Valo setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 496e705c121SKalle Valo (unsigned long)txq); 497e705c121SKalle Valo txq->trans_pcie = trans_pcie; 498e705c121SKalle Valo 499e705c121SKalle Valo txq->q.n_window = slots_num; 500e705c121SKalle Valo 501e705c121SKalle Valo txq->entries = kcalloc(slots_num, 502e705c121SKalle Valo sizeof(struct iwl_pcie_txq_entry), 503e705c121SKalle Valo GFP_KERNEL); 504e705c121SKalle Valo 505e705c121SKalle Valo if (!txq->entries) 506e705c121SKalle Valo goto error; 507e705c121SKalle Valo 508e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue) 509e705c121SKalle Valo for (i = 0; i < slots_num; i++) { 510e705c121SKalle Valo txq->entries[i].cmd = 511e705c121SKalle Valo kmalloc(sizeof(struct iwl_device_cmd), 512e705c121SKalle Valo GFP_KERNEL); 513e705c121SKalle Valo if (!txq->entries[i].cmd) 514e705c121SKalle Valo goto error; 515e705c121SKalle Valo } 516e705c121SKalle Valo 517e705c121SKalle Valo /* Circular buffer of transmit frame descriptors (TFDs), 518e705c121SKalle Valo * shared with device */ 519e705c121SKalle Valo txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 520e705c121SKalle Valo &txq->q.dma_addr, GFP_KERNEL); 521e705c121SKalle Valo if (!txq->tfds) 522e705c121SKalle Valo goto error; 523e705c121SKalle Valo 524e705c121SKalle Valo BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); 525e705c121SKalle Valo BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != 526e705c121SKalle Valo sizeof(struct iwl_cmd_header) + 527e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch)); 528e705c121SKalle Valo 529e705c121SKalle Valo scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; 530e705c121SKalle Valo 531e705c121SKalle Valo txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, 532e705c121SKalle Valo &txq->scratchbufs_dma, 533e705c121SKalle Valo GFP_KERNEL); 534e705c121SKalle Valo if (!txq->scratchbufs) 535e705c121SKalle Valo goto err_free_tfds; 536e705c121SKalle Valo 537e705c121SKalle Valo txq->q.id = txq_id; 538e705c121SKalle Valo 539e705c121SKalle Valo return 0; 540e705c121SKalle Valo err_free_tfds: 541e705c121SKalle Valo dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); 542e705c121SKalle Valo error: 543e705c121SKalle Valo if (txq->entries && txq_id == trans_pcie->cmd_queue) 544e705c121SKalle Valo for (i = 0; i < slots_num; i++) 545e705c121SKalle Valo kfree(txq->entries[i].cmd); 546e705c121SKalle Valo kfree(txq->entries); 547e705c121SKalle Valo txq->entries = NULL; 548e705c121SKalle Valo 549e705c121SKalle Valo return -ENOMEM; 550e705c121SKalle Valo 551e705c121SKalle Valo } 552e705c121SKalle Valo 553e705c121SKalle Valo static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 554e705c121SKalle Valo int slots_num, u32 txq_id) 555e705c121SKalle Valo { 556e705c121SKalle Valo int ret; 557e705c121SKalle Valo 558e705c121SKalle Valo txq->need_update = false; 559e705c121SKalle Valo 560e705c121SKalle Valo /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 561e705c121SKalle Valo * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 562e705c121SKalle Valo BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 563e705c121SKalle Valo 564e705c121SKalle Valo /* Initialize queue's high/low-water marks, and head/tail indexes */ 565e705c121SKalle Valo ret = iwl_queue_init(&txq->q, slots_num, txq_id); 566e705c121SKalle Valo if (ret) 567e705c121SKalle Valo return ret; 568e705c121SKalle Valo 569e705c121SKalle Valo spin_lock_init(&txq->lock); 570e705c121SKalle Valo 571e705c121SKalle Valo /* 572e705c121SKalle Valo * Tell nic where to find circular buffer of Tx Frame Descriptors for 573e705c121SKalle Valo * given Tx queue, and enable the DMA channel used for that queue. 574e705c121SKalle Valo * Circular buffer (TFD queue in DRAM) physical base address */ 575e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), 576e705c121SKalle Valo txq->q.dma_addr >> 8); 577e705c121SKalle Valo 578e705c121SKalle Valo return 0; 579e705c121SKalle Valo } 580e705c121SKalle Valo 581e705c121SKalle Valo /* 582e705c121SKalle Valo * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 583e705c121SKalle Valo */ 584e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 585e705c121SKalle Valo { 586e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 587e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 588e705c121SKalle Valo struct iwl_queue *q = &txq->q; 589e705c121SKalle Valo 590e705c121SKalle Valo spin_lock_bh(&txq->lock); 591e705c121SKalle Valo while (q->write_ptr != q->read_ptr) { 592e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 593e705c121SKalle Valo txq_id, q->read_ptr); 594e705c121SKalle Valo iwl_pcie_txq_free_tfd(trans, txq); 595e705c121SKalle Valo q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); 596e705c121SKalle Valo } 597e705c121SKalle Valo txq->active = false; 598e705c121SKalle Valo spin_unlock_bh(&txq->lock); 599e705c121SKalle Valo 600e705c121SKalle Valo /* just in case - this queue may have been stopped */ 601e705c121SKalle Valo iwl_wake_queue(trans, txq); 602e705c121SKalle Valo } 603e705c121SKalle Valo 604e705c121SKalle Valo /* 605e705c121SKalle Valo * iwl_pcie_txq_free - Deallocate DMA queue. 606e705c121SKalle Valo * @txq: Transmit queue to deallocate. 607e705c121SKalle Valo * 608e705c121SKalle Valo * Empty queue by removing and destroying all BD's. 609e705c121SKalle Valo * Free all buffers. 610e705c121SKalle Valo * 0-fill, but do not free "txq" descriptor structure. 611e705c121SKalle Valo */ 612e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 613e705c121SKalle Valo { 614e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 615e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 616e705c121SKalle Valo struct device *dev = trans->dev; 617e705c121SKalle Valo int i; 618e705c121SKalle Valo 619e705c121SKalle Valo if (WARN_ON(!txq)) 620e705c121SKalle Valo return; 621e705c121SKalle Valo 622e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 623e705c121SKalle Valo 624e705c121SKalle Valo /* De-alloc array of command/tx buffers */ 625e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue) 626e705c121SKalle Valo for (i = 0; i < txq->q.n_window; i++) { 627e705c121SKalle Valo kzfree(txq->entries[i].cmd); 628e705c121SKalle Valo kzfree(txq->entries[i].free_buf); 629e705c121SKalle Valo } 630e705c121SKalle Valo 631e705c121SKalle Valo /* De-alloc circular buffer of TFDs */ 632e705c121SKalle Valo if (txq->tfds) { 633e705c121SKalle Valo dma_free_coherent(dev, 634e705c121SKalle Valo sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, 635e705c121SKalle Valo txq->tfds, txq->q.dma_addr); 636e705c121SKalle Valo txq->q.dma_addr = 0; 637e705c121SKalle Valo txq->tfds = NULL; 638e705c121SKalle Valo 639e705c121SKalle Valo dma_free_coherent(dev, 640e705c121SKalle Valo sizeof(*txq->scratchbufs) * txq->q.n_window, 641e705c121SKalle Valo txq->scratchbufs, txq->scratchbufs_dma); 642e705c121SKalle Valo } 643e705c121SKalle Valo 644e705c121SKalle Valo kfree(txq->entries); 645e705c121SKalle Valo txq->entries = NULL; 646e705c121SKalle Valo 647e705c121SKalle Valo del_timer_sync(&txq->stuck_timer); 648e705c121SKalle Valo 649e705c121SKalle Valo /* 0-fill queue descriptor structure */ 650e705c121SKalle Valo memset(txq, 0, sizeof(*txq)); 651e705c121SKalle Valo } 652e705c121SKalle Valo 653e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 654e705c121SKalle Valo { 655e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 656e705c121SKalle Valo int nq = trans->cfg->base_params->num_of_queues; 657e705c121SKalle Valo int chan; 658e705c121SKalle Valo u32 reg_val; 659e705c121SKalle Valo int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 660e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 661e705c121SKalle Valo 662e705c121SKalle Valo /* make sure all queue are not stopped/used */ 663e705c121SKalle Valo memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 664e705c121SKalle Valo memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 665e705c121SKalle Valo 666e705c121SKalle Valo trans_pcie->scd_base_addr = 667e705c121SKalle Valo iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 668e705c121SKalle Valo 669e705c121SKalle Valo WARN_ON(scd_base_addr != 0 && 670e705c121SKalle Valo scd_base_addr != trans_pcie->scd_base_addr); 671e705c121SKalle Valo 672e705c121SKalle Valo /* reset context data, TX status and translation data */ 673e705c121SKalle Valo iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 674e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND, 675e705c121SKalle Valo NULL, clear_dwords); 676e705c121SKalle Valo 677e705c121SKalle Valo iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 678e705c121SKalle Valo trans_pcie->scd_bc_tbls.dma >> 10); 679e705c121SKalle Valo 680e705c121SKalle Valo /* The chain extension of the SCD doesn't work well. This feature is 681e705c121SKalle Valo * enabled by default by the HW, so we need to disable it manually. 682e705c121SKalle Valo */ 683e705c121SKalle Valo if (trans->cfg->base_params->scd_chain_ext_wa) 684e705c121SKalle Valo iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 685e705c121SKalle Valo 686e705c121SKalle Valo iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 687e705c121SKalle Valo trans_pcie->cmd_fifo, 688e705c121SKalle Valo trans_pcie->cmd_q_wdg_timeout); 689e705c121SKalle Valo 690e705c121SKalle Valo /* Activate all Tx DMA/FIFO channels */ 691e705c121SKalle Valo iwl_scd_activate_fifos(trans); 692e705c121SKalle Valo 693e705c121SKalle Valo /* Enable DMA channel */ 694e705c121SKalle Valo for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 695e705c121SKalle Valo iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 696e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 697e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 698e705c121SKalle Valo 699e705c121SKalle Valo /* Update FH chicken bits */ 700e705c121SKalle Valo reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 701e705c121SKalle Valo iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 702e705c121SKalle Valo reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 703e705c121SKalle Valo 704e705c121SKalle Valo /* Enable L1-Active */ 705e705c121SKalle Valo if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 706e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 707e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 708e705c121SKalle Valo } 709e705c121SKalle Valo 710e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 711e705c121SKalle Valo { 712e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 713e705c121SKalle Valo int txq_id; 714e705c121SKalle Valo 715e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 716e705c121SKalle Valo txq_id++) { 717e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 718e705c121SKalle Valo 719e705c121SKalle Valo iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), 720e705c121SKalle Valo txq->q.dma_addr >> 8); 721e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 722e705c121SKalle Valo txq->q.read_ptr = 0; 723e705c121SKalle Valo txq->q.write_ptr = 0; 724e705c121SKalle Valo } 725e705c121SKalle Valo 726e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 727e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 728e705c121SKalle Valo trans_pcie->kw.dma >> 4); 729e705c121SKalle Valo 730e705c121SKalle Valo /* 731e705c121SKalle Valo * Send 0 as the scd_base_addr since the device may have be reset 732e705c121SKalle Valo * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 733e705c121SKalle Valo * contain garbage. 734e705c121SKalle Valo */ 735e705c121SKalle Valo iwl_pcie_tx_start(trans, 0); 736e705c121SKalle Valo } 737e705c121SKalle Valo 738e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 739e705c121SKalle Valo { 740e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 741e705c121SKalle Valo unsigned long flags; 742e705c121SKalle Valo int ch, ret; 743e705c121SKalle Valo u32 mask = 0; 744e705c121SKalle Valo 745e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 746e705c121SKalle Valo 747e705c121SKalle Valo if (!iwl_trans_grab_nic_access(trans, false, &flags)) 748e705c121SKalle Valo goto out; 749e705c121SKalle Valo 750e705c121SKalle Valo /* Stop each Tx DMA channel */ 751e705c121SKalle Valo for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 752e705c121SKalle Valo iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 753e705c121SKalle Valo mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 754e705c121SKalle Valo } 755e705c121SKalle Valo 756e705c121SKalle Valo /* Wait for DMA channels to be idle */ 757e705c121SKalle Valo ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 758e705c121SKalle Valo if (ret < 0) 759e705c121SKalle Valo IWL_ERR(trans, 760e705c121SKalle Valo "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 761e705c121SKalle Valo ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 762e705c121SKalle Valo 763e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 764e705c121SKalle Valo 765e705c121SKalle Valo out: 766e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 767e705c121SKalle Valo } 768e705c121SKalle Valo 769e705c121SKalle Valo /* 770e705c121SKalle Valo * iwl_pcie_tx_stop - Stop all Tx DMA channels 771e705c121SKalle Valo */ 772e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans) 773e705c121SKalle Valo { 774e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 775e705c121SKalle Valo int txq_id; 776e705c121SKalle Valo 777e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 778e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 779e705c121SKalle Valo 780e705c121SKalle Valo /* Turn off all Tx DMA channels */ 781e705c121SKalle Valo iwl_pcie_tx_stop_fh(trans); 782e705c121SKalle Valo 783e705c121SKalle Valo /* 784e705c121SKalle Valo * This function can be called before the op_mode disabled the 785e705c121SKalle Valo * queues. This happens when we have an rfkill interrupt. 786e705c121SKalle Valo * Since we stop Tx altogether - mark the queues as stopped. 787e705c121SKalle Valo */ 788e705c121SKalle Valo memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 789e705c121SKalle Valo memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 790e705c121SKalle Valo 791e705c121SKalle Valo /* This can happen: start_hw, stop_device */ 792e705c121SKalle Valo if (!trans_pcie->txq) 793e705c121SKalle Valo return 0; 794e705c121SKalle Valo 795e705c121SKalle Valo /* Unmap DMA from host system and free skb's */ 796e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 797e705c121SKalle Valo txq_id++) 798e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 799e705c121SKalle Valo 800e705c121SKalle Valo return 0; 801e705c121SKalle Valo } 802e705c121SKalle Valo 803e705c121SKalle Valo /* 804e705c121SKalle Valo * iwl_trans_tx_free - Free TXQ Context 805e705c121SKalle Valo * 806e705c121SKalle Valo * Destroy all TX DMA queues and structures 807e705c121SKalle Valo */ 808e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans) 809e705c121SKalle Valo { 810e705c121SKalle Valo int txq_id; 811e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 812e705c121SKalle Valo 813e705c121SKalle Valo /* Tx queues */ 814e705c121SKalle Valo if (trans_pcie->txq) { 815e705c121SKalle Valo for (txq_id = 0; 816e705c121SKalle Valo txq_id < trans->cfg->base_params->num_of_queues; txq_id++) 817e705c121SKalle Valo iwl_pcie_txq_free(trans, txq_id); 818e705c121SKalle Valo } 819e705c121SKalle Valo 820e705c121SKalle Valo kfree(trans_pcie->txq); 821e705c121SKalle Valo trans_pcie->txq = NULL; 822e705c121SKalle Valo 823e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 824e705c121SKalle Valo 825e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 826e705c121SKalle Valo } 827e705c121SKalle Valo 828e705c121SKalle Valo /* 829e705c121SKalle Valo * iwl_pcie_tx_alloc - allocate TX context 830e705c121SKalle Valo * Allocate all Tx DMA structures and initialize them 831e705c121SKalle Valo */ 832e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 833e705c121SKalle Valo { 834e705c121SKalle Valo int ret; 835e705c121SKalle Valo int txq_id, slots_num; 836e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 837e705c121SKalle Valo 838e705c121SKalle Valo u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 839e705c121SKalle Valo sizeof(struct iwlagn_scd_bc_tbl); 840e705c121SKalle Valo 841e705c121SKalle Valo /*It is not allowed to alloc twice, so warn when this happens. 842e705c121SKalle Valo * We cannot rely on the previous allocation, so free and fail */ 843e705c121SKalle Valo if (WARN_ON(trans_pcie->txq)) { 844e705c121SKalle Valo ret = -EINVAL; 845e705c121SKalle Valo goto error; 846e705c121SKalle Valo } 847e705c121SKalle Valo 848e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 849e705c121SKalle Valo scd_bc_tbls_size); 850e705c121SKalle Valo if (ret) { 851e705c121SKalle Valo IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 852e705c121SKalle Valo goto error; 853e705c121SKalle Valo } 854e705c121SKalle Valo 855e705c121SKalle Valo /* Alloc keep-warm buffer */ 856e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 857e705c121SKalle Valo if (ret) { 858e705c121SKalle Valo IWL_ERR(trans, "Keep Warm allocation failed\n"); 859e705c121SKalle Valo goto error; 860e705c121SKalle Valo } 861e705c121SKalle Valo 862e705c121SKalle Valo trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, 863e705c121SKalle Valo sizeof(struct iwl_txq), GFP_KERNEL); 864e705c121SKalle Valo if (!trans_pcie->txq) { 865e705c121SKalle Valo IWL_ERR(trans, "Not enough memory for txq\n"); 866e705c121SKalle Valo ret = -ENOMEM; 867e705c121SKalle Valo goto error; 868e705c121SKalle Valo } 869e705c121SKalle Valo 870e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 871e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 872e705c121SKalle Valo txq_id++) { 873e705c121SKalle Valo slots_num = (txq_id == trans_pcie->cmd_queue) ? 874e705c121SKalle Valo TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 875e705c121SKalle Valo ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], 876e705c121SKalle Valo slots_num, txq_id); 877e705c121SKalle Valo if (ret) { 878e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 879e705c121SKalle Valo goto error; 880e705c121SKalle Valo } 881e705c121SKalle Valo } 882e705c121SKalle Valo 883e705c121SKalle Valo return 0; 884e705c121SKalle Valo 885e705c121SKalle Valo error: 886e705c121SKalle Valo iwl_pcie_tx_free(trans); 887e705c121SKalle Valo 888e705c121SKalle Valo return ret; 889e705c121SKalle Valo } 890e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans) 891e705c121SKalle Valo { 892e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 893e705c121SKalle Valo int ret; 894e705c121SKalle Valo int txq_id, slots_num; 895e705c121SKalle Valo bool alloc = false; 896e705c121SKalle Valo 897e705c121SKalle Valo if (!trans_pcie->txq) { 898e705c121SKalle Valo ret = iwl_pcie_tx_alloc(trans); 899e705c121SKalle Valo if (ret) 900e705c121SKalle Valo goto error; 901e705c121SKalle Valo alloc = true; 902e705c121SKalle Valo } 903e705c121SKalle Valo 904e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 905e705c121SKalle Valo 906e705c121SKalle Valo /* Turn off all Tx DMA fifos */ 907e705c121SKalle Valo iwl_scd_deactivate_fifos(trans); 908e705c121SKalle Valo 909e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */ 910e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 911e705c121SKalle Valo trans_pcie->kw.dma >> 4); 912e705c121SKalle Valo 913e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 914e705c121SKalle Valo 915e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 916e705c121SKalle Valo for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 917e705c121SKalle Valo txq_id++) { 918e705c121SKalle Valo slots_num = (txq_id == trans_pcie->cmd_queue) ? 919e705c121SKalle Valo TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 920e705c121SKalle Valo ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], 921e705c121SKalle Valo slots_num, txq_id); 922e705c121SKalle Valo if (ret) { 923e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 924e705c121SKalle Valo goto error; 925e705c121SKalle Valo } 926e705c121SKalle Valo } 927e705c121SKalle Valo 928e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 929e705c121SKalle Valo if (trans->cfg->base_params->num_of_queues > 20) 930e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, 931e705c121SKalle Valo SCD_GP_CTRL_ENABLE_31_QUEUES); 932e705c121SKalle Valo 933e705c121SKalle Valo return 0; 934e705c121SKalle Valo error: 935e705c121SKalle Valo /*Upon error, free only if we allocated something */ 936e705c121SKalle Valo if (alloc) 937e705c121SKalle Valo iwl_pcie_tx_free(trans); 938e705c121SKalle Valo return ret; 939e705c121SKalle Valo } 940e705c121SKalle Valo 941e705c121SKalle Valo static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 942e705c121SKalle Valo { 943e705c121SKalle Valo lockdep_assert_held(&txq->lock); 944e705c121SKalle Valo 945e705c121SKalle Valo if (!txq->wd_timeout) 946e705c121SKalle Valo return; 947e705c121SKalle Valo 948e705c121SKalle Valo /* 949e705c121SKalle Valo * station is asleep and we send data - that must 950e705c121SKalle Valo * be uAPSD or PS-Poll. Don't rearm the timer. 951e705c121SKalle Valo */ 952e705c121SKalle Valo if (txq->frozen) 953e705c121SKalle Valo return; 954e705c121SKalle Valo 955e705c121SKalle Valo /* 956e705c121SKalle Valo * if empty delete timer, otherwise move timer forward 957e705c121SKalle Valo * since we're making progress on this queue 958e705c121SKalle Valo */ 959e705c121SKalle Valo if (txq->q.read_ptr == txq->q.write_ptr) 960e705c121SKalle Valo del_timer(&txq->stuck_timer); 961e705c121SKalle Valo else 962e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 963e705c121SKalle Valo } 964e705c121SKalle Valo 965e705c121SKalle Valo /* Frees buffers until index _not_ inclusive */ 966e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 967e705c121SKalle Valo struct sk_buff_head *skbs) 968e705c121SKalle Valo { 969e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 970e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 971e705c121SKalle Valo int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 972e705c121SKalle Valo struct iwl_queue *q = &txq->q; 973e705c121SKalle Valo int last_to_free; 974e705c121SKalle Valo 975e705c121SKalle Valo /* This function is not meant to release cmd queue*/ 976e705c121SKalle Valo if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 977e705c121SKalle Valo return; 978e705c121SKalle Valo 979e705c121SKalle Valo spin_lock_bh(&txq->lock); 980e705c121SKalle Valo 981e705c121SKalle Valo if (!txq->active) { 982e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 983e705c121SKalle Valo txq_id, ssn); 984e705c121SKalle Valo goto out; 985e705c121SKalle Valo } 986e705c121SKalle Valo 987e705c121SKalle Valo if (txq->q.read_ptr == tfd_num) 988e705c121SKalle Valo goto out; 989e705c121SKalle Valo 990e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 991e705c121SKalle Valo txq_id, txq->q.read_ptr, tfd_num, ssn); 992e705c121SKalle Valo 993e705c121SKalle Valo /*Since we free until index _not_ inclusive, the one before index is 994e705c121SKalle Valo * the last we will free. This one must be used */ 995e705c121SKalle Valo last_to_free = iwl_queue_dec_wrap(tfd_num); 996e705c121SKalle Valo 997e705c121SKalle Valo if (!iwl_queue_used(q, last_to_free)) { 998e705c121SKalle Valo IWL_ERR(trans, 999e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1000e705c121SKalle Valo __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1001e705c121SKalle Valo q->write_ptr, q->read_ptr); 1002e705c121SKalle Valo goto out; 1003e705c121SKalle Valo } 1004e705c121SKalle Valo 1005e705c121SKalle Valo if (WARN_ON(!skb_queue_empty(skbs))) 1006e705c121SKalle Valo goto out; 1007e705c121SKalle Valo 1008e705c121SKalle Valo for (; 1009e705c121SKalle Valo q->read_ptr != tfd_num; 1010e705c121SKalle Valo q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { 1011e705c121SKalle Valo 1012e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 1013e705c121SKalle Valo continue; 1014e705c121SKalle Valo 1015e705c121SKalle Valo __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); 1016e705c121SKalle Valo 1017e705c121SKalle Valo txq->entries[txq->q.read_ptr].skb = NULL; 1018e705c121SKalle Valo 1019e705c121SKalle Valo iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1020e705c121SKalle Valo 1021e705c121SKalle Valo iwl_pcie_txq_free_tfd(trans, txq); 1022e705c121SKalle Valo } 1023e705c121SKalle Valo 1024e705c121SKalle Valo iwl_pcie_txq_progress(txq); 1025e705c121SKalle Valo 1026e705c121SKalle Valo if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1027e705c121SKalle Valo iwl_wake_queue(trans, txq); 1028e705c121SKalle Valo 1029e705c121SKalle Valo if (q->read_ptr == q->write_ptr) { 1030e705c121SKalle Valo IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); 1031e705c121SKalle Valo iwl_trans_pcie_unref(trans); 1032e705c121SKalle Valo } 1033e705c121SKalle Valo 1034e705c121SKalle Valo out: 1035e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1036e705c121SKalle Valo } 1037e705c121SKalle Valo 1038e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1039e705c121SKalle Valo const struct iwl_host_cmd *cmd) 1040e705c121SKalle Valo { 1041e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1042e705c121SKalle Valo int ret; 1043e705c121SKalle Valo 1044e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 1045e705c121SKalle Valo 1046e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1047e705c121SKalle Valo !trans_pcie->ref_cmd_in_flight) { 1048e705c121SKalle Valo trans_pcie->ref_cmd_in_flight = true; 1049e705c121SKalle Valo IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1050e705c121SKalle Valo iwl_trans_pcie_ref(trans); 1051e705c121SKalle Valo } 1052e705c121SKalle Valo 1053e705c121SKalle Valo /* 1054e705c121SKalle Valo * wake up the NIC to make sure that the firmware will see the host 1055e705c121SKalle Valo * command - we will let the NIC sleep once all the host commands 1056e705c121SKalle Valo * returned. This needs to be done only on NICs that have 1057e705c121SKalle Valo * apmg_wake_up_wa set. 1058e705c121SKalle Valo */ 1059e705c121SKalle Valo if (trans->cfg->base_params->apmg_wake_up_wa && 1060e705c121SKalle Valo !trans_pcie->cmd_hold_nic_awake) { 1061e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1062e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1063e705c121SKalle Valo 1064e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1065e705c121SKalle Valo CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1066e705c121SKalle Valo (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1067e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1068e705c121SKalle Valo 15000); 1069e705c121SKalle Valo if (ret < 0) { 1070e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1071e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1072e705c121SKalle Valo IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1073e705c121SKalle Valo return -EIO; 1074e705c121SKalle Valo } 1075e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = true; 1076e705c121SKalle Valo } 1077e705c121SKalle Valo 1078e705c121SKalle Valo return 0; 1079e705c121SKalle Valo } 1080e705c121SKalle Valo 1081e705c121SKalle Valo static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 1082e705c121SKalle Valo { 1083e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1084e705c121SKalle Valo 1085e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 1086e705c121SKalle Valo 1087e705c121SKalle Valo if (trans_pcie->ref_cmd_in_flight) { 1088e705c121SKalle Valo trans_pcie->ref_cmd_in_flight = false; 1089e705c121SKalle Valo IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 1090e705c121SKalle Valo iwl_trans_pcie_unref(trans); 1091e705c121SKalle Valo } 1092e705c121SKalle Valo 1093e705c121SKalle Valo if (trans->cfg->base_params->apmg_wake_up_wa) { 1094e705c121SKalle Valo if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 1095e705c121SKalle Valo return 0; 1096e705c121SKalle Valo 1097e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = false; 1098e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1099e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1100e705c121SKalle Valo } 1101e705c121SKalle Valo return 0; 1102e705c121SKalle Valo } 1103e705c121SKalle Valo 1104e705c121SKalle Valo /* 1105e705c121SKalle Valo * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1106e705c121SKalle Valo * 1107e705c121SKalle Valo * When FW advances 'R' index, all entries between old and new 'R' index 1108e705c121SKalle Valo * need to be reclaimed. As result, some free space forms. If there is 1109e705c121SKalle Valo * enough free space (> low mark), wake the stack that feeds us. 1110e705c121SKalle Valo */ 1111e705c121SKalle Valo static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1112e705c121SKalle Valo { 1113e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1114e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1115e705c121SKalle Valo struct iwl_queue *q = &txq->q; 1116e705c121SKalle Valo unsigned long flags; 1117e705c121SKalle Valo int nfreed = 0; 1118e705c121SKalle Valo 1119e705c121SKalle Valo lockdep_assert_held(&txq->lock); 1120e705c121SKalle Valo 1121e705c121SKalle Valo if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { 1122e705c121SKalle Valo IWL_ERR(trans, 1123e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1124e705c121SKalle Valo __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1125e705c121SKalle Valo q->write_ptr, q->read_ptr); 1126e705c121SKalle Valo return; 1127e705c121SKalle Valo } 1128e705c121SKalle Valo 1129e705c121SKalle Valo for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; 1130e705c121SKalle Valo q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { 1131e705c121SKalle Valo 1132e705c121SKalle Valo if (nfreed++ > 0) { 1133e705c121SKalle Valo IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1134e705c121SKalle Valo idx, q->write_ptr, q->read_ptr); 1135e705c121SKalle Valo iwl_force_nmi(trans); 1136e705c121SKalle Valo } 1137e705c121SKalle Valo } 1138e705c121SKalle Valo 1139e705c121SKalle Valo if (q->read_ptr == q->write_ptr) { 1140e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1141e705c121SKalle Valo iwl_pcie_clear_cmd_in_flight(trans); 1142e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1143e705c121SKalle Valo } 1144e705c121SKalle Valo 1145e705c121SKalle Valo iwl_pcie_txq_progress(txq); 1146e705c121SKalle Valo } 1147e705c121SKalle Valo 1148e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1149e705c121SKalle Valo u16 txq_id) 1150e705c121SKalle Valo { 1151e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1152e705c121SKalle Valo u32 tbl_dw_addr; 1153e705c121SKalle Valo u32 tbl_dw; 1154e705c121SKalle Valo u16 scd_q2ratid; 1155e705c121SKalle Valo 1156e705c121SKalle Valo scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1157e705c121SKalle Valo 1158e705c121SKalle Valo tbl_dw_addr = trans_pcie->scd_base_addr + 1159e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1160e705c121SKalle Valo 1161e705c121SKalle Valo tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1162e705c121SKalle Valo 1163e705c121SKalle Valo if (txq_id & 0x1) 1164e705c121SKalle Valo tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1165e705c121SKalle Valo else 1166e705c121SKalle Valo tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1167e705c121SKalle Valo 1168e705c121SKalle Valo iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1169e705c121SKalle Valo 1170e705c121SKalle Valo return 0; 1171e705c121SKalle Valo } 1172e705c121SKalle Valo 1173e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table), 1174e705c121SKalle Valo * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1175e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1176e705c121SKalle Valo 1177e705c121SKalle Valo void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1178e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 1179e705c121SKalle Valo unsigned int wdg_timeout) 1180e705c121SKalle Valo { 1181e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1182e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1183e705c121SKalle Valo int fifo = -1; 1184e705c121SKalle Valo 1185e705c121SKalle Valo if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1186e705c121SKalle Valo WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1187e705c121SKalle Valo 1188e705c121SKalle Valo txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1189e705c121SKalle Valo 1190e705c121SKalle Valo if (cfg) { 1191e705c121SKalle Valo fifo = cfg->fifo; 1192e705c121SKalle Valo 1193e705c121SKalle Valo /* Disable the scheduler prior configuring the cmd queue */ 1194e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue && 1195e705c121SKalle Valo trans_pcie->scd_set_active) 1196e705c121SKalle Valo iwl_scd_enable_set_active(trans, 0); 1197e705c121SKalle Valo 1198e705c121SKalle Valo /* Stop this Tx queue before configuring it */ 1199e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 1200e705c121SKalle Valo 1201e705c121SKalle Valo /* Set this queue as a chain-building queue unless it is CMD */ 1202e705c121SKalle Valo if (txq_id != trans_pcie->cmd_queue) 1203e705c121SKalle Valo iwl_scd_txq_set_chain(trans, txq_id); 1204e705c121SKalle Valo 1205e705c121SKalle Valo if (cfg->aggregate) { 1206e705c121SKalle Valo u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1207e705c121SKalle Valo 1208e705c121SKalle Valo /* Map receiver-address / traffic-ID to this queue */ 1209e705c121SKalle Valo iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1210e705c121SKalle Valo 1211e705c121SKalle Valo /* enable aggregations for the queue */ 1212e705c121SKalle Valo iwl_scd_txq_enable_agg(trans, txq_id); 1213e705c121SKalle Valo txq->ampdu = true; 1214e705c121SKalle Valo } else { 1215e705c121SKalle Valo /* 1216e705c121SKalle Valo * disable aggregations for the queue, this will also 1217e705c121SKalle Valo * make the ra_tid mapping configuration irrelevant 1218e705c121SKalle Valo * since it is now a non-AGG queue. 1219e705c121SKalle Valo */ 1220e705c121SKalle Valo iwl_scd_txq_disable_agg(trans, txq_id); 1221e705c121SKalle Valo 1222e705c121SKalle Valo ssn = txq->q.read_ptr; 1223e705c121SKalle Valo } 1224e705c121SKalle Valo } 1225e705c121SKalle Valo 1226e705c121SKalle Valo /* Place first TFD at index corresponding to start sequence number. 1227e705c121SKalle Valo * Assumes that ssn_idx is valid (!= 0xFFF) */ 1228e705c121SKalle Valo txq->q.read_ptr = (ssn & 0xff); 1229e705c121SKalle Valo txq->q.write_ptr = (ssn & 0xff); 1230e705c121SKalle Valo iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1231e705c121SKalle Valo (ssn & 0xff) | (txq_id << 8)); 1232e705c121SKalle Valo 1233e705c121SKalle Valo if (cfg) { 1234e705c121SKalle Valo u8 frame_limit = cfg->frame_limit; 1235e705c121SKalle Valo 1236e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1237e705c121SKalle Valo 1238e705c121SKalle Valo /* Set up Tx window size and frame limit for this queue */ 1239e705c121SKalle Valo iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1240e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1241e705c121SKalle Valo iwl_trans_write_mem32(trans, 1242e705c121SKalle Valo trans_pcie->scd_base_addr + 1243e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1244e705c121SKalle Valo ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1245e705c121SKalle Valo SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1246e705c121SKalle Valo ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1247e705c121SKalle Valo SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1248e705c121SKalle Valo 1249e705c121SKalle Valo /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1250e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1251e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1252e705c121SKalle Valo (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1253e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1254e705c121SKalle Valo SCD_QUEUE_STTS_REG_MSK); 1255e705c121SKalle Valo 1256e705c121SKalle Valo /* enable the scheduler for this queue (only) */ 1257e705c121SKalle Valo if (txq_id == trans_pcie->cmd_queue && 1258e705c121SKalle Valo trans_pcie->scd_set_active) 1259e705c121SKalle Valo iwl_scd_enable_set_active(trans, BIT(txq_id)); 1260e705c121SKalle Valo 1261e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 1262e705c121SKalle Valo "Activate queue %d on FIFO %d WrPtr: %d\n", 1263e705c121SKalle Valo txq_id, fifo, ssn & 0xff); 1264e705c121SKalle Valo } else { 1265e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, 1266e705c121SKalle Valo "Activate queue %d WrPtr: %d\n", 1267e705c121SKalle Valo txq_id, ssn & 0xff); 1268e705c121SKalle Valo } 1269e705c121SKalle Valo 1270e705c121SKalle Valo txq->active = true; 1271e705c121SKalle Valo } 1272e705c121SKalle Valo 1273e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1274e705c121SKalle Valo bool configure_scd) 1275e705c121SKalle Valo { 1276e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1277e705c121SKalle Valo u32 stts_addr = trans_pcie->scd_base_addr + 1278e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1279e705c121SKalle Valo static const u32 zero_val[4] = {}; 1280e705c121SKalle Valo 1281e705c121SKalle Valo trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; 1282e705c121SKalle Valo trans_pcie->txq[txq_id].frozen = false; 1283e705c121SKalle Valo 1284e705c121SKalle Valo /* 1285e705c121SKalle Valo * Upon HW Rfkill - we stop the device, and then stop the queues 1286e705c121SKalle Valo * in the op_mode. Just for the sake of the simplicity of the op_mode, 1287e705c121SKalle Valo * allow the op_mode to call txq_disable after it already called 1288e705c121SKalle Valo * stop_device. 1289e705c121SKalle Valo */ 1290e705c121SKalle Valo if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1291e705c121SKalle Valo WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1292e705c121SKalle Valo "queue %d not used", txq_id); 1293e705c121SKalle Valo return; 1294e705c121SKalle Valo } 1295e705c121SKalle Valo 1296e705c121SKalle Valo if (configure_scd) { 1297e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id); 1298e705c121SKalle Valo 1299e705c121SKalle Valo iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1300e705c121SKalle Valo ARRAY_SIZE(zero_val)); 1301e705c121SKalle Valo } 1302e705c121SKalle Valo 1303e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id); 1304e705c121SKalle Valo trans_pcie->txq[txq_id].ampdu = false; 1305e705c121SKalle Valo 1306e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1307e705c121SKalle Valo } 1308e705c121SKalle Valo 1309e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1310e705c121SKalle Valo 1311e705c121SKalle Valo /* 1312e705c121SKalle Valo * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1313e705c121SKalle Valo * @priv: device private data point 1314e705c121SKalle Valo * @cmd: a pointer to the ucode command structure 1315e705c121SKalle Valo * 1316e705c121SKalle Valo * The function returns < 0 values to indicate the operation 1317e705c121SKalle Valo * failed. On success, it returns the index (>= 0) of command in the 1318e705c121SKalle Valo * command queue. 1319e705c121SKalle Valo */ 1320e705c121SKalle Valo static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1321e705c121SKalle Valo struct iwl_host_cmd *cmd) 1322e705c121SKalle Valo { 1323e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1324e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1325e705c121SKalle Valo struct iwl_queue *q = &txq->q; 1326e705c121SKalle Valo struct iwl_device_cmd *out_cmd; 1327e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1328e705c121SKalle Valo unsigned long flags; 1329e705c121SKalle Valo void *dup_buf = NULL; 1330e705c121SKalle Valo dma_addr_t phys_addr; 1331e705c121SKalle Valo int idx; 1332e705c121SKalle Valo u16 copy_size, cmd_size, scratch_size; 1333e705c121SKalle Valo bool had_nocopy = false; 1334e705c121SKalle Valo u8 group_id = iwl_cmd_groupid(cmd->id); 1335e705c121SKalle Valo int i, ret; 1336e705c121SKalle Valo u32 cmd_pos; 1337e705c121SKalle Valo const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1338e705c121SKalle Valo u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1339e705c121SKalle Valo 1340e705c121SKalle Valo if (WARN(!trans_pcie->wide_cmd_header && 1341e705c121SKalle Valo group_id > IWL_ALWAYS_LONG_GROUP, 1342e705c121SKalle Valo "unsupported wide command %#x\n", cmd->id)) 1343e705c121SKalle Valo return -EINVAL; 1344e705c121SKalle Valo 1345e705c121SKalle Valo if (group_id != 0) { 1346e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1347e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header_wide); 1348e705c121SKalle Valo } else { 1349e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1350e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header); 1351e705c121SKalle Valo } 1352e705c121SKalle Valo 1353e705c121SKalle Valo /* need one for the header if the first is NOCOPY */ 1354e705c121SKalle Valo BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1355e705c121SKalle Valo 1356e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1357e705c121SKalle Valo cmddata[i] = cmd->data[i]; 1358e705c121SKalle Valo cmdlen[i] = cmd->len[i]; 1359e705c121SKalle Valo 1360e705c121SKalle Valo if (!cmd->len[i]) 1361e705c121SKalle Valo continue; 1362e705c121SKalle Valo 1363e705c121SKalle Valo /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ 1364e705c121SKalle Valo if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { 1365e705c121SKalle Valo int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; 1366e705c121SKalle Valo 1367e705c121SKalle Valo if (copy > cmdlen[i]) 1368e705c121SKalle Valo copy = cmdlen[i]; 1369e705c121SKalle Valo cmdlen[i] -= copy; 1370e705c121SKalle Valo cmddata[i] += copy; 1371e705c121SKalle Valo copy_size += copy; 1372e705c121SKalle Valo } 1373e705c121SKalle Valo 1374e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1375e705c121SKalle Valo had_nocopy = true; 1376e705c121SKalle Valo if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1377e705c121SKalle Valo idx = -EINVAL; 1378e705c121SKalle Valo goto free_dup_buf; 1379e705c121SKalle Valo } 1380e705c121SKalle Valo } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1381e705c121SKalle Valo /* 1382e705c121SKalle Valo * This is also a chunk that isn't copied 1383e705c121SKalle Valo * to the static buffer so set had_nocopy. 1384e705c121SKalle Valo */ 1385e705c121SKalle Valo had_nocopy = true; 1386e705c121SKalle Valo 1387e705c121SKalle Valo /* only allowed once */ 1388e705c121SKalle Valo if (WARN_ON(dup_buf)) { 1389e705c121SKalle Valo idx = -EINVAL; 1390e705c121SKalle Valo goto free_dup_buf; 1391e705c121SKalle Valo } 1392e705c121SKalle Valo 1393e705c121SKalle Valo dup_buf = kmemdup(cmddata[i], cmdlen[i], 1394e705c121SKalle Valo GFP_ATOMIC); 1395e705c121SKalle Valo if (!dup_buf) 1396e705c121SKalle Valo return -ENOMEM; 1397e705c121SKalle Valo } else { 1398e705c121SKalle Valo /* NOCOPY must not be followed by normal! */ 1399e705c121SKalle Valo if (WARN_ON(had_nocopy)) { 1400e705c121SKalle Valo idx = -EINVAL; 1401e705c121SKalle Valo goto free_dup_buf; 1402e705c121SKalle Valo } 1403e705c121SKalle Valo copy_size += cmdlen[i]; 1404e705c121SKalle Valo } 1405e705c121SKalle Valo cmd_size += cmd->len[i]; 1406e705c121SKalle Valo } 1407e705c121SKalle Valo 1408e705c121SKalle Valo /* 1409e705c121SKalle Valo * If any of the command structures end up being larger than 1410e705c121SKalle Valo * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1411e705c121SKalle Valo * allocated into separate TFDs, then we will need to 1412e705c121SKalle Valo * increase the size of the buffers. 1413e705c121SKalle Valo */ 1414e705c121SKalle Valo if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1415e705c121SKalle Valo "Command %s (%#x) is too large (%d bytes)\n", 141639bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 141739bdb17eSSharon Dvir cmd->id, copy_size)) { 1418e705c121SKalle Valo idx = -EINVAL; 1419e705c121SKalle Valo goto free_dup_buf; 1420e705c121SKalle Valo } 1421e705c121SKalle Valo 1422e705c121SKalle Valo spin_lock_bh(&txq->lock); 1423e705c121SKalle Valo 1424e705c121SKalle Valo if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1425e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1426e705c121SKalle Valo 1427e705c121SKalle Valo IWL_ERR(trans, "No space in command queue\n"); 1428e705c121SKalle Valo iwl_op_mode_cmd_queue_full(trans->op_mode); 1429e705c121SKalle Valo idx = -ENOSPC; 1430e705c121SKalle Valo goto free_dup_buf; 1431e705c121SKalle Valo } 1432e705c121SKalle Valo 1433e705c121SKalle Valo idx = get_cmd_index(q, q->write_ptr); 1434e705c121SKalle Valo out_cmd = txq->entries[idx].cmd; 1435e705c121SKalle Valo out_meta = &txq->entries[idx].meta; 1436e705c121SKalle Valo 1437e705c121SKalle Valo memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1438e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) 1439e705c121SKalle Valo out_meta->source = cmd; 1440e705c121SKalle Valo 1441e705c121SKalle Valo /* set up the header */ 1442e705c121SKalle Valo if (group_id != 0) { 1443e705c121SKalle Valo out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1444e705c121SKalle Valo out_cmd->hdr_wide.group_id = group_id; 1445e705c121SKalle Valo out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1446e705c121SKalle Valo out_cmd->hdr_wide.length = 1447e705c121SKalle Valo cpu_to_le16(cmd_size - 1448e705c121SKalle Valo sizeof(struct iwl_cmd_header_wide)); 1449e705c121SKalle Valo out_cmd->hdr_wide.reserved = 0; 1450e705c121SKalle Valo out_cmd->hdr_wide.sequence = 1451e705c121SKalle Valo cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1452e705c121SKalle Valo INDEX_TO_SEQ(q->write_ptr)); 1453e705c121SKalle Valo 1454e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header_wide); 1455e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide); 1456e705c121SKalle Valo } else { 1457e705c121SKalle Valo out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1458e705c121SKalle Valo out_cmd->hdr.sequence = 1459e705c121SKalle Valo cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1460e705c121SKalle Valo INDEX_TO_SEQ(q->write_ptr)); 1461e705c121SKalle Valo out_cmd->hdr.group_id = 0; 1462e705c121SKalle Valo 1463e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header); 1464e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header); 1465e705c121SKalle Valo } 1466e705c121SKalle Valo 1467e705c121SKalle Valo /* and copy the data that needs to be copied */ 1468e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1469e705c121SKalle Valo int copy; 1470e705c121SKalle Valo 1471e705c121SKalle Valo if (!cmd->len[i]) 1472e705c121SKalle Valo continue; 1473e705c121SKalle Valo 1474e705c121SKalle Valo /* copy everything if not nocopy/dup */ 1475e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1476e705c121SKalle Valo IWL_HCMD_DFL_DUP))) { 1477e705c121SKalle Valo copy = cmd->len[i]; 1478e705c121SKalle Valo 1479e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1480e705c121SKalle Valo cmd_pos += copy; 1481e705c121SKalle Valo copy_size += copy; 1482e705c121SKalle Valo continue; 1483e705c121SKalle Valo } 1484e705c121SKalle Valo 1485e705c121SKalle Valo /* 1486e705c121SKalle Valo * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied 1487e705c121SKalle Valo * in total (for the scratchbuf handling), but copy up to what 1488e705c121SKalle Valo * we can fit into the payload for debug dump purposes. 1489e705c121SKalle Valo */ 1490e705c121SKalle Valo copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1491e705c121SKalle Valo 1492e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1493e705c121SKalle Valo cmd_pos += copy; 1494e705c121SKalle Valo 1495e705c121SKalle Valo /* However, treat copy_size the proper way, we need it below */ 1496e705c121SKalle Valo if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { 1497e705c121SKalle Valo copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; 1498e705c121SKalle Valo 1499e705c121SKalle Valo if (copy > cmd->len[i]) 1500e705c121SKalle Valo copy = cmd->len[i]; 1501e705c121SKalle Valo copy_size += copy; 1502e705c121SKalle Valo } 1503e705c121SKalle Valo } 1504e705c121SKalle Valo 1505e705c121SKalle Valo IWL_DEBUG_HC(trans, 1506e705c121SKalle Valo "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 150739bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1508e705c121SKalle Valo group_id, out_cmd->hdr.cmd, 1509e705c121SKalle Valo le16_to_cpu(out_cmd->hdr.sequence), 1510e705c121SKalle Valo cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1511e705c121SKalle Valo 1512e705c121SKalle Valo /* start the TFD with the scratchbuf */ 1513e705c121SKalle Valo scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); 1514e705c121SKalle Valo memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); 1515e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, 1516e705c121SKalle Valo iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), 1517e705c121SKalle Valo scratch_size, true); 1518e705c121SKalle Valo 1519e705c121SKalle Valo /* map first command fragment, if any remains */ 1520e705c121SKalle Valo if (copy_size > scratch_size) { 1521e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, 1522e705c121SKalle Valo ((u8 *)&out_cmd->hdr) + scratch_size, 1523e705c121SKalle Valo copy_size - scratch_size, 1524e705c121SKalle Valo DMA_TO_DEVICE); 1525e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 1526e705c121SKalle Valo iwl_pcie_tfd_unmap(trans, out_meta, 1527e705c121SKalle Valo &txq->tfds[q->write_ptr]); 1528e705c121SKalle Valo idx = -ENOMEM; 1529e705c121SKalle Valo goto out; 1530e705c121SKalle Valo } 1531e705c121SKalle Valo 1532e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1533e705c121SKalle Valo copy_size - scratch_size, false); 1534e705c121SKalle Valo } 1535e705c121SKalle Valo 1536e705c121SKalle Valo /* map the remaining (adjusted) nocopy/dup fragments */ 1537e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1538e705c121SKalle Valo const void *data = cmddata[i]; 1539e705c121SKalle Valo 1540e705c121SKalle Valo if (!cmdlen[i]) 1541e705c121SKalle Valo continue; 1542e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1543e705c121SKalle Valo IWL_HCMD_DFL_DUP))) 1544e705c121SKalle Valo continue; 1545e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1546e705c121SKalle Valo data = dup_buf; 1547e705c121SKalle Valo phys_addr = dma_map_single(trans->dev, (void *)data, 1548e705c121SKalle Valo cmdlen[i], DMA_TO_DEVICE); 1549e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) { 1550e705c121SKalle Valo iwl_pcie_tfd_unmap(trans, out_meta, 1551e705c121SKalle Valo &txq->tfds[q->write_ptr]); 1552e705c121SKalle Valo idx = -ENOMEM; 1553e705c121SKalle Valo goto out; 1554e705c121SKalle Valo } 1555e705c121SKalle Valo 1556e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1557e705c121SKalle Valo } 1558e705c121SKalle Valo 1559e705c121SKalle Valo BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > 1560e705c121SKalle Valo sizeof(out_meta->flags) * BITS_PER_BYTE); 1561e705c121SKalle Valo out_meta->flags = cmd->flags; 1562e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1563e705c121SKalle Valo kzfree(txq->entries[idx].free_buf); 1564e705c121SKalle Valo txq->entries[idx].free_buf = dup_buf; 1565e705c121SKalle Valo 1566e705c121SKalle Valo trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1567e705c121SKalle Valo 1568e705c121SKalle Valo /* start timer if queue currently empty */ 1569e705c121SKalle Valo if (q->read_ptr == q->write_ptr && txq->wd_timeout) 1570e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1571e705c121SKalle Valo 1572e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1573e705c121SKalle Valo ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1574e705c121SKalle Valo if (ret < 0) { 1575e705c121SKalle Valo idx = ret; 1576e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1577e705c121SKalle Valo goto out; 1578e705c121SKalle Valo } 1579e705c121SKalle Valo 1580e705c121SKalle Valo /* Increment and update queue's write index */ 1581e705c121SKalle Valo q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); 1582e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1583e705c121SKalle Valo 1584e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1585e705c121SKalle Valo 1586e705c121SKalle Valo out: 1587e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1588e705c121SKalle Valo free_dup_buf: 1589e705c121SKalle Valo if (idx < 0) 1590e705c121SKalle Valo kfree(dup_buf); 1591e705c121SKalle Valo return idx; 1592e705c121SKalle Valo } 1593e705c121SKalle Valo 1594e705c121SKalle Valo /* 1595e705c121SKalle Valo * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1596e705c121SKalle Valo * @rxb: Rx buffer to reclaim 1597e705c121SKalle Valo */ 1598e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1599e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb) 1600e705c121SKalle Valo { 1601e705c121SKalle Valo struct iwl_rx_packet *pkt = rxb_addr(rxb); 1602e705c121SKalle Valo u16 sequence = le16_to_cpu(pkt->hdr.sequence); 160339bdb17eSSharon Dvir u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id); 160439bdb17eSSharon Dvir u32 cmd_id; 1605e705c121SKalle Valo int txq_id = SEQ_TO_QUEUE(sequence); 1606e705c121SKalle Valo int index = SEQ_TO_INDEX(sequence); 1607e705c121SKalle Valo int cmd_index; 1608e705c121SKalle Valo struct iwl_device_cmd *cmd; 1609e705c121SKalle Valo struct iwl_cmd_meta *meta; 1610e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1611e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1612e705c121SKalle Valo 1613e705c121SKalle Valo /* If a Tx command is being handled and it isn't in the actual 1614e705c121SKalle Valo * command queue then there a command routing bug has been introduced 1615e705c121SKalle Valo * in the queue management code. */ 1616e705c121SKalle Valo if (WARN(txq_id != trans_pcie->cmd_queue, 1617e705c121SKalle Valo "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1618e705c121SKalle Valo txq_id, trans_pcie->cmd_queue, sequence, 1619e705c121SKalle Valo trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, 1620e705c121SKalle Valo trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { 1621e705c121SKalle Valo iwl_print_hex_error(trans, pkt, 32); 1622e705c121SKalle Valo return; 1623e705c121SKalle Valo } 1624e705c121SKalle Valo 1625e705c121SKalle Valo spin_lock_bh(&txq->lock); 1626e705c121SKalle Valo 1627e705c121SKalle Valo cmd_index = get_cmd_index(&txq->q, index); 1628e705c121SKalle Valo cmd = txq->entries[cmd_index].cmd; 1629e705c121SKalle Valo meta = &txq->entries[cmd_index].meta; 163039bdb17eSSharon Dvir cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1631e705c121SKalle Valo 1632e705c121SKalle Valo iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); 1633e705c121SKalle Valo 1634e705c121SKalle Valo /* Input error checking is done when commands are added to queue. */ 1635e705c121SKalle Valo if (meta->flags & CMD_WANT_SKB) { 1636e705c121SKalle Valo struct page *p = rxb_steal_page(rxb); 1637e705c121SKalle Valo 1638e705c121SKalle Valo meta->source->resp_pkt = pkt; 1639e705c121SKalle Valo meta->source->_rx_page_addr = (unsigned long)page_address(p); 1640e705c121SKalle Valo meta->source->_rx_page_order = trans_pcie->rx_page_order; 1641e705c121SKalle Valo } 1642e705c121SKalle Valo 1643dcbb4746SEmmanuel Grumbach if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1644dcbb4746SEmmanuel Grumbach iwl_op_mode_async_cb(trans->op_mode, cmd); 1645dcbb4746SEmmanuel Grumbach 1646e705c121SKalle Valo iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1647e705c121SKalle Valo 1648e705c121SKalle Valo if (!(meta->flags & CMD_ASYNC)) { 1649e705c121SKalle Valo if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1650e705c121SKalle Valo IWL_WARN(trans, 1651e705c121SKalle Valo "HCMD_ACTIVE already clear for command %s\n", 165239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1653e705c121SKalle Valo } 1654e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1655e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 165639bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id)); 1657e705c121SKalle Valo wake_up(&trans_pcie->wait_command_queue); 1658e705c121SKalle Valo } 1659e705c121SKalle Valo 1660e705c121SKalle Valo meta->flags = 0; 1661e705c121SKalle Valo 1662e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1663e705c121SKalle Valo } 1664e705c121SKalle Valo 1665e705c121SKalle Valo #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1666e705c121SKalle Valo 1667e705c121SKalle Valo static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1668e705c121SKalle Valo struct iwl_host_cmd *cmd) 1669e705c121SKalle Valo { 1670e705c121SKalle Valo int ret; 1671e705c121SKalle Valo 1672e705c121SKalle Valo /* An asynchronous command can not expect an SKB to be set. */ 1673e705c121SKalle Valo if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1674e705c121SKalle Valo return -EINVAL; 1675e705c121SKalle Valo 1676e705c121SKalle Valo ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1677e705c121SKalle Valo if (ret < 0) { 1678e705c121SKalle Valo IWL_ERR(trans, 1679e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 168039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1681e705c121SKalle Valo return ret; 1682e705c121SKalle Valo } 1683e705c121SKalle Valo return 0; 1684e705c121SKalle Valo } 1685e705c121SKalle Valo 1686e705c121SKalle Valo static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1687e705c121SKalle Valo struct iwl_host_cmd *cmd) 1688e705c121SKalle Valo { 1689e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1690e705c121SKalle Valo int cmd_idx; 1691e705c121SKalle Valo int ret; 1692e705c121SKalle Valo 1693e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 169439bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1695e705c121SKalle Valo 1696e705c121SKalle Valo if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1697e705c121SKalle Valo &trans->status), 1698e705c121SKalle Valo "Command %s: a command is already active!\n", 169939bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id))) 1700e705c121SKalle Valo return -EIO; 1701e705c121SKalle Valo 1702e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 170339bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1704e705c121SKalle Valo 1705e705c121SKalle Valo cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1706e705c121SKalle Valo if (cmd_idx < 0) { 1707e705c121SKalle Valo ret = cmd_idx; 1708e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1709e705c121SKalle Valo IWL_ERR(trans, 1710e705c121SKalle Valo "Error sending %s: enqueue_hcmd failed: %d\n", 171139bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), ret); 1712e705c121SKalle Valo return ret; 1713e705c121SKalle Valo } 1714e705c121SKalle Valo 1715e705c121SKalle Valo ret = wait_event_timeout(trans_pcie->wait_command_queue, 1716e705c121SKalle Valo !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1717e705c121SKalle Valo &trans->status), 1718e705c121SKalle Valo HOST_COMPLETE_TIMEOUT); 1719e705c121SKalle Valo if (!ret) { 1720e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1721e705c121SKalle Valo struct iwl_queue *q = &txq->q; 1722e705c121SKalle Valo 1723e705c121SKalle Valo IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 172439bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id), 1725e705c121SKalle Valo jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1726e705c121SKalle Valo 1727e705c121SKalle Valo IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1728e705c121SKalle Valo q->read_ptr, q->write_ptr); 1729e705c121SKalle Valo 1730e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1731e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 173239bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1733e705c121SKalle Valo ret = -ETIMEDOUT; 1734e705c121SKalle Valo 1735e705c121SKalle Valo iwl_force_nmi(trans); 1736e705c121SKalle Valo iwl_trans_fw_error(trans); 1737e705c121SKalle Valo 1738e705c121SKalle Valo goto cancel; 1739e705c121SKalle Valo } 1740e705c121SKalle Valo 1741e705c121SKalle Valo if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1742e705c121SKalle Valo IWL_ERR(trans, "FW error in SYNC CMD %s\n", 174339bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1744e705c121SKalle Valo dump_stack(); 1745e705c121SKalle Valo ret = -EIO; 1746e705c121SKalle Valo goto cancel; 1747e705c121SKalle Valo } 1748e705c121SKalle Valo 1749e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1750e705c121SKalle Valo test_bit(STATUS_RFKILL, &trans->status)) { 1751e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1752e705c121SKalle Valo ret = -ERFKILL; 1753e705c121SKalle Valo goto cancel; 1754e705c121SKalle Valo } 1755e705c121SKalle Valo 1756e705c121SKalle Valo if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1757e705c121SKalle Valo IWL_ERR(trans, "Error: Response NULL in '%s'\n", 175839bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id)); 1759e705c121SKalle Valo ret = -EIO; 1760e705c121SKalle Valo goto cancel; 1761e705c121SKalle Valo } 1762e705c121SKalle Valo 1763e705c121SKalle Valo return 0; 1764e705c121SKalle Valo 1765e705c121SKalle Valo cancel: 1766e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB) { 1767e705c121SKalle Valo /* 1768e705c121SKalle Valo * Cancel the CMD_WANT_SKB flag for the cmd in the 1769e705c121SKalle Valo * TX cmd queue. Otherwise in case the cmd comes 1770e705c121SKalle Valo * in later, it will possibly set an invalid 1771e705c121SKalle Valo * address (cmd->meta.source). 1772e705c121SKalle Valo */ 1773e705c121SKalle Valo trans_pcie->txq[trans_pcie->cmd_queue]. 1774e705c121SKalle Valo entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1775e705c121SKalle Valo } 1776e705c121SKalle Valo 1777e705c121SKalle Valo if (cmd->resp_pkt) { 1778e705c121SKalle Valo iwl_free_resp(cmd); 1779e705c121SKalle Valo cmd->resp_pkt = NULL; 1780e705c121SKalle Valo } 1781e705c121SKalle Valo 1782e705c121SKalle Valo return ret; 1783e705c121SKalle Valo } 1784e705c121SKalle Valo 1785e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1786e705c121SKalle Valo { 1787e705c121SKalle Valo if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1788e705c121SKalle Valo test_bit(STATUS_RFKILL, &trans->status)) { 1789e705c121SKalle Valo IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1790e705c121SKalle Valo cmd->id); 1791e705c121SKalle Valo return -ERFKILL; 1792e705c121SKalle Valo } 1793e705c121SKalle Valo 1794e705c121SKalle Valo if (cmd->flags & CMD_ASYNC) 1795e705c121SKalle Valo return iwl_pcie_send_hcmd_async(trans, cmd); 1796e705c121SKalle Valo 1797e705c121SKalle Valo /* We still can fail on RFKILL that can be asserted while we wait */ 1798e705c121SKalle Valo return iwl_pcie_send_hcmd_sync(trans, cmd); 1799e705c121SKalle Valo } 1800e705c121SKalle Valo 1801e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1802e705c121SKalle Valo struct iwl_device_cmd *dev_cmd, int txq_id) 1803e705c121SKalle Valo { 1804e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1805e705c121SKalle Valo struct ieee80211_hdr *hdr; 1806e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1807e705c121SKalle Valo struct iwl_cmd_meta *out_meta; 1808e705c121SKalle Valo struct iwl_txq *txq; 1809e705c121SKalle Valo struct iwl_queue *q; 1810e705c121SKalle Valo dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1811e705c121SKalle Valo void *tb1_addr; 1812e705c121SKalle Valo u16 len, tb1_len, tb2_len; 1813e705c121SKalle Valo bool wait_write_ptr; 1814e705c121SKalle Valo __le16 fc; 1815e705c121SKalle Valo u8 hdr_len; 1816e705c121SKalle Valo u16 wifi_seq; 1817e705c121SKalle Valo int i; 1818e705c121SKalle Valo 1819e705c121SKalle Valo txq = &trans_pcie->txq[txq_id]; 1820e705c121SKalle Valo q = &txq->q; 1821e705c121SKalle Valo 1822e705c121SKalle Valo if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 1823e705c121SKalle Valo "TX on unused queue %d\n", txq_id)) 1824e705c121SKalle Valo return -EINVAL; 1825e705c121SKalle Valo 1826e705c121SKalle Valo if (skb_is_nonlinear(skb) && 1827e705c121SKalle Valo skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && 1828e705c121SKalle Valo __skb_linearize(skb)) 1829e705c121SKalle Valo return -ENOMEM; 1830e705c121SKalle Valo 1831e705c121SKalle Valo /* mac80211 always puts the full header into the SKB's head, 1832e705c121SKalle Valo * so there's no need to check if it's readable there 1833e705c121SKalle Valo */ 1834e705c121SKalle Valo hdr = (struct ieee80211_hdr *)skb->data; 1835e705c121SKalle Valo fc = hdr->frame_control; 1836e705c121SKalle Valo hdr_len = ieee80211_hdrlen(fc); 1837e705c121SKalle Valo 1838e705c121SKalle Valo spin_lock(&txq->lock); 1839e705c121SKalle Valo 1840e705c121SKalle Valo /* In AGG mode, the index in the ring must correspond to the WiFi 1841e705c121SKalle Valo * sequence number. This is a HW requirements to help the SCD to parse 1842e705c121SKalle Valo * the BA. 1843e705c121SKalle Valo * Check here that the packets are in the right place on the ring. 1844e705c121SKalle Valo */ 1845e705c121SKalle Valo wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1846e705c121SKalle Valo WARN_ONCE(txq->ampdu && 1847e705c121SKalle Valo (wifi_seq & 0xff) != q->write_ptr, 1848e705c121SKalle Valo "Q: %d WiFi Seq %d tfdNum %d", 1849e705c121SKalle Valo txq_id, wifi_seq, q->write_ptr); 1850e705c121SKalle Valo 1851e705c121SKalle Valo /* Set up driver data for this TFD */ 1852e705c121SKalle Valo txq->entries[q->write_ptr].skb = skb; 1853e705c121SKalle Valo txq->entries[q->write_ptr].cmd = dev_cmd; 1854e705c121SKalle Valo 1855e705c121SKalle Valo dev_cmd->hdr.sequence = 1856e705c121SKalle Valo cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1857e705c121SKalle Valo INDEX_TO_SEQ(q->write_ptr))); 1858e705c121SKalle Valo 1859e705c121SKalle Valo tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); 1860e705c121SKalle Valo scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1861e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch); 1862e705c121SKalle Valo 1863e705c121SKalle Valo tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1864e705c121SKalle Valo tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1865e705c121SKalle Valo 1866e705c121SKalle Valo /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1867e705c121SKalle Valo out_meta = &txq->entries[q->write_ptr].meta; 1868e705c121SKalle Valo out_meta->flags = 0; 1869e705c121SKalle Valo 1870e705c121SKalle Valo /* 1871e705c121SKalle Valo * The second TB (tb1) points to the remainder of the TX command 1872e705c121SKalle Valo * and the 802.11 header - dword aligned size 1873e705c121SKalle Valo * (This calculation modifies the TX command, so do it before the 1874e705c121SKalle Valo * setup of the first TB) 1875e705c121SKalle Valo */ 1876e705c121SKalle Valo len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1877e705c121SKalle Valo hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; 1878e705c121SKalle Valo tb1_len = ALIGN(len, 4); 1879e705c121SKalle Valo 1880e705c121SKalle Valo /* Tell NIC about any 2-byte padding after MAC header */ 1881e705c121SKalle Valo if (tb1_len != len) 1882e705c121SKalle Valo tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 1883e705c121SKalle Valo 1884e705c121SKalle Valo /* The first TB points to the scratchbuf data - min_copy bytes */ 1885e705c121SKalle Valo memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, 1886e705c121SKalle Valo IWL_HCMD_SCRATCHBUF_SIZE); 1887e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1888e705c121SKalle Valo IWL_HCMD_SCRATCHBUF_SIZE, true); 1889e705c121SKalle Valo 1890e705c121SKalle Valo /* there must be data left over for TB1 or this code must be changed */ 1891e705c121SKalle Valo BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); 1892e705c121SKalle Valo 1893e705c121SKalle Valo /* map the data for TB1 */ 1894e705c121SKalle Valo tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; 1895e705c121SKalle Valo tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1896e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1897e705c121SKalle Valo goto out_err; 1898e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1899e705c121SKalle Valo 1900e705c121SKalle Valo /* 1901e705c121SKalle Valo * Set up TFD's third entry to point directly to remainder 1902e705c121SKalle Valo * of skb's head, if any 1903e705c121SKalle Valo */ 1904e705c121SKalle Valo tb2_len = skb_headlen(skb) - hdr_len; 1905e705c121SKalle Valo if (tb2_len > 0) { 1906e705c121SKalle Valo dma_addr_t tb2_phys = dma_map_single(trans->dev, 1907e705c121SKalle Valo skb->data + hdr_len, 1908e705c121SKalle Valo tb2_len, DMA_TO_DEVICE); 1909e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 1910e705c121SKalle Valo iwl_pcie_tfd_unmap(trans, out_meta, 1911e705c121SKalle Valo &txq->tfds[q->write_ptr]); 1912e705c121SKalle Valo goto out_err; 1913e705c121SKalle Valo } 1914e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 1915e705c121SKalle Valo } 1916e705c121SKalle Valo 1917e705c121SKalle Valo /* set up the remaining entries to point to the data */ 1918e705c121SKalle Valo for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1919e705c121SKalle Valo const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1920e705c121SKalle Valo dma_addr_t tb_phys; 1921e705c121SKalle Valo int tb_idx; 1922e705c121SKalle Valo 1923e705c121SKalle Valo if (!skb_frag_size(frag)) 1924e705c121SKalle Valo continue; 1925e705c121SKalle Valo 1926e705c121SKalle Valo tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1927e705c121SKalle Valo skb_frag_size(frag), DMA_TO_DEVICE); 1928e705c121SKalle Valo 1929e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1930e705c121SKalle Valo iwl_pcie_tfd_unmap(trans, out_meta, 1931e705c121SKalle Valo &txq->tfds[q->write_ptr]); 1932e705c121SKalle Valo goto out_err; 1933e705c121SKalle Valo } 1934e705c121SKalle Valo tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1935e705c121SKalle Valo skb_frag_size(frag), false); 1936e705c121SKalle Valo 1937e705c121SKalle Valo out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); 1938e705c121SKalle Valo } 1939e705c121SKalle Valo 1940e705c121SKalle Valo /* Set up entry for this TFD in Tx byte-count array */ 1941e705c121SKalle Valo iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1942e705c121SKalle Valo 1943e705c121SKalle Valo trace_iwlwifi_dev_tx(trans->dev, skb, 1944e705c121SKalle Valo &txq->tfds[txq->q.write_ptr], 1945e705c121SKalle Valo sizeof(struct iwl_tfd), 1946e705c121SKalle Valo &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, 1947e705c121SKalle Valo skb->data + hdr_len, tb2_len); 1948e705c121SKalle Valo trace_iwlwifi_dev_tx_data(trans->dev, skb, 1949e705c121SKalle Valo hdr_len, skb->len - hdr_len); 1950e705c121SKalle Valo 1951e705c121SKalle Valo wait_write_ptr = ieee80211_has_morefrags(fc); 1952e705c121SKalle Valo 1953e705c121SKalle Valo /* start timer if queue currently empty */ 1954e705c121SKalle Valo if (q->read_ptr == q->write_ptr) { 1955e705c121SKalle Valo if (txq->wd_timeout) { 1956e705c121SKalle Valo /* 1957e705c121SKalle Valo * If the TXQ is active, then set the timer, if not, 1958e705c121SKalle Valo * set the timer in remainder so that the timer will 1959e705c121SKalle Valo * be armed with the right value when the station will 1960e705c121SKalle Valo * wake up. 1961e705c121SKalle Valo */ 1962e705c121SKalle Valo if (!txq->frozen) 1963e705c121SKalle Valo mod_timer(&txq->stuck_timer, 1964e705c121SKalle Valo jiffies + txq->wd_timeout); 1965e705c121SKalle Valo else 1966e705c121SKalle Valo txq->frozen_expiry_remainder = txq->wd_timeout; 1967e705c121SKalle Valo } 1968e705c121SKalle Valo IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1969e705c121SKalle Valo iwl_trans_pcie_ref(trans); 1970e705c121SKalle Valo } 1971e705c121SKalle Valo 1972e705c121SKalle Valo /* Tell device the write index *just past* this latest filled TFD */ 1973e705c121SKalle Valo q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); 1974e705c121SKalle Valo if (!wait_write_ptr) 1975e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1976e705c121SKalle Valo 1977e705c121SKalle Valo /* 1978e705c121SKalle Valo * At this point the frame is "transmitted" successfully 1979e705c121SKalle Valo * and we will get a TX status notification eventually. 1980e705c121SKalle Valo */ 1981e705c121SKalle Valo if (iwl_queue_space(q) < q->high_mark) { 1982e705c121SKalle Valo if (wait_write_ptr) 1983e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq); 1984e705c121SKalle Valo else 1985e705c121SKalle Valo iwl_stop_queue(trans, txq); 1986e705c121SKalle Valo } 1987e705c121SKalle Valo spin_unlock(&txq->lock); 1988e705c121SKalle Valo return 0; 1989e705c121SKalle Valo out_err: 1990e705c121SKalle Valo spin_unlock(&txq->lock); 1991e705c121SKalle Valo return -1; 1992e705c121SKalle Valo } 1993