18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
28e99ea8dSJohannes Berg /*
31caa3a5eSJohannes Berg * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation
48e99ea8dSJohannes Berg * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
58e99ea8dSJohannes Berg * Copyright (C) 2016-2017 Intel Deutschland GmbH
68e99ea8dSJohannes Berg */
7e705c121SKalle Valo #include <linux/etherdevice.h>
86eb5e529SEmmanuel Grumbach #include <linux/ieee80211.h>
9e705c121SKalle Valo #include <linux/slab.h>
10e705c121SKalle Valo #include <linux/sched.h>
116eb5e529SEmmanuel Grumbach #include <net/ip6_checksum.h>
126eb5e529SEmmanuel Grumbach #include <net/tso.h>
13e705c121SKalle Valo
14e705c121SKalle Valo #include "iwl-debug.h"
15e705c121SKalle Valo #include "iwl-csr.h"
16e705c121SKalle Valo #include "iwl-prph.h"
17e705c121SKalle Valo #include "iwl-io.h"
18e705c121SKalle Valo #include "iwl-scd.h"
19e705c121SKalle Valo #include "iwl-op-mode.h"
20e705c121SKalle Valo #include "internal.h"
21d172a5efSJohannes Berg #include "fw/api/tx.h"
22e705c121SKalle Valo
23e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
24e705c121SKalle Valo * DMA services
25e705c121SKalle Valo *
26e705c121SKalle Valo * Theory of operation
27e705c121SKalle Valo *
28e705c121SKalle Valo * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
29e705c121SKalle Valo * of buffer descriptors, each of which points to one or more data buffers for
30e705c121SKalle Valo * the device to read from or fill. Driver and device exchange status of each
31e705c121SKalle Valo * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
32e705c121SKalle Valo * entries in each circular buffer, to protect against confusing empty and full
33e705c121SKalle Valo * queue states.
34e705c121SKalle Valo *
35e705c121SKalle Valo * The device reads or writes the data in the queues via the device's several
36e705c121SKalle Valo * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
37e705c121SKalle Valo *
38e705c121SKalle Valo * For Tx queue, there are low mark and high mark limits. If, after queuing
39e705c121SKalle Valo * the packet for Tx, free space become < low mark, Tx queue stopped. When
40e705c121SKalle Valo * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
41e705c121SKalle Valo * Tx queue resumed.
42e705c121SKalle Valo *
43e705c121SKalle Valo ***************************************************/
44e22744afSSara Sharon
45e705c121SKalle Valo
iwl_pcie_alloc_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr,size_t size)4613a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
47e705c121SKalle Valo struct iwl_dma_ptr *ptr, size_t size)
48e705c121SKalle Valo {
49e705c121SKalle Valo if (WARN_ON(ptr->addr))
50e705c121SKalle Valo return -EINVAL;
51e705c121SKalle Valo
52e705c121SKalle Valo ptr->addr = dma_alloc_coherent(trans->dev, size,
53e705c121SKalle Valo &ptr->dma, GFP_KERNEL);
54e705c121SKalle Valo if (!ptr->addr)
55e705c121SKalle Valo return -ENOMEM;
56e705c121SKalle Valo ptr->size = size;
57e705c121SKalle Valo return 0;
58e705c121SKalle Valo }
59e705c121SKalle Valo
iwl_pcie_free_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr)6013a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
61e705c121SKalle Valo {
62e705c121SKalle Valo if (unlikely(!ptr->addr))
63e705c121SKalle Valo return;
64e705c121SKalle Valo
65e705c121SKalle Valo dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
66e705c121SKalle Valo memset(ptr, 0, sizeof(*ptr));
67e705c121SKalle Valo }
68e705c121SKalle Valo
69e705c121SKalle Valo /*
70e705c121SKalle Valo * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
71e705c121SKalle Valo */
iwl_pcie_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)72e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
73e705c121SKalle Valo struct iwl_txq *txq)
74e705c121SKalle Valo {
75e705c121SKalle Valo u32 reg = 0;
76bb98ecd4SSara Sharon int txq_id = txq->id;
77e705c121SKalle Valo
78e705c121SKalle Valo lockdep_assert_held(&txq->lock);
79e705c121SKalle Valo
80e705c121SKalle Valo /*
81e705c121SKalle Valo * explicitly wake up the NIC if:
82e705c121SKalle Valo * 1. shadow registers aren't enabled
83e705c121SKalle Valo * 2. NIC is woken up for CMD regardless of shadow outside this function
84e705c121SKalle Valo * 3. there is a chance that the NIC is asleep
85e705c121SKalle Valo */
86286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->shadow_reg_enable &&
874f4822b7SMordechay Goodstein txq_id != trans->txqs.cmd.q_id &&
88e705c121SKalle Valo test_bit(STATUS_TPOWER_PMI, &trans->status)) {
89e705c121SKalle Valo /*
90e705c121SKalle Valo * wake up nic if it's powered down ...
91e705c121SKalle Valo * uCode will wake up, and interrupt us again, so next
92e705c121SKalle Valo * time we'll skip this part.
93e705c121SKalle Valo */
94e705c121SKalle Valo reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
95e705c121SKalle Valo
96e705c121SKalle Valo if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
97e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
98e705c121SKalle Valo txq_id, reg);
99e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL,
1006dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
101e705c121SKalle Valo txq->need_update = true;
102e705c121SKalle Valo return;
103e705c121SKalle Valo }
104e705c121SKalle Valo }
105e705c121SKalle Valo
106e705c121SKalle Valo /*
107e705c121SKalle Valo * if not in power-save mode, uCode will never sleep when we're
108e705c121SKalle Valo * trying to tx (during RFKILL, we're not trying to tx).
109e705c121SKalle Valo */
110bb98ecd4SSara Sharon IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
1110cd58eaaSEmmanuel Grumbach if (!txq->block)
1120cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR,
113bb98ecd4SSara Sharon txq->write_ptr | (txq_id << 8));
114e705c121SKalle Valo }
115e705c121SKalle Valo
iwl_pcie_txq_check_wrptrs(struct iwl_trans * trans)116e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
117e705c121SKalle Valo {
118e705c121SKalle Valo int i;
119e705c121SKalle Valo
120286ca8ebSLuca Coelho for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1214f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[i];
122e705c121SKalle Valo
1234f4822b7SMordechay Goodstein if (!test_bit(i, trans->txqs.queue_used))
124f6eac740SMordechai Goodstein continue;
125f6eac740SMordechai Goodstein
126e705c121SKalle Valo spin_lock_bh(&txq->lock);
127b2a3b1c1SSara Sharon if (txq->need_update) {
128e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq);
129b2a3b1c1SSara Sharon txq->need_update = false;
130e705c121SKalle Valo }
131e705c121SKalle Valo spin_unlock_bh(&txq->lock);
132e705c121SKalle Valo }
133e705c121SKalle Valo }
134e705c121SKalle Valo
iwl_pcie_txq_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,dma_addr_t addr,u16 len,bool reset)135e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
136e705c121SKalle Valo dma_addr_t addr, u16 len, bool reset)
137e705c121SKalle Valo {
1386983ba69SSara Sharon void *tfd;
139e705c121SKalle Valo u32 num_tbs;
140e705c121SKalle Valo
1413827cb59SJohannes Berg tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
142e705c121SKalle Valo
143e705c121SKalle Valo if (reset)
144885375d0SMordechay Goodstein memset(tfd, 0, trans->txqs.tfd.size);
145e705c121SKalle Valo
1460179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
147e705c121SKalle Valo
1486983ba69SSara Sharon /* Each TFD can point to a maximum max_tbs Tx buffers */
149885375d0SMordechay Goodstein if (num_tbs >= trans->txqs.tfd.max_tbs) {
150e705c121SKalle Valo IWL_ERR(trans, "Error can not send more than %d chunks\n",
151885375d0SMordechay Goodstein trans->txqs.tfd.max_tbs);
152e705c121SKalle Valo return -EINVAL;
153e705c121SKalle Valo }
154e705c121SKalle Valo
155e705c121SKalle Valo if (WARN(addr & ~IWL_TX_DMA_MASK,
156e705c121SKalle Valo "Unaligned address = %llx\n", (unsigned long long)addr))
157e705c121SKalle Valo return -EINVAL;
158e705c121SKalle Valo
159*c522948aSJohannes Berg iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len);
160e705c121SKalle Valo
161e705c121SKalle Valo return num_tbs;
162e705c121SKalle Valo }
163e705c121SKalle Valo
iwl_pcie_clear_cmd_in_flight(struct iwl_trans * trans)16401d11cd1SSara Sharon static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
16501d11cd1SSara Sharon {
16601d11cd1SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
16701d11cd1SSara Sharon
168286ca8ebSLuca Coelho if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
16901d11cd1SSara Sharon return;
17072bc934cSJohannes Berg
17172bc934cSJohannes Berg spin_lock(&trans_pcie->reg_lock);
17272bc934cSJohannes Berg
17372bc934cSJohannes Berg if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
17472bc934cSJohannes Berg spin_unlock(&trans_pcie->reg_lock);
17501d11cd1SSara Sharon return;
17672bc934cSJohannes Berg }
17701d11cd1SSara Sharon
17801d11cd1SSara Sharon trans_pcie->cmd_hold_nic_awake = false;
17901d11cd1SSara Sharon __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1806dece0e9SLuca Coelho CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
18172bc934cSJohannes Berg spin_unlock(&trans_pcie->reg_lock);
18201d11cd1SSara Sharon }
18301d11cd1SSara Sharon
184e705c121SKalle Valo /*
185e705c121SKalle Valo * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
186e705c121SKalle Valo */
iwl_pcie_txq_unmap(struct iwl_trans * trans,int txq_id)187e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
188e705c121SKalle Valo {
1894f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id];
190e705c121SKalle Valo
19198c7d21fSEmmanuel Grumbach if (!txq) {
19298c7d21fSEmmanuel Grumbach IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
19398c7d21fSEmmanuel Grumbach return;
19498c7d21fSEmmanuel Grumbach }
19598c7d21fSEmmanuel Grumbach
196e705c121SKalle Valo spin_lock_bh(&txq->lock);
197bb98ecd4SSara Sharon while (txq->write_ptr != txq->read_ptr) {
198e705c121SKalle Valo IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
199bb98ecd4SSara Sharon txq_id, txq->read_ptr);
2006eb5e529SEmmanuel Grumbach
2014f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) {
202bb98ecd4SSara Sharon struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
2036eb5e529SEmmanuel Grumbach
2046eb5e529SEmmanuel Grumbach if (WARN_ON_ONCE(!skb))
2056eb5e529SEmmanuel Grumbach continue;
2066eb5e529SEmmanuel Grumbach
2070cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb);
2086eb5e529SEmmanuel Grumbach }
209a4450980SMordechay Goodstein iwl_txq_free_tfd(trans, txq);
2100cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
21101d11cd1SSara Sharon
21272bc934cSJohannes Berg if (txq->read_ptr == txq->write_ptr &&
21372bc934cSJohannes Berg txq_id == trans->txqs.cmd.q_id)
21401d11cd1SSara Sharon iwl_pcie_clear_cmd_in_flight(trans);
215e705c121SKalle Valo }
2163955525dSEmmanuel Grumbach
2173955525dSEmmanuel Grumbach while (!skb_queue_empty(&txq->overflow_q)) {
2183955525dSEmmanuel Grumbach struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
2193955525dSEmmanuel Grumbach
2203955525dSEmmanuel Grumbach iwl_op_mode_free_skb(trans->op_mode, skb);
2213955525dSEmmanuel Grumbach }
2223955525dSEmmanuel Grumbach
223e705c121SKalle Valo spin_unlock_bh(&txq->lock);
224e705c121SKalle Valo
225e705c121SKalle Valo /* just in case - this queue may have been stopped */
226e705c121SKalle Valo iwl_wake_queue(trans, txq);
227e705c121SKalle Valo }
228e705c121SKalle Valo
229e705c121SKalle Valo /*
230e705c121SKalle Valo * iwl_pcie_txq_free - Deallocate DMA queue.
231e705c121SKalle Valo * @txq: Transmit queue to deallocate.
232e705c121SKalle Valo *
233e705c121SKalle Valo * Empty queue by removing and destroying all BD's.
234e705c121SKalle Valo * Free all buffers.
235e705c121SKalle Valo * 0-fill, but do not free "txq" descriptor structure.
236e705c121SKalle Valo */
iwl_pcie_txq_free(struct iwl_trans * trans,int txq_id)237e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
238e705c121SKalle Valo {
2394f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id];
240e705c121SKalle Valo struct device *dev = trans->dev;
241e705c121SKalle Valo int i;
242e705c121SKalle Valo
243e705c121SKalle Valo if (WARN_ON(!txq))
244e705c121SKalle Valo return;
245e705c121SKalle Valo
246e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id);
247e705c121SKalle Valo
248e705c121SKalle Valo /* De-alloc array of command/tx buffers */
2494f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id)
250bb98ecd4SSara Sharon for (i = 0; i < txq->n_window; i++) {
251453431a5SWaiman Long kfree_sensitive(txq->entries[i].cmd);
252453431a5SWaiman Long kfree_sensitive(txq->entries[i].free_buf);
253e705c121SKalle Valo }
254e705c121SKalle Valo
255e705c121SKalle Valo /* De-alloc circular buffer of TFDs */
256e705c121SKalle Valo if (txq->tfds) {
257e705c121SKalle Valo dma_free_coherent(dev,
258885375d0SMordechay Goodstein trans->txqs.tfd.size *
259286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size,
260bb98ecd4SSara Sharon txq->tfds, txq->dma_addr);
261bb98ecd4SSara Sharon txq->dma_addr = 0;
262e705c121SKalle Valo txq->tfds = NULL;
263e705c121SKalle Valo
264e705c121SKalle Valo dma_free_coherent(dev,
265bb98ecd4SSara Sharon sizeof(*txq->first_tb_bufs) * txq->n_window,
2668de437c7SSara Sharon txq->first_tb_bufs, txq->first_tb_dma);
267e705c121SKalle Valo }
268e705c121SKalle Valo
269e705c121SKalle Valo kfree(txq->entries);
270e705c121SKalle Valo txq->entries = NULL;
271e705c121SKalle Valo
272e705c121SKalle Valo del_timer_sync(&txq->stuck_timer);
273e705c121SKalle Valo
274e705c121SKalle Valo /* 0-fill queue descriptor structure */
275e705c121SKalle Valo memset(txq, 0, sizeof(*txq));
276e705c121SKalle Valo }
277e705c121SKalle Valo
iwl_pcie_tx_start(struct iwl_trans * trans,u32 scd_base_addr)278e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
279e705c121SKalle Valo {
280e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
281286ca8ebSLuca Coelho int nq = trans->trans_cfg->base_params->num_of_queues;
282e705c121SKalle Valo int chan;
283e705c121SKalle Valo u32 reg_val;
284e705c121SKalle Valo int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
285e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
286e705c121SKalle Valo
287e705c121SKalle Valo /* make sure all queue are not stopped/used */
2884f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0,
2894f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped));
2904f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
291e705c121SKalle Valo
292e705c121SKalle Valo trans_pcie->scd_base_addr =
293e705c121SKalle Valo iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
294e705c121SKalle Valo
295e705c121SKalle Valo WARN_ON(scd_base_addr != 0 &&
296e705c121SKalle Valo scd_base_addr != trans_pcie->scd_base_addr);
297e705c121SKalle Valo
298e705c121SKalle Valo /* reset context data, TX status and translation data */
299e705c121SKalle Valo iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
300e705c121SKalle Valo SCD_CONTEXT_MEM_LOWER_BOUND,
301e705c121SKalle Valo NULL, clear_dwords);
302e705c121SKalle Valo
303e705c121SKalle Valo iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
3040179bfffSMordechay Goodstein trans->txqs.scd_bc_tbls.dma >> 10);
305e705c121SKalle Valo
306e705c121SKalle Valo /* The chain extension of the SCD doesn't work well. This feature is
307e705c121SKalle Valo * enabled by default by the HW, so we need to disable it manually.
308e705c121SKalle Valo */
309286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->scd_chain_ext_wa)
310e705c121SKalle Valo iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
311e705c121SKalle Valo
3124f4822b7SMordechay Goodstein iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
3134f4822b7SMordechay Goodstein trans->txqs.cmd.fifo,
3144f4822b7SMordechay Goodstein trans->txqs.cmd.wdg_timeout);
315e705c121SKalle Valo
316e705c121SKalle Valo /* Activate all Tx DMA/FIFO channels */
317e705c121SKalle Valo iwl_scd_activate_fifos(trans);
318e705c121SKalle Valo
319e705c121SKalle Valo /* Enable DMA channel */
320e705c121SKalle Valo for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
321e705c121SKalle Valo iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
322e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
323e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
324e705c121SKalle Valo
325e705c121SKalle Valo /* Update FH chicken bits */
326e705c121SKalle Valo reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
327e705c121SKalle Valo iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
328e705c121SKalle Valo reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
329e705c121SKalle Valo
330e705c121SKalle Valo /* Enable L1-Active */
331286ca8ebSLuca Coelho if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
332e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
333e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
334e705c121SKalle Valo }
335e705c121SKalle Valo
iwl_trans_pcie_tx_reset(struct iwl_trans * trans)336e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
337e705c121SKalle Valo {
338e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
339e705c121SKalle Valo int txq_id;
340e705c121SKalle Valo
34113a3a390SSara Sharon /*
34213a3a390SSara Sharon * we should never get here in gen2 trans mode return early to avoid
34313a3a390SSara Sharon * having invalid accesses
34413a3a390SSara Sharon */
345286ca8ebSLuca Coelho if (WARN_ON_ONCE(trans->trans_cfg->gen2))
34613a3a390SSara Sharon return;
34713a3a390SSara Sharon
348286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
349e705c121SKalle Valo txq_id++) {
3504f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id];
35112a89f01SJohannes Berg if (trans->trans_cfg->gen2)
352e22744afSSara Sharon iwl_write_direct64(trans,
353e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id),
354bb98ecd4SSara Sharon txq->dma_addr);
355e22744afSSara Sharon else
356e22744afSSara Sharon iwl_write_direct32(trans,
357e22744afSSara Sharon FH_MEM_CBBC_QUEUE(trans, txq_id),
358bb98ecd4SSara Sharon txq->dma_addr >> 8);
359e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id);
360bb98ecd4SSara Sharon txq->read_ptr = 0;
361bb98ecd4SSara Sharon txq->write_ptr = 0;
362e705c121SKalle Valo }
363e705c121SKalle Valo
364e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */
365e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
366e705c121SKalle Valo trans_pcie->kw.dma >> 4);
367e705c121SKalle Valo
368e705c121SKalle Valo /*
369e705c121SKalle Valo * Send 0 as the scd_base_addr since the device may have be reset
370e705c121SKalle Valo * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
371e705c121SKalle Valo * contain garbage.
372e705c121SKalle Valo */
373e705c121SKalle Valo iwl_pcie_tx_start(trans, 0);
374e705c121SKalle Valo }
375e705c121SKalle Valo
iwl_pcie_tx_stop_fh(struct iwl_trans * trans)376e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
377e705c121SKalle Valo {
378e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
379e705c121SKalle Valo int ch, ret;
380e705c121SKalle Valo u32 mask = 0;
381e705c121SKalle Valo
38225edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock);
383e705c121SKalle Valo
3841ed08f6fSJohannes Berg if (!iwl_trans_grab_nic_access(trans))
385e705c121SKalle Valo goto out;
386e705c121SKalle Valo
387e705c121SKalle Valo /* Stop each Tx DMA channel */
388e705c121SKalle Valo for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
389e705c121SKalle Valo iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
390e705c121SKalle Valo mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
391e705c121SKalle Valo }
392e705c121SKalle Valo
393e705c121SKalle Valo /* Wait for DMA channels to be idle */
394e705c121SKalle Valo ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
395e705c121SKalle Valo if (ret < 0)
396e705c121SKalle Valo IWL_ERR(trans,
397e705c121SKalle Valo "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
398e705c121SKalle Valo ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
399e705c121SKalle Valo
4001ed08f6fSJohannes Berg iwl_trans_release_nic_access(trans);
401e705c121SKalle Valo
402e705c121SKalle Valo out:
40325edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock);
404e705c121SKalle Valo }
405e705c121SKalle Valo
406e705c121SKalle Valo /*
407e705c121SKalle Valo * iwl_pcie_tx_stop - Stop all Tx DMA channels
408e705c121SKalle Valo */
iwl_pcie_tx_stop(struct iwl_trans * trans)409e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans)
410e705c121SKalle Valo {
411e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412e705c121SKalle Valo int txq_id;
413e705c121SKalle Valo
414e705c121SKalle Valo /* Turn off all Tx DMA fifos */
415e705c121SKalle Valo iwl_scd_deactivate_fifos(trans);
416e705c121SKalle Valo
417e705c121SKalle Valo /* Turn off all Tx DMA channels */
418e705c121SKalle Valo iwl_pcie_tx_stop_fh(trans);
419e705c121SKalle Valo
420e705c121SKalle Valo /*
421e705c121SKalle Valo * This function can be called before the op_mode disabled the
422e705c121SKalle Valo * queues. This happens when we have an rfkill interrupt.
423e705c121SKalle Valo * Since we stop Tx altogether - mark the queues as stopped.
424e705c121SKalle Valo */
4254f4822b7SMordechay Goodstein memset(trans->txqs.queue_stopped, 0,
4264f4822b7SMordechay Goodstein sizeof(trans->txqs.queue_stopped));
4274f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
428e705c121SKalle Valo
429e705c121SKalle Valo /* This can happen: start_hw, stop_device */
430b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory)
431e705c121SKalle Valo return 0;
432e705c121SKalle Valo
433e705c121SKalle Valo /* Unmap DMA from host system and free skb's */
434286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
435e705c121SKalle Valo txq_id++)
436e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id);
437e705c121SKalle Valo
438e705c121SKalle Valo return 0;
439e705c121SKalle Valo }
440e705c121SKalle Valo
441e705c121SKalle Valo /*
442e705c121SKalle Valo * iwl_trans_tx_free - Free TXQ Context
443e705c121SKalle Valo *
444e705c121SKalle Valo * Destroy all TX DMA queues and structures
445e705c121SKalle Valo */
iwl_pcie_tx_free(struct iwl_trans * trans)446e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans)
447e705c121SKalle Valo {
448e705c121SKalle Valo int txq_id;
449e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
450e705c121SKalle Valo
4514f4822b7SMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
452de74c455SSara Sharon
453e705c121SKalle Valo /* Tx queues */
454b2a3b1c1SSara Sharon if (trans_pcie->txq_memory) {
455e705c121SKalle Valo for (txq_id = 0;
456286ca8ebSLuca Coelho txq_id < trans->trans_cfg->base_params->num_of_queues;
457b2a3b1c1SSara Sharon txq_id++) {
458e705c121SKalle Valo iwl_pcie_txq_free(trans, txq_id);
4594f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = NULL;
460b2a3b1c1SSara Sharon }
461e705c121SKalle Valo }
462e705c121SKalle Valo
463b2a3b1c1SSara Sharon kfree(trans_pcie->txq_memory);
464b2a3b1c1SSara Sharon trans_pcie->txq_memory = NULL;
465e705c121SKalle Valo
466e705c121SKalle Valo iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
467e705c121SKalle Valo
4680179bfffSMordechay Goodstein iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
469e705c121SKalle Valo }
470e705c121SKalle Valo
471e705c121SKalle Valo /*
472e705c121SKalle Valo * iwl_pcie_tx_alloc - allocate TX context
473e705c121SKalle Valo * Allocate all Tx DMA structures and initialize them
474e705c121SKalle Valo */
iwl_pcie_tx_alloc(struct iwl_trans * trans)475e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
476e705c121SKalle Valo {
477e705c121SKalle Valo int ret;
478e705c121SKalle Valo int txq_id, slots_num;
479e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
480286ca8ebSLuca Coelho u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
481e705c121SKalle Valo
482a8e82c36SJohannes Berg if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
483a8e82c36SJohannes Berg return -EINVAL;
484a8e82c36SJohannes Berg
485a8e82c36SJohannes Berg bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
486e705c121SKalle Valo
487e705c121SKalle Valo /*It is not allowed to alloc twice, so warn when this happens.
488e705c121SKalle Valo * We cannot rely on the previous allocation, so free and fail */
489b2a3b1c1SSara Sharon if (WARN_ON(trans_pcie->txq_memory)) {
490e705c121SKalle Valo ret = -EINVAL;
491e705c121SKalle Valo goto error;
492e705c121SKalle Valo }
493e705c121SKalle Valo
4940179bfffSMordechay Goodstein ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
4957b3e42eaSGolan Ben Ami bc_tbls_size);
496e705c121SKalle Valo if (ret) {
497e705c121SKalle Valo IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
498e705c121SKalle Valo goto error;
499e705c121SKalle Valo }
500e705c121SKalle Valo
501e705c121SKalle Valo /* Alloc keep-warm buffer */
502e705c121SKalle Valo ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
503e705c121SKalle Valo if (ret) {
504e705c121SKalle Valo IWL_ERR(trans, "Keep Warm allocation failed\n");
505e705c121SKalle Valo goto error;
506e705c121SKalle Valo }
507e705c121SKalle Valo
50879b6c8feSLuca Coelho trans_pcie->txq_memory =
509286ca8ebSLuca Coelho kcalloc(trans->trans_cfg->base_params->num_of_queues,
510e705c121SKalle Valo sizeof(struct iwl_txq), GFP_KERNEL);
511b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) {
512e705c121SKalle Valo IWL_ERR(trans, "Not enough memory for txq\n");
513e705c121SKalle Valo ret = -ENOMEM;
514e705c121SKalle Valo goto error;
515e705c121SKalle Valo }
516e705c121SKalle Valo
517e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */
518286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
519e705c121SKalle Valo txq_id++) {
5204f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
521b8e8d7ceSSara Sharon
522ff911dcaSShaul Triebitz if (cmd_queue)
523718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
524ff911dcaSShaul Triebitz trans->cfg->min_txq_size);
525ff911dcaSShaul Triebitz else
526718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
527d5399f11SMordechay Goodstein trans->cfg->min_ba_txq_size);
5284f4822b7SMordechay Goodstein trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
5290cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
5300cd1ad2dSMordechay Goodstein cmd_queue);
531e705c121SKalle Valo if (ret) {
532e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
533e705c121SKalle Valo goto error;
534e705c121SKalle Valo }
5354f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id;
536e705c121SKalle Valo }
537e705c121SKalle Valo
538e705c121SKalle Valo return 0;
539e705c121SKalle Valo
540e705c121SKalle Valo error:
541e705c121SKalle Valo iwl_pcie_tx_free(trans);
542e705c121SKalle Valo
543e705c121SKalle Valo return ret;
544e705c121SKalle Valo }
545eda50cdeSSara Sharon
iwl_pcie_tx_init(struct iwl_trans * trans)546e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans)
547e705c121SKalle Valo {
548e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
549e705c121SKalle Valo int ret;
550e705c121SKalle Valo int txq_id, slots_num;
551e705c121SKalle Valo bool alloc = false;
552e705c121SKalle Valo
553b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) {
554e705c121SKalle Valo ret = iwl_pcie_tx_alloc(trans);
555e705c121SKalle Valo if (ret)
556e705c121SKalle Valo goto error;
557e705c121SKalle Valo alloc = true;
558e705c121SKalle Valo }
559e705c121SKalle Valo
56025edc8f2SJohannes Berg spin_lock_bh(&trans_pcie->irq_lock);
561e705c121SKalle Valo
562e705c121SKalle Valo /* Turn off all Tx DMA fifos */
563e705c121SKalle Valo iwl_scd_deactivate_fifos(trans);
564e705c121SKalle Valo
565e705c121SKalle Valo /* Tell NIC where to find the "keep warm" buffer */
566e705c121SKalle Valo iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
567e705c121SKalle Valo trans_pcie->kw.dma >> 4);
568e705c121SKalle Valo
56925edc8f2SJohannes Berg spin_unlock_bh(&trans_pcie->irq_lock);
570e705c121SKalle Valo
571e705c121SKalle Valo /* Alloc and init all Tx queues, including the command queue (#4/#9) */
572286ca8ebSLuca Coelho for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
573e705c121SKalle Valo txq_id++) {
5744f4822b7SMordechay Goodstein bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
575b8e8d7ceSSara Sharon
576ff911dcaSShaul Triebitz if (cmd_queue)
577718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
578ff911dcaSShaul Triebitz trans->cfg->min_txq_size);
579ff911dcaSShaul Triebitz else
580718a8b23SShaul Triebitz slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
581d5399f11SMordechay Goodstein trans->cfg->min_ba_txq_size);
5820cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
5830cd1ad2dSMordechay Goodstein cmd_queue);
584e705c121SKalle Valo if (ret) {
585e705c121SKalle Valo IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
586e705c121SKalle Valo goto error;
587e705c121SKalle Valo }
588e705c121SKalle Valo
589eda50cdeSSara Sharon /*
590eda50cdeSSara Sharon * Tell nic where to find circular buffer of TFDs for a
591eda50cdeSSara Sharon * given Tx queue, and enable the DMA channel used for that
592eda50cdeSSara Sharon * queue.
593eda50cdeSSara Sharon * Circular buffer (TFD queue in DRAM) physical base address
594eda50cdeSSara Sharon */
595eda50cdeSSara Sharon iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
5964f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->dma_addr >> 8);
597ae79785fSSara Sharon }
598e22744afSSara Sharon
599e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
600286ca8ebSLuca Coelho if (trans->trans_cfg->base_params->num_of_queues > 20)
601e705c121SKalle Valo iwl_set_bits_prph(trans, SCD_GP_CTRL,
602e705c121SKalle Valo SCD_GP_CTRL_ENABLE_31_QUEUES);
603e705c121SKalle Valo
604e705c121SKalle Valo return 0;
605e705c121SKalle Valo error:
606e705c121SKalle Valo /*Upon error, free only if we allocated something */
607e705c121SKalle Valo if (alloc)
608e705c121SKalle Valo iwl_pcie_tx_free(trans);
609e705c121SKalle Valo return ret;
610e705c121SKalle Valo }
611e705c121SKalle Valo
iwl_pcie_set_cmd_in_flight(struct iwl_trans * trans,const struct iwl_host_cmd * cmd)612e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
613e705c121SKalle Valo const struct iwl_host_cmd *cmd)
614e705c121SKalle Valo {
615e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
616e705c121SKalle Valo
6172b3fae66SMatt Chen /* Make sure the NIC is still alive in the bus */
618f60c9e59SEmmanuel Grumbach if (test_bit(STATUS_TRANS_DEAD, &trans->status))
619f60c9e59SEmmanuel Grumbach return -ENODEV;
6202b3fae66SMatt Chen
62172bc934cSJohannes Berg if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
62272bc934cSJohannes Berg return 0;
62372bc934cSJohannes Berg
624e705c121SKalle Valo /*
625e705c121SKalle Valo * wake up the NIC to make sure that the firmware will see the host
626e705c121SKalle Valo * command - we will let the NIC sleep once all the host commands
627e705c121SKalle Valo * returned. This needs to be done only on NICs that have
62872bc934cSJohannes Berg * apmg_wake_up_wa set (see above.)
629e705c121SKalle Valo */
630c544d89bSJohannes Berg if (!_iwl_trans_pcie_grab_nic_access(trans))
631416dde0fSJohannes Berg return -EIO;
632e705c121SKalle Valo
633416dde0fSJohannes Berg /*
634416dde0fSJohannes Berg * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
635416dde0fSJohannes Berg * There, we also returned immediately if cmd_hold_nic_awake is
636416dde0fSJohannes Berg * already true, so it's OK to unconditionally set it to true.
637416dde0fSJohannes Berg */
638e705c121SKalle Valo trans_pcie->cmd_hold_nic_awake = true;
639c544d89bSJohannes Berg spin_unlock(&trans_pcie->reg_lock);
640e705c121SKalle Valo
641416dde0fSJohannes Berg return 0;
642e705c121SKalle Valo }
643e705c121SKalle Valo
644e705c121SKalle Valo /*
645e705c121SKalle Valo * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
646e705c121SKalle Valo *
647e705c121SKalle Valo * When FW advances 'R' index, all entries between old and new 'R' index
648e705c121SKalle Valo * need to be reclaimed. As result, some free space forms. If there is
649e705c121SKalle Valo * enough free space (> low mark), wake the stack that feeds us.
650e705c121SKalle Valo */
iwl_pcie_cmdq_reclaim(struct iwl_trans * trans,int txq_id,int idx)6517216dc99SJohannes Berg static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
652e705c121SKalle Valo {
6534f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id];
654e705c121SKalle Valo int nfreed = 0;
655f5955a6cSGolan Ben Ami u16 r;
656e705c121SKalle Valo
657e705c121SKalle Valo lockdep_assert_held(&txq->lock);
658e705c121SKalle Valo
6590cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, idx);
6600cd1ad2dSMordechay Goodstein r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
661f5955a6cSGolan Ben Ami
662286ca8ebSLuca Coelho if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
6630cd1ad2dSMordechay Goodstein (!iwl_txq_used(txq, idx))) {
6644f4822b7SMordechay Goodstein WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
665e705c121SKalle Valo "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
6667b3e42eaSGolan Ben Ami __func__, txq_id, idx,
667286ca8ebSLuca Coelho trans->trans_cfg->base_params->max_tfd_queue_size,
668bb98ecd4SSara Sharon txq->write_ptr, txq->read_ptr);
669e705c121SKalle Valo return;
670e705c121SKalle Valo }
671e705c121SKalle Valo
6720cd1ad2dSMordechay Goodstein for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
6730cd1ad2dSMordechay Goodstein r = iwl_txq_inc_wrap(trans, r)) {
6740cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
675e705c121SKalle Valo
676e705c121SKalle Valo if (nfreed++ > 0) {
677e705c121SKalle Valo IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
678f5955a6cSGolan Ben Ami idx, txq->write_ptr, r);
679e705c121SKalle Valo iwl_force_nmi(trans);
680e705c121SKalle Valo }
681e705c121SKalle Valo }
682e705c121SKalle Valo
68372bc934cSJohannes Berg if (txq->read_ptr == txq->write_ptr)
684e705c121SKalle Valo iwl_pcie_clear_cmd_in_flight(trans);
685e705c121SKalle Valo
686a4450980SMordechay Goodstein iwl_txq_progress(txq);
687e705c121SKalle Valo }
688e705c121SKalle Valo
iwl_pcie_txq_set_ratid_map(struct iwl_trans * trans,u16 ra_tid,u16 txq_id)689e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
690e705c121SKalle Valo u16 txq_id)
691e705c121SKalle Valo {
692e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693e705c121SKalle Valo u32 tbl_dw_addr;
694e705c121SKalle Valo u32 tbl_dw;
695e705c121SKalle Valo u16 scd_q2ratid;
696e705c121SKalle Valo
697e705c121SKalle Valo scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
698e705c121SKalle Valo
699e705c121SKalle Valo tbl_dw_addr = trans_pcie->scd_base_addr +
700e705c121SKalle Valo SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
701e705c121SKalle Valo
702e705c121SKalle Valo tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
703e705c121SKalle Valo
704e705c121SKalle Valo if (txq_id & 0x1)
705e705c121SKalle Valo tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
706e705c121SKalle Valo else
707e705c121SKalle Valo tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
708e705c121SKalle Valo
709e705c121SKalle Valo iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
710e705c121SKalle Valo
711e705c121SKalle Valo return 0;
712e705c121SKalle Valo }
713e705c121SKalle Valo
714e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table),
715e705c121SKalle Valo * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
716e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
717e705c121SKalle Valo
iwl_trans_pcie_txq_enable(struct iwl_trans * trans,int txq_id,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)718dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
719e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg,
720e705c121SKalle Valo unsigned int wdg_timeout)
721e705c121SKalle Valo {
722e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7234f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id];
724e705c121SKalle Valo int fifo = -1;
725dcfbd67bSEmmanuel Grumbach bool scd_bug = false;
726e705c121SKalle Valo
7274f4822b7SMordechay Goodstein if (test_and_set_bit(txq_id, trans->txqs.queue_used))
728e705c121SKalle Valo WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
729e705c121SKalle Valo
730e705c121SKalle Valo txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
731e705c121SKalle Valo
732e705c121SKalle Valo if (cfg) {
733e705c121SKalle Valo fifo = cfg->fifo;
734e705c121SKalle Valo
735e705c121SKalle Valo /* Disable the scheduler prior configuring the cmd queue */
7364f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id &&
737e705c121SKalle Valo trans_pcie->scd_set_active)
738e705c121SKalle Valo iwl_scd_enable_set_active(trans, 0);
739e705c121SKalle Valo
740e705c121SKalle Valo /* Stop this Tx queue before configuring it */
741e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id);
742e705c121SKalle Valo
743e705c121SKalle Valo /* Set this queue as a chain-building queue unless it is CMD */
7444f4822b7SMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id)
745e705c121SKalle Valo iwl_scd_txq_set_chain(trans, txq_id);
746e705c121SKalle Valo
747e705c121SKalle Valo if (cfg->aggregate) {
748e705c121SKalle Valo u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
749e705c121SKalle Valo
750e705c121SKalle Valo /* Map receiver-address / traffic-ID to this queue */
751e705c121SKalle Valo iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
752e705c121SKalle Valo
753e705c121SKalle Valo /* enable aggregations for the queue */
754e705c121SKalle Valo iwl_scd_txq_enable_agg(trans, txq_id);
755e705c121SKalle Valo txq->ampdu = true;
756e705c121SKalle Valo } else {
757e705c121SKalle Valo /*
758e705c121SKalle Valo * disable aggregations for the queue, this will also
759e705c121SKalle Valo * make the ra_tid mapping configuration irrelevant
760e705c121SKalle Valo * since it is now a non-AGG queue.
761e705c121SKalle Valo */
762e705c121SKalle Valo iwl_scd_txq_disable_agg(trans, txq_id);
763e705c121SKalle Valo
764bb98ecd4SSara Sharon ssn = txq->read_ptr;
765e705c121SKalle Valo }
766dcfbd67bSEmmanuel Grumbach } else {
767dcfbd67bSEmmanuel Grumbach /*
768dcfbd67bSEmmanuel Grumbach * If we need to move the SCD write pointer by steps of
769dcfbd67bSEmmanuel Grumbach * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
770dcfbd67bSEmmanuel Grumbach * the op_mode know by returning true later.
771dcfbd67bSEmmanuel Grumbach * Do this only in case cfg is NULL since this trick can
772dcfbd67bSEmmanuel Grumbach * be done only if we have DQA enabled which is true for mvm
773dcfbd67bSEmmanuel Grumbach * only. And mvm never sets a cfg pointer.
774dcfbd67bSEmmanuel Grumbach * This is really ugly, but this is the easiest way out for
775dcfbd67bSEmmanuel Grumbach * this sad hardware issue.
776dcfbd67bSEmmanuel Grumbach * This bug has been fixed on devices 9000 and up.
777dcfbd67bSEmmanuel Grumbach */
778286ca8ebSLuca Coelho scd_bug = !trans->trans_cfg->mq_rx_supported &&
779dcfbd67bSEmmanuel Grumbach !((ssn - txq->write_ptr) & 0x3f) &&
780dcfbd67bSEmmanuel Grumbach (ssn != txq->write_ptr);
781dcfbd67bSEmmanuel Grumbach if (scd_bug)
782dcfbd67bSEmmanuel Grumbach ssn++;
783e705c121SKalle Valo }
784e705c121SKalle Valo
785e705c121SKalle Valo /* Place first TFD at index corresponding to start sequence number.
786e705c121SKalle Valo * Assumes that ssn_idx is valid (!= 0xFFF) */
787bb98ecd4SSara Sharon txq->read_ptr = (ssn & 0xff);
788bb98ecd4SSara Sharon txq->write_ptr = (ssn & 0xff);
789e705c121SKalle Valo iwl_write_direct32(trans, HBUS_TARG_WRPTR,
790e705c121SKalle Valo (ssn & 0xff) | (txq_id << 8));
791e705c121SKalle Valo
792e705c121SKalle Valo if (cfg) {
793e705c121SKalle Valo u8 frame_limit = cfg->frame_limit;
794e705c121SKalle Valo
795e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
796e705c121SKalle Valo
797e705c121SKalle Valo /* Set up Tx window size and frame limit for this queue */
798e705c121SKalle Valo iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
799e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
800e705c121SKalle Valo iwl_trans_write_mem32(trans,
801e705c121SKalle Valo trans_pcie->scd_base_addr +
802e705c121SKalle Valo SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
803f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
804f3779f47SJohannes Berg SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
805e705c121SKalle Valo
806e705c121SKalle Valo /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
807e705c121SKalle Valo iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
808e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
809e705c121SKalle Valo (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
810e705c121SKalle Valo (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
811e705c121SKalle Valo SCD_QUEUE_STTS_REG_MSK);
812e705c121SKalle Valo
813e705c121SKalle Valo /* enable the scheduler for this queue (only) */
8144f4822b7SMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id &&
815e705c121SKalle Valo trans_pcie->scd_set_active)
816e705c121SKalle Valo iwl_scd_enable_set_active(trans, BIT(txq_id));
817e705c121SKalle Valo
818e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans,
819e705c121SKalle Valo "Activate queue %d on FIFO %d WrPtr: %d\n",
820e705c121SKalle Valo txq_id, fifo, ssn & 0xff);
821e705c121SKalle Valo } else {
822e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans,
823e705c121SKalle Valo "Activate queue %d WrPtr: %d\n",
824e705c121SKalle Valo txq_id, ssn & 0xff);
825e705c121SKalle Valo }
826dcfbd67bSEmmanuel Grumbach
827dcfbd67bSEmmanuel Grumbach return scd_bug;
828e705c121SKalle Valo }
829e705c121SKalle Valo
iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans * trans,u32 txq_id,bool shared_mode)83042db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
83142db09c1SLiad Kaufman bool shared_mode)
83242db09c1SLiad Kaufman {
8334f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id];
83442db09c1SLiad Kaufman
83542db09c1SLiad Kaufman txq->ampdu = !shared_mode;
83642db09c1SLiad Kaufman }
83742db09c1SLiad Kaufman
iwl_trans_pcie_txq_disable(struct iwl_trans * trans,int txq_id,bool configure_scd)838e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
839e705c121SKalle Valo bool configure_scd)
840e705c121SKalle Valo {
841e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
842e705c121SKalle Valo u32 stts_addr = trans_pcie->scd_base_addr +
843e705c121SKalle Valo SCD_TX_STTS_QUEUE_OFFSET(txq_id);
844e705c121SKalle Valo static const u32 zero_val[4] = {};
845e705c121SKalle Valo
8464f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
8474f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->frozen = false;
848e705c121SKalle Valo
849e705c121SKalle Valo /*
850e705c121SKalle Valo * Upon HW Rfkill - we stop the device, and then stop the queues
851e705c121SKalle Valo * in the op_mode. Just for the sake of the simplicity of the op_mode,
852e705c121SKalle Valo * allow the op_mode to call txq_disable after it already called
853e705c121SKalle Valo * stop_device.
854e705c121SKalle Valo */
8554f4822b7SMordechay Goodstein if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
856e705c121SKalle Valo WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
857e705c121SKalle Valo "queue %d not used", txq_id);
858e705c121SKalle Valo return;
859e705c121SKalle Valo }
860e705c121SKalle Valo
861e705c121SKalle Valo if (configure_scd) {
862e705c121SKalle Valo iwl_scd_txq_set_inactive(trans, txq_id);
863e705c121SKalle Valo
86473c289baSBjoern A. Zeeb iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
865e705c121SKalle Valo ARRAY_SIZE(zero_val));
866e705c121SKalle Valo }
867e705c121SKalle Valo
868e705c121SKalle Valo iwl_pcie_txq_unmap(trans, txq_id);
8694f4822b7SMordechay Goodstein trans->txqs.txq[txq_id]->ampdu = false;
870e705c121SKalle Valo
871e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
872e705c121SKalle Valo }
873e705c121SKalle Valo
874e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS *****/
875e705c121SKalle Valo
876e705c121SKalle Valo /*
877e705c121SKalle Valo * iwl_pcie_enqueue_hcmd - enqueue a uCode command
878e705c121SKalle Valo * @priv: device private data point
879e705c121SKalle Valo * @cmd: a pointer to the ucode command structure
880e705c121SKalle Valo *
881e705c121SKalle Valo * The function returns < 0 values to indicate the operation
882e705c121SKalle Valo * failed. On success, it returns the index (>= 0) of command in the
883e705c121SKalle Valo * command queue.
884e705c121SKalle Valo */
iwl_pcie_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)88513f028b4SMordechay Goodstein int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
886e705c121SKalle Valo struct iwl_host_cmd *cmd)
887e705c121SKalle Valo {
8884f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
889e705c121SKalle Valo struct iwl_device_cmd *out_cmd;
890e705c121SKalle Valo struct iwl_cmd_meta *out_meta;
891e705c121SKalle Valo void *dup_buf = NULL;
892e705c121SKalle Valo dma_addr_t phys_addr;
893e705c121SKalle Valo int idx;
8948de437c7SSara Sharon u16 copy_size, cmd_size, tb0_size;
895e705c121SKalle Valo bool had_nocopy = false;
896e705c121SKalle Valo u8 group_id = iwl_cmd_groupid(cmd->id);
897e705c121SKalle Valo int i, ret;
898e705c121SKalle Valo u32 cmd_pos;
899e705c121SKalle Valo const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
900e705c121SKalle Valo u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
9012800aadcSJiri Kosina unsigned long flags;
902e705c121SKalle Valo
903b7d96bcaSLuca Coelho if (WARN(!trans->wide_cmd_header &&
904b7d96bcaSLuca Coelho group_id > IWL_ALWAYS_LONG_GROUP,
905b7d96bcaSLuca Coelho "unsupported wide command %#x\n", cmd->id))
906b7d96bcaSLuca Coelho return -EINVAL;
907b7d96bcaSLuca Coelho
908e705c121SKalle Valo if (group_id != 0) {
909e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide);
910e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header_wide);
911e705c121SKalle Valo } else {
912e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header);
913e705c121SKalle Valo cmd_size = sizeof(struct iwl_cmd_header);
914e705c121SKalle Valo }
915e705c121SKalle Valo
916e705c121SKalle Valo /* need one for the header if the first is NOCOPY */
917e705c121SKalle Valo BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
918e705c121SKalle Valo
919e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
920e705c121SKalle Valo cmddata[i] = cmd->data[i];
921e705c121SKalle Valo cmdlen[i] = cmd->len[i];
922e705c121SKalle Valo
923e705c121SKalle Valo if (!cmd->len[i])
924e705c121SKalle Valo continue;
925e705c121SKalle Valo
9268de437c7SSara Sharon /* need at least IWL_FIRST_TB_SIZE copied */
9278de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) {
9288de437c7SSara Sharon int copy = IWL_FIRST_TB_SIZE - copy_size;
929e705c121SKalle Valo
930e705c121SKalle Valo if (copy > cmdlen[i])
931e705c121SKalle Valo copy = cmdlen[i];
932e705c121SKalle Valo cmdlen[i] -= copy;
933e705c121SKalle Valo cmddata[i] += copy;
934e705c121SKalle Valo copy_size += copy;
935e705c121SKalle Valo }
936e705c121SKalle Valo
937e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
938e705c121SKalle Valo had_nocopy = true;
939e705c121SKalle Valo if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
940e705c121SKalle Valo idx = -EINVAL;
941e705c121SKalle Valo goto free_dup_buf;
942e705c121SKalle Valo }
943e705c121SKalle Valo } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
944e705c121SKalle Valo /*
945e705c121SKalle Valo * This is also a chunk that isn't copied
946e705c121SKalle Valo * to the static buffer so set had_nocopy.
947e705c121SKalle Valo */
948e705c121SKalle Valo had_nocopy = true;
949e705c121SKalle Valo
950e705c121SKalle Valo /* only allowed once */
951e705c121SKalle Valo if (WARN_ON(dup_buf)) {
952e705c121SKalle Valo idx = -EINVAL;
953e705c121SKalle Valo goto free_dup_buf;
954e705c121SKalle Valo }
955e705c121SKalle Valo
956e705c121SKalle Valo dup_buf = kmemdup(cmddata[i], cmdlen[i],
957e705c121SKalle Valo GFP_ATOMIC);
958e705c121SKalle Valo if (!dup_buf)
959e705c121SKalle Valo return -ENOMEM;
960e705c121SKalle Valo } else {
961e705c121SKalle Valo /* NOCOPY must not be followed by normal! */
962e705c121SKalle Valo if (WARN_ON(had_nocopy)) {
963e705c121SKalle Valo idx = -EINVAL;
964e705c121SKalle Valo goto free_dup_buf;
965e705c121SKalle Valo }
966e705c121SKalle Valo copy_size += cmdlen[i];
967e705c121SKalle Valo }
968e705c121SKalle Valo cmd_size += cmd->len[i];
969e705c121SKalle Valo }
970e705c121SKalle Valo
971e705c121SKalle Valo /*
972e705c121SKalle Valo * If any of the command structures end up being larger than
973e705c121SKalle Valo * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
974e705c121SKalle Valo * allocated into separate TFDs, then we will need to
975e705c121SKalle Valo * increase the size of the buffers.
976e705c121SKalle Valo */
977e705c121SKalle Valo if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
978e705c121SKalle Valo "Command %s (%#x) is too large (%d bytes)\n",
97939bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id),
98039bdb17eSSharon Dvir cmd->id, copy_size)) {
981e705c121SKalle Valo idx = -EINVAL;
982e705c121SKalle Valo goto free_dup_buf;
983e705c121SKalle Valo }
984e705c121SKalle Valo
9852800aadcSJiri Kosina spin_lock_irqsave(&txq->lock, flags);
986e705c121SKalle Valo
9870cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
9882800aadcSJiri Kosina spin_unlock_irqrestore(&txq->lock, flags);
989e705c121SKalle Valo
990e705c121SKalle Valo IWL_ERR(trans, "No space in command queue\n");
991e705c121SKalle Valo iwl_op_mode_cmd_queue_full(trans->op_mode);
992e705c121SKalle Valo idx = -ENOSPC;
993e705c121SKalle Valo goto free_dup_buf;
994e705c121SKalle Valo }
995e705c121SKalle Valo
9960cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
997e705c121SKalle Valo out_cmd = txq->entries[idx].cmd;
998e705c121SKalle Valo out_meta = &txq->entries[idx].meta;
999e705c121SKalle Valo
1000e705c121SKalle Valo memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
1001e705c121SKalle Valo if (cmd->flags & CMD_WANT_SKB)
1002e705c121SKalle Valo out_meta->source = cmd;
1003e705c121SKalle Valo
1004e705c121SKalle Valo /* set up the header */
1005e705c121SKalle Valo if (group_id != 0) {
1006e705c121SKalle Valo out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1007e705c121SKalle Valo out_cmd->hdr_wide.group_id = group_id;
1008e705c121SKalle Valo out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1009e705c121SKalle Valo out_cmd->hdr_wide.length =
1010e705c121SKalle Valo cpu_to_le16(cmd_size -
1011e705c121SKalle Valo sizeof(struct iwl_cmd_header_wide));
1012e705c121SKalle Valo out_cmd->hdr_wide.reserved = 0;
1013e705c121SKalle Valo out_cmd->hdr_wide.sequence =
10144f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1015bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr));
1016e705c121SKalle Valo
1017e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header_wide);
1018e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header_wide);
1019e705c121SKalle Valo } else {
1020e705c121SKalle Valo out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1021e705c121SKalle Valo out_cmd->hdr.sequence =
10224f4822b7SMordechay Goodstein cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1023bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr));
1024e705c121SKalle Valo out_cmd->hdr.group_id = 0;
1025e705c121SKalle Valo
1026e705c121SKalle Valo cmd_pos = sizeof(struct iwl_cmd_header);
1027e705c121SKalle Valo copy_size = sizeof(struct iwl_cmd_header);
1028e705c121SKalle Valo }
1029e705c121SKalle Valo
1030e705c121SKalle Valo /* and copy the data that needs to be copied */
1031e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1032e705c121SKalle Valo int copy;
1033e705c121SKalle Valo
1034e705c121SKalle Valo if (!cmd->len[i])
1035e705c121SKalle Valo continue;
1036e705c121SKalle Valo
1037e705c121SKalle Valo /* copy everything if not nocopy/dup */
1038e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1039e705c121SKalle Valo IWL_HCMD_DFL_DUP))) {
1040e705c121SKalle Valo copy = cmd->len[i];
1041e705c121SKalle Valo
1042e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1043e705c121SKalle Valo cmd_pos += copy;
1044e705c121SKalle Valo copy_size += copy;
1045e705c121SKalle Valo continue;
1046e705c121SKalle Valo }
1047e705c121SKalle Valo
1048e705c121SKalle Valo /*
10498de437c7SSara Sharon * Otherwise we need at least IWL_FIRST_TB_SIZE copied
10508de437c7SSara Sharon * in total (for bi-directional DMA), but copy up to what
1051e705c121SKalle Valo * we can fit into the payload for debug dump purposes.
1052e705c121SKalle Valo */
1053e705c121SKalle Valo copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1054e705c121SKalle Valo
1055e705c121SKalle Valo memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1056e705c121SKalle Valo cmd_pos += copy;
1057e705c121SKalle Valo
1058e705c121SKalle Valo /* However, treat copy_size the proper way, we need it below */
10598de437c7SSara Sharon if (copy_size < IWL_FIRST_TB_SIZE) {
10608de437c7SSara Sharon copy = IWL_FIRST_TB_SIZE - copy_size;
1061e705c121SKalle Valo
1062e705c121SKalle Valo if (copy > cmd->len[i])
1063e705c121SKalle Valo copy = cmd->len[i];
1064e705c121SKalle Valo copy_size += copy;
1065e705c121SKalle Valo }
1066e705c121SKalle Valo }
1067e705c121SKalle Valo
1068e705c121SKalle Valo IWL_DEBUG_HC(trans,
1069e705c121SKalle Valo "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
107039bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd->id),
1071e705c121SKalle Valo group_id, out_cmd->hdr.cmd,
1072e705c121SKalle Valo le16_to_cpu(out_cmd->hdr.sequence),
10734f4822b7SMordechay Goodstein cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
1074e705c121SKalle Valo
10758de437c7SSara Sharon /* start the TFD with the minimum copy bytes */
10768de437c7SSara Sharon tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
10778de437c7SSara Sharon memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1078e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq,
10790cd1ad2dSMordechay Goodstein iwl_txq_get_first_tb_dma(txq, idx),
10808de437c7SSara Sharon tb0_size, true);
1081e705c121SKalle Valo
1082e705c121SKalle Valo /* map first command fragment, if any remains */
10838de437c7SSara Sharon if (copy_size > tb0_size) {
1084e705c121SKalle Valo phys_addr = dma_map_single(trans->dev,
10858de437c7SSara Sharon ((u8 *)&out_cmd->hdr) + tb0_size,
10868de437c7SSara Sharon copy_size - tb0_size,
1087e705c121SKalle Valo DMA_TO_DEVICE);
1088e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) {
10890179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1090bb98ecd4SSara Sharon txq->write_ptr);
1091e705c121SKalle Valo idx = -ENOMEM;
1092e705c121SKalle Valo goto out;
1093e705c121SKalle Valo }
1094e705c121SKalle Valo
1095e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
10968de437c7SSara Sharon copy_size - tb0_size, false);
1097e705c121SKalle Valo }
1098e705c121SKalle Valo
1099e705c121SKalle Valo /* map the remaining (adjusted) nocopy/dup fragments */
1100e705c121SKalle Valo for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
11010301bcd5SBjoern A. Zeeb void *data = (void *)(uintptr_t)cmddata[i];
1102e705c121SKalle Valo
1103e705c121SKalle Valo if (!cmdlen[i])
1104e705c121SKalle Valo continue;
1105e705c121SKalle Valo if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1106e705c121SKalle Valo IWL_HCMD_DFL_DUP)))
1107e705c121SKalle Valo continue;
1108e705c121SKalle Valo if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1109e705c121SKalle Valo data = dup_buf;
11100301bcd5SBjoern A. Zeeb phys_addr = dma_map_single(trans->dev, data,
1111e705c121SKalle Valo cmdlen[i], DMA_TO_DEVICE);
1112e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys_addr)) {
11130179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1114bb98ecd4SSara Sharon txq->write_ptr);
1115e705c121SKalle Valo idx = -ENOMEM;
1116e705c121SKalle Valo goto out;
1117e705c121SKalle Valo }
1118e705c121SKalle Valo
1119e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1120e705c121SKalle Valo }
1121e705c121SKalle Valo
11223cd1980bSSara Sharon BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1123e705c121SKalle Valo out_meta->flags = cmd->flags;
1124e705c121SKalle Valo if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1125453431a5SWaiman Long kfree_sensitive(txq->entries[idx].free_buf);
1126e705c121SKalle Valo txq->entries[idx].free_buf = dup_buf;
1127e705c121SKalle Valo
1128e705c121SKalle Valo trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1129e705c121SKalle Valo
1130e705c121SKalle Valo /* start timer if queue currently empty */
1131bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1132e705c121SKalle Valo mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1133e705c121SKalle Valo
1134e705c121SKalle Valo ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1135e705c121SKalle Valo if (ret < 0) {
1136e705c121SKalle Valo idx = ret;
113772bc934cSJohannes Berg goto out;
1138e705c121SKalle Valo }
1139e705c121SKalle Valo
1140e705c121SKalle Valo /* Increment and update queue's write index */
11410cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1142e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq);
1143e705c121SKalle Valo
1144e705c121SKalle Valo out:
11452800aadcSJiri Kosina spin_unlock_irqrestore(&txq->lock, flags);
1146e705c121SKalle Valo free_dup_buf:
1147e705c121SKalle Valo if (idx < 0)
1148e705c121SKalle Valo kfree(dup_buf);
1149e705c121SKalle Valo return idx;
1150e705c121SKalle Valo }
1151e705c121SKalle Valo
1152e705c121SKalle Valo /*
1153e705c121SKalle Valo * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1154e705c121SKalle Valo * @rxb: Rx buffer to reclaim
1155e705c121SKalle Valo */
iwl_pcie_hcmd_complete(struct iwl_trans * trans,struct iwl_rx_cmd_buffer * rxb)1156e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1157e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb)
1158e705c121SKalle Valo {
1159e705c121SKalle Valo struct iwl_rx_packet *pkt = rxb_addr(rxb);
1160e705c121SKalle Valo u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1161d490e097SJohannes Berg u8 group_id;
116239bdb17eSSharon Dvir u32 cmd_id;
1163e705c121SKalle Valo int txq_id = SEQ_TO_QUEUE(sequence);
1164e705c121SKalle Valo int index = SEQ_TO_INDEX(sequence);
1165e705c121SKalle Valo int cmd_index;
1166e705c121SKalle Valo struct iwl_device_cmd *cmd;
1167e705c121SKalle Valo struct iwl_cmd_meta *meta;
1168e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
11694f4822b7SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1170e705c121SKalle Valo
1171e705c121SKalle Valo /* If a Tx command is being handled and it isn't in the actual
1172e705c121SKalle Valo * command queue then there a command routing bug has been introduced
1173e705c121SKalle Valo * in the queue management code. */
11744f4822b7SMordechay Goodstein if (WARN(txq_id != trans->txqs.cmd.q_id,
1175e705c121SKalle Valo "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
11764f4822b7SMordechay Goodstein txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1177b2a3b1c1SSara Sharon txq->write_ptr)) {
1178e705c121SKalle Valo iwl_print_hex_error(trans, pkt, 32);
1179e705c121SKalle Valo return;
1180e705c121SKalle Valo }
1181e705c121SKalle Valo
1182e705c121SKalle Valo spin_lock_bh(&txq->lock);
1183e705c121SKalle Valo
11840cd1ad2dSMordechay Goodstein cmd_index = iwl_txq_get_cmd_index(txq, index);
1185e705c121SKalle Valo cmd = txq->entries[cmd_index].cmd;
1186e705c121SKalle Valo meta = &txq->entries[cmd_index].meta;
1187d490e097SJohannes Berg group_id = cmd->hdr.group_id;
1188f0c86427SJohannes Berg cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1189e705c121SKalle Valo
1190a0632004SJohannes Berg if (trans->trans_cfg->gen2)
1191a0632004SJohannes Berg iwl_txq_gen2_tfd_unmap(trans, meta,
1192a0632004SJohannes Berg iwl_txq_get_tfd(trans, txq, index));
1193a0632004SJohannes Berg else
11940179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1195e705c121SKalle Valo
1196e705c121SKalle Valo /* Input error checking is done when commands are added to queue. */
1197e705c121SKalle Valo if (meta->flags & CMD_WANT_SKB) {
1198e705c121SKalle Valo struct page *p = rxb_steal_page(rxb);
1199e705c121SKalle Valo
1200e705c121SKalle Valo meta->source->resp_pkt = pkt;
1201e705c121SKalle Valo meta->source->_rx_page_addr = (unsigned long)page_address(p);
1202e705c121SKalle Valo meta->source->_rx_page_order = trans_pcie->rx_page_order;
1203e705c121SKalle Valo }
1204e705c121SKalle Valo
1205dcbb4746SEmmanuel Grumbach if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1206dcbb4746SEmmanuel Grumbach iwl_op_mode_async_cb(trans->op_mode, cmd);
1207dcbb4746SEmmanuel Grumbach
1208e705c121SKalle Valo iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1209e705c121SKalle Valo
1210e705c121SKalle Valo if (!(meta->flags & CMD_ASYNC)) {
1211e705c121SKalle Valo if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1212e705c121SKalle Valo IWL_WARN(trans,
1213e705c121SKalle Valo "HCMD_ACTIVE already clear for command %s\n",
121439bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id));
1215e705c121SKalle Valo }
1216e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1217e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
121839bdb17eSSharon Dvir iwl_get_cmd_string(trans, cmd_id));
121913f028b4SMordechay Goodstein wake_up(&trans->wait_command_queue);
1220e705c121SKalle Valo }
1221e705c121SKalle Valo
1222e705c121SKalle Valo meta->flags = 0;
1223e705c121SKalle Valo
1224e705c121SKalle Valo spin_unlock_bh(&txq->lock);
1225e705c121SKalle Valo }
1226e705c121SKalle Valo
iwl_fill_data_tbs(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta)12273a0b2a42SEmmanuel Grumbach static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
12283a0b2a42SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len,
1229bb03927eSJohannes Berg struct iwl_cmd_meta *out_meta)
12303a0b2a42SEmmanuel Grumbach {
1231bb03927eSJohannes Berg u16 head_tb_len;
12323a0b2a42SEmmanuel Grumbach int i;
12333a0b2a42SEmmanuel Grumbach
12343a0b2a42SEmmanuel Grumbach /*
12353a0b2a42SEmmanuel Grumbach * Set up TFD's third entry to point directly to remainder
12363a0b2a42SEmmanuel Grumbach * of skb's head, if any
12373a0b2a42SEmmanuel Grumbach */
1238bb03927eSJohannes Berg head_tb_len = skb_headlen(skb) - hdr_len;
12393a0b2a42SEmmanuel Grumbach
1240bb03927eSJohannes Berg if (head_tb_len > 0) {
1241bb03927eSJohannes Berg dma_addr_t tb_phys = dma_map_single(trans->dev,
12423a0b2a42SEmmanuel Grumbach skb->data + hdr_len,
1243bb03927eSJohannes Berg head_tb_len, DMA_TO_DEVICE);
1244bb03927eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
12453a0b2a42SEmmanuel Grumbach return -EINVAL;
12469b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
12479b08ae22SJohannes Berg tb_phys, head_tb_len);
1248bb03927eSJohannes Berg iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
12493a0b2a42SEmmanuel Grumbach }
12503a0b2a42SEmmanuel Grumbach
12513a0b2a42SEmmanuel Grumbach /* set up the remaining entries to point to the data */
12523a0b2a42SEmmanuel Grumbach for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12533a0b2a42SEmmanuel Grumbach const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12543a0b2a42SEmmanuel Grumbach dma_addr_t tb_phys;
12553a0b2a42SEmmanuel Grumbach int tb_idx;
12563a0b2a42SEmmanuel Grumbach
12573a0b2a42SEmmanuel Grumbach if (!skb_frag_size(frag))
12583a0b2a42SEmmanuel Grumbach continue;
12593a0b2a42SEmmanuel Grumbach
12603a0b2a42SEmmanuel Grumbach tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
12613a0b2a42SEmmanuel Grumbach skb_frag_size(frag), DMA_TO_DEVICE);
12623a0b2a42SEmmanuel Grumbach
12637d50d76eSJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
12643a0b2a42SEmmanuel Grumbach return -EINVAL;
12659b08ae22SJohannes Berg trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
12669b08ae22SJohannes Berg tb_phys, skb_frag_size(frag));
12673a0b2a42SEmmanuel Grumbach tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
12683a0b2a42SEmmanuel Grumbach skb_frag_size(frag), false);
12696e00a237SJohannes Berg if (tb_idx < 0)
12706e00a237SJohannes Berg return tb_idx;
12713a0b2a42SEmmanuel Grumbach
12723cd1980bSSara Sharon out_meta->tbs |= BIT(tb_idx);
12733a0b2a42SEmmanuel Grumbach }
12743a0b2a42SEmmanuel Grumbach
12753a0b2a42SEmmanuel Grumbach return 0;
12763a0b2a42SEmmanuel Grumbach }
12773a0b2a42SEmmanuel Grumbach
12786eb5e529SEmmanuel Grumbach #ifdef CONFIG_INET
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1279066fd29aSSara Sharon static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
12806eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len,
12816eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta,
1282a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd,
1283a89c72ffSJohannes Berg u16 tb1_len)
12846eb5e529SEmmanuel Grumbach {
128505e5a7e5SJohannes Berg struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
12866eb5e529SEmmanuel Grumbach struct ieee80211_hdr *hdr = (void *)skb->data;
12876eb5e529SEmmanuel Grumbach unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
12886eb5e529SEmmanuel Grumbach unsigned int mss = skb_shinfo(skb)->gso_size;
12896eb5e529SEmmanuel Grumbach u16 length, iv_len, amsdu_pad;
12906eb5e529SEmmanuel Grumbach u8 *start_hdr;
12916eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *hdr_page;
12926eb5e529SEmmanuel Grumbach struct tso_t tso;
12936eb5e529SEmmanuel Grumbach
12946eb5e529SEmmanuel Grumbach /* if the packet is protected, then it must be CCMP or GCMP */
12956eb5e529SEmmanuel Grumbach BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
12966eb5e529SEmmanuel Grumbach iv_len = ieee80211_has_protected(hdr->frame_control) ?
12976eb5e529SEmmanuel Grumbach IEEE80211_CCMP_HDR_LEN : 0;
12986eb5e529SEmmanuel Grumbach
12996eb5e529SEmmanuel Grumbach trace_iwlwifi_dev_tx(trans->dev, skb,
13000cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1301885375d0SMordechay Goodstein trans->txqs.tfd.size,
13028790fce4SJohannes Berg &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
13036eb5e529SEmmanuel Grumbach
13046eb5e529SEmmanuel Grumbach ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
13056eb5e529SEmmanuel Grumbach snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
13066eb5e529SEmmanuel Grumbach total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
13076eb5e529SEmmanuel Grumbach amsdu_pad = 0;
13086eb5e529SEmmanuel Grumbach
13096eb5e529SEmmanuel Grumbach /* total amount of header we may need for this A-MSDU */
13106eb5e529SEmmanuel Grumbach hdr_room = DIV_ROUND_UP(total_len, mss) *
13116eb5e529SEmmanuel Grumbach (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
13126eb5e529SEmmanuel Grumbach
13136eb5e529SEmmanuel Grumbach /* Our device supports 9 segments at most, it will fit in 1 page */
13147b02bf61SJohannes Berg hdr_page = get_page_hdr(trans, hdr_room, skb);
13156eb5e529SEmmanuel Grumbach if (!hdr_page)
13166eb5e529SEmmanuel Grumbach return -ENOMEM;
13176eb5e529SEmmanuel Grumbach
13186eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos;
13196eb5e529SEmmanuel Grumbach memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
13206eb5e529SEmmanuel Grumbach hdr_page->pos += iv_len;
13216eb5e529SEmmanuel Grumbach
13226eb5e529SEmmanuel Grumbach /*
13236eb5e529SEmmanuel Grumbach * Pull the ieee80211 header + IV to be able to use TSO core,
13246eb5e529SEmmanuel Grumbach * we will restore it for the tx_status flow.
13256eb5e529SEmmanuel Grumbach */
13266eb5e529SEmmanuel Grumbach skb_pull(skb, hdr_len + iv_len);
13276eb5e529SEmmanuel Grumbach
132805e5a7e5SJohannes Berg /*
132905e5a7e5SJohannes Berg * Remove the length of all the headers that we don't actually
133005e5a7e5SJohannes Berg * have in the MPDU by themselves, but that we duplicate into
133105e5a7e5SJohannes Berg * all the different MSDUs inside the A-MSDU.
133205e5a7e5SJohannes Berg */
133305e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
133405e5a7e5SJohannes Berg
13356eb5e529SEmmanuel Grumbach tso_start(skb, &tso);
13366eb5e529SEmmanuel Grumbach
13376eb5e529SEmmanuel Grumbach while (total_len) {
13386eb5e529SEmmanuel Grumbach /* this is the data left for this subframe */
13396eb5e529SEmmanuel Grumbach unsigned int data_left =
13406eb5e529SEmmanuel Grumbach min_t(unsigned int, mss, total_len);
13416eb5e529SEmmanuel Grumbach unsigned int hdr_tb_len;
13426eb5e529SEmmanuel Grumbach dma_addr_t hdr_tb_phys;
134359fa61f3SEmmanuel Grumbach u8 *subf_hdrs_start = hdr_page->pos;
13446eb5e529SEmmanuel Grumbach
13456eb5e529SEmmanuel Grumbach total_len -= data_left;
13466eb5e529SEmmanuel Grumbach
13476eb5e529SEmmanuel Grumbach memset(hdr_page->pos, 0, amsdu_pad);
13486eb5e529SEmmanuel Grumbach hdr_page->pos += amsdu_pad;
13496eb5e529SEmmanuel Grumbach amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
13506eb5e529SEmmanuel Grumbach data_left)) & 0x3;
13516eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
13526eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN;
13536eb5e529SEmmanuel Grumbach ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
13546eb5e529SEmmanuel Grumbach hdr_page->pos += ETH_ALEN;
13556eb5e529SEmmanuel Grumbach
13566eb5e529SEmmanuel Grumbach length = snap_ip_tcp_hdrlen + data_left;
13576eb5e529SEmmanuel Grumbach *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
13586eb5e529SEmmanuel Grumbach hdr_page->pos += sizeof(length);
13596eb5e529SEmmanuel Grumbach
13606eb5e529SEmmanuel Grumbach /*
13616eb5e529SEmmanuel Grumbach * This will copy the SNAP as well which will be considered
13626eb5e529SEmmanuel Grumbach * as MAC header.
13636eb5e529SEmmanuel Grumbach */
13646eb5e529SEmmanuel Grumbach tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
13656eb5e529SEmmanuel Grumbach
13666eb5e529SEmmanuel Grumbach hdr_page->pos += snap_ip_tcp_hdrlen;
13676eb5e529SEmmanuel Grumbach
13686eb5e529SEmmanuel Grumbach hdr_tb_len = hdr_page->pos - start_hdr;
13696eb5e529SEmmanuel Grumbach hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
13706eb5e529SEmmanuel Grumbach hdr_tb_len, DMA_TO_DEVICE);
1371fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
13727d50d76eSJohannes Berg return -EINVAL;
13736eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
13746eb5e529SEmmanuel Grumbach hdr_tb_len, false);
1375bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
13769b08ae22SJohannes Berg hdr_tb_phys, hdr_tb_len);
137705e5a7e5SJohannes Berg /* add this subframe's headers' length to the tx_cmd */
137805e5a7e5SJohannes Berg le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
13796eb5e529SEmmanuel Grumbach
13806eb5e529SEmmanuel Grumbach /* prepare the start_hdr for the next subframe */
13816eb5e529SEmmanuel Grumbach start_hdr = hdr_page->pos;
13826eb5e529SEmmanuel Grumbach
13836eb5e529SEmmanuel Grumbach /* put the payload */
13846eb5e529SEmmanuel Grumbach while (data_left) {
13856eb5e529SEmmanuel Grumbach unsigned int size = min_t(unsigned int, tso.size,
13866eb5e529SEmmanuel Grumbach data_left);
13876eb5e529SEmmanuel Grumbach dma_addr_t tb_phys;
13886eb5e529SEmmanuel Grumbach
13896eb5e529SEmmanuel Grumbach tb_phys = dma_map_single(trans->dev, tso.data,
13906eb5e529SEmmanuel Grumbach size, DMA_TO_DEVICE);
1391fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
13927d50d76eSJohannes Berg return -EINVAL;
13936eb5e529SEmmanuel Grumbach
13946eb5e529SEmmanuel Grumbach iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
13956eb5e529SEmmanuel Grumbach size, false);
1396bf77ee2eSSara Sharon trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
13979b08ae22SJohannes Berg tb_phys, size);
13986eb5e529SEmmanuel Grumbach
13996eb5e529SEmmanuel Grumbach data_left -= size;
14006eb5e529SEmmanuel Grumbach tso_build_data(skb, &tso, size);
14016eb5e529SEmmanuel Grumbach }
14026eb5e529SEmmanuel Grumbach }
14036eb5e529SEmmanuel Grumbach
14046eb5e529SEmmanuel Grumbach /* re -add the WiFi header and IV */
14056eb5e529SEmmanuel Grumbach skb_push(skb, hdr_len + iv_len);
14066eb5e529SEmmanuel Grumbach
14076eb5e529SEmmanuel Grumbach return 0;
14086eb5e529SEmmanuel Grumbach }
14096eb5e529SEmmanuel Grumbach #else /* CONFIG_INET */
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)14106eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
14116eb5e529SEmmanuel Grumbach struct iwl_txq *txq, u8 hdr_len,
14126eb5e529SEmmanuel Grumbach struct iwl_cmd_meta *out_meta,
1413a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd,
1414a89c72ffSJohannes Berg u16 tb1_len)
14156eb5e529SEmmanuel Grumbach {
14166eb5e529SEmmanuel Grumbach /* No A-MSDU without CONFIG_INET */
14176eb5e529SEmmanuel Grumbach WARN_ON(1);
14186eb5e529SEmmanuel Grumbach
14196eb5e529SEmmanuel Grumbach return -1;
14206eb5e529SEmmanuel Grumbach }
14216eb5e529SEmmanuel Grumbach #endif /* CONFIG_INET */
14226eb5e529SEmmanuel Grumbach
iwl_trans_pcie_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)1423e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1424a89c72ffSJohannes Berg struct iwl_device_tx_cmd *dev_cmd, int txq_id)
1425e705c121SKalle Valo {
1426e705c121SKalle Valo struct ieee80211_hdr *hdr;
1427e705c121SKalle Valo struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1428e705c121SKalle Valo struct iwl_cmd_meta *out_meta;
1429e705c121SKalle Valo struct iwl_txq *txq;
1430e705c121SKalle Valo dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1431e705c121SKalle Valo void *tb1_addr;
14324fe10bc6SSara Sharon void *tfd;
14333a0b2a42SEmmanuel Grumbach u16 len, tb1_len;
1434e705c121SKalle Valo bool wait_write_ptr;
1435e705c121SKalle Valo __le16 fc;
1436e705c121SKalle Valo u8 hdr_len;
1437e705c121SKalle Valo u16 wifi_seq;
1438c772a3d3SSara Sharon bool amsdu;
1439e705c121SKalle Valo
14404f4822b7SMordechay Goodstein txq = trans->txqs.txq[txq_id];
1441e705c121SKalle Valo
14424f4822b7SMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
1443e705c121SKalle Valo "TX on unused queue %d\n", txq_id))
1444e705c121SKalle Valo return -EINVAL;
1445e705c121SKalle Valo
1446e705c121SKalle Valo if (skb_is_nonlinear(skb) &&
1447885375d0SMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
1448e705c121SKalle Valo __skb_linearize(skb))
1449e705c121SKalle Valo return -ENOMEM;
1450e705c121SKalle Valo
1451e705c121SKalle Valo /* mac80211 always puts the full header into the SKB's head,
1452e705c121SKalle Valo * so there's no need to check if it's readable there
1453e705c121SKalle Valo */
1454e705c121SKalle Valo hdr = (struct ieee80211_hdr *)skb->data;
1455e705c121SKalle Valo fc = hdr->frame_control;
1456e705c121SKalle Valo hdr_len = ieee80211_hdrlen(fc);
1457e705c121SKalle Valo
1458e705c121SKalle Valo spin_lock(&txq->lock);
1459e705c121SKalle Valo
14600cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) {
14610cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq);
14623955525dSEmmanuel Grumbach
14633955525dSEmmanuel Grumbach /* don't put the packet on the ring, if there is no room */
14640cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) {
1465a89c72ffSJohannes Berg struct iwl_device_tx_cmd **dev_cmd_ptr;
14663955525dSEmmanuel Grumbach
146721cb3222SJohannes Berg dev_cmd_ptr = (void *)((u8 *)skb->cb +
146822852fadSMordechay Goodstein trans->txqs.dev_cmd_offs);
146921cb3222SJohannes Berg
147021cb3222SJohannes Berg *dev_cmd_ptr = dev_cmd;
14713955525dSEmmanuel Grumbach __skb_queue_tail(&txq->overflow_q, skb);
14723955525dSEmmanuel Grumbach
14733955525dSEmmanuel Grumbach spin_unlock(&txq->lock);
14743955525dSEmmanuel Grumbach return 0;
14753955525dSEmmanuel Grumbach }
14763955525dSEmmanuel Grumbach }
14773955525dSEmmanuel Grumbach
1478e705c121SKalle Valo /* In AGG mode, the index in the ring must correspond to the WiFi
1479e705c121SKalle Valo * sequence number. This is a HW requirements to help the SCD to parse
1480e705c121SKalle Valo * the BA.
1481e705c121SKalle Valo * Check here that the packets are in the right place on the ring.
1482e705c121SKalle Valo */
1483e705c121SKalle Valo wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1484e705c121SKalle Valo WARN_ONCE(txq->ampdu &&
1485bb98ecd4SSara Sharon (wifi_seq & 0xff) != txq->write_ptr,
1486e705c121SKalle Valo "Q: %d WiFi Seq %d tfdNum %d",
1487bb98ecd4SSara Sharon txq_id, wifi_seq, txq->write_ptr);
1488e705c121SKalle Valo
1489e705c121SKalle Valo /* Set up driver data for this TFD */
1490bb98ecd4SSara Sharon txq->entries[txq->write_ptr].skb = skb;
1491bb98ecd4SSara Sharon txq->entries[txq->write_ptr].cmd = dev_cmd;
1492e705c121SKalle Valo
1493e705c121SKalle Valo dev_cmd->hdr.sequence =
1494e705c121SKalle Valo cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1495bb98ecd4SSara Sharon INDEX_TO_SEQ(txq->write_ptr)));
1496e705c121SKalle Valo
14970cd1ad2dSMordechay Goodstein tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
1498e705c121SKalle Valo scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1499e705c121SKalle Valo offsetof(struct iwl_tx_cmd, scratch);
1500e705c121SKalle Valo
1501e705c121SKalle Valo tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1502e705c121SKalle Valo tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1503e705c121SKalle Valo
1504e705c121SKalle Valo /* Set up first empty entry in queue's array of Tx/cmd buffers */
1505bb98ecd4SSara Sharon out_meta = &txq->entries[txq->write_ptr].meta;
1506e705c121SKalle Valo out_meta->flags = 0;
1507e705c121SKalle Valo
1508e705c121SKalle Valo /*
1509e705c121SKalle Valo * The second TB (tb1) points to the remainder of the TX command
1510e705c121SKalle Valo * and the 802.11 header - dword aligned size
1511e705c121SKalle Valo * (This calculation modifies the TX command, so do it before the
1512e705c121SKalle Valo * setup of the first TB)
1513e705c121SKalle Valo */
1514e705c121SKalle Valo len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
15158de437c7SSara Sharon hdr_len - IWL_FIRST_TB_SIZE;
1516c772a3d3SSara Sharon /* do not align A-MSDU to dword as the subframe header aligns it */
1517c772a3d3SSara Sharon amsdu = ieee80211_is_data_qos(fc) &&
1518c772a3d3SSara Sharon (*ieee80211_get_qos_ctl(hdr) &
1519c772a3d3SSara Sharon IEEE80211_QOS_CTL_A_MSDU_PRESENT);
152059fa61f3SEmmanuel Grumbach if (!amsdu) {
1521e705c121SKalle Valo tb1_len = ALIGN(len, 4);
1522e705c121SKalle Valo /* Tell NIC about any 2-byte padding after MAC header */
1523e705c121SKalle Valo if (tb1_len != len)
1524d172a5efSJohannes Berg tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
1525c772a3d3SSara Sharon } else {
1526c772a3d3SSara Sharon tb1_len = len;
1527c772a3d3SSara Sharon }
1528e705c121SKalle Valo
152905e5a7e5SJohannes Berg /*
153005e5a7e5SJohannes Berg * The first TB points to bi-directional DMA data, we'll
153105e5a7e5SJohannes Berg * memcpy the data into it later.
153205e5a7e5SJohannes Berg */
1533e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
15348de437c7SSara Sharon IWL_FIRST_TB_SIZE, true);
1535e705c121SKalle Valo
1536e705c121SKalle Valo /* there must be data left over for TB1 or this code must be changed */
15378de437c7SSara Sharon BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
15381caa3a5eSJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
15391caa3a5eSJohannes Berg offsetofend(struct iwl_tx_cmd, scratch) >
15401caa3a5eSJohannes Berg IWL_FIRST_TB_SIZE);
1541e705c121SKalle Valo
1542e705c121SKalle Valo /* map the data for TB1 */
15438de437c7SSara Sharon tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
1544e705c121SKalle Valo tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1545e705c121SKalle Valo if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1546e705c121SKalle Valo goto out_err;
1547e705c121SKalle Valo iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1548e705c121SKalle Valo
1549bf77ee2eSSara Sharon trace_iwlwifi_dev_tx(trans->dev, skb,
15500cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1551885375d0SMordechay Goodstein trans->txqs.tfd.size,
1552bf77ee2eSSara Sharon &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
1553bf77ee2eSSara Sharon hdr_len);
1554bf77ee2eSSara Sharon
1555bf1ad897SEliad Peller /*
1556bf1ad897SEliad Peller * If gso_size wasn't set, don't give the frame "amsdu treatment"
1557bf1ad897SEliad Peller * (adding subframes, etc.).
1558bf1ad897SEliad Peller * This can happen in some testing flows when the amsdu was already
1559bf1ad897SEliad Peller * pre-built, and we just need to send the resulting skb.
1560bf1ad897SEliad Peller */
1561bf1ad897SEliad Peller if (amsdu && skb_shinfo(skb)->gso_size) {
15626eb5e529SEmmanuel Grumbach if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
15636eb5e529SEmmanuel Grumbach out_meta, dev_cmd,
15646eb5e529SEmmanuel Grumbach tb1_len)))
1565e705c121SKalle Valo goto out_err;
1566bb03927eSJohannes Berg } else {
15670044f171SJohannes Berg struct sk_buff *frag;
15680044f171SJohannes Berg
1569bb03927eSJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
1570bb03927eSJohannes Berg out_meta)))
15716eb5e529SEmmanuel Grumbach goto out_err;
1572bb03927eSJohannes Berg
15730044f171SJohannes Berg skb_walk_frags(skb, frag) {
15740044f171SJohannes Berg if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
15750044f171SJohannes Berg out_meta)))
15760044f171SJohannes Berg goto out_err;
15770044f171SJohannes Berg }
15786eb5e529SEmmanuel Grumbach }
1579e705c121SKalle Valo
158005e5a7e5SJohannes Berg /* building the A-MSDU might have changed this data, so memcpy it now */
1581c1f33442SLiad Kaufman memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
158205e5a7e5SJohannes Berg
15830cd1ad2dSMordechay Goodstein tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
1584e705c121SKalle Valo /* Set up entry for this TFD in Tx byte-count array */
15850179bfffSMordechay Goodstein iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
15860179bfffSMordechay Goodstein iwl_txq_gen1_tfd_get_num_tbs(trans,
15870179bfffSMordechay Goodstein tfd));
1588e705c121SKalle Valo
1589e705c121SKalle Valo wait_write_ptr = ieee80211_has_morefrags(fc);
1590e705c121SKalle Valo
1591e705c121SKalle Valo /* start timer if queue currently empty */
15920d52497aSEmmanuel Grumbach if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
1593e705c121SKalle Valo /*
1594e705c121SKalle Valo * If the TXQ is active, then set the timer, if not,
1595e705c121SKalle Valo * set the timer in remainder so that the timer will
1596e705c121SKalle Valo * be armed with the right value when the station will
1597e705c121SKalle Valo * wake up.
1598e705c121SKalle Valo */
1599e705c121SKalle Valo if (!txq->frozen)
1600e705c121SKalle Valo mod_timer(&txq->stuck_timer,
1601e705c121SKalle Valo jiffies + txq->wd_timeout);
1602e705c121SKalle Valo else
1603e705c121SKalle Valo txq->frozen_expiry_remainder = txq->wd_timeout;
1604e705c121SKalle Valo }
1605e705c121SKalle Valo
1606e705c121SKalle Valo /* Tell device the write index *just past* this latest filled TFD */
16070cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1608e705c121SKalle Valo if (!wait_write_ptr)
1609e705c121SKalle Valo iwl_pcie_txq_inc_wr_ptr(trans, txq);
1610e705c121SKalle Valo
1611e705c121SKalle Valo /*
1612e705c121SKalle Valo * At this point the frame is "transmitted" successfully
1613e705c121SKalle Valo * and we will get a TX status notification eventually.
1614e705c121SKalle Valo */
1615e705c121SKalle Valo spin_unlock(&txq->lock);
1616e705c121SKalle Valo return 0;
1617e705c121SKalle Valo out_err:
16180179bfffSMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
1619e705c121SKalle Valo spin_unlock(&txq->lock);
1620e705c121SKalle Valo return -1;
1621e705c121SKalle Valo }
1622