1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2003-2014, 2018-2020 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/etherdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <net/ip6_checksum.h>
12 #include <net/tso.h>
13 
14 #include "iwl-debug.h"
15 #include "iwl-csr.h"
16 #include "iwl-prph.h"
17 #include "iwl-io.h"
18 #include "iwl-scd.h"
19 #include "iwl-op-mode.h"
20 #include "internal.h"
21 #include "fw/api/tx.h"
22 
23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
24  * DMA services
25  *
26  * Theory of operation
27  *
28  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
29  * of buffer descriptors, each of which points to one or more data buffers for
30  * the device to read from or fill.  Driver and device exchange status of each
31  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
32  * entries in each circular buffer, to protect against confusing empty and full
33  * queue states.
34  *
35  * The device reads or writes the data in the queues via the device's several
36  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
37  *
38  * For Tx queue, there are low mark and high mark limits. If, after queuing
39  * the packet for Tx, free space become < low mark, Tx queue stopped. When
40  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
41  * Tx queue resumed.
42  *
43  ***************************************************/
44 
45 
46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
47 			   struct iwl_dma_ptr *ptr, size_t size)
48 {
49 	if (WARN_ON(ptr->addr))
50 		return -EINVAL;
51 
52 	ptr->addr = dma_alloc_coherent(trans->dev, size,
53 				       &ptr->dma, GFP_KERNEL);
54 	if (!ptr->addr)
55 		return -ENOMEM;
56 	ptr->size = size;
57 	return 0;
58 }
59 
60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
61 {
62 	if (unlikely(!ptr->addr))
63 		return;
64 
65 	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
66 	memset(ptr, 0, sizeof(*ptr));
67 }
68 
69 /*
70  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
71  */
72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
73 				    struct iwl_txq *txq)
74 {
75 	u32 reg = 0;
76 	int txq_id = txq->id;
77 
78 	lockdep_assert_held(&txq->lock);
79 
80 	/*
81 	 * explicitly wake up the NIC if:
82 	 * 1. shadow registers aren't enabled
83 	 * 2. NIC is woken up for CMD regardless of shadow outside this function
84 	 * 3. there is a chance that the NIC is asleep
85 	 */
86 	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
87 	    txq_id != trans->txqs.cmd.q_id &&
88 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
89 		/*
90 		 * wake up nic if it's powered down ...
91 		 * uCode will wake up, and interrupt us again, so next
92 		 * time we'll skip this part.
93 		 */
94 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
95 
96 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
97 			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
98 				       txq_id, reg);
99 			iwl_set_bit(trans, CSR_GP_CNTRL,
100 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
101 			txq->need_update = true;
102 			return;
103 		}
104 	}
105 
106 	/*
107 	 * if not in power-save mode, uCode will never sleep when we're
108 	 * trying to tx (during RFKILL, we're not trying to tx).
109 	 */
110 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
111 	if (!txq->block)
112 		iwl_write32(trans, HBUS_TARG_WRPTR,
113 			    txq->write_ptr | (txq_id << 8));
114 }
115 
116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
117 {
118 	int i;
119 
120 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
121 		struct iwl_txq *txq = trans->txqs.txq[i];
122 
123 		if (!test_bit(i, trans->txqs.queue_used))
124 			continue;
125 
126 		spin_lock_bh(&txq->lock);
127 		if (txq->need_update) {
128 			iwl_pcie_txq_inc_wr_ptr(trans, txq);
129 			txq->need_update = false;
130 		}
131 		spin_unlock_bh(&txq->lock);
132 	}
133 }
134 
135 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
136 				       u8 idx, dma_addr_t addr, u16 len)
137 {
138 	struct iwl_tfd *tfd_fh = (void *)tfd;
139 	struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
140 
141 	u16 hi_n_len = len << 4;
142 
143 	put_unaligned_le32(addr, &tb->lo);
144 	hi_n_len |= iwl_get_dma_hi_addr(addr);
145 
146 	tb->hi_n_len = cpu_to_le16(hi_n_len);
147 
148 	tfd_fh->num_tbs = idx + 1;
149 }
150 
151 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
152 				  dma_addr_t addr, u16 len, bool reset)
153 {
154 	void *tfd;
155 	u32 num_tbs;
156 
157 	tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
158 
159 	if (reset)
160 		memset(tfd, 0, trans->txqs.tfd.size);
161 
162 	num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
163 
164 	/* Each TFD can point to a maximum max_tbs Tx buffers */
165 	if (num_tbs >= trans->txqs.tfd.max_tbs) {
166 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
167 			trans->txqs.tfd.max_tbs);
168 		return -EINVAL;
169 	}
170 
171 	if (WARN(addr & ~IWL_TX_DMA_MASK,
172 		 "Unaligned address = %llx\n", (unsigned long long)addr))
173 		return -EINVAL;
174 
175 	iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
176 
177 	return num_tbs;
178 }
179 
180 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
181 {
182 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183 
184 	lockdep_assert_held(&trans_pcie->reg_lock);
185 
186 	if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
187 		return;
188 	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
189 		return;
190 
191 	trans_pcie->cmd_hold_nic_awake = false;
192 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
193 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
194 }
195 
196 /*
197  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
198  */
199 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
200 {
201 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
202 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
203 
204 	if (!txq) {
205 		IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
206 		return;
207 	}
208 
209 	spin_lock_bh(&txq->lock);
210 	while (txq->write_ptr != txq->read_ptr) {
211 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
212 				   txq_id, txq->read_ptr);
213 
214 		if (txq_id != trans->txqs.cmd.q_id) {
215 			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
216 
217 			if (WARN_ON_ONCE(!skb))
218 				continue;
219 
220 			iwl_txq_free_tso_page(trans, skb);
221 		}
222 		iwl_txq_free_tfd(trans, txq);
223 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
224 
225 		if (txq->read_ptr == txq->write_ptr) {
226 			spin_lock(&trans_pcie->reg_lock);
227 			if (txq_id == trans->txqs.cmd.q_id)
228 				iwl_pcie_clear_cmd_in_flight(trans);
229 			spin_unlock(&trans_pcie->reg_lock);
230 		}
231 	}
232 
233 	while (!skb_queue_empty(&txq->overflow_q)) {
234 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
235 
236 		iwl_op_mode_free_skb(trans->op_mode, skb);
237 	}
238 
239 	spin_unlock_bh(&txq->lock);
240 
241 	/* just in case - this queue may have been stopped */
242 	iwl_wake_queue(trans, txq);
243 }
244 
245 /*
246  * iwl_pcie_txq_free - Deallocate DMA queue.
247  * @txq: Transmit queue to deallocate.
248  *
249  * Empty queue by removing and destroying all BD's.
250  * Free all buffers.
251  * 0-fill, but do not free "txq" descriptor structure.
252  */
253 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
254 {
255 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
256 	struct device *dev = trans->dev;
257 	int i;
258 
259 	if (WARN_ON(!txq))
260 		return;
261 
262 	iwl_pcie_txq_unmap(trans, txq_id);
263 
264 	/* De-alloc array of command/tx buffers */
265 	if (txq_id == trans->txqs.cmd.q_id)
266 		for (i = 0; i < txq->n_window; i++) {
267 			kfree_sensitive(txq->entries[i].cmd);
268 			kfree_sensitive(txq->entries[i].free_buf);
269 		}
270 
271 	/* De-alloc circular buffer of TFDs */
272 	if (txq->tfds) {
273 		dma_free_coherent(dev,
274 				  trans->txqs.tfd.size *
275 				  trans->trans_cfg->base_params->max_tfd_queue_size,
276 				  txq->tfds, txq->dma_addr);
277 		txq->dma_addr = 0;
278 		txq->tfds = NULL;
279 
280 		dma_free_coherent(dev,
281 				  sizeof(*txq->first_tb_bufs) * txq->n_window,
282 				  txq->first_tb_bufs, txq->first_tb_dma);
283 	}
284 
285 	kfree(txq->entries);
286 	txq->entries = NULL;
287 
288 	del_timer_sync(&txq->stuck_timer);
289 
290 	/* 0-fill queue descriptor structure */
291 	memset(txq, 0, sizeof(*txq));
292 }
293 
294 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
295 {
296 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297 	int nq = trans->trans_cfg->base_params->num_of_queues;
298 	int chan;
299 	u32 reg_val;
300 	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
301 				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
302 
303 	/* make sure all queue are not stopped/used */
304 	memset(trans->txqs.queue_stopped, 0,
305 	       sizeof(trans->txqs.queue_stopped));
306 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
307 
308 	trans_pcie->scd_base_addr =
309 		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
310 
311 	WARN_ON(scd_base_addr != 0 &&
312 		scd_base_addr != trans_pcie->scd_base_addr);
313 
314 	/* reset context data, TX status and translation data */
315 	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
316 				   SCD_CONTEXT_MEM_LOWER_BOUND,
317 			    NULL, clear_dwords);
318 
319 	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
320 		       trans->txqs.scd_bc_tbls.dma >> 10);
321 
322 	/* The chain extension of the SCD doesn't work well. This feature is
323 	 * enabled by default by the HW, so we need to disable it manually.
324 	 */
325 	if (trans->trans_cfg->base_params->scd_chain_ext_wa)
326 		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
327 
328 	iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
329 				trans->txqs.cmd.fifo,
330 				trans->txqs.cmd.wdg_timeout);
331 
332 	/* Activate all Tx DMA/FIFO channels */
333 	iwl_scd_activate_fifos(trans);
334 
335 	/* Enable DMA channel */
336 	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
337 		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
338 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
339 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
340 
341 	/* Update FH chicken bits */
342 	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
343 	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
344 			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
345 
346 	/* Enable L1-Active */
347 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
348 		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
349 				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
350 }
351 
352 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
353 {
354 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
355 	int txq_id;
356 
357 	/*
358 	 * we should never get here in gen2 trans mode return early to avoid
359 	 * having invalid accesses
360 	 */
361 	if (WARN_ON_ONCE(trans->trans_cfg->gen2))
362 		return;
363 
364 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
365 	     txq_id++) {
366 		struct iwl_txq *txq = trans->txqs.txq[txq_id];
367 		if (trans->trans_cfg->use_tfh)
368 			iwl_write_direct64(trans,
369 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
370 					   txq->dma_addr);
371 		else
372 			iwl_write_direct32(trans,
373 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
374 					   txq->dma_addr >> 8);
375 		iwl_pcie_txq_unmap(trans, txq_id);
376 		txq->read_ptr = 0;
377 		txq->write_ptr = 0;
378 	}
379 
380 	/* Tell NIC where to find the "keep warm" buffer */
381 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
382 			   trans_pcie->kw.dma >> 4);
383 
384 	/*
385 	 * Send 0 as the scd_base_addr since the device may have be reset
386 	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
387 	 * contain garbage.
388 	 */
389 	iwl_pcie_tx_start(trans, 0);
390 }
391 
392 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
393 {
394 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
395 	int ch, ret;
396 	u32 mask = 0;
397 
398 	spin_lock_bh(&trans_pcie->irq_lock);
399 
400 	if (!iwl_trans_grab_nic_access(trans))
401 		goto out;
402 
403 	/* Stop each Tx DMA channel */
404 	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
405 		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
406 		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
407 	}
408 
409 	/* Wait for DMA channels to be idle */
410 	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
411 	if (ret < 0)
412 		IWL_ERR(trans,
413 			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
414 			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
415 
416 	iwl_trans_release_nic_access(trans);
417 
418 out:
419 	spin_unlock_bh(&trans_pcie->irq_lock);
420 }
421 
422 /*
423  * iwl_pcie_tx_stop - Stop all Tx DMA channels
424  */
425 int iwl_pcie_tx_stop(struct iwl_trans *trans)
426 {
427 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
428 	int txq_id;
429 
430 	/* Turn off all Tx DMA fifos */
431 	iwl_scd_deactivate_fifos(trans);
432 
433 	/* Turn off all Tx DMA channels */
434 	iwl_pcie_tx_stop_fh(trans);
435 
436 	/*
437 	 * This function can be called before the op_mode disabled the
438 	 * queues. This happens when we have an rfkill interrupt.
439 	 * Since we stop Tx altogether - mark the queues as stopped.
440 	 */
441 	memset(trans->txqs.queue_stopped, 0,
442 	       sizeof(trans->txqs.queue_stopped));
443 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
444 
445 	/* This can happen: start_hw, stop_device */
446 	if (!trans_pcie->txq_memory)
447 		return 0;
448 
449 	/* Unmap DMA from host system and free skb's */
450 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
451 	     txq_id++)
452 		iwl_pcie_txq_unmap(trans, txq_id);
453 
454 	return 0;
455 }
456 
457 /*
458  * iwl_trans_tx_free - Free TXQ Context
459  *
460  * Destroy all TX DMA queues and structures
461  */
462 void iwl_pcie_tx_free(struct iwl_trans *trans)
463 {
464 	int txq_id;
465 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
466 
467 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
468 
469 	/* Tx queues */
470 	if (trans_pcie->txq_memory) {
471 		for (txq_id = 0;
472 		     txq_id < trans->trans_cfg->base_params->num_of_queues;
473 		     txq_id++) {
474 			iwl_pcie_txq_free(trans, txq_id);
475 			trans->txqs.txq[txq_id] = NULL;
476 		}
477 	}
478 
479 	kfree(trans_pcie->txq_memory);
480 	trans_pcie->txq_memory = NULL;
481 
482 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
483 
484 	iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
485 }
486 
487 /*
488  * iwl_pcie_tx_alloc - allocate TX context
489  * Allocate all Tx DMA structures and initialize them
490  */
491 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
492 {
493 	int ret;
494 	int txq_id, slots_num;
495 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
496 	u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
497 
498 	if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
499 		return -EINVAL;
500 
501 	bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
502 
503 	/*It is not allowed to alloc twice, so warn when this happens.
504 	 * We cannot rely on the previous allocation, so free and fail */
505 	if (WARN_ON(trans_pcie->txq_memory)) {
506 		ret = -EINVAL;
507 		goto error;
508 	}
509 
510 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
511 				     bc_tbls_size);
512 	if (ret) {
513 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
514 		goto error;
515 	}
516 
517 	/* Alloc keep-warm buffer */
518 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
519 	if (ret) {
520 		IWL_ERR(trans, "Keep Warm allocation failed\n");
521 		goto error;
522 	}
523 
524 	trans_pcie->txq_memory =
525 		kcalloc(trans->trans_cfg->base_params->num_of_queues,
526 			sizeof(struct iwl_txq), GFP_KERNEL);
527 	if (!trans_pcie->txq_memory) {
528 		IWL_ERR(trans, "Not enough memory for txq\n");
529 		ret = -ENOMEM;
530 		goto error;
531 	}
532 
533 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
534 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
535 	     txq_id++) {
536 		bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
537 
538 		if (cmd_queue)
539 			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
540 					  trans->cfg->min_txq_size);
541 		else
542 			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
543 					  trans->cfg->min_256_ba_txq_size);
544 		trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
545 		ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
546 				    cmd_queue);
547 		if (ret) {
548 			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
549 			goto error;
550 		}
551 		trans->txqs.txq[txq_id]->id = txq_id;
552 	}
553 
554 	return 0;
555 
556 error:
557 	iwl_pcie_tx_free(trans);
558 
559 	return ret;
560 }
561 
562 int iwl_pcie_tx_init(struct iwl_trans *trans)
563 {
564 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
565 	int ret;
566 	int txq_id, slots_num;
567 	bool alloc = false;
568 
569 	if (!trans_pcie->txq_memory) {
570 		ret = iwl_pcie_tx_alloc(trans);
571 		if (ret)
572 			goto error;
573 		alloc = true;
574 	}
575 
576 	spin_lock_bh(&trans_pcie->irq_lock);
577 
578 	/* Turn off all Tx DMA fifos */
579 	iwl_scd_deactivate_fifos(trans);
580 
581 	/* Tell NIC where to find the "keep warm" buffer */
582 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
583 			   trans_pcie->kw.dma >> 4);
584 
585 	spin_unlock_bh(&trans_pcie->irq_lock);
586 
587 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
588 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
589 	     txq_id++) {
590 		bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
591 
592 		if (cmd_queue)
593 			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
594 					  trans->cfg->min_txq_size);
595 		else
596 			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
597 					  trans->cfg->min_256_ba_txq_size);
598 		ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
599 				   cmd_queue);
600 		if (ret) {
601 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
602 			goto error;
603 		}
604 
605 		/*
606 		 * Tell nic where to find circular buffer of TFDs for a
607 		 * given Tx queue, and enable the DMA channel used for that
608 		 * queue.
609 		 * Circular buffer (TFD queue in DRAM) physical base address
610 		 */
611 		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
612 				   trans->txqs.txq[txq_id]->dma_addr >> 8);
613 	}
614 
615 	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
616 	if (trans->trans_cfg->base_params->num_of_queues > 20)
617 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
618 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
619 
620 	return 0;
621 error:
622 	/*Upon error, free only if we allocated something */
623 	if (alloc)
624 		iwl_pcie_tx_free(trans);
625 	return ret;
626 }
627 
628 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
629 				      const struct iwl_host_cmd *cmd)
630 {
631 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
632 	int ret;
633 
634 	lockdep_assert_held(&trans_pcie->reg_lock);
635 
636 	/* Make sure the NIC is still alive in the bus */
637 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
638 		return -ENODEV;
639 
640 	/*
641 	 * wake up the NIC to make sure that the firmware will see the host
642 	 * command - we will let the NIC sleep once all the host commands
643 	 * returned. This needs to be done only on NICs that have
644 	 * apmg_wake_up_wa set.
645 	 */
646 	if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
647 	    !trans_pcie->cmd_hold_nic_awake) {
648 		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
649 					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
650 
651 		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
652 				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
653 				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
654 				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
655 				   15000);
656 		if (ret < 0) {
657 			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
658 					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
659 			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
660 			return -EIO;
661 		}
662 		trans_pcie->cmd_hold_nic_awake = true;
663 	}
664 
665 	return 0;
666 }
667 
668 /*
669  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
670  *
671  * When FW advances 'R' index, all entries between old and new 'R' index
672  * need to be reclaimed. As result, some free space forms.  If there is
673  * enough free space (> low mark), wake the stack that feeds us.
674  */
675 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
676 {
677 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
678 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
679 	int nfreed = 0;
680 	u16 r;
681 
682 	lockdep_assert_held(&txq->lock);
683 
684 	idx = iwl_txq_get_cmd_index(txq, idx);
685 	r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
686 
687 	if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
688 	    (!iwl_txq_used(txq, idx))) {
689 		WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
690 			  "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
691 			  __func__, txq_id, idx,
692 			  trans->trans_cfg->base_params->max_tfd_queue_size,
693 			  txq->write_ptr, txq->read_ptr);
694 		return;
695 	}
696 
697 	for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
698 	     r = iwl_txq_inc_wrap(trans, r)) {
699 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
700 
701 		if (nfreed++ > 0) {
702 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
703 				idx, txq->write_ptr, r);
704 			iwl_force_nmi(trans);
705 		}
706 	}
707 
708 	if (txq->read_ptr == txq->write_ptr) {
709 		/* BHs are also disabled due to txq->lock */
710 		spin_lock(&trans_pcie->reg_lock);
711 		iwl_pcie_clear_cmd_in_flight(trans);
712 		spin_unlock(&trans_pcie->reg_lock);
713 	}
714 
715 	iwl_txq_progress(txq);
716 }
717 
718 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
719 				 u16 txq_id)
720 {
721 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
722 	u32 tbl_dw_addr;
723 	u32 tbl_dw;
724 	u16 scd_q2ratid;
725 
726 	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
727 
728 	tbl_dw_addr = trans_pcie->scd_base_addr +
729 			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
730 
731 	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
732 
733 	if (txq_id & 0x1)
734 		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
735 	else
736 		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
737 
738 	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
739 
740 	return 0;
741 }
742 
743 /* Receiver address (actually, Rx station's index into station table),
744  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
745 #define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
746 
747 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
748 			       const struct iwl_trans_txq_scd_cfg *cfg,
749 			       unsigned int wdg_timeout)
750 {
751 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
752 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
753 	int fifo = -1;
754 	bool scd_bug = false;
755 
756 	if (test_and_set_bit(txq_id, trans->txqs.queue_used))
757 		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
758 
759 	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
760 
761 	if (cfg) {
762 		fifo = cfg->fifo;
763 
764 		/* Disable the scheduler prior configuring the cmd queue */
765 		if (txq_id == trans->txqs.cmd.q_id &&
766 		    trans_pcie->scd_set_active)
767 			iwl_scd_enable_set_active(trans, 0);
768 
769 		/* Stop this Tx queue before configuring it */
770 		iwl_scd_txq_set_inactive(trans, txq_id);
771 
772 		/* Set this queue as a chain-building queue unless it is CMD */
773 		if (txq_id != trans->txqs.cmd.q_id)
774 			iwl_scd_txq_set_chain(trans, txq_id);
775 
776 		if (cfg->aggregate) {
777 			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
778 
779 			/* Map receiver-address / traffic-ID to this queue */
780 			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
781 
782 			/* enable aggregations for the queue */
783 			iwl_scd_txq_enable_agg(trans, txq_id);
784 			txq->ampdu = true;
785 		} else {
786 			/*
787 			 * disable aggregations for the queue, this will also
788 			 * make the ra_tid mapping configuration irrelevant
789 			 * since it is now a non-AGG queue.
790 			 */
791 			iwl_scd_txq_disable_agg(trans, txq_id);
792 
793 			ssn = txq->read_ptr;
794 		}
795 	} else {
796 		/*
797 		 * If we need to move the SCD write pointer by steps of
798 		 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
799 		 * the op_mode know by returning true later.
800 		 * Do this only in case cfg is NULL since this trick can
801 		 * be done only if we have DQA enabled which is true for mvm
802 		 * only. And mvm never sets a cfg pointer.
803 		 * This is really ugly, but this is the easiest way out for
804 		 * this sad hardware issue.
805 		 * This bug has been fixed on devices 9000 and up.
806 		 */
807 		scd_bug = !trans->trans_cfg->mq_rx_supported &&
808 			!((ssn - txq->write_ptr) & 0x3f) &&
809 			(ssn != txq->write_ptr);
810 		if (scd_bug)
811 			ssn++;
812 	}
813 
814 	/* Place first TFD at index corresponding to start sequence number.
815 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
816 	txq->read_ptr = (ssn & 0xff);
817 	txq->write_ptr = (ssn & 0xff);
818 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
819 			   (ssn & 0xff) | (txq_id << 8));
820 
821 	if (cfg) {
822 		u8 frame_limit = cfg->frame_limit;
823 
824 		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
825 
826 		/* Set up Tx window size and frame limit for this queue */
827 		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
828 				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
829 		iwl_trans_write_mem32(trans,
830 			trans_pcie->scd_base_addr +
831 			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
832 			SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
833 			SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
834 
835 		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
836 		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
837 			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
838 			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
839 			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
840 			       SCD_QUEUE_STTS_REG_MSK);
841 
842 		/* enable the scheduler for this queue (only) */
843 		if (txq_id == trans->txqs.cmd.q_id &&
844 		    trans_pcie->scd_set_active)
845 			iwl_scd_enable_set_active(trans, BIT(txq_id));
846 
847 		IWL_DEBUG_TX_QUEUES(trans,
848 				    "Activate queue %d on FIFO %d WrPtr: %d\n",
849 				    txq_id, fifo, ssn & 0xff);
850 	} else {
851 		IWL_DEBUG_TX_QUEUES(trans,
852 				    "Activate queue %d WrPtr: %d\n",
853 				    txq_id, ssn & 0xff);
854 	}
855 
856 	return scd_bug;
857 }
858 
859 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
860 					bool shared_mode)
861 {
862 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
863 
864 	txq->ampdu = !shared_mode;
865 }
866 
867 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
868 				bool configure_scd)
869 {
870 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
871 	u32 stts_addr = trans_pcie->scd_base_addr +
872 			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
873 	static const u32 zero_val[4] = {};
874 
875 	trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
876 	trans->txqs.txq[txq_id]->frozen = false;
877 
878 	/*
879 	 * Upon HW Rfkill - we stop the device, and then stop the queues
880 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
881 	 * allow the op_mode to call txq_disable after it already called
882 	 * stop_device.
883 	 */
884 	if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
885 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
886 			  "queue %d not used", txq_id);
887 		return;
888 	}
889 
890 	if (configure_scd) {
891 		iwl_scd_txq_set_inactive(trans, txq_id);
892 
893 		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
894 				    ARRAY_SIZE(zero_val));
895 	}
896 
897 	iwl_pcie_txq_unmap(trans, txq_id);
898 	trans->txqs.txq[txq_id]->ampdu = false;
899 
900 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
901 }
902 
903 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
904 
905 /*
906  * iwl_pcie_enqueue_hcmd - enqueue a uCode command
907  * @priv: device private data point
908  * @cmd: a pointer to the ucode command structure
909  *
910  * The function returns < 0 values to indicate the operation
911  * failed. On success, it returns the index (>= 0) of command in the
912  * command queue.
913  */
914 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
915 			  struct iwl_host_cmd *cmd)
916 {
917 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
918 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
919 	struct iwl_device_cmd *out_cmd;
920 	struct iwl_cmd_meta *out_meta;
921 	void *dup_buf = NULL;
922 	dma_addr_t phys_addr;
923 	int idx;
924 	u16 copy_size, cmd_size, tb0_size;
925 	bool had_nocopy = false;
926 	u8 group_id = iwl_cmd_groupid(cmd->id);
927 	int i, ret;
928 	u32 cmd_pos;
929 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
930 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
931 	unsigned long flags;
932 
933 	if (WARN(!trans->wide_cmd_header &&
934 		 group_id > IWL_ALWAYS_LONG_GROUP,
935 		 "unsupported wide command %#x\n", cmd->id))
936 		return -EINVAL;
937 
938 	if (group_id != 0) {
939 		copy_size = sizeof(struct iwl_cmd_header_wide);
940 		cmd_size = sizeof(struct iwl_cmd_header_wide);
941 	} else {
942 		copy_size = sizeof(struct iwl_cmd_header);
943 		cmd_size = sizeof(struct iwl_cmd_header);
944 	}
945 
946 	/* need one for the header if the first is NOCOPY */
947 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
948 
949 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
950 		cmddata[i] = cmd->data[i];
951 		cmdlen[i] = cmd->len[i];
952 
953 		if (!cmd->len[i])
954 			continue;
955 
956 		/* need at least IWL_FIRST_TB_SIZE copied */
957 		if (copy_size < IWL_FIRST_TB_SIZE) {
958 			int copy = IWL_FIRST_TB_SIZE - copy_size;
959 
960 			if (copy > cmdlen[i])
961 				copy = cmdlen[i];
962 			cmdlen[i] -= copy;
963 			cmddata[i] += copy;
964 			copy_size += copy;
965 		}
966 
967 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
968 			had_nocopy = true;
969 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
970 				idx = -EINVAL;
971 				goto free_dup_buf;
972 			}
973 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
974 			/*
975 			 * This is also a chunk that isn't copied
976 			 * to the static buffer so set had_nocopy.
977 			 */
978 			had_nocopy = true;
979 
980 			/* only allowed once */
981 			if (WARN_ON(dup_buf)) {
982 				idx = -EINVAL;
983 				goto free_dup_buf;
984 			}
985 
986 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
987 					  GFP_ATOMIC);
988 			if (!dup_buf)
989 				return -ENOMEM;
990 		} else {
991 			/* NOCOPY must not be followed by normal! */
992 			if (WARN_ON(had_nocopy)) {
993 				idx = -EINVAL;
994 				goto free_dup_buf;
995 			}
996 			copy_size += cmdlen[i];
997 		}
998 		cmd_size += cmd->len[i];
999 	}
1000 
1001 	/*
1002 	 * If any of the command structures end up being larger than
1003 	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1004 	 * allocated into separate TFDs, then we will need to
1005 	 * increase the size of the buffers.
1006 	 */
1007 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1008 		 "Command %s (%#x) is too large (%d bytes)\n",
1009 		 iwl_get_cmd_string(trans, cmd->id),
1010 		 cmd->id, copy_size)) {
1011 		idx = -EINVAL;
1012 		goto free_dup_buf;
1013 	}
1014 
1015 	spin_lock_irqsave(&txq->lock, flags);
1016 
1017 	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1018 		spin_unlock_irqrestore(&txq->lock, flags);
1019 
1020 		IWL_ERR(trans, "No space in command queue\n");
1021 		iwl_op_mode_cmd_queue_full(trans->op_mode);
1022 		idx = -ENOSPC;
1023 		goto free_dup_buf;
1024 	}
1025 
1026 	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1027 	out_cmd = txq->entries[idx].cmd;
1028 	out_meta = &txq->entries[idx].meta;
1029 
1030 	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
1031 	if (cmd->flags & CMD_WANT_SKB)
1032 		out_meta->source = cmd;
1033 
1034 	/* set up the header */
1035 	if (group_id != 0) {
1036 		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1037 		out_cmd->hdr_wide.group_id = group_id;
1038 		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1039 		out_cmd->hdr_wide.length =
1040 			cpu_to_le16(cmd_size -
1041 				    sizeof(struct iwl_cmd_header_wide));
1042 		out_cmd->hdr_wide.reserved = 0;
1043 		out_cmd->hdr_wide.sequence =
1044 			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1045 						 INDEX_TO_SEQ(txq->write_ptr));
1046 
1047 		cmd_pos = sizeof(struct iwl_cmd_header_wide);
1048 		copy_size = sizeof(struct iwl_cmd_header_wide);
1049 	} else {
1050 		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1051 		out_cmd->hdr.sequence =
1052 			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1053 						 INDEX_TO_SEQ(txq->write_ptr));
1054 		out_cmd->hdr.group_id = 0;
1055 
1056 		cmd_pos = sizeof(struct iwl_cmd_header);
1057 		copy_size = sizeof(struct iwl_cmd_header);
1058 	}
1059 
1060 	/* and copy the data that needs to be copied */
1061 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1062 		int copy;
1063 
1064 		if (!cmd->len[i])
1065 			continue;
1066 
1067 		/* copy everything if not nocopy/dup */
1068 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1069 					   IWL_HCMD_DFL_DUP))) {
1070 			copy = cmd->len[i];
1071 
1072 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1073 			cmd_pos += copy;
1074 			copy_size += copy;
1075 			continue;
1076 		}
1077 
1078 		/*
1079 		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1080 		 * in total (for bi-directional DMA), but copy up to what
1081 		 * we can fit into the payload for debug dump purposes.
1082 		 */
1083 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1084 
1085 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1086 		cmd_pos += copy;
1087 
1088 		/* However, treat copy_size the proper way, we need it below */
1089 		if (copy_size < IWL_FIRST_TB_SIZE) {
1090 			copy = IWL_FIRST_TB_SIZE - copy_size;
1091 
1092 			if (copy > cmd->len[i])
1093 				copy = cmd->len[i];
1094 			copy_size += copy;
1095 		}
1096 	}
1097 
1098 	IWL_DEBUG_HC(trans,
1099 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1100 		     iwl_get_cmd_string(trans, cmd->id),
1101 		     group_id, out_cmd->hdr.cmd,
1102 		     le16_to_cpu(out_cmd->hdr.sequence),
1103 		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
1104 
1105 	/* start the TFD with the minimum copy bytes */
1106 	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1107 	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1108 	iwl_pcie_txq_build_tfd(trans, txq,
1109 			       iwl_txq_get_first_tb_dma(txq, idx),
1110 			       tb0_size, true);
1111 
1112 	/* map first command fragment, if any remains */
1113 	if (copy_size > tb0_size) {
1114 		phys_addr = dma_map_single(trans->dev,
1115 					   ((u8 *)&out_cmd->hdr) + tb0_size,
1116 					   copy_size - tb0_size,
1117 					   DMA_TO_DEVICE);
1118 		if (dma_mapping_error(trans->dev, phys_addr)) {
1119 			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1120 					       txq->write_ptr);
1121 			idx = -ENOMEM;
1122 			goto out;
1123 		}
1124 
1125 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1126 				       copy_size - tb0_size, false);
1127 	}
1128 
1129 	/* map the remaining (adjusted) nocopy/dup fragments */
1130 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1131 		const void *data = cmddata[i];
1132 
1133 		if (!cmdlen[i])
1134 			continue;
1135 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1136 					   IWL_HCMD_DFL_DUP)))
1137 			continue;
1138 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1139 			data = dup_buf;
1140 		phys_addr = dma_map_single(trans->dev, (void *)data,
1141 					   cmdlen[i], DMA_TO_DEVICE);
1142 		if (dma_mapping_error(trans->dev, phys_addr)) {
1143 			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1144 					       txq->write_ptr);
1145 			idx = -ENOMEM;
1146 			goto out;
1147 		}
1148 
1149 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1150 	}
1151 
1152 	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1153 	out_meta->flags = cmd->flags;
1154 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1155 		kfree_sensitive(txq->entries[idx].free_buf);
1156 	txq->entries[idx].free_buf = dup_buf;
1157 
1158 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1159 
1160 	/* start timer if queue currently empty */
1161 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1162 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1163 
1164 	spin_lock(&trans_pcie->reg_lock);
1165 	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1166 	if (ret < 0) {
1167 		idx = ret;
1168 		goto unlock_reg;
1169 	}
1170 
1171 	/* Increment and update queue's write index */
1172 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1173 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1174 
1175  unlock_reg:
1176 	spin_unlock(&trans_pcie->reg_lock);
1177  out:
1178 	spin_unlock_irqrestore(&txq->lock, flags);
1179  free_dup_buf:
1180 	if (idx < 0)
1181 		kfree(dup_buf);
1182 	return idx;
1183 }
1184 
1185 /*
1186  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1187  * @rxb: Rx buffer to reclaim
1188  */
1189 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1190 			    struct iwl_rx_cmd_buffer *rxb)
1191 {
1192 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1193 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1194 	u8 group_id;
1195 	u32 cmd_id;
1196 	int txq_id = SEQ_TO_QUEUE(sequence);
1197 	int index = SEQ_TO_INDEX(sequence);
1198 	int cmd_index;
1199 	struct iwl_device_cmd *cmd;
1200 	struct iwl_cmd_meta *meta;
1201 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1202 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1203 
1204 	/* If a Tx command is being handled and it isn't in the actual
1205 	 * command queue then there a command routing bug has been introduced
1206 	 * in the queue management code. */
1207 	if (WARN(txq_id != trans->txqs.cmd.q_id,
1208 		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1209 		 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1210 		 txq->write_ptr)) {
1211 		iwl_print_hex_error(trans, pkt, 32);
1212 		return;
1213 	}
1214 
1215 	spin_lock_bh(&txq->lock);
1216 
1217 	cmd_index = iwl_txq_get_cmd_index(txq, index);
1218 	cmd = txq->entries[cmd_index].cmd;
1219 	meta = &txq->entries[cmd_index].meta;
1220 	group_id = cmd->hdr.group_id;
1221 	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
1222 
1223 	iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1224 
1225 	/* Input error checking is done when commands are added to queue. */
1226 	if (meta->flags & CMD_WANT_SKB) {
1227 		struct page *p = rxb_steal_page(rxb);
1228 
1229 		meta->source->resp_pkt = pkt;
1230 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1231 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1232 	}
1233 
1234 	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1235 		iwl_op_mode_async_cb(trans->op_mode, cmd);
1236 
1237 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1238 
1239 	if (!(meta->flags & CMD_ASYNC)) {
1240 		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1241 			IWL_WARN(trans,
1242 				 "HCMD_ACTIVE already clear for command %s\n",
1243 				 iwl_get_cmd_string(trans, cmd_id));
1244 		}
1245 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1246 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1247 			       iwl_get_cmd_string(trans, cmd_id));
1248 		wake_up(&trans->wait_command_queue);
1249 	}
1250 
1251 	meta->flags = 0;
1252 
1253 	spin_unlock_bh(&txq->lock);
1254 }
1255 
1256 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1257 			     struct iwl_txq *txq, u8 hdr_len,
1258 			     struct iwl_cmd_meta *out_meta)
1259 {
1260 	u16 head_tb_len;
1261 	int i;
1262 
1263 	/*
1264 	 * Set up TFD's third entry to point directly to remainder
1265 	 * of skb's head, if any
1266 	 */
1267 	head_tb_len = skb_headlen(skb) - hdr_len;
1268 
1269 	if (head_tb_len > 0) {
1270 		dma_addr_t tb_phys = dma_map_single(trans->dev,
1271 						    skb->data + hdr_len,
1272 						    head_tb_len, DMA_TO_DEVICE);
1273 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1274 			return -EINVAL;
1275 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1276 					tb_phys, head_tb_len);
1277 		iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1278 	}
1279 
1280 	/* set up the remaining entries to point to the data */
1281 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1282 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1283 		dma_addr_t tb_phys;
1284 		int tb_idx;
1285 
1286 		if (!skb_frag_size(frag))
1287 			continue;
1288 
1289 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1290 					   skb_frag_size(frag), DMA_TO_DEVICE);
1291 
1292 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1293 			return -EINVAL;
1294 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1295 					tb_phys, skb_frag_size(frag));
1296 		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1297 						skb_frag_size(frag), false);
1298 		if (tb_idx < 0)
1299 			return tb_idx;
1300 
1301 		out_meta->tbs |= BIT(tb_idx);
1302 	}
1303 
1304 	return 0;
1305 }
1306 
1307 #ifdef CONFIG_INET
1308 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1309 				   struct iwl_txq *txq, u8 hdr_len,
1310 				   struct iwl_cmd_meta *out_meta,
1311 				   struct iwl_device_tx_cmd *dev_cmd,
1312 				   u16 tb1_len)
1313 {
1314 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1315 	struct ieee80211_hdr *hdr = (void *)skb->data;
1316 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
1317 	unsigned int mss = skb_shinfo(skb)->gso_size;
1318 	u16 length, iv_len, amsdu_pad;
1319 	u8 *start_hdr;
1320 	struct iwl_tso_hdr_page *hdr_page;
1321 	struct tso_t tso;
1322 
1323 	/* if the packet is protected, then it must be CCMP or GCMP */
1324 	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
1325 	iv_len = ieee80211_has_protected(hdr->frame_control) ?
1326 		IEEE80211_CCMP_HDR_LEN : 0;
1327 
1328 	trace_iwlwifi_dev_tx(trans->dev, skb,
1329 			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1330 			     trans->txqs.tfd.size,
1331 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1332 
1333 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
1334 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
1335 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
1336 	amsdu_pad = 0;
1337 
1338 	/* total amount of header we may need for this A-MSDU */
1339 	hdr_room = DIV_ROUND_UP(total_len, mss) *
1340 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
1341 
1342 	/* Our device supports 9 segments at most, it will fit in 1 page */
1343 	hdr_page = get_page_hdr(trans, hdr_room, skb);
1344 	if (!hdr_page)
1345 		return -ENOMEM;
1346 
1347 	start_hdr = hdr_page->pos;
1348 	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
1349 	hdr_page->pos += iv_len;
1350 
1351 	/*
1352 	 * Pull the ieee80211 header + IV to be able to use TSO core,
1353 	 * we will restore it for the tx_status flow.
1354 	 */
1355 	skb_pull(skb, hdr_len + iv_len);
1356 
1357 	/*
1358 	 * Remove the length of all the headers that we don't actually
1359 	 * have in the MPDU by themselves, but that we duplicate into
1360 	 * all the different MSDUs inside the A-MSDU.
1361 	 */
1362 	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
1363 
1364 	tso_start(skb, &tso);
1365 
1366 	while (total_len) {
1367 		/* this is the data left for this subframe */
1368 		unsigned int data_left =
1369 			min_t(unsigned int, mss, total_len);
1370 		struct sk_buff *csum_skb = NULL;
1371 		unsigned int hdr_tb_len;
1372 		dma_addr_t hdr_tb_phys;
1373 		u8 *subf_hdrs_start = hdr_page->pos;
1374 
1375 		total_len -= data_left;
1376 
1377 		memset(hdr_page->pos, 0, amsdu_pad);
1378 		hdr_page->pos += amsdu_pad;
1379 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
1380 				  data_left)) & 0x3;
1381 		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
1382 		hdr_page->pos += ETH_ALEN;
1383 		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
1384 		hdr_page->pos += ETH_ALEN;
1385 
1386 		length = snap_ip_tcp_hdrlen + data_left;
1387 		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
1388 		hdr_page->pos += sizeof(length);
1389 
1390 		/*
1391 		 * This will copy the SNAP as well which will be considered
1392 		 * as MAC header.
1393 		 */
1394 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
1395 
1396 		hdr_page->pos += snap_ip_tcp_hdrlen;
1397 
1398 		hdr_tb_len = hdr_page->pos - start_hdr;
1399 		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
1400 					     hdr_tb_len, DMA_TO_DEVICE);
1401 		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
1402 			dev_kfree_skb(csum_skb);
1403 			return -EINVAL;
1404 		}
1405 		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
1406 				       hdr_tb_len, false);
1407 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
1408 					hdr_tb_phys, hdr_tb_len);
1409 		/* add this subframe's headers' length to the tx_cmd */
1410 		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
1411 
1412 		/* prepare the start_hdr for the next subframe */
1413 		start_hdr = hdr_page->pos;
1414 
1415 		/* put the payload */
1416 		while (data_left) {
1417 			unsigned int size = min_t(unsigned int, tso.size,
1418 						  data_left);
1419 			dma_addr_t tb_phys;
1420 
1421 			tb_phys = dma_map_single(trans->dev, tso.data,
1422 						 size, DMA_TO_DEVICE);
1423 			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
1424 				dev_kfree_skb(csum_skb);
1425 				return -EINVAL;
1426 			}
1427 
1428 			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1429 					       size, false);
1430 			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
1431 						tb_phys, size);
1432 
1433 			data_left -= size;
1434 			tso_build_data(skb, &tso, size);
1435 		}
1436 	}
1437 
1438 	/* re -add the WiFi header and IV */
1439 	skb_push(skb, hdr_len + iv_len);
1440 
1441 	return 0;
1442 }
1443 #else /* CONFIG_INET */
1444 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1445 				   struct iwl_txq *txq, u8 hdr_len,
1446 				   struct iwl_cmd_meta *out_meta,
1447 				   struct iwl_device_tx_cmd *dev_cmd,
1448 				   u16 tb1_len)
1449 {
1450 	/* No A-MSDU without CONFIG_INET */
1451 	WARN_ON(1);
1452 
1453 	return -1;
1454 }
1455 #endif /* CONFIG_INET */
1456 
1457 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1458 		      struct iwl_device_tx_cmd *dev_cmd, int txq_id)
1459 {
1460 	struct ieee80211_hdr *hdr;
1461 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1462 	struct iwl_cmd_meta *out_meta;
1463 	struct iwl_txq *txq;
1464 	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1465 	void *tb1_addr;
1466 	void *tfd;
1467 	u16 len, tb1_len;
1468 	bool wait_write_ptr;
1469 	__le16 fc;
1470 	u8 hdr_len;
1471 	u16 wifi_seq;
1472 	bool amsdu;
1473 
1474 	txq = trans->txqs.txq[txq_id];
1475 
1476 	if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
1477 		      "TX on unused queue %d\n", txq_id))
1478 		return -EINVAL;
1479 
1480 	if (skb_is_nonlinear(skb) &&
1481 	    skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
1482 	    __skb_linearize(skb))
1483 		return -ENOMEM;
1484 
1485 	/* mac80211 always puts the full header into the SKB's head,
1486 	 * so there's no need to check if it's readable there
1487 	 */
1488 	hdr = (struct ieee80211_hdr *)skb->data;
1489 	fc = hdr->frame_control;
1490 	hdr_len = ieee80211_hdrlen(fc);
1491 
1492 	spin_lock(&txq->lock);
1493 
1494 	if (iwl_txq_space(trans, txq) < txq->high_mark) {
1495 		iwl_txq_stop(trans, txq);
1496 
1497 		/* don't put the packet on the ring, if there is no room */
1498 		if (unlikely(iwl_txq_space(trans, txq) < 3)) {
1499 			struct iwl_device_tx_cmd **dev_cmd_ptr;
1500 
1501 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
1502 					       trans->txqs.dev_cmd_offs);
1503 
1504 			*dev_cmd_ptr = dev_cmd;
1505 			__skb_queue_tail(&txq->overflow_q, skb);
1506 
1507 			spin_unlock(&txq->lock);
1508 			return 0;
1509 		}
1510 	}
1511 
1512 	/* In AGG mode, the index in the ring must correspond to the WiFi
1513 	 * sequence number. This is a HW requirements to help the SCD to parse
1514 	 * the BA.
1515 	 * Check here that the packets are in the right place on the ring.
1516 	 */
1517 	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1518 	WARN_ONCE(txq->ampdu &&
1519 		  (wifi_seq & 0xff) != txq->write_ptr,
1520 		  "Q: %d WiFi Seq %d tfdNum %d",
1521 		  txq_id, wifi_seq, txq->write_ptr);
1522 
1523 	/* Set up driver data for this TFD */
1524 	txq->entries[txq->write_ptr].skb = skb;
1525 	txq->entries[txq->write_ptr].cmd = dev_cmd;
1526 
1527 	dev_cmd->hdr.sequence =
1528 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1529 			    INDEX_TO_SEQ(txq->write_ptr)));
1530 
1531 	tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
1532 	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1533 		       offsetof(struct iwl_tx_cmd, scratch);
1534 
1535 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1536 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1537 
1538 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
1539 	out_meta = &txq->entries[txq->write_ptr].meta;
1540 	out_meta->flags = 0;
1541 
1542 	/*
1543 	 * The second TB (tb1) points to the remainder of the TX command
1544 	 * and the 802.11 header - dword aligned size
1545 	 * (This calculation modifies the TX command, so do it before the
1546 	 * setup of the first TB)
1547 	 */
1548 	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1549 	      hdr_len - IWL_FIRST_TB_SIZE;
1550 	/* do not align A-MSDU to dword as the subframe header aligns it */
1551 	amsdu = ieee80211_is_data_qos(fc) &&
1552 		(*ieee80211_get_qos_ctl(hdr) &
1553 		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
1554 	if (!amsdu) {
1555 		tb1_len = ALIGN(len, 4);
1556 		/* Tell NIC about any 2-byte padding after MAC header */
1557 		if (tb1_len != len)
1558 			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
1559 	} else {
1560 		tb1_len = len;
1561 	}
1562 
1563 	/*
1564 	 * The first TB points to bi-directional DMA data, we'll
1565 	 * memcpy the data into it later.
1566 	 */
1567 	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1568 			       IWL_FIRST_TB_SIZE, true);
1569 
1570 	/* there must be data left over for TB1 or this code must be changed */
1571 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
1572 
1573 	/* map the data for TB1 */
1574 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
1575 	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1576 	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1577 		goto out_err;
1578 	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1579 
1580 	trace_iwlwifi_dev_tx(trans->dev, skb,
1581 			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1582 			     trans->txqs.tfd.size,
1583 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
1584 			     hdr_len);
1585 
1586 	/*
1587 	 * If gso_size wasn't set, don't give the frame "amsdu treatment"
1588 	 * (adding subframes, etc.).
1589 	 * This can happen in some testing flows when the amsdu was already
1590 	 * pre-built, and we just need to send the resulting skb.
1591 	 */
1592 	if (amsdu && skb_shinfo(skb)->gso_size) {
1593 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
1594 						     out_meta, dev_cmd,
1595 						     tb1_len)))
1596 			goto out_err;
1597 	} else {
1598 		struct sk_buff *frag;
1599 
1600 		if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
1601 					       out_meta)))
1602 			goto out_err;
1603 
1604 		skb_walk_frags(skb, frag) {
1605 			if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
1606 						       out_meta)))
1607 				goto out_err;
1608 		}
1609 	}
1610 
1611 	/* building the A-MSDU might have changed this data, so memcpy it now */
1612 	memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
1613 
1614 	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
1615 	/* Set up entry for this TFD in Tx byte-count array */
1616 	iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
1617 					 iwl_txq_gen1_tfd_get_num_tbs(trans,
1618 								      tfd));
1619 
1620 	wait_write_ptr = ieee80211_has_morefrags(fc);
1621 
1622 	/* start timer if queue currently empty */
1623 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
1624 		/*
1625 		 * If the TXQ is active, then set the timer, if not,
1626 		 * set the timer in remainder so that the timer will
1627 		 * be armed with the right value when the station will
1628 		 * wake up.
1629 		 */
1630 		if (!txq->frozen)
1631 			mod_timer(&txq->stuck_timer,
1632 				  jiffies + txq->wd_timeout);
1633 		else
1634 			txq->frozen_expiry_remainder = txq->wd_timeout;
1635 	}
1636 
1637 	/* Tell device the write index *just past* this latest filled TFD */
1638 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1639 	if (!wait_write_ptr)
1640 		iwl_pcie_txq_inc_wr_ptr(trans, txq);
1641 
1642 	/*
1643 	 * At this point the frame is "transmitted" successfully
1644 	 * and we will get a TX status notification eventually.
1645 	 */
1646 	spin_unlock(&txq->lock);
1647 	return 0;
1648 out_err:
1649 	iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
1650 	spin_unlock(&txq->lock);
1651 	return -1;
1652 }
1653