1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5  * Copyright(c) 2016 Intel Deutschland GmbH
6  *
7  * Portions of this file are derived from the ipw3945 project, as well
8  * as portions of the ieee80211 subsystem header files.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called LICENSE.
25  *
26  * Contact Information:
27  *  Intel Linux Wireless <linuxwifi@intel.com>
28  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29  *
30  *****************************************************************************/
31 #include <linux/etherdevice.h>
32 #include <linux/ieee80211.h>
33 #include <linux/slab.h>
34 #include <linux/sched.h>
35 #include <linux/pm_runtime.h>
36 #include <net/ip6_checksum.h>
37 #include <net/tso.h>
38 
39 #include "iwl-debug.h"
40 #include "iwl-csr.h"
41 #include "iwl-prph.h"
42 #include "iwl-io.h"
43 #include "iwl-scd.h"
44 #include "iwl-op-mode.h"
45 #include "internal.h"
46 /* FIXME: need to abstract out TX command (once we know what it looks like) */
47 #include "dvm/commands.h"
48 
49 #define IWL_TX_CRC_SIZE 4
50 #define IWL_TX_DELIMITER_SIZE 4
51 
52 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
53  * DMA services
54  *
55  * Theory of operation
56  *
57  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
58  * of buffer descriptors, each of which points to one or more data buffers for
59  * the device to read from or fill.  Driver and device exchange status of each
60  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
61  * entries in each circular buffer, to protect against confusing empty and full
62  * queue states.
63  *
64  * The device reads or writes the data in the queues via the device's several
65  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
66  *
67  * For Tx queue, there are low mark and high mark limits. If, after queuing
68  * the packet for Tx, free space become < low mark, Tx queue stopped. When
69  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
70  * Tx queue resumed.
71  *
72  ***************************************************/
73 static int iwl_queue_space(const struct iwl_queue *q)
74 {
75 	unsigned int max;
76 	unsigned int used;
77 
78 	/*
79 	 * To avoid ambiguity between empty and completely full queues, there
80 	 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
81 	 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
82 	 * to reserve any queue entries for this purpose.
83 	 */
84 	if (q->n_window < TFD_QUEUE_SIZE_MAX)
85 		max = q->n_window;
86 	else
87 		max = TFD_QUEUE_SIZE_MAX - 1;
88 
89 	/*
90 	 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
91 	 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
92 	 */
93 	used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
94 
95 	if (WARN_ON(used > max))
96 		return 0;
97 
98 	return max - used;
99 }
100 
101 /*
102  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
103  */
104 static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
105 {
106 	q->n_window = slots_num;
107 	q->id = id;
108 
109 	/* slots_num must be power-of-two size, otherwise
110 	 * get_cmd_index is broken. */
111 	if (WARN_ON(!is_power_of_2(slots_num)))
112 		return -EINVAL;
113 
114 	q->low_mark = q->n_window / 4;
115 	if (q->low_mark < 4)
116 		q->low_mark = 4;
117 
118 	q->high_mark = q->n_window / 8;
119 	if (q->high_mark < 2)
120 		q->high_mark = 2;
121 
122 	q->write_ptr = 0;
123 	q->read_ptr = 0;
124 
125 	return 0;
126 }
127 
128 static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
129 				  struct iwl_dma_ptr *ptr, size_t size)
130 {
131 	if (WARN_ON(ptr->addr))
132 		return -EINVAL;
133 
134 	ptr->addr = dma_alloc_coherent(trans->dev, size,
135 				       &ptr->dma, GFP_KERNEL);
136 	if (!ptr->addr)
137 		return -ENOMEM;
138 	ptr->size = size;
139 	return 0;
140 }
141 
142 static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
143 				  struct iwl_dma_ptr *ptr)
144 {
145 	if (unlikely(!ptr->addr))
146 		return;
147 
148 	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
149 	memset(ptr, 0, sizeof(*ptr));
150 }
151 
152 static void iwl_pcie_txq_stuck_timer(unsigned long data)
153 {
154 	struct iwl_txq *txq = (void *)data;
155 	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
156 	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
157 	u32 scd_sram_addr = trans_pcie->scd_base_addr +
158 				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
159 	u8 buf[16];
160 	int i;
161 
162 	spin_lock(&txq->lock);
163 	/* check if triggered erroneously */
164 	if (txq->q.read_ptr == txq->q.write_ptr) {
165 		spin_unlock(&txq->lock);
166 		return;
167 	}
168 	spin_unlock(&txq->lock);
169 
170 	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
171 		jiffies_to_msecs(txq->wd_timeout));
172 	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
173 		txq->q.read_ptr, txq->q.write_ptr);
174 
175 	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
176 
177 	iwl_print_hex_error(trans, buf, sizeof(buf));
178 
179 	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
180 		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
181 			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
182 
183 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
184 		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
185 		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
186 		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
187 		u32 tbl_dw =
188 			iwl_trans_read_mem32(trans,
189 					     trans_pcie->scd_base_addr +
190 					     SCD_TRANS_TBL_OFFSET_QUEUE(i));
191 
192 		if (i & 0x1)
193 			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
194 		else
195 			tbl_dw = tbl_dw & 0x0000FFFF;
196 
197 		IWL_ERR(trans,
198 			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
199 			i, active ? "" : "in", fifo, tbl_dw,
200 			iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
201 				(TFD_QUEUE_SIZE_MAX - 1),
202 			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
203 	}
204 
205 	iwl_force_nmi(trans);
206 }
207 
208 /*
209  * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
210  */
211 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
212 					     struct iwl_txq *txq, u16 byte_cnt)
213 {
214 	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
215 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
216 	int write_ptr = txq->q.write_ptr;
217 	int txq_id = txq->q.id;
218 	u8 sec_ctl = 0;
219 	u8 sta_id = 0;
220 	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
221 	__le16 bc_ent;
222 	struct iwl_tx_cmd *tx_cmd =
223 		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
224 
225 	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
226 
227 	sta_id = tx_cmd->sta_id;
228 	sec_ctl = tx_cmd->sec_ctl;
229 
230 	switch (sec_ctl & TX_CMD_SEC_MSK) {
231 	case TX_CMD_SEC_CCM:
232 		len += IEEE80211_CCMP_MIC_LEN;
233 		break;
234 	case TX_CMD_SEC_TKIP:
235 		len += IEEE80211_TKIP_ICV_LEN;
236 		break;
237 	case TX_CMD_SEC_WEP:
238 		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
239 		break;
240 	}
241 
242 	if (trans_pcie->bc_table_dword)
243 		len = DIV_ROUND_UP(len, 4);
244 
245 	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
246 		return;
247 
248 	bc_ent = cpu_to_le16(len | (sta_id << 12));
249 
250 	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
251 
252 	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
253 		scd_bc_tbl[txq_id].
254 			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
255 }
256 
257 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
258 					    struct iwl_txq *txq)
259 {
260 	struct iwl_trans_pcie *trans_pcie =
261 		IWL_TRANS_GET_PCIE_TRANS(trans);
262 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
263 	int txq_id = txq->q.id;
264 	int read_ptr = txq->q.read_ptr;
265 	u8 sta_id = 0;
266 	__le16 bc_ent;
267 	struct iwl_tx_cmd *tx_cmd =
268 		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
269 
270 	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
271 
272 	if (txq_id != trans_pcie->cmd_queue)
273 		sta_id = tx_cmd->sta_id;
274 
275 	bc_ent = cpu_to_le16(1 | (sta_id << 12));
276 	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
277 
278 	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
279 		scd_bc_tbl[txq_id].
280 			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
281 }
282 
283 /*
284  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
285  */
286 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
287 				    struct iwl_txq *txq)
288 {
289 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
290 	u32 reg = 0;
291 	int txq_id = txq->q.id;
292 
293 	lockdep_assert_held(&txq->lock);
294 
295 	/*
296 	 * explicitly wake up the NIC if:
297 	 * 1. shadow registers aren't enabled
298 	 * 2. NIC is woken up for CMD regardless of shadow outside this function
299 	 * 3. there is a chance that the NIC is asleep
300 	 */
301 	if (!trans->cfg->base_params->shadow_reg_enable &&
302 	    txq_id != trans_pcie->cmd_queue &&
303 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
304 		/*
305 		 * wake up nic if it's powered down ...
306 		 * uCode will wake up, and interrupt us again, so next
307 		 * time we'll skip this part.
308 		 */
309 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
310 
311 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
312 			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
313 				       txq_id, reg);
314 			iwl_set_bit(trans, CSR_GP_CNTRL,
315 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
316 			txq->need_update = true;
317 			return;
318 		}
319 	}
320 
321 	/*
322 	 * if not in power-save mode, uCode will never sleep when we're
323 	 * trying to tx (during RFKILL, we're not trying to tx).
324 	 */
325 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
326 	if (!txq->block)
327 		iwl_write32(trans, HBUS_TARG_WRPTR,
328 			    txq->q.write_ptr | (txq_id << 8));
329 }
330 
331 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
332 {
333 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
334 	int i;
335 
336 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
337 		struct iwl_txq *txq = &trans_pcie->txq[i];
338 
339 		spin_lock_bh(&txq->lock);
340 		if (trans_pcie->txq[i].need_update) {
341 			iwl_pcie_txq_inc_wr_ptr(trans, txq);
342 			trans_pcie->txq[i].need_update = false;
343 		}
344 		spin_unlock_bh(&txq->lock);
345 	}
346 }
347 
348 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
349 {
350 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
351 
352 	dma_addr_t addr = get_unaligned_le32(&tb->lo);
353 	if (sizeof(dma_addr_t) > sizeof(u32))
354 		addr |=
355 		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
356 
357 	return addr;
358 }
359 
360 static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
361 				       dma_addr_t addr, u16 len)
362 {
363 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
364 	u16 hi_n_len = len << 4;
365 
366 	put_unaligned_le32(addr, &tb->lo);
367 	if (sizeof(dma_addr_t) > sizeof(u32))
368 		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
369 
370 	tb->hi_n_len = cpu_to_le16(hi_n_len);
371 
372 	tfd->num_tbs = idx + 1;
373 }
374 
375 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
376 {
377 	return tfd->num_tbs & 0x1f;
378 }
379 
380 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
381 			       struct iwl_cmd_meta *meta,
382 			       struct iwl_tfd *tfd)
383 {
384 	int i;
385 	int num_tbs;
386 
387 	/* Sanity check on number of chunks */
388 	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
389 
390 	if (num_tbs >= IWL_NUM_OF_TBS) {
391 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
392 		/* @todo issue fatal error, it is quite serious situation */
393 		return;
394 	}
395 
396 	/* first TB is never freed - it's the scratchbuf data */
397 
398 	for (i = 1; i < num_tbs; i++) {
399 		if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
400 			dma_unmap_page(trans->dev,
401 				       iwl_pcie_tfd_tb_get_addr(tfd, i),
402 				       iwl_pcie_tfd_tb_get_len(tfd, i),
403 				       DMA_TO_DEVICE);
404 		else
405 			dma_unmap_single(trans->dev,
406 					 iwl_pcie_tfd_tb_get_addr(tfd, i),
407 					 iwl_pcie_tfd_tb_get_len(tfd, i),
408 					 DMA_TO_DEVICE);
409 	}
410 	tfd->num_tbs = 0;
411 }
412 
413 /*
414  * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
415  * @trans - transport private data
416  * @txq - tx queue
417  * @dma_dir - the direction of the DMA mapping
418  *
419  * Does NOT advance any TFD circular buffer read/write indexes
420  * Does NOT free the TFD itself (which is within circular buffer)
421  */
422 static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
423 {
424 	struct iwl_tfd *tfd_tmp = txq->tfds;
425 
426 	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
427 	 * idx is bounded by n_window
428 	 */
429 	int rd_ptr = txq->q.read_ptr;
430 	int idx = get_cmd_index(&txq->q, rd_ptr);
431 
432 	lockdep_assert_held(&txq->lock);
433 
434 	/* We have only q->n_window txq->entries, but we use
435 	 * TFD_QUEUE_SIZE_MAX tfds
436 	 */
437 	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
438 
439 	/* free SKB */
440 	if (txq->entries) {
441 		struct sk_buff *skb;
442 
443 		skb = txq->entries[idx].skb;
444 
445 		/* Can be called from irqs-disabled context
446 		 * If skb is not NULL, it means that the whole queue is being
447 		 * freed and that the queue is not empty - free the skb
448 		 */
449 		if (skb) {
450 			iwl_op_mode_free_skb(trans->op_mode, skb);
451 			txq->entries[idx].skb = NULL;
452 		}
453 	}
454 }
455 
456 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
457 				  dma_addr_t addr, u16 len, bool reset)
458 {
459 	struct iwl_queue *q;
460 	struct iwl_tfd *tfd, *tfd_tmp;
461 	u32 num_tbs;
462 
463 	q = &txq->q;
464 	tfd_tmp = txq->tfds;
465 	tfd = &tfd_tmp[q->write_ptr];
466 
467 	if (reset)
468 		memset(tfd, 0, sizeof(*tfd));
469 
470 	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
471 
472 	/* Each TFD can point to a maximum 20 Tx buffers */
473 	if (num_tbs >= IWL_NUM_OF_TBS) {
474 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
475 			IWL_NUM_OF_TBS);
476 		return -EINVAL;
477 	}
478 
479 	if (WARN(addr & ~IWL_TX_DMA_MASK,
480 		 "Unaligned address = %llx\n", (unsigned long long)addr))
481 		return -EINVAL;
482 
483 	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
484 
485 	return num_tbs;
486 }
487 
488 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
489 			       struct iwl_txq *txq, int slots_num,
490 			       u32 txq_id)
491 {
492 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
494 	size_t scratchbuf_sz;
495 	int i;
496 
497 	if (WARN_ON(txq->entries || txq->tfds))
498 		return -EINVAL;
499 
500 	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
501 		    (unsigned long)txq);
502 	txq->trans_pcie = trans_pcie;
503 
504 	txq->q.n_window = slots_num;
505 
506 	txq->entries = kcalloc(slots_num,
507 			       sizeof(struct iwl_pcie_txq_entry),
508 			       GFP_KERNEL);
509 
510 	if (!txq->entries)
511 		goto error;
512 
513 	if (txq_id == trans_pcie->cmd_queue)
514 		for (i = 0; i < slots_num; i++) {
515 			txq->entries[i].cmd =
516 				kmalloc(sizeof(struct iwl_device_cmd),
517 					GFP_KERNEL);
518 			if (!txq->entries[i].cmd)
519 				goto error;
520 		}
521 
522 	/* Circular buffer of transmit frame descriptors (TFDs),
523 	 * shared with device */
524 	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
525 				       &txq->q.dma_addr, GFP_KERNEL);
526 	if (!txq->tfds)
527 		goto error;
528 
529 	BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
530 	BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
531 			sizeof(struct iwl_cmd_header) +
532 			offsetof(struct iwl_tx_cmd, scratch));
533 
534 	scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
535 
536 	txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
537 					      &txq->scratchbufs_dma,
538 					      GFP_KERNEL);
539 	if (!txq->scratchbufs)
540 		goto err_free_tfds;
541 
542 	txq->q.id = txq_id;
543 
544 	return 0;
545 err_free_tfds:
546 	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
547 error:
548 	if (txq->entries && txq_id == trans_pcie->cmd_queue)
549 		for (i = 0; i < slots_num; i++)
550 			kfree(txq->entries[i].cmd);
551 	kfree(txq->entries);
552 	txq->entries = NULL;
553 
554 	return -ENOMEM;
555 
556 }
557 
558 static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
559 			      int slots_num, u32 txq_id)
560 {
561 	int ret;
562 
563 	txq->need_update = false;
564 
565 	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
566 	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
567 	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
568 
569 	/* Initialize queue's high/low-water marks, and head/tail indexes */
570 	ret = iwl_queue_init(&txq->q, slots_num, txq_id);
571 	if (ret)
572 		return ret;
573 
574 	spin_lock_init(&txq->lock);
575 	__skb_queue_head_init(&txq->overflow_q);
576 
577 	/*
578 	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
579 	 * given Tx queue, and enable the DMA channel used for that queue.
580 	 * Circular buffer (TFD queue in DRAM) physical base address */
581 	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
582 			   txq->q.dma_addr >> 8);
583 
584 	return 0;
585 }
586 
587 static void iwl_pcie_free_tso_page(struct sk_buff *skb)
588 {
589 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
590 
591 	if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) {
592 		struct page *page =
593 			info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA];
594 
595 		__free_page(page);
596 		info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL;
597 	}
598 }
599 
600 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
601 {
602 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
603 
604 	lockdep_assert_held(&trans_pcie->reg_lock);
605 
606 	if (trans_pcie->ref_cmd_in_flight) {
607 		trans_pcie->ref_cmd_in_flight = false;
608 		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
609 		iwl_trans_unref(trans);
610 	}
611 
612 	if (!trans->cfg->base_params->apmg_wake_up_wa)
613 		return;
614 	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
615 		return;
616 
617 	trans_pcie->cmd_hold_nic_awake = false;
618 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
619 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
620 }
621 
622 /*
623  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
624  */
625 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
626 {
627 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
628 	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
629 	struct iwl_queue *q = &txq->q;
630 
631 	spin_lock_bh(&txq->lock);
632 	while (q->write_ptr != q->read_ptr) {
633 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
634 				   txq_id, q->read_ptr);
635 
636 		if (txq_id != trans_pcie->cmd_queue) {
637 			struct sk_buff *skb = txq->entries[q->read_ptr].skb;
638 
639 			if (WARN_ON_ONCE(!skb))
640 				continue;
641 
642 			iwl_pcie_free_tso_page(skb);
643 		}
644 		iwl_pcie_txq_free_tfd(trans, txq);
645 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
646 
647 		if (q->read_ptr == q->write_ptr) {
648 			unsigned long flags;
649 
650 			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
651 			if (txq_id != trans_pcie->cmd_queue) {
652 				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
653 					      q->id);
654 				iwl_trans_unref(trans);
655 			} else {
656 				iwl_pcie_clear_cmd_in_flight(trans);
657 			}
658 			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
659 		}
660 	}
661 	txq->active = false;
662 
663 	while (!skb_queue_empty(&txq->overflow_q)) {
664 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
665 
666 		iwl_op_mode_free_skb(trans->op_mode, skb);
667 	}
668 
669 	spin_unlock_bh(&txq->lock);
670 
671 	/* just in case - this queue may have been stopped */
672 	iwl_wake_queue(trans, txq);
673 }
674 
675 /*
676  * iwl_pcie_txq_free - Deallocate DMA queue.
677  * @txq: Transmit queue to deallocate.
678  *
679  * Empty queue by removing and destroying all BD's.
680  * Free all buffers.
681  * 0-fill, but do not free "txq" descriptor structure.
682  */
683 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
684 {
685 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
686 	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
687 	struct device *dev = trans->dev;
688 	int i;
689 
690 	if (WARN_ON(!txq))
691 		return;
692 
693 	iwl_pcie_txq_unmap(trans, txq_id);
694 
695 	/* De-alloc array of command/tx buffers */
696 	if (txq_id == trans_pcie->cmd_queue)
697 		for (i = 0; i < txq->q.n_window; i++) {
698 			kzfree(txq->entries[i].cmd);
699 			kzfree(txq->entries[i].free_buf);
700 		}
701 
702 	/* De-alloc circular buffer of TFDs */
703 	if (txq->tfds) {
704 		dma_free_coherent(dev,
705 				  sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
706 				  txq->tfds, txq->q.dma_addr);
707 		txq->q.dma_addr = 0;
708 		txq->tfds = NULL;
709 
710 		dma_free_coherent(dev,
711 				  sizeof(*txq->scratchbufs) * txq->q.n_window,
712 				  txq->scratchbufs, txq->scratchbufs_dma);
713 	}
714 
715 	kfree(txq->entries);
716 	txq->entries = NULL;
717 
718 	del_timer_sync(&txq->stuck_timer);
719 
720 	/* 0-fill queue descriptor structure */
721 	memset(txq, 0, sizeof(*txq));
722 }
723 
724 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
725 {
726 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727 	int nq = trans->cfg->base_params->num_of_queues;
728 	int chan;
729 	u32 reg_val;
730 	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
731 				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
732 
733 	/* make sure all queue are not stopped/used */
734 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
735 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
736 
737 	trans_pcie->scd_base_addr =
738 		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
739 
740 	WARN_ON(scd_base_addr != 0 &&
741 		scd_base_addr != trans_pcie->scd_base_addr);
742 
743 	/* reset context data, TX status and translation data */
744 	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
745 				   SCD_CONTEXT_MEM_LOWER_BOUND,
746 			    NULL, clear_dwords);
747 
748 	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
749 		       trans_pcie->scd_bc_tbls.dma >> 10);
750 
751 	/* The chain extension of the SCD doesn't work well. This feature is
752 	 * enabled by default by the HW, so we need to disable it manually.
753 	 */
754 	if (trans->cfg->base_params->scd_chain_ext_wa)
755 		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
756 
757 	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
758 				trans_pcie->cmd_fifo,
759 				trans_pcie->cmd_q_wdg_timeout);
760 
761 	/* Activate all Tx DMA/FIFO channels */
762 	iwl_scd_activate_fifos(trans);
763 
764 	/* Enable DMA channel */
765 	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
766 		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
767 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
768 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
769 
770 	/* Update FH chicken bits */
771 	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
772 	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
773 			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
774 
775 	/* Enable L1-Active */
776 	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
777 		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
778 				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
779 }
780 
781 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
782 {
783 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
784 	int txq_id;
785 
786 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
787 	     txq_id++) {
788 		struct iwl_txq *txq = &trans_pcie->txq[txq_id];
789 
790 		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
791 				   txq->q.dma_addr >> 8);
792 		iwl_pcie_txq_unmap(trans, txq_id);
793 		txq->q.read_ptr = 0;
794 		txq->q.write_ptr = 0;
795 	}
796 
797 	/* Tell NIC where to find the "keep warm" buffer */
798 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
799 			   trans_pcie->kw.dma >> 4);
800 
801 	/*
802 	 * Send 0 as the scd_base_addr since the device may have be reset
803 	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
804 	 * contain garbage.
805 	 */
806 	iwl_pcie_tx_start(trans, 0);
807 }
808 
809 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
810 {
811 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
812 	unsigned long flags;
813 	int ch, ret;
814 	u32 mask = 0;
815 
816 	spin_lock(&trans_pcie->irq_lock);
817 
818 	if (!iwl_trans_grab_nic_access(trans, &flags))
819 		goto out;
820 
821 	/* Stop each Tx DMA channel */
822 	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
823 		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
824 		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
825 	}
826 
827 	/* Wait for DMA channels to be idle */
828 	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
829 	if (ret < 0)
830 		IWL_ERR(trans,
831 			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
832 			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
833 
834 	iwl_trans_release_nic_access(trans, &flags);
835 
836 out:
837 	spin_unlock(&trans_pcie->irq_lock);
838 }
839 
840 /*
841  * iwl_pcie_tx_stop - Stop all Tx DMA channels
842  */
843 int iwl_pcie_tx_stop(struct iwl_trans *trans)
844 {
845 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
846 	int txq_id;
847 
848 	/* Turn off all Tx DMA fifos */
849 	iwl_scd_deactivate_fifos(trans);
850 
851 	/* Turn off all Tx DMA channels */
852 	iwl_pcie_tx_stop_fh(trans);
853 
854 	/*
855 	 * This function can be called before the op_mode disabled the
856 	 * queues. This happens when we have an rfkill interrupt.
857 	 * Since we stop Tx altogether - mark the queues as stopped.
858 	 */
859 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
860 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
861 
862 	/* This can happen: start_hw, stop_device */
863 	if (!trans_pcie->txq)
864 		return 0;
865 
866 	/* Unmap DMA from host system and free skb's */
867 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
868 	     txq_id++)
869 		iwl_pcie_txq_unmap(trans, txq_id);
870 
871 	return 0;
872 }
873 
874 /*
875  * iwl_trans_tx_free - Free TXQ Context
876  *
877  * Destroy all TX DMA queues and structures
878  */
879 void iwl_pcie_tx_free(struct iwl_trans *trans)
880 {
881 	int txq_id;
882 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
883 
884 	/* Tx queues */
885 	if (trans_pcie->txq) {
886 		for (txq_id = 0;
887 		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
888 			iwl_pcie_txq_free(trans, txq_id);
889 	}
890 
891 	kfree(trans_pcie->txq);
892 	trans_pcie->txq = NULL;
893 
894 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
895 
896 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
897 }
898 
899 /*
900  * iwl_pcie_tx_alloc - allocate TX context
901  * Allocate all Tx DMA structures and initialize them
902  */
903 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
904 {
905 	int ret;
906 	int txq_id, slots_num;
907 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
908 
909 	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
910 			sizeof(struct iwlagn_scd_bc_tbl);
911 
912 	/*It is not allowed to alloc twice, so warn when this happens.
913 	 * We cannot rely on the previous allocation, so free and fail */
914 	if (WARN_ON(trans_pcie->txq)) {
915 		ret = -EINVAL;
916 		goto error;
917 	}
918 
919 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
920 				   scd_bc_tbls_size);
921 	if (ret) {
922 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
923 		goto error;
924 	}
925 
926 	/* Alloc keep-warm buffer */
927 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
928 	if (ret) {
929 		IWL_ERR(trans, "Keep Warm allocation failed\n");
930 		goto error;
931 	}
932 
933 	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
934 				  sizeof(struct iwl_txq), GFP_KERNEL);
935 	if (!trans_pcie->txq) {
936 		IWL_ERR(trans, "Not enough memory for txq\n");
937 		ret = -ENOMEM;
938 		goto error;
939 	}
940 
941 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
942 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
943 	     txq_id++) {
944 		slots_num = (txq_id == trans_pcie->cmd_queue) ?
945 					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
946 		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
947 					  slots_num, txq_id);
948 		if (ret) {
949 			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
950 			goto error;
951 		}
952 	}
953 
954 	return 0;
955 
956 error:
957 	iwl_pcie_tx_free(trans);
958 
959 	return ret;
960 }
961 int iwl_pcie_tx_init(struct iwl_trans *trans)
962 {
963 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
964 	int ret;
965 	int txq_id, slots_num;
966 	bool alloc = false;
967 
968 	if (!trans_pcie->txq) {
969 		ret = iwl_pcie_tx_alloc(trans);
970 		if (ret)
971 			goto error;
972 		alloc = true;
973 	}
974 
975 	spin_lock(&trans_pcie->irq_lock);
976 
977 	/* Turn off all Tx DMA fifos */
978 	iwl_scd_deactivate_fifos(trans);
979 
980 	/* Tell NIC where to find the "keep warm" buffer */
981 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
982 			   trans_pcie->kw.dma >> 4);
983 
984 	spin_unlock(&trans_pcie->irq_lock);
985 
986 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
987 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
988 	     txq_id++) {
989 		slots_num = (txq_id == trans_pcie->cmd_queue) ?
990 					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
991 		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
992 					 slots_num, txq_id);
993 		if (ret) {
994 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
995 			goto error;
996 		}
997 	}
998 
999 	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1000 	if (trans->cfg->base_params->num_of_queues > 20)
1001 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
1002 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
1003 
1004 	return 0;
1005 error:
1006 	/*Upon error, free only if we allocated something */
1007 	if (alloc)
1008 		iwl_pcie_tx_free(trans);
1009 	return ret;
1010 }
1011 
1012 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
1013 {
1014 	lockdep_assert_held(&txq->lock);
1015 
1016 	if (!txq->wd_timeout)
1017 		return;
1018 
1019 	/*
1020 	 * station is asleep and we send data - that must
1021 	 * be uAPSD or PS-Poll. Don't rearm the timer.
1022 	 */
1023 	if (txq->frozen)
1024 		return;
1025 
1026 	/*
1027 	 * if empty delete timer, otherwise move timer forward
1028 	 * since we're making progress on this queue
1029 	 */
1030 	if (txq->q.read_ptr == txq->q.write_ptr)
1031 		del_timer(&txq->stuck_timer);
1032 	else
1033 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1034 }
1035 
1036 /* Frees buffers until index _not_ inclusive */
1037 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1038 			    struct sk_buff_head *skbs)
1039 {
1040 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1041 	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1042 	int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
1043 	struct iwl_queue *q = &txq->q;
1044 	int last_to_free;
1045 
1046 	/* This function is not meant to release cmd queue*/
1047 	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
1048 		return;
1049 
1050 	spin_lock_bh(&txq->lock);
1051 
1052 	if (!txq->active) {
1053 		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1054 				    txq_id, ssn);
1055 		goto out;
1056 	}
1057 
1058 	if (txq->q.read_ptr == tfd_num)
1059 		goto out;
1060 
1061 	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1062 			   txq_id, txq->q.read_ptr, tfd_num, ssn);
1063 
1064 	/*Since we free until index _not_ inclusive, the one before index is
1065 	 * the last we will free. This one must be used */
1066 	last_to_free = iwl_queue_dec_wrap(tfd_num);
1067 
1068 	if (!iwl_queue_used(q, last_to_free)) {
1069 		IWL_ERR(trans,
1070 			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1071 			__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
1072 			q->write_ptr, q->read_ptr);
1073 		goto out;
1074 	}
1075 
1076 	if (WARN_ON(!skb_queue_empty(skbs)))
1077 		goto out;
1078 
1079 	for (;
1080 	     q->read_ptr != tfd_num;
1081 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1082 		struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
1083 
1084 		if (WARN_ON_ONCE(!skb))
1085 			continue;
1086 
1087 		iwl_pcie_free_tso_page(skb);
1088 
1089 		__skb_queue_tail(skbs, skb);
1090 
1091 		txq->entries[txq->q.read_ptr].skb = NULL;
1092 
1093 		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1094 
1095 		iwl_pcie_txq_free_tfd(trans, txq);
1096 	}
1097 
1098 	iwl_pcie_txq_progress(txq);
1099 
1100 	if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1101 	    test_bit(txq_id, trans_pcie->queue_stopped)) {
1102 		struct sk_buff_head overflow_skbs;
1103 
1104 		__skb_queue_head_init(&overflow_skbs);
1105 		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1106 
1107 		/*
1108 		 * This is tricky: we are in reclaim path which is non
1109 		 * re-entrant, so noone will try to take the access the
1110 		 * txq data from that path. We stopped tx, so we can't
1111 		 * have tx as well. Bottom line, we can unlock and re-lock
1112 		 * later.
1113 		 */
1114 		spin_unlock_bh(&txq->lock);
1115 
1116 		while (!skb_queue_empty(&overflow_skbs)) {
1117 			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1118 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1119 			u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
1120 			struct iwl_device_cmd *dev_cmd =
1121 				info->driver_data[dev_cmd_idx];
1122 
1123 			/*
1124 			 * Note that we can very well be overflowing again.
1125 			 * In that case, iwl_queue_space will be small again
1126 			 * and we won't wake mac80211's queue.
1127 			 */
1128 			iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
1129 		}
1130 		spin_lock_bh(&txq->lock);
1131 
1132 		if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1133 			iwl_wake_queue(trans, txq);
1134 	}
1135 
1136 	if (q->read_ptr == q->write_ptr) {
1137 		IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
1138 		iwl_trans_unref(trans);
1139 	}
1140 
1141 out:
1142 	spin_unlock_bh(&txq->lock);
1143 }
1144 
1145 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1146 				      const struct iwl_host_cmd *cmd)
1147 {
1148 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1149 	int ret;
1150 
1151 	lockdep_assert_held(&trans_pcie->reg_lock);
1152 
1153 	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1154 	    !trans_pcie->ref_cmd_in_flight) {
1155 		trans_pcie->ref_cmd_in_flight = true;
1156 		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1157 		iwl_trans_ref(trans);
1158 	}
1159 
1160 	/*
1161 	 * wake up the NIC to make sure that the firmware will see the host
1162 	 * command - we will let the NIC sleep once all the host commands
1163 	 * returned. This needs to be done only on NICs that have
1164 	 * apmg_wake_up_wa set.
1165 	 */
1166 	if (trans->cfg->base_params->apmg_wake_up_wa &&
1167 	    !trans_pcie->cmd_hold_nic_awake) {
1168 		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1169 					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1170 
1171 		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1172 				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1173 				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1174 				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1175 				   15000);
1176 		if (ret < 0) {
1177 			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1178 					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1179 			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1180 			return -EIO;
1181 		}
1182 		trans_pcie->cmd_hold_nic_awake = true;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 /*
1189  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1190  *
1191  * When FW advances 'R' index, all entries between old and new 'R' index
1192  * need to be reclaimed. As result, some free space forms.  If there is
1193  * enough free space (> low mark), wake the stack that feeds us.
1194  */
1195 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1196 {
1197 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1198 	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1199 	struct iwl_queue *q = &txq->q;
1200 	unsigned long flags;
1201 	int nfreed = 0;
1202 
1203 	lockdep_assert_held(&txq->lock);
1204 
1205 	if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1206 		IWL_ERR(trans,
1207 			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1208 			__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1209 			q->write_ptr, q->read_ptr);
1210 		return;
1211 	}
1212 
1213 	for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1214 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1215 
1216 		if (nfreed++ > 0) {
1217 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1218 				idx, q->write_ptr, q->read_ptr);
1219 			iwl_force_nmi(trans);
1220 		}
1221 	}
1222 
1223 	if (q->read_ptr == q->write_ptr) {
1224 		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1225 		iwl_pcie_clear_cmd_in_flight(trans);
1226 		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1227 	}
1228 
1229 	iwl_pcie_txq_progress(txq);
1230 }
1231 
1232 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1233 				 u16 txq_id)
1234 {
1235 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1236 	u32 tbl_dw_addr;
1237 	u32 tbl_dw;
1238 	u16 scd_q2ratid;
1239 
1240 	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1241 
1242 	tbl_dw_addr = trans_pcie->scd_base_addr +
1243 			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1244 
1245 	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1246 
1247 	if (txq_id & 0x1)
1248 		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1249 	else
1250 		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1251 
1252 	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1253 
1254 	return 0;
1255 }
1256 
1257 /* Receiver address (actually, Rx station's index into station table),
1258  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1259 #define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
1260 
1261 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1262 			       const struct iwl_trans_txq_scd_cfg *cfg,
1263 			       unsigned int wdg_timeout)
1264 {
1265 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1266 	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1267 	int fifo = -1;
1268 
1269 	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1270 		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1271 
1272 	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1273 
1274 	if (cfg) {
1275 		fifo = cfg->fifo;
1276 
1277 		/* Disable the scheduler prior configuring the cmd queue */
1278 		if (txq_id == trans_pcie->cmd_queue &&
1279 		    trans_pcie->scd_set_active)
1280 			iwl_scd_enable_set_active(trans, 0);
1281 
1282 		/* Stop this Tx queue before configuring it */
1283 		iwl_scd_txq_set_inactive(trans, txq_id);
1284 
1285 		/* Set this queue as a chain-building queue unless it is CMD */
1286 		if (txq_id != trans_pcie->cmd_queue)
1287 			iwl_scd_txq_set_chain(trans, txq_id);
1288 
1289 		if (cfg->aggregate) {
1290 			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1291 
1292 			/* Map receiver-address / traffic-ID to this queue */
1293 			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1294 
1295 			/* enable aggregations for the queue */
1296 			iwl_scd_txq_enable_agg(trans, txq_id);
1297 			txq->ampdu = true;
1298 		} else {
1299 			/*
1300 			 * disable aggregations for the queue, this will also
1301 			 * make the ra_tid mapping configuration irrelevant
1302 			 * since it is now a non-AGG queue.
1303 			 */
1304 			iwl_scd_txq_disable_agg(trans, txq_id);
1305 
1306 			ssn = txq->q.read_ptr;
1307 		}
1308 	}
1309 
1310 	/* Place first TFD at index corresponding to start sequence number.
1311 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1312 	txq->q.read_ptr = (ssn & 0xff);
1313 	txq->q.write_ptr = (ssn & 0xff);
1314 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1315 			   (ssn & 0xff) | (txq_id << 8));
1316 
1317 	if (cfg) {
1318 		u8 frame_limit = cfg->frame_limit;
1319 
1320 		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1321 
1322 		/* Set up Tx window size and frame limit for this queue */
1323 		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1324 				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1325 		iwl_trans_write_mem32(trans,
1326 			trans_pcie->scd_base_addr +
1327 			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1328 			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1329 					SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1330 			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1331 					SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1332 
1333 		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1334 		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1335 			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1336 			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1337 			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1338 			       SCD_QUEUE_STTS_REG_MSK);
1339 
1340 		/* enable the scheduler for this queue (only) */
1341 		if (txq_id == trans_pcie->cmd_queue &&
1342 		    trans_pcie->scd_set_active)
1343 			iwl_scd_enable_set_active(trans, BIT(txq_id));
1344 
1345 		IWL_DEBUG_TX_QUEUES(trans,
1346 				    "Activate queue %d on FIFO %d WrPtr: %d\n",
1347 				    txq_id, fifo, ssn & 0xff);
1348 	} else {
1349 		IWL_DEBUG_TX_QUEUES(trans,
1350 				    "Activate queue %d WrPtr: %d\n",
1351 				    txq_id, ssn & 0xff);
1352 	}
1353 
1354 	txq->active = true;
1355 }
1356 
1357 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1358 				bool configure_scd)
1359 {
1360 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1361 	u32 stts_addr = trans_pcie->scd_base_addr +
1362 			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1363 	static const u32 zero_val[4] = {};
1364 
1365 	trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
1366 	trans_pcie->txq[txq_id].frozen = false;
1367 
1368 	/*
1369 	 * Upon HW Rfkill - we stop the device, and then stop the queues
1370 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1371 	 * allow the op_mode to call txq_disable after it already called
1372 	 * stop_device.
1373 	 */
1374 	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1375 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1376 			  "queue %d not used", txq_id);
1377 		return;
1378 	}
1379 
1380 	if (configure_scd) {
1381 		iwl_scd_txq_set_inactive(trans, txq_id);
1382 
1383 		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1384 				    ARRAY_SIZE(zero_val));
1385 	}
1386 
1387 	iwl_pcie_txq_unmap(trans, txq_id);
1388 	trans_pcie->txq[txq_id].ampdu = false;
1389 
1390 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1391 }
1392 
1393 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
1394 
1395 /*
1396  * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1397  * @priv: device private data point
1398  * @cmd: a pointer to the ucode command structure
1399  *
1400  * The function returns < 0 values to indicate the operation
1401  * failed. On success, it returns the index (>= 0) of command in the
1402  * command queue.
1403  */
1404 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1405 				 struct iwl_host_cmd *cmd)
1406 {
1407 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1408 	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1409 	struct iwl_queue *q = &txq->q;
1410 	struct iwl_device_cmd *out_cmd;
1411 	struct iwl_cmd_meta *out_meta;
1412 	unsigned long flags;
1413 	void *dup_buf = NULL;
1414 	dma_addr_t phys_addr;
1415 	int idx;
1416 	u16 copy_size, cmd_size, scratch_size;
1417 	bool had_nocopy = false;
1418 	u8 group_id = iwl_cmd_groupid(cmd->id);
1419 	int i, ret;
1420 	u32 cmd_pos;
1421 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1422 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1423 
1424 	if (WARN(!trans_pcie->wide_cmd_header &&
1425 		 group_id > IWL_ALWAYS_LONG_GROUP,
1426 		 "unsupported wide command %#x\n", cmd->id))
1427 		return -EINVAL;
1428 
1429 	if (group_id != 0) {
1430 		copy_size = sizeof(struct iwl_cmd_header_wide);
1431 		cmd_size = sizeof(struct iwl_cmd_header_wide);
1432 	} else {
1433 		copy_size = sizeof(struct iwl_cmd_header);
1434 		cmd_size = sizeof(struct iwl_cmd_header);
1435 	}
1436 
1437 	/* need one for the header if the first is NOCOPY */
1438 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1439 
1440 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1441 		cmddata[i] = cmd->data[i];
1442 		cmdlen[i] = cmd->len[i];
1443 
1444 		if (!cmd->len[i])
1445 			continue;
1446 
1447 		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1448 		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1449 			int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1450 
1451 			if (copy > cmdlen[i])
1452 				copy = cmdlen[i];
1453 			cmdlen[i] -= copy;
1454 			cmddata[i] += copy;
1455 			copy_size += copy;
1456 		}
1457 
1458 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1459 			had_nocopy = true;
1460 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1461 				idx = -EINVAL;
1462 				goto free_dup_buf;
1463 			}
1464 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1465 			/*
1466 			 * This is also a chunk that isn't copied
1467 			 * to the static buffer so set had_nocopy.
1468 			 */
1469 			had_nocopy = true;
1470 
1471 			/* only allowed once */
1472 			if (WARN_ON(dup_buf)) {
1473 				idx = -EINVAL;
1474 				goto free_dup_buf;
1475 			}
1476 
1477 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1478 					  GFP_ATOMIC);
1479 			if (!dup_buf)
1480 				return -ENOMEM;
1481 		} else {
1482 			/* NOCOPY must not be followed by normal! */
1483 			if (WARN_ON(had_nocopy)) {
1484 				idx = -EINVAL;
1485 				goto free_dup_buf;
1486 			}
1487 			copy_size += cmdlen[i];
1488 		}
1489 		cmd_size += cmd->len[i];
1490 	}
1491 
1492 	/*
1493 	 * If any of the command structures end up being larger than
1494 	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1495 	 * allocated into separate TFDs, then we will need to
1496 	 * increase the size of the buffers.
1497 	 */
1498 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1499 		 "Command %s (%#x) is too large (%d bytes)\n",
1500 		 iwl_get_cmd_string(trans, cmd->id),
1501 		 cmd->id, copy_size)) {
1502 		idx = -EINVAL;
1503 		goto free_dup_buf;
1504 	}
1505 
1506 	spin_lock_bh(&txq->lock);
1507 
1508 	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1509 		spin_unlock_bh(&txq->lock);
1510 
1511 		IWL_ERR(trans, "No space in command queue\n");
1512 		iwl_op_mode_cmd_queue_full(trans->op_mode);
1513 		idx = -ENOSPC;
1514 		goto free_dup_buf;
1515 	}
1516 
1517 	idx = get_cmd_index(q, q->write_ptr);
1518 	out_cmd = txq->entries[idx].cmd;
1519 	out_meta = &txq->entries[idx].meta;
1520 
1521 	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
1522 	if (cmd->flags & CMD_WANT_SKB)
1523 		out_meta->source = cmd;
1524 
1525 	/* set up the header */
1526 	if (group_id != 0) {
1527 		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1528 		out_cmd->hdr_wide.group_id = group_id;
1529 		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1530 		out_cmd->hdr_wide.length =
1531 			cpu_to_le16(cmd_size -
1532 				    sizeof(struct iwl_cmd_header_wide));
1533 		out_cmd->hdr_wide.reserved = 0;
1534 		out_cmd->hdr_wide.sequence =
1535 			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1536 						 INDEX_TO_SEQ(q->write_ptr));
1537 
1538 		cmd_pos = sizeof(struct iwl_cmd_header_wide);
1539 		copy_size = sizeof(struct iwl_cmd_header_wide);
1540 	} else {
1541 		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1542 		out_cmd->hdr.sequence =
1543 			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1544 						 INDEX_TO_SEQ(q->write_ptr));
1545 		out_cmd->hdr.group_id = 0;
1546 
1547 		cmd_pos = sizeof(struct iwl_cmd_header);
1548 		copy_size = sizeof(struct iwl_cmd_header);
1549 	}
1550 
1551 	/* and copy the data that needs to be copied */
1552 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1553 		int copy;
1554 
1555 		if (!cmd->len[i])
1556 			continue;
1557 
1558 		/* copy everything if not nocopy/dup */
1559 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1560 					   IWL_HCMD_DFL_DUP))) {
1561 			copy = cmd->len[i];
1562 
1563 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1564 			cmd_pos += copy;
1565 			copy_size += copy;
1566 			continue;
1567 		}
1568 
1569 		/*
1570 		 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
1571 		 * in total (for the scratchbuf handling), but copy up to what
1572 		 * we can fit into the payload for debug dump purposes.
1573 		 */
1574 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1575 
1576 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1577 		cmd_pos += copy;
1578 
1579 		/* However, treat copy_size the proper way, we need it below */
1580 		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1581 			copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1582 
1583 			if (copy > cmd->len[i])
1584 				copy = cmd->len[i];
1585 			copy_size += copy;
1586 		}
1587 	}
1588 
1589 	IWL_DEBUG_HC(trans,
1590 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1591 		     iwl_get_cmd_string(trans, cmd->id),
1592 		     group_id, out_cmd->hdr.cmd,
1593 		     le16_to_cpu(out_cmd->hdr.sequence),
1594 		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1595 
1596 	/* start the TFD with the scratchbuf */
1597 	scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1598 	memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1599 	iwl_pcie_txq_build_tfd(trans, txq,
1600 			       iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1601 			       scratch_size, true);
1602 
1603 	/* map first command fragment, if any remains */
1604 	if (copy_size > scratch_size) {
1605 		phys_addr = dma_map_single(trans->dev,
1606 					   ((u8 *)&out_cmd->hdr) + scratch_size,
1607 					   copy_size - scratch_size,
1608 					   DMA_TO_DEVICE);
1609 		if (dma_mapping_error(trans->dev, phys_addr)) {
1610 			iwl_pcie_tfd_unmap(trans, out_meta,
1611 					   &txq->tfds[q->write_ptr]);
1612 			idx = -ENOMEM;
1613 			goto out;
1614 		}
1615 
1616 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1617 				       copy_size - scratch_size, false);
1618 	}
1619 
1620 	/* map the remaining (adjusted) nocopy/dup fragments */
1621 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1622 		const void *data = cmddata[i];
1623 
1624 		if (!cmdlen[i])
1625 			continue;
1626 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1627 					   IWL_HCMD_DFL_DUP)))
1628 			continue;
1629 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1630 			data = dup_buf;
1631 		phys_addr = dma_map_single(trans->dev, (void *)data,
1632 					   cmdlen[i], DMA_TO_DEVICE);
1633 		if (dma_mapping_error(trans->dev, phys_addr)) {
1634 			iwl_pcie_tfd_unmap(trans, out_meta,
1635 					   &txq->tfds[q->write_ptr]);
1636 			idx = -ENOMEM;
1637 			goto out;
1638 		}
1639 
1640 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1641 	}
1642 
1643 	BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
1644 		     sizeof(out_meta->flags) * BITS_PER_BYTE);
1645 	out_meta->flags = cmd->flags;
1646 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1647 		kzfree(txq->entries[idx].free_buf);
1648 	txq->entries[idx].free_buf = dup_buf;
1649 
1650 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1651 
1652 	/* start timer if queue currently empty */
1653 	if (q->read_ptr == q->write_ptr && txq->wd_timeout)
1654 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1655 
1656 	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1657 	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1658 	if (ret < 0) {
1659 		idx = ret;
1660 		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1661 		goto out;
1662 	}
1663 
1664 	/* Increment and update queue's write index */
1665 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1666 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1667 
1668 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1669 
1670  out:
1671 	spin_unlock_bh(&txq->lock);
1672  free_dup_buf:
1673 	if (idx < 0)
1674 		kfree(dup_buf);
1675 	return idx;
1676 }
1677 
1678 /*
1679  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1680  * @rxb: Rx buffer to reclaim
1681  */
1682 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1683 			    struct iwl_rx_cmd_buffer *rxb)
1684 {
1685 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1686 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1687 	u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id);
1688 	u32 cmd_id;
1689 	int txq_id = SEQ_TO_QUEUE(sequence);
1690 	int index = SEQ_TO_INDEX(sequence);
1691 	int cmd_index;
1692 	struct iwl_device_cmd *cmd;
1693 	struct iwl_cmd_meta *meta;
1694 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1695 	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1696 
1697 	/* If a Tx command is being handled and it isn't in the actual
1698 	 * command queue then there a command routing bug has been introduced
1699 	 * in the queue management code. */
1700 	if (WARN(txq_id != trans_pcie->cmd_queue,
1701 		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1702 		 txq_id, trans_pcie->cmd_queue, sequence,
1703 		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1704 		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1705 		iwl_print_hex_error(trans, pkt, 32);
1706 		return;
1707 	}
1708 
1709 	spin_lock_bh(&txq->lock);
1710 
1711 	cmd_index = get_cmd_index(&txq->q, index);
1712 	cmd = txq->entries[cmd_index].cmd;
1713 	meta = &txq->entries[cmd_index].meta;
1714 	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
1715 
1716 	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1717 
1718 	/* Input error checking is done when commands are added to queue. */
1719 	if (meta->flags & CMD_WANT_SKB) {
1720 		struct page *p = rxb_steal_page(rxb);
1721 
1722 		meta->source->resp_pkt = pkt;
1723 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1724 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1725 	}
1726 
1727 	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1728 		iwl_op_mode_async_cb(trans->op_mode, cmd);
1729 
1730 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1731 
1732 	if (!(meta->flags & CMD_ASYNC)) {
1733 		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1734 			IWL_WARN(trans,
1735 				 "HCMD_ACTIVE already clear for command %s\n",
1736 				 iwl_get_cmd_string(trans, cmd_id));
1737 		}
1738 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1739 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1740 			       iwl_get_cmd_string(trans, cmd_id));
1741 		wake_up(&trans_pcie->wait_command_queue);
1742 	}
1743 
1744 	if (meta->flags & CMD_MAKE_TRANS_IDLE) {
1745 		IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
1746 			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
1747 		set_bit(STATUS_TRANS_IDLE, &trans->status);
1748 		wake_up(&trans_pcie->d0i3_waitq);
1749 	}
1750 
1751 	if (meta->flags & CMD_WAKE_UP_TRANS) {
1752 		IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
1753 			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
1754 		clear_bit(STATUS_TRANS_IDLE, &trans->status);
1755 		wake_up(&trans_pcie->d0i3_waitq);
1756 	}
1757 
1758 	meta->flags = 0;
1759 
1760 	spin_unlock_bh(&txq->lock);
1761 }
1762 
1763 #define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1764 
1765 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1766 				    struct iwl_host_cmd *cmd)
1767 {
1768 	int ret;
1769 
1770 	/* An asynchronous command can not expect an SKB to be set. */
1771 	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1772 		return -EINVAL;
1773 
1774 	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1775 	if (ret < 0) {
1776 		IWL_ERR(trans,
1777 			"Error sending %s: enqueue_hcmd failed: %d\n",
1778 			iwl_get_cmd_string(trans, cmd->id), ret);
1779 		return ret;
1780 	}
1781 	return 0;
1782 }
1783 
1784 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1785 				   struct iwl_host_cmd *cmd)
1786 {
1787 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1788 	int cmd_idx;
1789 	int ret;
1790 
1791 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1792 		       iwl_get_cmd_string(trans, cmd->id));
1793 
1794 	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1795 				  &trans->status),
1796 		 "Command %s: a command is already active!\n",
1797 		 iwl_get_cmd_string(trans, cmd->id)))
1798 		return -EIO;
1799 
1800 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1801 		       iwl_get_cmd_string(trans, cmd->id));
1802 
1803 	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
1804 		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
1805 				 pm_runtime_active(&trans_pcie->pci_dev->dev),
1806 				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
1807 		if (!ret) {
1808 			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
1809 			return -ETIMEDOUT;
1810 		}
1811 	}
1812 
1813 	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1814 	if (cmd_idx < 0) {
1815 		ret = cmd_idx;
1816 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1817 		IWL_ERR(trans,
1818 			"Error sending %s: enqueue_hcmd failed: %d\n",
1819 			iwl_get_cmd_string(trans, cmd->id), ret);
1820 		return ret;
1821 	}
1822 
1823 	ret = wait_event_timeout(trans_pcie->wait_command_queue,
1824 				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1825 					   &trans->status),
1826 				 HOST_COMPLETE_TIMEOUT);
1827 	if (!ret) {
1828 		struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1829 		struct iwl_queue *q = &txq->q;
1830 
1831 		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1832 			iwl_get_cmd_string(trans, cmd->id),
1833 			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1834 
1835 		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1836 			q->read_ptr, q->write_ptr);
1837 
1838 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1839 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1840 			       iwl_get_cmd_string(trans, cmd->id));
1841 		ret = -ETIMEDOUT;
1842 
1843 		iwl_force_nmi(trans);
1844 		iwl_trans_fw_error(trans);
1845 
1846 		goto cancel;
1847 	}
1848 
1849 	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1850 		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1851 			iwl_get_cmd_string(trans, cmd->id));
1852 		dump_stack();
1853 		ret = -EIO;
1854 		goto cancel;
1855 	}
1856 
1857 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1858 	    test_bit(STATUS_RFKILL, &trans->status)) {
1859 		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1860 		ret = -ERFKILL;
1861 		goto cancel;
1862 	}
1863 
1864 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1865 		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1866 			iwl_get_cmd_string(trans, cmd->id));
1867 		ret = -EIO;
1868 		goto cancel;
1869 	}
1870 
1871 	return 0;
1872 
1873 cancel:
1874 	if (cmd->flags & CMD_WANT_SKB) {
1875 		/*
1876 		 * Cancel the CMD_WANT_SKB flag for the cmd in the
1877 		 * TX cmd queue. Otherwise in case the cmd comes
1878 		 * in later, it will possibly set an invalid
1879 		 * address (cmd->meta.source).
1880 		 */
1881 		trans_pcie->txq[trans_pcie->cmd_queue].
1882 			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1883 	}
1884 
1885 	if (cmd->resp_pkt) {
1886 		iwl_free_resp(cmd);
1887 		cmd->resp_pkt = NULL;
1888 	}
1889 
1890 	return ret;
1891 }
1892 
1893 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1894 {
1895 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1896 	    test_bit(STATUS_RFKILL, &trans->status)) {
1897 		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1898 				  cmd->id);
1899 		return -ERFKILL;
1900 	}
1901 
1902 	if (cmd->flags & CMD_ASYNC)
1903 		return iwl_pcie_send_hcmd_async(trans, cmd);
1904 
1905 	/* We still can fail on RFKILL that can be asserted while we wait */
1906 	return iwl_pcie_send_hcmd_sync(trans, cmd);
1907 }
1908 
1909 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1910 			     struct iwl_txq *txq, u8 hdr_len,
1911 			     struct iwl_cmd_meta *out_meta,
1912 			     struct iwl_device_cmd *dev_cmd, u16 tb1_len)
1913 {
1914 	struct iwl_queue *q = &txq->q;
1915 	u16 tb2_len;
1916 	int i;
1917 
1918 	/*
1919 	 * Set up TFD's third entry to point directly to remainder
1920 	 * of skb's head, if any
1921 	 */
1922 	tb2_len = skb_headlen(skb) - hdr_len;
1923 
1924 	if (tb2_len > 0) {
1925 		dma_addr_t tb2_phys = dma_map_single(trans->dev,
1926 						     skb->data + hdr_len,
1927 						     tb2_len, DMA_TO_DEVICE);
1928 		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1929 			iwl_pcie_tfd_unmap(trans, out_meta,
1930 					   &txq->tfds[q->write_ptr]);
1931 			return -EINVAL;
1932 		}
1933 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1934 	}
1935 
1936 	/* set up the remaining entries to point to the data */
1937 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1938 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1939 		dma_addr_t tb_phys;
1940 		int tb_idx;
1941 
1942 		if (!skb_frag_size(frag))
1943 			continue;
1944 
1945 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1946 					   skb_frag_size(frag), DMA_TO_DEVICE);
1947 
1948 		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
1949 			iwl_pcie_tfd_unmap(trans, out_meta,
1950 					   &txq->tfds[q->write_ptr]);
1951 			return -EINVAL;
1952 		}
1953 		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1954 						skb_frag_size(frag), false);
1955 
1956 		out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
1957 	}
1958 
1959 	trace_iwlwifi_dev_tx(trans->dev, skb,
1960 			     &txq->tfds[txq->q.write_ptr],
1961 			     sizeof(struct iwl_tfd),
1962 			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1963 			     skb->data + hdr_len, tb2_len);
1964 	trace_iwlwifi_dev_tx_data(trans->dev, skb,
1965 				  hdr_len, skb->len - hdr_len);
1966 	return 0;
1967 }
1968 
1969 #ifdef CONFIG_INET
1970 static struct iwl_tso_hdr_page *
1971 get_page_hdr(struct iwl_trans *trans, size_t len)
1972 {
1973 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1974 	struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
1975 
1976 	if (!p->page)
1977 		goto alloc;
1978 
1979 	/* enough room on this page */
1980 	if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
1981 		return p;
1982 
1983 	/* We don't have enough room on this page, get a new one. */
1984 	__free_page(p->page);
1985 
1986 alloc:
1987 	p->page = alloc_page(GFP_ATOMIC);
1988 	if (!p->page)
1989 		return NULL;
1990 	p->pos = page_address(p->page);
1991 	return p;
1992 }
1993 
1994 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
1995 					bool ipv6, unsigned int len)
1996 {
1997 	if (ipv6) {
1998 		struct ipv6hdr *iphv6 = iph;
1999 
2000 		tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
2001 					       len + tcph->doff * 4,
2002 					       IPPROTO_TCP, 0);
2003 	} else {
2004 		struct iphdr *iphv4 = iph;
2005 
2006 		ip_send_check(iphv4);
2007 		tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
2008 						 len + tcph->doff * 4,
2009 						 IPPROTO_TCP, 0);
2010 	}
2011 }
2012 
2013 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2014 				   struct iwl_txq *txq, u8 hdr_len,
2015 				   struct iwl_cmd_meta *out_meta,
2016 				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
2017 {
2018 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2019 	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
2020 	struct ieee80211_hdr *hdr = (void *)skb->data;
2021 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
2022 	unsigned int mss = skb_shinfo(skb)->gso_size;
2023 	struct iwl_queue *q = &txq->q;
2024 	u16 length, iv_len, amsdu_pad;
2025 	u8 *start_hdr;
2026 	struct iwl_tso_hdr_page *hdr_page;
2027 	int ret;
2028 	struct tso_t tso;
2029 
2030 	/* if the packet is protected, then it must be CCMP or GCMP */
2031 	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
2032 	iv_len = ieee80211_has_protected(hdr->frame_control) ?
2033 		IEEE80211_CCMP_HDR_LEN : 0;
2034 
2035 	trace_iwlwifi_dev_tx(trans->dev, skb,
2036 			     &txq->tfds[txq->q.write_ptr],
2037 			     sizeof(struct iwl_tfd),
2038 			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
2039 			     NULL, 0);
2040 
2041 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
2042 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
2043 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
2044 	amsdu_pad = 0;
2045 
2046 	/* total amount of header we may need for this A-MSDU */
2047 	hdr_room = DIV_ROUND_UP(total_len, mss) *
2048 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
2049 
2050 	/* Our device supports 9 segments at most, it will fit in 1 page */
2051 	hdr_page = get_page_hdr(trans, hdr_room);
2052 	if (!hdr_page)
2053 		return -ENOMEM;
2054 
2055 	get_page(hdr_page->page);
2056 	start_hdr = hdr_page->pos;
2057 	info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page;
2058 	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
2059 	hdr_page->pos += iv_len;
2060 
2061 	/*
2062 	 * Pull the ieee80211 header + IV to be able to use TSO core,
2063 	 * we will restore it for the tx_status flow.
2064 	 */
2065 	skb_pull(skb, hdr_len + iv_len);
2066 
2067 	tso_start(skb, &tso);
2068 
2069 	while (total_len) {
2070 		/* this is the data left for this subframe */
2071 		unsigned int data_left =
2072 			min_t(unsigned int, mss, total_len);
2073 		struct sk_buff *csum_skb = NULL;
2074 		unsigned int hdr_tb_len;
2075 		dma_addr_t hdr_tb_phys;
2076 		struct tcphdr *tcph;
2077 		u8 *iph;
2078 
2079 		total_len -= data_left;
2080 
2081 		memset(hdr_page->pos, 0, amsdu_pad);
2082 		hdr_page->pos += amsdu_pad;
2083 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
2084 				  data_left)) & 0x3;
2085 		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
2086 		hdr_page->pos += ETH_ALEN;
2087 		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
2088 		hdr_page->pos += ETH_ALEN;
2089 
2090 		length = snap_ip_tcp_hdrlen + data_left;
2091 		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
2092 		hdr_page->pos += sizeof(length);
2093 
2094 		/*
2095 		 * This will copy the SNAP as well which will be considered
2096 		 * as MAC header.
2097 		 */
2098 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
2099 		iph = hdr_page->pos + 8;
2100 		tcph = (void *)(iph + ip_hdrlen);
2101 
2102 		/* For testing on current hardware only */
2103 		if (trans_pcie->sw_csum_tx) {
2104 			csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
2105 					     GFP_ATOMIC);
2106 			if (!csum_skb) {
2107 				ret = -ENOMEM;
2108 				goto out_unmap;
2109 			}
2110 
2111 			iwl_compute_pseudo_hdr_csum(iph, tcph,
2112 						    skb->protocol ==
2113 							htons(ETH_P_IPV6),
2114 						    data_left);
2115 
2116 			memcpy(skb_put(csum_skb, tcp_hdrlen(skb)),
2117 			       tcph, tcp_hdrlen(skb));
2118 			skb_set_transport_header(csum_skb, 0);
2119 			csum_skb->csum_start =
2120 				(unsigned char *)tcp_hdr(csum_skb) -
2121 						 csum_skb->head;
2122 		}
2123 
2124 		hdr_page->pos += snap_ip_tcp_hdrlen;
2125 
2126 		hdr_tb_len = hdr_page->pos - start_hdr;
2127 		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
2128 					     hdr_tb_len, DMA_TO_DEVICE);
2129 		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
2130 			dev_kfree_skb(csum_skb);
2131 			ret = -EINVAL;
2132 			goto out_unmap;
2133 		}
2134 		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2135 				       hdr_tb_len, false);
2136 		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
2137 					       hdr_tb_len);
2138 
2139 		/* prepare the start_hdr for the next subframe */
2140 		start_hdr = hdr_page->pos;
2141 
2142 		/* put the payload */
2143 		while (data_left) {
2144 			unsigned int size = min_t(unsigned int, tso.size,
2145 						  data_left);
2146 			dma_addr_t tb_phys;
2147 
2148 			if (trans_pcie->sw_csum_tx)
2149 				memcpy(skb_put(csum_skb, size), tso.data, size);
2150 
2151 			tb_phys = dma_map_single(trans->dev, tso.data,
2152 						 size, DMA_TO_DEVICE);
2153 			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
2154 				dev_kfree_skb(csum_skb);
2155 				ret = -EINVAL;
2156 				goto out_unmap;
2157 			}
2158 
2159 			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2160 					       size, false);
2161 			trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
2162 						       size);
2163 
2164 			data_left -= size;
2165 			tso_build_data(skb, &tso, size);
2166 		}
2167 
2168 		/* For testing on early hardware only */
2169 		if (trans_pcie->sw_csum_tx) {
2170 			__wsum csum;
2171 
2172 			csum = skb_checksum(csum_skb,
2173 					    skb_checksum_start_offset(csum_skb),
2174 					    csum_skb->len -
2175 					    skb_checksum_start_offset(csum_skb),
2176 					    0);
2177 			dev_kfree_skb(csum_skb);
2178 			dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
2179 						hdr_tb_len, DMA_TO_DEVICE);
2180 			tcph->check = csum_fold(csum);
2181 			dma_sync_single_for_device(trans->dev, hdr_tb_phys,
2182 						   hdr_tb_len, DMA_TO_DEVICE);
2183 		}
2184 	}
2185 
2186 	/* re -add the WiFi header and IV */
2187 	skb_push(skb, hdr_len + iv_len);
2188 
2189 	return 0;
2190 
2191 out_unmap:
2192 	iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
2193 	return ret;
2194 }
2195 #else /* CONFIG_INET */
2196 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2197 				   struct iwl_txq *txq, u8 hdr_len,
2198 				   struct iwl_cmd_meta *out_meta,
2199 				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
2200 {
2201 	/* No A-MSDU without CONFIG_INET */
2202 	WARN_ON(1);
2203 
2204 	return -1;
2205 }
2206 #endif /* CONFIG_INET */
2207 
2208 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2209 		      struct iwl_device_cmd *dev_cmd, int txq_id)
2210 {
2211 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2212 	struct ieee80211_hdr *hdr;
2213 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2214 	struct iwl_cmd_meta *out_meta;
2215 	struct iwl_txq *txq;
2216 	struct iwl_queue *q;
2217 	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2218 	void *tb1_addr;
2219 	u16 len, tb1_len;
2220 	bool wait_write_ptr;
2221 	__le16 fc;
2222 	u8 hdr_len;
2223 	u16 wifi_seq;
2224 	bool amsdu;
2225 
2226 	txq = &trans_pcie->txq[txq_id];
2227 	q = &txq->q;
2228 
2229 	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
2230 		      "TX on unused queue %d\n", txq_id))
2231 		return -EINVAL;
2232 
2233 	if (unlikely(trans_pcie->sw_csum_tx &&
2234 		     skb->ip_summed == CHECKSUM_PARTIAL)) {
2235 		int offs = skb_checksum_start_offset(skb);
2236 		int csum_offs = offs + skb->csum_offset;
2237 		__wsum csum;
2238 
2239 		if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
2240 			return -1;
2241 
2242 		csum = skb_checksum(skb, offs, skb->len - offs, 0);
2243 		*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
2244 
2245 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2246 	}
2247 
2248 	if (skb_is_nonlinear(skb) &&
2249 	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
2250 	    __skb_linearize(skb))
2251 		return -ENOMEM;
2252 
2253 	/* mac80211 always puts the full header into the SKB's head,
2254 	 * so there's no need to check if it's readable there
2255 	 */
2256 	hdr = (struct ieee80211_hdr *)skb->data;
2257 	fc = hdr->frame_control;
2258 	hdr_len = ieee80211_hdrlen(fc);
2259 
2260 	spin_lock(&txq->lock);
2261 
2262 	if (iwl_queue_space(q) < q->high_mark) {
2263 		iwl_stop_queue(trans, txq);
2264 
2265 		/* don't put the packet on the ring, if there is no room */
2266 		if (unlikely(iwl_queue_space(q) < 3)) {
2267 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2268 
2269 			info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
2270 				dev_cmd;
2271 			__skb_queue_tail(&txq->overflow_q, skb);
2272 
2273 			spin_unlock(&txq->lock);
2274 			return 0;
2275 		}
2276 	}
2277 
2278 	/* In AGG mode, the index in the ring must correspond to the WiFi
2279 	 * sequence number. This is a HW requirements to help the SCD to parse
2280 	 * the BA.
2281 	 * Check here that the packets are in the right place on the ring.
2282 	 */
2283 	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2284 	WARN_ONCE(txq->ampdu &&
2285 		  (wifi_seq & 0xff) != q->write_ptr,
2286 		  "Q: %d WiFi Seq %d tfdNum %d",
2287 		  txq_id, wifi_seq, q->write_ptr);
2288 
2289 	/* Set up driver data for this TFD */
2290 	txq->entries[q->write_ptr].skb = skb;
2291 	txq->entries[q->write_ptr].cmd = dev_cmd;
2292 
2293 	dev_cmd->hdr.sequence =
2294 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2295 			    INDEX_TO_SEQ(q->write_ptr)));
2296 
2297 	tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
2298 	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2299 		       offsetof(struct iwl_tx_cmd, scratch);
2300 
2301 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2302 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2303 
2304 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
2305 	out_meta = &txq->entries[q->write_ptr].meta;
2306 	out_meta->flags = 0;
2307 
2308 	/*
2309 	 * The second TB (tb1) points to the remainder of the TX command
2310 	 * and the 802.11 header - dword aligned size
2311 	 * (This calculation modifies the TX command, so do it before the
2312 	 * setup of the first TB)
2313 	 */
2314 	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2315 	      hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
2316 	/* do not align A-MSDU to dword as the subframe header aligns it */
2317 	amsdu = ieee80211_is_data_qos(fc) &&
2318 		(*ieee80211_get_qos_ctl(hdr) &
2319 		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2320 	if (trans_pcie->sw_csum_tx || !amsdu) {
2321 		tb1_len = ALIGN(len, 4);
2322 		/* Tell NIC about any 2-byte padding after MAC header */
2323 		if (tb1_len != len)
2324 			tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2325 	} else {
2326 		tb1_len = len;
2327 	}
2328 
2329 	/* The first TB points to the scratchbuf data - min_copy bytes */
2330 	memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
2331 	       IWL_HCMD_SCRATCHBUF_SIZE);
2332 	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2333 			       IWL_HCMD_SCRATCHBUF_SIZE, true);
2334 
2335 	/* there must be data left over for TB1 or this code must be changed */
2336 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
2337 
2338 	/* map the data for TB1 */
2339 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
2340 	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2341 	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2342 		goto out_err;
2343 	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2344 
2345 	if (amsdu) {
2346 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2347 						     out_meta, dev_cmd,
2348 						     tb1_len)))
2349 			goto out_err;
2350 	} else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2351 				       out_meta, dev_cmd, tb1_len))) {
2352 		goto out_err;
2353 	}
2354 
2355 	/* Set up entry for this TFD in Tx byte-count array */
2356 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
2357 
2358 	wait_write_ptr = ieee80211_has_morefrags(fc);
2359 
2360 	/* start timer if queue currently empty */
2361 	if (q->read_ptr == q->write_ptr) {
2362 		if (txq->wd_timeout) {
2363 			/*
2364 			 * If the TXQ is active, then set the timer, if not,
2365 			 * set the timer in remainder so that the timer will
2366 			 * be armed with the right value when the station will
2367 			 * wake up.
2368 			 */
2369 			if (!txq->frozen)
2370 				mod_timer(&txq->stuck_timer,
2371 					  jiffies + txq->wd_timeout);
2372 			else
2373 				txq->frozen_expiry_remainder = txq->wd_timeout;
2374 		}
2375 		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
2376 		iwl_trans_ref(trans);
2377 	}
2378 
2379 	/* Tell device the write index *just past* this latest filled TFD */
2380 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
2381 	if (!wait_write_ptr)
2382 		iwl_pcie_txq_inc_wr_ptr(trans, txq);
2383 
2384 	/*
2385 	 * At this point the frame is "transmitted" successfully
2386 	 * and we will get a TX status notification eventually.
2387 	 */
2388 	spin_unlock(&txq->lock);
2389 	return 0;
2390 out_err:
2391 	spin_unlock(&txq->lock);
2392 	return -1;
2393 }
2394