xref: /openbmc/linux/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c (revision 7a846d3c43b0b6d04300be9ba666b102b57a391a)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2017 Intel Deutschland GmbH
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2017 Intel Deutschland GmbH
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  *
28  *  * Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  *  * Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in
32  *    the documentation and/or other materials provided with the
33  *    distribution.
34  *  * Neither the name Intel Corporation nor the names of its
35  *    contributors may be used to endorse or promote products derived
36  *    from this software without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49  *
50  *****************************************************************************/
51 #include <linux/pm_runtime.h>
52 #include <net/tso.h>
53 
54 #include "iwl-debug.h"
55 #include "iwl-csr.h"
56 #include "iwl-io.h"
57 #include "internal.h"
58 #include "fw/api/tx.h"
59 
60  /*
61  * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
62  */
63 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
64 {
65 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
66 	int txq_id;
67 
68 	/*
69 	 * This function can be called before the op_mode disabled the
70 	 * queues. This happens when we have an rfkill interrupt.
71 	 * Since we stop Tx altogether - mark the queues as stopped.
72 	 */
73 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
74 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
75 
76 	/* Unmap DMA from host system and free skb's */
77 	for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
78 		if (!trans_pcie->txq[txq_id])
79 			continue;
80 		iwl_pcie_gen2_txq_unmap(trans, txq_id);
81 	}
82 }
83 
84 /*
85  * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
86  */
87 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
88 					  int num_tbs)
89 {
90 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
91 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
92 	u8 filled_tfd_size, num_fetch_chunks;
93 	u16 len = byte_cnt;
94 	__le16 bc_ent;
95 
96 	len = DIV_ROUND_UP(len, 4);
97 
98 	if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
99 		return;
100 
101 	filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
102 				   num_tbs * sizeof(struct iwl_tfh_tb);
103 	/*
104 	 * filled_tfd_size contains the number of filled bytes in the TFD.
105 	 * Dividing it by 64 will give the number of chunks to fetch
106 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
107 	 * If, for example, TFD contains only 3 TBs then 32 bytes
108 	 * of the TFD are used, and only one chunk of 64 bytes should
109 	 * be fetched
110 	 */
111 	num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
112 
113 	bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
114 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
115 }
116 
117 /*
118  * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
119  */
120 static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
121 					 struct iwl_txq *txq)
122 {
123 	lockdep_assert_held(&txq->lock);
124 
125 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
126 
127 	/*
128 	 * if not in power-save mode, uCode will never sleep when we're
129 	 * trying to tx (during RFKILL, we're not trying to tx).
130 	 */
131 	iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
132 }
133 
134 static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
135 				    struct iwl_tfh_tfd *tfd)
136 {
137 	return le16_to_cpu(tfd->num_tbs) & 0x1f;
138 }
139 
140 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
141 				    struct iwl_cmd_meta *meta,
142 				    struct iwl_tfh_tfd *tfd)
143 {
144 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
145 	int i, num_tbs;
146 
147 	/* Sanity check on number of chunks */
148 	num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
149 
150 	if (num_tbs > trans_pcie->max_tbs) {
151 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
152 		return;
153 	}
154 
155 	/* first TB is never freed - it's the bidirectional DMA data */
156 	for (i = 1; i < num_tbs; i++) {
157 		if (meta->tbs & BIT(i))
158 			dma_unmap_page(trans->dev,
159 				       le64_to_cpu(tfd->tbs[i].addr),
160 				       le16_to_cpu(tfd->tbs[i].tb_len),
161 				       DMA_TO_DEVICE);
162 		else
163 			dma_unmap_single(trans->dev,
164 					 le64_to_cpu(tfd->tbs[i].addr),
165 					 le16_to_cpu(tfd->tbs[i].tb_len),
166 					 DMA_TO_DEVICE);
167 	}
168 
169 	tfd->num_tbs = 0;
170 }
171 
172 static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173 {
174 	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
175 	 * idx is bounded by n_window
176 	 */
177 	int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
178 
179 	lockdep_assert_held(&txq->lock);
180 
181 	iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
182 				iwl_pcie_get_tfd(trans, txq, idx));
183 
184 	/* free SKB */
185 	if (txq->entries) {
186 		struct sk_buff *skb;
187 
188 		skb = txq->entries[idx].skb;
189 
190 		/* Can be called from irqs-disabled context
191 		 * If skb is not NULL, it means that the whole queue is being
192 		 * freed and that the queue is not empty - free the skb
193 		 */
194 		if (skb) {
195 			iwl_op_mode_free_skb(trans->op_mode, skb);
196 			txq->entries[idx].skb = NULL;
197 		}
198 	}
199 }
200 
201 static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
202 				struct iwl_tfh_tfd *tfd, dma_addr_t addr,
203 				u16 len)
204 {
205 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
206 	int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
207 	struct iwl_tfh_tb *tb = &tfd->tbs[idx];
208 
209 	/* Each TFD can point to a maximum max_tbs Tx buffers */
210 	if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
211 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
212 			trans_pcie->max_tbs);
213 		return -EINVAL;
214 	}
215 
216 	put_unaligned_le64(addr, &tb->addr);
217 	tb->tb_len = cpu_to_le16(len);
218 
219 	tfd->num_tbs = cpu_to_le16(idx + 1);
220 
221 	return idx;
222 }
223 
224 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
225 				     struct sk_buff *skb,
226 				     struct iwl_tfh_tfd *tfd, int start_len,
227 				     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
228 {
229 #ifdef CONFIG_INET
230 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
231 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
232 	struct ieee80211_hdr *hdr = (void *)skb->data;
233 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
234 	unsigned int mss = skb_shinfo(skb)->gso_size;
235 	u16 length, iv_len, amsdu_pad;
236 	u8 *start_hdr;
237 	struct iwl_tso_hdr_page *hdr_page;
238 	struct page **page_ptr;
239 	struct tso_t tso;
240 
241 	/* if the packet is protected, then it must be CCMP or GCMP */
242 	iv_len = ieee80211_has_protected(hdr->frame_control) ?
243 		IEEE80211_CCMP_HDR_LEN : 0;
244 
245 	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
246 			     &dev_cmd->hdr, start_len, 0);
247 
248 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
249 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
250 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
251 	amsdu_pad = 0;
252 
253 	/* total amount of header we may need for this A-MSDU */
254 	hdr_room = DIV_ROUND_UP(total_len, mss) *
255 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
256 
257 	/* Our device supports 9 segments at most, it will fit in 1 page */
258 	hdr_page = get_page_hdr(trans, hdr_room);
259 	if (!hdr_page)
260 		return -ENOMEM;
261 
262 	get_page(hdr_page->page);
263 	start_hdr = hdr_page->pos;
264 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
265 	*page_ptr = hdr_page->page;
266 	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
267 	hdr_page->pos += iv_len;
268 
269 	/*
270 	 * Pull the ieee80211 header + IV to be able to use TSO core,
271 	 * we will restore it for the tx_status flow.
272 	 */
273 	skb_pull(skb, hdr_len + iv_len);
274 
275 	/*
276 	 * Remove the length of all the headers that we don't actually
277 	 * have in the MPDU by themselves, but that we duplicate into
278 	 * all the different MSDUs inside the A-MSDU.
279 	 */
280 	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
281 
282 	tso_start(skb, &tso);
283 
284 	while (total_len) {
285 		/* this is the data left for this subframe */
286 		unsigned int data_left = min_t(unsigned int, mss, total_len);
287 		struct sk_buff *csum_skb = NULL;
288 		unsigned int tb_len;
289 		dma_addr_t tb_phys;
290 		u8 *subf_hdrs_start = hdr_page->pos;
291 
292 		total_len -= data_left;
293 
294 		memset(hdr_page->pos, 0, amsdu_pad);
295 		hdr_page->pos += amsdu_pad;
296 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
297 				  data_left)) & 0x3;
298 		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
299 		hdr_page->pos += ETH_ALEN;
300 		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
301 		hdr_page->pos += ETH_ALEN;
302 
303 		length = snap_ip_tcp_hdrlen + data_left;
304 		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
305 		hdr_page->pos += sizeof(length);
306 
307 		/*
308 		 * This will copy the SNAP as well which will be considered
309 		 * as MAC header.
310 		 */
311 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
312 
313 		hdr_page->pos += snap_ip_tcp_hdrlen;
314 
315 		tb_len = hdr_page->pos - start_hdr;
316 		tb_phys = dma_map_single(trans->dev, start_hdr,
317 					 tb_len, DMA_TO_DEVICE);
318 		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
319 			dev_kfree_skb(csum_skb);
320 			goto out_err;
321 		}
322 		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
323 		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
324 		/* add this subframe's headers' length to the tx_cmd */
325 		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
326 
327 		/* prepare the start_hdr for the next subframe */
328 		start_hdr = hdr_page->pos;
329 
330 		/* put the payload */
331 		while (data_left) {
332 			tb_len = min_t(unsigned int, tso.size, data_left);
333 			tb_phys = dma_map_single(trans->dev, tso.data,
334 						 tb_len, DMA_TO_DEVICE);
335 			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
336 				dev_kfree_skb(csum_skb);
337 				goto out_err;
338 			}
339 			iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
340 			trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
341 						       tb_len);
342 
343 			data_left -= tb_len;
344 			tso_build_data(skb, &tso, tb_len);
345 		}
346 	}
347 
348 	/* re -add the WiFi header and IV */
349 	skb_push(skb, hdr_len + iv_len);
350 
351 	return 0;
352 
353 out_err:
354 #endif
355 	return -EINVAL;
356 }
357 
358 static
359 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
360 					    struct iwl_txq *txq,
361 					    struct iwl_device_cmd *dev_cmd,
362 					    struct sk_buff *skb,
363 					    struct iwl_cmd_meta *out_meta)
364 {
365 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
366 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
367 	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
368 	dma_addr_t tb_phys;
369 	bool amsdu;
370 	int i, len, tb1_len, tb2_len, hdr_len;
371 	void *tb1_addr;
372 
373 	memset(tfd, 0, sizeof(*tfd));
374 
375 	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
376 		(*ieee80211_get_qos_ctl(hdr) &
377 		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
378 
379 	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
380 	/* The first TB points to bi-directional DMA data */
381 	if (!amsdu)
382 		memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
383 		       IWL_FIRST_TB_SIZE);
384 
385 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
386 
387 	/* there must be data left over for TB1 or this code must be changed */
388 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
389 
390 	/*
391 	 * The second TB (tb1) points to the remainder of the TX command
392 	 * and the 802.11 header - dword aligned size
393 	 * (This calculation modifies the TX command, so do it before the
394 	 * setup of the first TB)
395 	 */
396 	len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
397 	      ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
398 
399 	/* do not align A-MSDU to dword as the subframe header aligns it */
400 	if (amsdu)
401 		tb1_len = len;
402 	else
403 		tb1_len = ALIGN(len, 4);
404 
405 	/* map the data for TB1 */
406 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
407 	tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
408 	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
409 		goto out_err;
410 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
411 
412 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
413 
414 	if (amsdu) {
415 		if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
416 					      tb1_len + IWL_FIRST_TB_SIZE,
417 					      hdr_len, dev_cmd))
418 			goto out_err;
419 
420 		/*
421 		 * building the A-MSDU might have changed this data, so memcpy
422 		 * it now
423 		 */
424 		memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
425 		       IWL_FIRST_TB_SIZE);
426 		return tfd;
427 	}
428 
429 	/* set up TFD's third entry to point to remainder of skb's head */
430 	tb2_len = skb_headlen(skb) - hdr_len;
431 
432 	if (tb2_len > 0) {
433 		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
434 					 tb2_len, DMA_TO_DEVICE);
435 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
436 			goto out_err;
437 		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
438 	}
439 
440 	/* set up the remaining entries to point to the data */
441 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
442 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
443 		int tb_idx;
444 
445 		if (!skb_frag_size(frag))
446 			continue;
447 
448 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
449 					   skb_frag_size(frag), DMA_TO_DEVICE);
450 
451 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
452 			goto out_err;
453 		tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
454 					      skb_frag_size(frag));
455 
456 		out_meta->tbs |= BIT(tb_idx);
457 	}
458 
459 	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
460 			     IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
461 	trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
462 
463 	return tfd;
464 
465 out_err:
466 	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
467 	return NULL;
468 }
469 
470 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
471 			   struct iwl_device_cmd *dev_cmd, int txq_id)
472 {
473 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
474 	struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
475 	struct iwl_cmd_meta *out_meta;
476 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
477 	int idx;
478 	void *tfd;
479 
480 	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
481 		      "TX on unused queue %d\n", txq_id))
482 		return -EINVAL;
483 
484 	if (skb_is_nonlinear(skb) &&
485 	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
486 	    __skb_linearize(skb))
487 		return -ENOMEM;
488 
489 	spin_lock(&txq->lock);
490 
491 	if (iwl_queue_space(txq) < txq->high_mark) {
492 		iwl_stop_queue(trans, txq);
493 
494 		/* don't put the packet on the ring, if there is no room */
495 		if (unlikely(iwl_queue_space(txq) < 3)) {
496 			struct iwl_device_cmd **dev_cmd_ptr;
497 
498 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
499 					       trans_pcie->dev_cmd_offs);
500 
501 			*dev_cmd_ptr = dev_cmd;
502 			__skb_queue_tail(&txq->overflow_q, skb);
503 			spin_unlock(&txq->lock);
504 			return 0;
505 		}
506 	}
507 
508 	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
509 
510 	/* Set up driver data for this TFD */
511 	txq->entries[idx].skb = skb;
512 	txq->entries[idx].cmd = dev_cmd;
513 
514 	dev_cmd->hdr.sequence =
515 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
516 			    INDEX_TO_SEQ(idx)));
517 
518 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
519 	out_meta = &txq->entries[idx].meta;
520 	out_meta->flags = 0;
521 
522 	tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
523 	if (!tfd) {
524 		spin_unlock(&txq->lock);
525 		return -1;
526 	}
527 
528 	/* Set up entry for this TFD in Tx byte-count array */
529 	iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
530 				      iwl_pcie_gen2_get_num_tbs(trans, tfd));
531 
532 	/* start timer if queue currently empty */
533 	if (txq->read_ptr == txq->write_ptr) {
534 		if (txq->wd_timeout)
535 			mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
536 		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
537 		iwl_trans_ref(trans);
538 	}
539 
540 	/* Tell device the write index *just past* this latest filled TFD */
541 	txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
542 	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
543 	/*
544 	 * At this point the frame is "transmitted" successfully
545 	 * and we will get a TX status notification eventually.
546 	 */
547 	spin_unlock(&txq->lock);
548 	return 0;
549 }
550 
551 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
552 
553 /*
554  * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
555  * @priv: device private data point
556  * @cmd: a pointer to the ucode command structure
557  *
558  * The function returns < 0 values to indicate the operation
559  * failed. On success, it returns the index (>= 0) of command in the
560  * command queue.
561  */
562 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
563 				      struct iwl_host_cmd *cmd)
564 {
565 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
567 	struct iwl_device_cmd *out_cmd;
568 	struct iwl_cmd_meta *out_meta;
569 	unsigned long flags;
570 	void *dup_buf = NULL;
571 	dma_addr_t phys_addr;
572 	int i, cmd_pos, idx;
573 	u16 copy_size, cmd_size, tb0_size;
574 	bool had_nocopy = false;
575 	u8 group_id = iwl_cmd_groupid(cmd->id);
576 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
577 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
578 	struct iwl_tfh_tfd *tfd;
579 
580 	copy_size = sizeof(struct iwl_cmd_header_wide);
581 	cmd_size = sizeof(struct iwl_cmd_header_wide);
582 
583 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
584 		cmddata[i] = cmd->data[i];
585 		cmdlen[i] = cmd->len[i];
586 
587 		if (!cmd->len[i])
588 			continue;
589 
590 		/* need at least IWL_FIRST_TB_SIZE copied */
591 		if (copy_size < IWL_FIRST_TB_SIZE) {
592 			int copy = IWL_FIRST_TB_SIZE - copy_size;
593 
594 			if (copy > cmdlen[i])
595 				copy = cmdlen[i];
596 			cmdlen[i] -= copy;
597 			cmddata[i] += copy;
598 			copy_size += copy;
599 		}
600 
601 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
602 			had_nocopy = true;
603 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
604 				idx = -EINVAL;
605 				goto free_dup_buf;
606 			}
607 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
608 			/*
609 			 * This is also a chunk that isn't copied
610 			 * to the static buffer so set had_nocopy.
611 			 */
612 			had_nocopy = true;
613 
614 			/* only allowed once */
615 			if (WARN_ON(dup_buf)) {
616 				idx = -EINVAL;
617 				goto free_dup_buf;
618 			}
619 
620 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
621 					  GFP_ATOMIC);
622 			if (!dup_buf)
623 				return -ENOMEM;
624 		} else {
625 			/* NOCOPY must not be followed by normal! */
626 			if (WARN_ON(had_nocopy)) {
627 				idx = -EINVAL;
628 				goto free_dup_buf;
629 			}
630 			copy_size += cmdlen[i];
631 		}
632 		cmd_size += cmd->len[i];
633 	}
634 
635 	/*
636 	 * If any of the command structures end up being larger than the
637 	 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
638 	 * separate TFDs, then we will need to increase the size of the buffers
639 	 */
640 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
641 		 "Command %s (%#x) is too large (%d bytes)\n",
642 		 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
643 		idx = -EINVAL;
644 		goto free_dup_buf;
645 	}
646 
647 	spin_lock_bh(&txq->lock);
648 
649 	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
650 	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
651 	memset(tfd, 0, sizeof(*tfd));
652 
653 	if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
654 		spin_unlock_bh(&txq->lock);
655 
656 		IWL_ERR(trans, "No space in command queue\n");
657 		iwl_op_mode_cmd_queue_full(trans->op_mode);
658 		idx = -ENOSPC;
659 		goto free_dup_buf;
660 	}
661 
662 	out_cmd = txq->entries[idx].cmd;
663 	out_meta = &txq->entries[idx].meta;
664 
665 	/* re-initialize to NULL */
666 	memset(out_meta, 0, sizeof(*out_meta));
667 	if (cmd->flags & CMD_WANT_SKB)
668 		out_meta->source = cmd;
669 
670 	/* set up the header */
671 	out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
672 	out_cmd->hdr_wide.group_id = group_id;
673 	out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
674 	out_cmd->hdr_wide.length =
675 		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
676 	out_cmd->hdr_wide.reserved = 0;
677 	out_cmd->hdr_wide.sequence =
678 		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
679 					 INDEX_TO_SEQ(txq->write_ptr));
680 
681 	cmd_pos = sizeof(struct iwl_cmd_header_wide);
682 	copy_size = sizeof(struct iwl_cmd_header_wide);
683 
684 	/* and copy the data that needs to be copied */
685 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
686 		int copy;
687 
688 		if (!cmd->len[i])
689 			continue;
690 
691 		/* copy everything if not nocopy/dup */
692 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
693 					   IWL_HCMD_DFL_DUP))) {
694 			copy = cmd->len[i];
695 
696 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
697 			cmd_pos += copy;
698 			copy_size += copy;
699 			continue;
700 		}
701 
702 		/*
703 		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
704 		 * in total (for bi-directional DMA), but copy up to what
705 		 * we can fit into the payload for debug dump purposes.
706 		 */
707 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
708 
709 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
710 		cmd_pos += copy;
711 
712 		/* However, treat copy_size the proper way, we need it below */
713 		if (copy_size < IWL_FIRST_TB_SIZE) {
714 			copy = IWL_FIRST_TB_SIZE - copy_size;
715 
716 			if (copy > cmd->len[i])
717 				copy = cmd->len[i];
718 			copy_size += copy;
719 		}
720 	}
721 
722 	IWL_DEBUG_HC(trans,
723 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
724 		     iwl_get_cmd_string(trans, cmd->id), group_id,
725 		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
726 		     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
727 
728 	/* start the TFD with the minimum copy bytes */
729 	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
730 	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
731 	iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
732 			     tb0_size);
733 
734 	/* map first command fragment, if any remains */
735 	if (copy_size > tb0_size) {
736 		phys_addr = dma_map_single(trans->dev,
737 					   ((u8 *)&out_cmd->hdr) + tb0_size,
738 					   copy_size - tb0_size,
739 					   DMA_TO_DEVICE);
740 		if (dma_mapping_error(trans->dev, phys_addr)) {
741 			idx = -ENOMEM;
742 			iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
743 			goto out;
744 		}
745 		iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
746 				     copy_size - tb0_size);
747 	}
748 
749 	/* map the remaining (adjusted) nocopy/dup fragments */
750 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
751 		const void *data = cmddata[i];
752 
753 		if (!cmdlen[i])
754 			continue;
755 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
756 					   IWL_HCMD_DFL_DUP)))
757 			continue;
758 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
759 			data = dup_buf;
760 		phys_addr = dma_map_single(trans->dev, (void *)data,
761 					   cmdlen[i], DMA_TO_DEVICE);
762 		if (dma_mapping_error(trans->dev, phys_addr)) {
763 			idx = -ENOMEM;
764 			iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
765 			goto out;
766 		}
767 		iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
768 	}
769 
770 	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
771 	out_meta->flags = cmd->flags;
772 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
773 		kzfree(txq->entries[idx].free_buf);
774 	txq->entries[idx].free_buf = dup_buf;
775 
776 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
777 
778 	/* start timer if queue currently empty */
779 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
780 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
781 
782 	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
783 	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
784 	    !trans_pcie->ref_cmd_in_flight) {
785 		trans_pcie->ref_cmd_in_flight = true;
786 		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
787 		iwl_trans_ref(trans);
788 	}
789 	/* Increment and update queue's write index */
790 	txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
791 	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
792 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
793 
794 out:
795 	spin_unlock_bh(&txq->lock);
796 free_dup_buf:
797 	if (idx < 0)
798 		kfree(dup_buf);
799 	return idx;
800 }
801 
802 #define HOST_COMPLETE_TIMEOUT	(2 * HZ)
803 
804 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
805 					struct iwl_host_cmd *cmd)
806 {
807 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
808 	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
809 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
810 	int cmd_idx;
811 	int ret;
812 
813 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
814 
815 	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
816 				  &trans->status),
817 		 "Command %s: a command is already active!\n", cmd_str))
818 		return -EIO;
819 
820 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
821 
822 	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
823 		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
824 				 pm_runtime_active(&trans_pcie->pci_dev->dev),
825 				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
826 		if (!ret) {
827 			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
828 			return -ETIMEDOUT;
829 		}
830 	}
831 
832 	cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
833 	if (cmd_idx < 0) {
834 		ret = cmd_idx;
835 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
836 		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
837 			cmd_str, ret);
838 		return ret;
839 	}
840 
841 	ret = wait_event_timeout(trans_pcie->wait_command_queue,
842 				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
843 					   &trans->status),
844 				 HOST_COMPLETE_TIMEOUT);
845 	if (!ret) {
846 		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
847 			cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
848 
849 		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
850 			txq->read_ptr, txq->write_ptr);
851 
852 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
853 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
854 			       cmd_str);
855 		ret = -ETIMEDOUT;
856 
857 		iwl_force_nmi(trans);
858 		iwl_trans_fw_error(trans);
859 
860 		goto cancel;
861 	}
862 
863 	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
864 		IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
865 		dump_stack();
866 		ret = -EIO;
867 		goto cancel;
868 	}
869 
870 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
871 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
872 		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
873 		ret = -ERFKILL;
874 		goto cancel;
875 	}
876 
877 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
878 		IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
879 		ret = -EIO;
880 		goto cancel;
881 	}
882 
883 	return 0;
884 
885 cancel:
886 	if (cmd->flags & CMD_WANT_SKB) {
887 		/*
888 		 * Cancel the CMD_WANT_SKB flag for the cmd in the
889 		 * TX cmd queue. Otherwise in case the cmd comes
890 		 * in later, it will possibly set an invalid
891 		 * address (cmd->meta.source).
892 		 */
893 		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
894 	}
895 
896 	if (cmd->resp_pkt) {
897 		iwl_free_resp(cmd);
898 		cmd->resp_pkt = NULL;
899 	}
900 
901 	return ret;
902 }
903 
904 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
905 				  struct iwl_host_cmd *cmd)
906 {
907 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
908 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
909 		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
910 				  cmd->id);
911 		return -ERFKILL;
912 	}
913 
914 	if (cmd->flags & CMD_ASYNC) {
915 		int ret;
916 
917 		/* An asynchronous command can not expect an SKB to be set. */
918 		if (WARN_ON(cmd->flags & CMD_WANT_SKB))
919 			return -EINVAL;
920 
921 		ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
922 		if (ret < 0) {
923 			IWL_ERR(trans,
924 				"Error sending %s: enqueue_hcmd failed: %d\n",
925 				iwl_get_cmd_string(trans, cmd->id), ret);
926 			return ret;
927 		}
928 		return 0;
929 	}
930 
931 	return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
932 }
933 
934 /*
935  * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
936  */
937 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
938 {
939 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
940 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
941 
942 	spin_lock_bh(&txq->lock);
943 	while (txq->write_ptr != txq->read_ptr) {
944 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
945 				   txq_id, txq->read_ptr);
946 
947 		if (txq_id != trans_pcie->cmd_queue) {
948 			int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
949 			struct sk_buff *skb = txq->entries[idx].skb;
950 
951 			if (WARN_ON_ONCE(!skb))
952 				continue;
953 
954 			iwl_pcie_free_tso_page(trans_pcie, skb);
955 		}
956 		iwl_pcie_gen2_free_tfd(trans, txq);
957 		txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
958 
959 		if (txq->read_ptr == txq->write_ptr) {
960 			unsigned long flags;
961 
962 			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
963 			if (txq_id != trans_pcie->cmd_queue) {
964 				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
965 					      txq->id);
966 				iwl_trans_unref(trans);
967 			} else if (trans_pcie->ref_cmd_in_flight) {
968 				trans_pcie->ref_cmd_in_flight = false;
969 				IWL_DEBUG_RPM(trans,
970 					      "clear ref_cmd_in_flight\n");
971 				iwl_trans_unref(trans);
972 			}
973 			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
974 		}
975 	}
976 
977 	while (!skb_queue_empty(&txq->overflow_q)) {
978 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
979 
980 		iwl_op_mode_free_skb(trans->op_mode, skb);
981 	}
982 
983 	spin_unlock_bh(&txq->lock);
984 
985 	/* just in case - this queue may have been stopped */
986 	iwl_wake_queue(trans, txq);
987 }
988 
989 static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
990 					  struct iwl_txq *txq)
991 {
992 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
993 	struct device *dev = trans->dev;
994 
995 	/* De-alloc circular buffer of TFDs */
996 	if (txq->tfds) {
997 		dma_free_coherent(dev,
998 				  trans_pcie->tfd_size * txq->n_window,
999 				  txq->tfds, txq->dma_addr);
1000 		dma_free_coherent(dev,
1001 				  sizeof(*txq->first_tb_bufs) * txq->n_window,
1002 				  txq->first_tb_bufs, txq->first_tb_dma);
1003 	}
1004 
1005 	kfree(txq->entries);
1006 	iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
1007 	kfree(txq);
1008 }
1009 
1010 /*
1011  * iwl_pcie_txq_free - Deallocate DMA queue.
1012  * @txq: Transmit queue to deallocate.
1013  *
1014  * Empty queue by removing and destroying all BD's.
1015  * Free all buffers.
1016  * 0-fill, but do not free "txq" descriptor structure.
1017  */
1018 static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1019 {
1020 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1021 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1022 	int i;
1023 
1024 	if (WARN_ON(!txq))
1025 		return;
1026 
1027 	iwl_pcie_gen2_txq_unmap(trans, txq_id);
1028 
1029 	/* De-alloc array of command/tx buffers */
1030 	if (txq_id == trans_pcie->cmd_queue)
1031 		for (i = 0; i < txq->n_window; i++) {
1032 			kzfree(txq->entries[i].cmd);
1033 			kzfree(txq->entries[i].free_buf);
1034 		}
1035 	del_timer_sync(&txq->stuck_timer);
1036 
1037 	iwl_pcie_gen2_txq_free_memory(trans, txq);
1038 
1039 	trans_pcie->txq[txq_id] = NULL;
1040 
1041 	clear_bit(txq_id, trans_pcie->queue_used);
1042 }
1043 
1044 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1045 				 struct iwl_tx_queue_cfg_cmd *cmd,
1046 				 int cmd_id, int size,
1047 				 unsigned int timeout)
1048 {
1049 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1050 	struct iwl_tx_queue_cfg_rsp *rsp;
1051 	struct iwl_txq *txq;
1052 	struct iwl_host_cmd hcmd = {
1053 		.id = cmd_id,
1054 		.len = { sizeof(*cmd) },
1055 		.data = { cmd, },
1056 		.flags = CMD_WANT_SKB,
1057 	};
1058 	int ret, qid;
1059 	u32 wr_ptr;
1060 
1061 	txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1062 	if (!txq)
1063 		return -ENOMEM;
1064 	ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1065 				     sizeof(struct iwlagn_scd_bc_tbl));
1066 	if (ret) {
1067 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1068 		kfree(txq);
1069 		return -ENOMEM;
1070 	}
1071 
1072 	ret = iwl_pcie_txq_alloc(trans, txq, size, false);
1073 	if (ret) {
1074 		IWL_ERR(trans, "Tx queue alloc failed\n");
1075 		goto error;
1076 	}
1077 	ret = iwl_pcie_txq_init(trans, txq, size, false);
1078 	if (ret) {
1079 		IWL_ERR(trans, "Tx queue init failed\n");
1080 		goto error;
1081 	}
1082 
1083 	txq->wd_timeout = msecs_to_jiffies(timeout);
1084 
1085 	cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
1086 	cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1087 	cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1088 
1089 	ret = iwl_trans_send_cmd(trans, &hcmd);
1090 	if (ret)
1091 		goto error;
1092 
1093 	if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
1094 		ret = -EINVAL;
1095 		goto error_free_resp;
1096 	}
1097 
1098 	rsp = (void *)hcmd.resp_pkt->data;
1099 	qid = le16_to_cpu(rsp->queue_number);
1100 	wr_ptr = le16_to_cpu(rsp->write_pointer);
1101 
1102 	if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1103 		WARN_ONCE(1, "queue index %d unsupported", qid);
1104 		ret = -EIO;
1105 		goto error_free_resp;
1106 	}
1107 
1108 	if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1109 		WARN_ONCE(1, "queue %d already used", qid);
1110 		ret = -EIO;
1111 		goto error_free_resp;
1112 	}
1113 
1114 	txq->id = qid;
1115 	trans_pcie->txq[qid] = txq;
1116 	wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
1117 
1118 	/* Place first TFD at index corresponding to start sequence number */
1119 	txq->read_ptr = wr_ptr;
1120 	txq->write_ptr = wr_ptr;
1121 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1122 			   (txq->write_ptr) | (qid << 16));
1123 	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1124 
1125 	iwl_free_resp(&hcmd);
1126 	return qid;
1127 
1128 error_free_resp:
1129 	iwl_free_resp(&hcmd);
1130 error:
1131 	iwl_pcie_gen2_txq_free_memory(trans, txq);
1132 	return ret;
1133 }
1134 
1135 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1136 {
1137 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1138 
1139 	/*
1140 	 * Upon HW Rfkill - we stop the device, and then stop the queues
1141 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1142 	 * allow the op_mode to call txq_disable after it already called
1143 	 * stop_device.
1144 	 */
1145 	if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1146 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1147 			  "queue %d not used", queue);
1148 		return;
1149 	}
1150 
1151 	iwl_pcie_gen2_txq_unmap(trans, queue);
1152 
1153 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1154 }
1155 
1156 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1157 {
1158 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1159 	int i;
1160 
1161 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1162 
1163 	/* Free all TX queues */
1164 	for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1165 		if (!trans_pcie->txq[i])
1166 			continue;
1167 
1168 		iwl_pcie_gen2_txq_free(trans, i);
1169 	}
1170 }
1171 
1172 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
1173 {
1174 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1175 	struct iwl_txq *cmd_queue;
1176 	int txq_id = trans_pcie->cmd_queue, ret;
1177 
1178 	/* alloc and init the command queue */
1179 	if (!trans_pcie->txq[txq_id]) {
1180 		cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
1181 		if (!cmd_queue) {
1182 			IWL_ERR(trans, "Not enough memory for command queue\n");
1183 			return -ENOMEM;
1184 		}
1185 		trans_pcie->txq[txq_id] = cmd_queue;
1186 		ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
1187 		if (ret) {
1188 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1189 			goto error;
1190 		}
1191 	} else {
1192 		cmd_queue = trans_pcie->txq[txq_id];
1193 	}
1194 
1195 	ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
1196 	if (ret) {
1197 		IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1198 		goto error;
1199 	}
1200 	trans_pcie->txq[txq_id]->id = txq_id;
1201 	set_bit(txq_id, trans_pcie->queue_used);
1202 
1203 	return 0;
1204 
1205 error:
1206 	iwl_pcie_gen2_tx_free(trans);
1207 	return ret;
1208 }
1209 
1210