Lines Matching +full:num +full:- +full:txq

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2020-2023 Intel Corporation
8 #include "iwl-debug.h"
9 #include "iwl-io.h"
15 #include "iwl-fh.h"
16 #include "iwl-scd.h"
20 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
23 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_gen2_update_byte_tbl() argument
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_update_byte_tbl()
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) in iwl_pcie_gen2_update_byte_tbl()
39 * to SRAM- 0 for one chunk, 1 for 2 and so on. in iwl_pcie_gen2_update_byte_tbl()
44 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; in iwl_pcie_gen2_update_byte_tbl()
46 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_pcie_gen2_update_byte_tbl()
47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl()
50 WARN_ON(trans->txqs.bc_table_dword); in iwl_pcie_gen2_update_byte_tbl()
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl()
58 WARN_ON(!trans->txqs.bc_table_dword); in iwl_pcie_gen2_update_byte_tbl()
62 scd_bc_tbl->tfd_offset[idx] = bc_ent; in iwl_pcie_gen2_update_byte_tbl()
67 * iwl_txq_inc_wr_ptr - Send new write index to hardware
69 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_inc_wr_ptr() argument
71 lockdep_assert_held(&txq->lock); in iwl_txq_inc_wr_ptr()
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_txq_inc_wr_ptr()
76 * if not in power-save mode, uCode will never sleep when we're in iwl_txq_inc_wr_ptr()
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); in iwl_txq_inc_wr_ptr()
85 return le16_to_cpu(tfd->num_tbs) & 0x1f; in iwl_txq_gen2_get_num_tbs()
96 * returned from this function - it can only return an error if in iwl_txq_gen2_set_tb()
105 return -EINVAL; in iwl_txq_gen2_set_tb()
106 tb = &tfd->tbs[idx]; in iwl_txq_gen2_set_tb()
109 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { in iwl_txq_gen2_set_tb()
111 trans->txqs.tfd.max_tbs); in iwl_txq_gen2_set_tb()
112 return -EINVAL; in iwl_txq_gen2_set_tb()
115 put_unaligned_le64(addr, &tb->addr); in iwl_txq_gen2_set_tb()
116 tb->tb_len = cpu_to_le16(len); in iwl_txq_gen2_set_tb()
118 tfd->num_tbs = cpu_to_le16(idx + 1); in iwl_txq_gen2_set_tb()
126 tfd->num_tbs = 0; in iwl_txq_set_tfd_invalid_gen2()
128 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, in iwl_txq_set_tfd_invalid_gen2()
129 trans->invalid_tx_cmd.size); in iwl_txq_set_tfd_invalid_gen2()
140 if (num_tbs > trans->txqs.tfd.max_tbs) { in iwl_txq_gen2_tfd_unmap()
145 /* first TB is never freed - it's the bidirectional DMA data */ in iwl_txq_gen2_tfd_unmap()
147 if (meta->tbs & BIT(i)) in iwl_txq_gen2_tfd_unmap()
148 dma_unmap_page(trans->dev, in iwl_txq_gen2_tfd_unmap()
149 le64_to_cpu(tfd->tbs[i].addr), in iwl_txq_gen2_tfd_unmap()
150 le16_to_cpu(tfd->tbs[i].tb_len), in iwl_txq_gen2_tfd_unmap()
153 dma_unmap_single(trans->dev, in iwl_txq_gen2_tfd_unmap()
154 le64_to_cpu(tfd->tbs[i].addr), in iwl_txq_gen2_tfd_unmap()
155 le16_to_cpu(tfd->tbs[i].tb_len), in iwl_txq_gen2_tfd_unmap()
162 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_gen2_free_tfd() argument
167 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_txq_gen2_free_tfd()
170 lockdep_assert_held(&txq->lock); in iwl_txq_gen2_free_tfd()
172 if (!txq->entries) in iwl_txq_gen2_free_tfd()
175 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_gen2_free_tfd()
176 iwl_txq_get_tfd(trans, txq, idx)); in iwl_txq_gen2_free_tfd()
178 skb = txq->entries[idx].skb; in iwl_txq_gen2_free_tfd()
180 /* Can be called from irqs-disabled context in iwl_txq_gen2_free_tfd()
182 * freed and that the queue is not empty - free the skb in iwl_txq_gen2_free_tfd()
185 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_gen2_free_tfd()
186 txq->entries[idx].skb = NULL; in iwl_txq_gen2_free_tfd()
196 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in get_workaround_page()
203 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; in get_workaround_page()
212 * need to dma_unmap_page() and set the meta->tbs bit in
225 if (unlikely(dma_mapping_error(trans->dev, phys))) in iwl_txq_gen2_set_tb_with_wa()
226 return -ENOMEM; in iwl_txq_gen2_set_tb_with_wa()
235 meta->tbs |= BIT(ret); in iwl_txq_gen2_set_tb_with_wa()
243 * condition above) the TB ends on a 32-bit boundary, in iwl_txq_gen2_set_tb_with_wa()
250 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { in iwl_txq_gen2_set_tb_with_wa()
251 ret = -ENOBUFS; in iwl_txq_gen2_set_tb_with_wa()
257 ret = -ENOMEM; in iwl_txq_gen2_set_tb_with_wa()
263 phys = dma_map_single(trans->dev, page_address(page), len, in iwl_txq_gen2_set_tb_with_wa()
265 if (unlikely(dma_mapping_error(trans->dev, phys))) in iwl_txq_gen2_set_tb_with_wa()
266 return -ENOMEM; in iwl_txq_gen2_set_tb_with_wa()
281 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); in iwl_txq_gen2_set_tb_with_wa()
283 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); in iwl_txq_gen2_set_tb_with_wa()
285 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); in iwl_txq_gen2_set_tb_with_wa()
294 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); in get_page_hdr()
297 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in get_page_hdr()
302 if (!p->page) in get_page_hdr()
309 * page - we need it somewhere, and if it's there then we in get_page_hdr()
311 * trigger the 32-bit boundary hardware bug. in get_page_hdr()
313 * (see also get_workaround_page() in tx-gen2.c) in get_page_hdr()
315 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - in get_page_hdr()
320 __free_page(p->page); in get_page_hdr()
323 p->page = alloc_page(GFP_ATOMIC); in get_page_hdr()
324 if (!p->page) in get_page_hdr()
326 p->pos = page_address(p->page); in get_page_hdr()
328 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; in get_page_hdr()
330 *page_ptr = p->page; in get_page_hdr()
331 get_page(p->page); in get_page_hdr()
343 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; in iwl_txq_gen2_build_amsdu()
344 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_txq_gen2_build_amsdu()
346 unsigned int mss = skb_shinfo(skb)->gso_size; in iwl_txq_gen2_build_amsdu()
352 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), in iwl_txq_gen2_build_amsdu()
353 &dev_cmd->hdr, start_len, 0); in iwl_txq_gen2_build_amsdu()
355 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); in iwl_txq_gen2_build_amsdu()
357 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; in iwl_txq_gen2_build_amsdu()
360 /* total amount of header we may need for this A-MSDU */ in iwl_txq_gen2_build_amsdu()
367 return -ENOMEM; in iwl_txq_gen2_build_amsdu()
369 start_hdr = hdr_page->pos; in iwl_txq_gen2_build_amsdu()
380 * all the different MSDUs inside the A-MSDU. in iwl_txq_gen2_build_amsdu()
382 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); in iwl_txq_gen2_build_amsdu()
391 u8 *subf_hdrs_start = hdr_page->pos; in iwl_txq_gen2_build_amsdu()
393 total_len -= data_left; in iwl_txq_gen2_build_amsdu()
395 memset(hdr_page->pos, 0, amsdu_pad); in iwl_txq_gen2_build_amsdu()
396 hdr_page->pos += amsdu_pad; in iwl_txq_gen2_build_amsdu()
397 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + in iwl_txq_gen2_build_amsdu()
399 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); in iwl_txq_gen2_build_amsdu()
400 hdr_page->pos += ETH_ALEN; in iwl_txq_gen2_build_amsdu()
401 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); in iwl_txq_gen2_build_amsdu()
402 hdr_page->pos += ETH_ALEN; in iwl_txq_gen2_build_amsdu()
405 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); in iwl_txq_gen2_build_amsdu()
406 hdr_page->pos += sizeof(length); in iwl_txq_gen2_build_amsdu()
412 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); in iwl_txq_gen2_build_amsdu()
414 hdr_page->pos += snap_ip_tcp_hdrlen; in iwl_txq_gen2_build_amsdu()
416 tb_len = hdr_page->pos - start_hdr; in iwl_txq_gen2_build_amsdu()
417 tb_phys = dma_map_single(trans->dev, start_hdr, in iwl_txq_gen2_build_amsdu()
419 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_amsdu()
427 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, in iwl_txq_gen2_build_amsdu()
430 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); in iwl_txq_gen2_build_amsdu()
433 start_hdr = hdr_page->pos; in iwl_txq_gen2_build_amsdu()
440 tb_phys = dma_map_single(trans->dev, tso.data, in iwl_txq_gen2_build_amsdu()
448 data_left -= tb_len; in iwl_txq_gen2_build_amsdu()
453 /* re -add the WiFi header */ in iwl_txq_gen2_build_amsdu()
460 return -EINVAL; in iwl_txq_gen2_build_amsdu()
465 struct iwl_txq *txq, in iwl_txq_gen2_build_tx_amsdu() argument
472 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx_amsdu()
473 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu()
478 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx_amsdu()
482 * to a 64-byte boundary and thus can't be at the end or cross in iwl_txq_gen2_build_tx_amsdu()
489 * and the 802.11 header - dword aligned size in iwl_txq_gen2_build_tx_amsdu()
493 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - in iwl_txq_gen2_build_tx_amsdu()
496 /* do not align A-MSDU to dword as the subframe header aligns it */ in iwl_txq_gen2_build_tx_amsdu()
499 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; in iwl_txq_gen2_build_tx_amsdu()
500 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); in iwl_txq_gen2_build_tx_amsdu()
501 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_tx_amsdu()
513 /* building the A-MSDU might have changed this data, memcpy it now */ in iwl_txq_gen2_build_tx_amsdu()
514 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu()
529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in iwl_txq_gen2_tx_add_frags()
530 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in iwl_txq_gen2_tx_add_frags()
538 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, in iwl_txq_gen2_tx_add_frags()
552 struct iwl_txq *txq, in iwl_txq_gen2_build_tx() argument
560 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx()
561 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx()
567 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx()
569 /* The first TB points to bi-directional DMA data */ in iwl_txq_gen2_build_tx()
570 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx()
574 * to a 64-byte boundary and thus can't be at the end or cross in iwl_txq_gen2_build_tx()
581 * and the 802.11 header - dword aligned size in iwl_txq_gen2_build_tx()
585 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - in iwl_txq_gen2_build_tx()
594 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; in iwl_txq_gen2_build_tx()
595 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); in iwl_txq_gen2_build_tx()
596 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_tx()
603 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, in iwl_txq_gen2_build_tx()
607 tb2_len = skb_headlen(skb) - hdr_len; in iwl_txq_gen2_build_tx()
612 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, in iwl_txq_gen2_build_tx()
615 skb->data + hdr_len, tb2_len, in iwl_txq_gen2_build_tx()
627 tb_phys = dma_map_single(trans->dev, frag->data, in iwl_txq_gen2_build_tx()
630 frag->data, in iwl_txq_gen2_build_tx()
647 struct iwl_txq *txq, in iwl_txq_gen2_build_tfd() argument
652 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_txq_gen2_build_tfd()
653 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tfd()
654 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tfd()
670 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) in iwl_txq_gen2_build_tfd()
675 amsdu = ieee80211_is_data_qos(hdr->frame_control) && in iwl_txq_gen2_build_tfd()
679 hdr_len = ieee80211_hdrlen(hdr->frame_control); in iwl_txq_gen2_build_tfd()
682 * Only build A-MSDUs here if doing so by GSO, otherwise it may be in iwl_txq_gen2_build_tfd()
683 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been in iwl_txq_gen2_build_tfd()
686 if (amsdu && skb_shinfo(skb)->gso_size) in iwl_txq_gen2_build_tfd()
687 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, in iwl_txq_gen2_build_tfd()
689 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, in iwl_txq_gen2_build_tfd()
701 * If q->n_window is smaller than max_tfd_queue_size, there is no need in iwl_txq_space()
704 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) in iwl_txq_space()
705 max = q->n_window; in iwl_txq_space()
707 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; in iwl_txq_space()
713 used = (q->write_ptr - q->read_ptr) & in iwl_txq_space()
714 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_space()
719 return max - used; in iwl_txq_space()
726 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_tx() local
733 return -EINVAL; in iwl_txq_gen2_tx()
735 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), in iwl_txq_gen2_tx()
737 return -EINVAL; in iwl_txq_gen2_tx()
740 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && in iwl_txq_gen2_tx()
742 return -ENOMEM; in iwl_txq_gen2_tx()
744 spin_lock(&txq->lock); in iwl_txq_gen2_tx()
746 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_txq_gen2_tx()
747 iwl_txq_stop(trans, txq); in iwl_txq_gen2_tx()
750 if (unlikely(iwl_txq_space(trans, txq) < 3)) { in iwl_txq_gen2_tx()
753 dev_cmd_ptr = (void *)((u8 *)skb->cb + in iwl_txq_gen2_tx()
754 trans->txqs.dev_cmd_offs); in iwl_txq_gen2_tx()
757 __skb_queue_tail(&txq->overflow_q, skb); in iwl_txq_gen2_tx()
758 spin_unlock(&txq->lock); in iwl_txq_gen2_tx()
763 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_tx()
766 txq->entries[idx].skb = skb; in iwl_txq_gen2_tx()
767 txq->entries[idx].cmd = dev_cmd; in iwl_txq_gen2_tx()
769 dev_cmd->hdr.sequence = in iwl_txq_gen2_tx()
774 out_meta = &txq->entries[idx].meta; in iwl_txq_gen2_tx()
775 out_meta->flags = 0; in iwl_txq_gen2_tx()
777 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); in iwl_txq_gen2_tx()
779 spin_unlock(&txq->lock); in iwl_txq_gen2_tx()
780 return -1; in iwl_txq_gen2_tx()
783 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_txq_gen2_tx()
785 (void *)dev_cmd->payload; in iwl_txq_gen2_tx()
787 cmd_len = le16_to_cpu(tx_cmd_gen3->len); in iwl_txq_gen2_tx()
790 (void *)dev_cmd->payload; in iwl_txq_gen2_tx()
792 cmd_len = le16_to_cpu(tx_cmd_gen2->len); in iwl_txq_gen2_tx()
795 /* Set up entry for this TFD in Tx byte-count array */ in iwl_txq_gen2_tx()
796 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, in iwl_txq_gen2_tx()
800 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) in iwl_txq_gen2_tx()
801 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); in iwl_txq_gen2_tx()
804 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_txq_gen2_tx()
805 iwl_txq_inc_wr_ptr(trans, txq); in iwl_txq_gen2_tx()
810 spin_unlock(&txq->lock); in iwl_txq_gen2_tx()
817 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
821 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_unmap() local
823 spin_lock_bh(&txq->lock); in iwl_txq_gen2_unmap()
824 while (txq->write_ptr != txq->read_ptr) { in iwl_txq_gen2_unmap()
826 txq_id, txq->read_ptr); in iwl_txq_gen2_unmap()
828 if (txq_id != trans->txqs.cmd.q_id) { in iwl_txq_gen2_unmap()
829 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_txq_gen2_unmap()
830 struct sk_buff *skb = txq->entries[idx].skb; in iwl_txq_gen2_unmap()
835 iwl_txq_gen2_free_tfd(trans, txq); in iwl_txq_gen2_unmap()
836 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_txq_gen2_unmap()
839 while (!skb_queue_empty(&txq->overflow_q)) { in iwl_txq_gen2_unmap()
840 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); in iwl_txq_gen2_unmap()
842 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_gen2_unmap()
845 spin_unlock_bh(&txq->lock); in iwl_txq_gen2_unmap()
847 /* just in case - this queue may have been stopped */ in iwl_txq_gen2_unmap()
848 iwl_wake_queue(trans, txq); in iwl_txq_gen2_unmap()
852 struct iwl_txq *txq) in iwl_txq_gen2_free_memory() argument
854 struct device *dev = trans->dev; in iwl_txq_gen2_free_memory()
856 /* De-alloc circular buffer of TFDs */ in iwl_txq_gen2_free_memory()
857 if (txq->tfds) { in iwl_txq_gen2_free_memory()
859 trans->txqs.tfd.size * txq->n_window, in iwl_txq_gen2_free_memory()
860 txq->tfds, txq->dma_addr); in iwl_txq_gen2_free_memory()
862 sizeof(*txq->first_tb_bufs) * txq->n_window, in iwl_txq_gen2_free_memory()
863 txq->first_tb_bufs, txq->first_tb_dma); in iwl_txq_gen2_free_memory()
866 kfree(txq->entries); in iwl_txq_gen2_free_memory()
867 if (txq->bc_tbl.addr) in iwl_txq_gen2_free_memory()
868 dma_pool_free(trans->txqs.bc_pool, in iwl_txq_gen2_free_memory()
869 txq->bc_tbl.addr, txq->bc_tbl.dma); in iwl_txq_gen2_free_memory()
870 kfree(txq); in iwl_txq_gen2_free_memory()
874 * iwl_pcie_txq_free - Deallocate DMA queue.
875 * @txq: Transmit queue to deallocate.
879 * 0-fill, but do not free "txq" descriptor structure.
883 struct iwl_txq *txq; in iwl_txq_gen2_free() local
890 txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_free()
892 if (WARN_ON(!txq)) in iwl_txq_gen2_free()
897 /* De-alloc array of command/tx buffers */ in iwl_txq_gen2_free()
898 if (txq_id == trans->txqs.cmd.q_id) in iwl_txq_gen2_free()
899 for (i = 0; i < txq->n_window; i++) { in iwl_txq_gen2_free()
900 kfree_sensitive(txq->entries[i].cmd); in iwl_txq_gen2_free()
901 kfree_sensitive(txq->entries[i].free_buf); in iwl_txq_gen2_free()
903 del_timer_sync(&txq->stuck_timer); in iwl_txq_gen2_free()
905 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_gen2_free()
907 trans->txqs.txq[txq_id] = NULL; in iwl_txq_gen2_free()
909 clear_bit(txq_id, trans->txqs.queue_used); in iwl_txq_gen2_free()
913 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
917 q->n_window = slots_num; in iwl_queue_init()
919 /* slots_num must be power-of-two size, otherwise in iwl_queue_init()
922 return -EINVAL; in iwl_queue_init()
924 q->low_mark = q->n_window / 4; in iwl_queue_init()
925 if (q->low_mark < 4) in iwl_queue_init()
926 q->low_mark = 4; in iwl_queue_init()
928 q->high_mark = q->n_window / 8; in iwl_queue_init()
929 if (q->high_mark < 2) in iwl_queue_init()
930 q->high_mark = 2; in iwl_queue_init()
932 q->write_ptr = 0; in iwl_queue_init()
933 q->read_ptr = 0; in iwl_queue_init()
938 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_init() argument
943 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_init()
945 txq->need_update = false; in iwl_txq_init()
947 /* max_tfd_queue_size must be power-of-two size, otherwise in iwl_txq_init()
949 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), in iwl_txq_init()
952 return -EINVAL; in iwl_txq_init()
954 /* Initialize queue's high/low-water marks, and head/tail indexes */ in iwl_txq_init()
955 ret = iwl_queue_init(txq, slots_num); in iwl_txq_init()
959 spin_lock_init(&txq->lock); in iwl_txq_init()
964 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); in iwl_txq_init()
967 __skb_queue_head_init(&txq->overflow_q); in iwl_txq_init()
977 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in iwl_txq_free_tso_page()
984 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - in iwl_txq_free_tso_page()
990 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_log_scd_error() argument
992 u32 txq_id = txq->id; in iwl_txq_log_scd_error()
997 if (trans->trans_cfg->gen2) { in iwl_txq_log_scd_error()
999 txq->read_ptr, txq->write_ptr); in iwl_txq_log_scd_error()
1011 jiffies_to_msecs(txq->wd_timeout), in iwl_txq_log_scd_error()
1012 txq->read_ptr, txq->write_ptr, in iwl_txq_log_scd_error()
1014 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
1016 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
1022 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); in iwl_txq_stuck_timer() local
1023 struct iwl_trans *trans = txq->trans; in iwl_txq_stuck_timer()
1025 spin_lock(&txq->lock); in iwl_txq_stuck_timer()
1027 if (txq->read_ptr == txq->write_ptr) { in iwl_txq_stuck_timer()
1028 spin_unlock(&txq->lock); in iwl_txq_stuck_timer()
1031 spin_unlock(&txq->lock); in iwl_txq_stuck_timer()
1033 iwl_txq_log_scd_error(trans, txq); in iwl_txq_stuck_timer()
1041 tfd->num_tbs = 0; in iwl_txq_set_tfd_invalid_gen1()
1043 iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, in iwl_txq_set_tfd_invalid_gen1()
1044 trans->invalid_tx_cmd.size); in iwl_txq_set_tfd_invalid_gen1()
1047 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_alloc() argument
1050 size_t num_entries = trans->trans_cfg->gen2 ? in iwl_txq_alloc()
1051 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_alloc()
1056 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) in iwl_txq_alloc()
1057 return -EINVAL; in iwl_txq_alloc()
1059 if (WARN_ON(txq->entries || txq->tfds)) in iwl_txq_alloc()
1060 return -EINVAL; in iwl_txq_alloc()
1062 tfd_sz = trans->txqs.tfd.size * num_entries; in iwl_txq_alloc()
1064 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); in iwl_txq_alloc()
1065 txq->trans = trans; in iwl_txq_alloc()
1067 txq->n_window = slots_num; in iwl_txq_alloc()
1069 txq->entries = kcalloc(slots_num, in iwl_txq_alloc()
1073 if (!txq->entries) in iwl_txq_alloc()
1078 txq->entries[i].cmd = in iwl_txq_alloc()
1081 if (!txq->entries[i].cmd) in iwl_txq_alloc()
1087 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, in iwl_txq_alloc()
1088 &txq->dma_addr, GFP_KERNEL); in iwl_txq_alloc()
1089 if (!txq->tfds) in iwl_txq_alloc()
1092 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); in iwl_txq_alloc()
1094 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; in iwl_txq_alloc()
1096 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, in iwl_txq_alloc()
1097 &txq->first_tb_dma, in iwl_txq_alloc()
1099 if (!txq->first_tb_bufs) in iwl_txq_alloc()
1103 void *tfd = iwl_txq_get_tfd(trans, txq, i); in iwl_txq_alloc()
1105 if (trans->trans_cfg->gen2) in iwl_txq_alloc()
1113 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); in iwl_txq_alloc()
1114 txq->tfds = NULL; in iwl_txq_alloc()
1116 if (txq->entries && cmd_queue) in iwl_txq_alloc()
1118 kfree(txq->entries[i].cmd); in iwl_txq_alloc()
1119 kfree(txq->entries); in iwl_txq_alloc()
1120 txq->entries = NULL; in iwl_txq_alloc()
1122 return -ENOMEM; in iwl_txq_alloc()
1129 struct iwl_txq *txq; in iwl_txq_dyn_alloc_dma() local
1132 WARN_ON(!trans->txqs.bc_tbl_size); in iwl_txq_dyn_alloc_dma()
1134 bc_tbl_size = trans->txqs.bc_tbl_size; in iwl_txq_dyn_alloc_dma()
1138 return ERR_PTR(-EINVAL); in iwl_txq_dyn_alloc_dma()
1140 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in iwl_txq_dyn_alloc_dma()
1141 if (!txq) in iwl_txq_dyn_alloc_dma()
1142 return ERR_PTR(-ENOMEM); in iwl_txq_dyn_alloc_dma()
1144 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, in iwl_txq_dyn_alloc_dma()
1145 &txq->bc_tbl.dma); in iwl_txq_dyn_alloc_dma()
1146 if (!txq->bc_tbl.addr) { in iwl_txq_dyn_alloc_dma()
1148 kfree(txq); in iwl_txq_dyn_alloc_dma()
1149 return ERR_PTR(-ENOMEM); in iwl_txq_dyn_alloc_dma()
1152 ret = iwl_txq_alloc(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1157 ret = iwl_txq_init(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1163 txq->wd_timeout = msecs_to_jiffies(timeout); in iwl_txq_dyn_alloc_dma()
1165 return txq; in iwl_txq_dyn_alloc_dma()
1168 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc_dma()
1172 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_alloc_response() argument
1179 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != in iwl_txq_alloc_response()
1181 ret = -EINVAL; in iwl_txq_alloc_response()
1185 rsp = (void *)hcmd->resp_pkt->data; in iwl_txq_alloc_response()
1186 qid = le16_to_cpu(rsp->queue_number); in iwl_txq_alloc_response()
1187 wr_ptr = le16_to_cpu(rsp->write_pointer); in iwl_txq_alloc_response()
1189 if (qid >= ARRAY_SIZE(trans->txqs.txq)) { in iwl_txq_alloc_response()
1191 ret = -EIO; in iwl_txq_alloc_response()
1195 if (test_and_set_bit(qid, trans->txqs.queue_used)) { in iwl_txq_alloc_response()
1197 ret = -EIO; in iwl_txq_alloc_response()
1201 if (WARN_ONCE(trans->txqs.txq[qid], in iwl_txq_alloc_response()
1203 ret = -EIO; in iwl_txq_alloc_response()
1207 txq->id = qid; in iwl_txq_alloc_response()
1208 trans->txqs.txq[qid] = txq; in iwl_txq_alloc_response()
1209 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_alloc_response()
1212 txq->read_ptr = wr_ptr; in iwl_txq_alloc_response()
1213 txq->write_ptr = wr_ptr; in iwl_txq_alloc_response()
1222 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_alloc_response()
1229 struct iwl_txq *txq; in iwl_txq_dyn_alloc() local
1239 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && in iwl_txq_dyn_alloc()
1240 trans->hw_rev_step == SILICON_A_STEP) in iwl_txq_dyn_alloc()
1243 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); in iwl_txq_dyn_alloc()
1244 if (IS_ERR(txq)) in iwl_txq_dyn_alloc()
1245 return PTR_ERR(txq); in iwl_txq_dyn_alloc()
1247 if (trans->txqs.queue_alloc_cmd_ver == 0) { in iwl_txq_dyn_alloc()
1249 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); in iwl_txq_dyn_alloc()
1250 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); in iwl_txq_dyn_alloc()
1256 ret = -EINVAL; in iwl_txq_dyn_alloc()
1259 cmd.old.sta_id = ffs(sta_mask) - 1; in iwl_txq_dyn_alloc()
1264 } else if (trans->txqs.queue_alloc_cmd_ver == 3) { in iwl_txq_dyn_alloc()
1267 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); in iwl_txq_dyn_alloc()
1268 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); in iwl_txq_dyn_alloc()
1278 ret = -EOPNOTSUPP; in iwl_txq_dyn_alloc()
1286 return iwl_txq_alloc_response(trans, txq, &hcmd); in iwl_txq_dyn_alloc()
1289 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc()
1300 * Upon HW Rfkill - we stop the device, and then stop the queues in iwl_txq_dyn_free()
1305 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { in iwl_txq_dyn_free()
1306 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), in iwl_txq_dyn_free()
1320 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); in iwl_txq_gen2_tx_free()
1323 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { in iwl_txq_gen2_tx_free()
1324 if (!trans->txqs.txq[i]) in iwl_txq_gen2_tx_free()
1337 if (!trans->txqs.txq[txq_id]) { in iwl_txq_gen2_init()
1341 return -ENOMEM; in iwl_txq_gen2_init()
1343 trans->txqs.txq[txq_id] = queue; in iwl_txq_gen2_init()
1350 queue = trans->txqs.txq[txq_id]; in iwl_txq_gen2_init()
1354 (txq_id == trans->txqs.cmd.q_id)); in iwl_txq_gen2_init()
1359 trans->txqs.txq[txq_id]->id = txq_id; in iwl_txq_gen2_init()
1360 set_bit(txq_id, trans->txqs.queue_used); in iwl_txq_gen2_init()
1372 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; in iwl_txq_gen1_tfd_tb_get_addr()
1376 addr = get_unaligned_le32(&tb->lo); in iwl_txq_gen1_tfd_tb_get_addr()
1381 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; in iwl_txq_gen1_tfd_tb_get_addr()
1384 * shift by 16 twice to avoid warnings on 32-bit in iwl_txq_gen1_tfd_tb_get_addr()
1393 struct iwl_txq *txq, int index) in iwl_txq_gen1_tfd_unmap() argument
1396 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); in iwl_txq_gen1_tfd_unmap()
1401 if (num_tbs > trans->txqs.tfd.max_tbs) { in iwl_txq_gen1_tfd_unmap()
1407 /* first TB is never freed - it's the bidirectional DMA data */ in iwl_txq_gen1_tfd_unmap()
1410 if (meta->tbs & BIT(i)) in iwl_txq_gen1_tfd_unmap()
1411 dma_unmap_page(trans->dev, in iwl_txq_gen1_tfd_unmap()
1418 dma_unmap_single(trans->dev, in iwl_txq_gen1_tfd_unmap()
1426 meta->tbs = 0; in iwl_txq_gen1_tfd_unmap()
1435 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1438 struct iwl_txq *txq, u16 byte_cnt, in iwl_txq_gen1_update_byte_cnt_tbl() argument
1442 int write_ptr = txq->write_ptr; in iwl_txq_gen1_update_byte_cnt_tbl()
1443 int txq_id = txq->id; in iwl_txq_gen1_update_byte_cnt_tbl()
1447 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; in iwl_txq_gen1_update_byte_cnt_tbl()
1448 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_txq_gen1_update_byte_cnt_tbl()
1449 u8 sta_id = tx_cmd->sta_id; in iwl_txq_gen1_update_byte_cnt_tbl()
1451 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_update_byte_cnt_tbl()
1453 sec_ctl = tx_cmd->sec_ctl; in iwl_txq_gen1_update_byte_cnt_tbl()
1466 if (trans->txqs.bc_table_dword) in iwl_txq_gen1_update_byte_cnt_tbl()
1482 struct iwl_txq *txq) in iwl_txq_gen1_inval_byte_cnt_tbl() argument
1484 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_inval_byte_cnt_tbl()
1485 int txq_id = txq->id; in iwl_txq_gen1_inval_byte_cnt_tbl()
1486 int read_ptr = txq->read_ptr; in iwl_txq_gen1_inval_byte_cnt_tbl()
1489 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; in iwl_txq_gen1_inval_byte_cnt_tbl()
1490 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_txq_gen1_inval_byte_cnt_tbl()
1494 if (txq_id != trans->txqs.cmd.q_id) in iwl_txq_gen1_inval_byte_cnt_tbl()
1495 sta_id = tx_cmd->sta_id; in iwl_txq_gen1_inval_byte_cnt_tbl()
1507 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1508 * @trans - transport private data
1509 * @txq - tx queue
1510 * @dma_dir - the direction of the DMA mapping
1515 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_free_tfd() argument
1520 int rd_ptr = txq->read_ptr; in iwl_txq_free_tfd()
1521 int idx = iwl_txq_get_cmd_index(txq, rd_ptr); in iwl_txq_free_tfd()
1524 lockdep_assert_held(&txq->lock); in iwl_txq_free_tfd()
1526 if (!txq->entries) in iwl_txq_free_tfd()
1529 /* We have only q->n_window txq->entries, but we use in iwl_txq_free_tfd()
1532 if (trans->trans_cfg->gen2) in iwl_txq_free_tfd()
1533 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
1534 iwl_txq_get_tfd(trans, txq, rd_ptr)); in iwl_txq_free_tfd()
1536 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
1537 txq, rd_ptr); in iwl_txq_free_tfd()
1540 skb = txq->entries[idx].skb; in iwl_txq_free_tfd()
1542 /* Can be called from irqs-disabled context in iwl_txq_free_tfd()
1544 * freed and that the queue is not empty - free the skb in iwl_txq_free_tfd()
1547 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_free_tfd()
1548 txq->entries[idx].skb = NULL; in iwl_txq_free_tfd()
1552 void iwl_txq_progress(struct iwl_txq *txq) in iwl_txq_progress() argument
1554 lockdep_assert_held(&txq->lock); in iwl_txq_progress()
1556 if (!txq->wd_timeout) in iwl_txq_progress()
1560 * station is asleep and we send data - that must in iwl_txq_progress()
1561 * be uAPSD or PS-Poll. Don't rearm the timer. in iwl_txq_progress()
1563 if (txq->frozen) in iwl_txq_progress()
1570 if (txq->read_ptr == txq->write_ptr) in iwl_txq_progress()
1571 del_timer(&txq->stuck_timer); in iwl_txq_progress()
1573 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); in iwl_txq_progress()
1580 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_reclaim() local
1584 if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) in iwl_txq_reclaim()
1587 if (WARN_ON(!txq)) in iwl_txq_reclaim()
1590 tfd_num = iwl_txq_get_cmd_index(txq, ssn); in iwl_txq_reclaim()
1592 spin_lock_bh(&txq->lock); in iwl_txq_reclaim()
1593 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_txq_reclaim()
1595 if (!test_bit(txq_id, trans->txqs.queue_used)) { in iwl_txq_reclaim()
1596 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", in iwl_txq_reclaim()
1604 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", in iwl_txq_reclaim()
1605 txq_id, txq->read_ptr, tfd_num, ssn); in iwl_txq_reclaim()
1611 if (!iwl_txq_used(txq, last_to_free)) { in iwl_txq_reclaim()
1613 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", in iwl_txq_reclaim()
1615 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_txq_reclaim()
1616 txq->write_ptr, txq->read_ptr); in iwl_txq_reclaim()
1618 iwl_op_mode_time_point(trans->op_mode, in iwl_txq_reclaim()
1629 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), in iwl_txq_reclaim()
1630 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { in iwl_txq_reclaim()
1631 struct sk_buff *skb = txq->entries[read_ptr].skb; in iwl_txq_reclaim()
1640 txq->entries[read_ptr].skb = NULL; in iwl_txq_reclaim()
1642 if (!trans->trans_cfg->gen2) in iwl_txq_reclaim()
1643 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); in iwl_txq_reclaim()
1645 iwl_txq_free_tfd(trans, txq); in iwl_txq_reclaim()
1648 iwl_txq_progress(txq); in iwl_txq_reclaim()
1650 if (iwl_txq_space(trans, txq) > txq->low_mark && in iwl_txq_reclaim()
1651 test_bit(txq_id, trans->txqs.queue_stopped)) { in iwl_txq_reclaim()
1656 skb_queue_splice_init(&txq->overflow_q, in iwl_txq_reclaim()
1663 * the state of &txq->overflow_q, as we just emptied it, but in iwl_txq_reclaim()
1666 txq->overflow_tx = true; in iwl_txq_reclaim()
1670 * re-entrant, so noone will try to take the access the in iwl_txq_reclaim()
1671 * txq data from that path. We stopped tx, so we can't in iwl_txq_reclaim()
1672 * have tx as well. Bottom line, we can unlock and re-lock in iwl_txq_reclaim()
1675 spin_unlock_bh(&txq->lock); in iwl_txq_reclaim()
1680 dev_cmd_ptr = *(void **)((u8 *)skb->cb + in iwl_txq_reclaim()
1681 trans->txqs.dev_cmd_offs); in iwl_txq_reclaim()
1691 if (iwl_txq_space(trans, txq) > txq->low_mark) in iwl_txq_reclaim()
1692 iwl_wake_queue(trans, txq); in iwl_txq_reclaim()
1694 spin_lock_bh(&txq->lock); in iwl_txq_reclaim()
1695 txq->overflow_tx = false; in iwl_txq_reclaim()
1699 spin_unlock_bh(&txq->lock); in iwl_txq_reclaim()
1702 /* Set wr_ptr of specific device and txq */
1705 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_set_q_ptrs() local
1707 spin_lock_bh(&txq->lock); in iwl_txq_set_q_ptrs()
1709 txq->write_ptr = ptr; in iwl_txq_set_q_ptrs()
1710 txq->read_ptr = txq->write_ptr; in iwl_txq_set_q_ptrs()
1712 spin_unlock_bh(&txq->lock); in iwl_txq_set_q_ptrs()
1721 struct iwl_txq *txq = trans->txqs.txq[queue]; in iwl_trans_txq_freeze_timer() local
1724 spin_lock_bh(&txq->lock); in iwl_trans_txq_freeze_timer()
1728 if (txq->frozen == freeze) in iwl_trans_txq_freeze_timer()
1731 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", in iwl_trans_txq_freeze_timer()
1734 txq->frozen = freeze; in iwl_trans_txq_freeze_timer()
1736 if (txq->read_ptr == txq->write_ptr) in iwl_trans_txq_freeze_timer()
1741 txq->stuck_timer.expires))) { in iwl_trans_txq_freeze_timer()
1749 txq->frozen_expiry_remainder = in iwl_trans_txq_freeze_timer()
1750 txq->stuck_timer.expires - now; in iwl_trans_txq_freeze_timer()
1751 del_timer(&txq->stuck_timer); in iwl_trans_txq_freeze_timer()
1756 * Wake a non-empty queue -> arm timer with the in iwl_trans_txq_freeze_timer()
1759 mod_timer(&txq->stuck_timer, in iwl_trans_txq_freeze_timer()
1760 now + txq->frozen_expiry_remainder); in iwl_trans_txq_freeze_timer()
1763 spin_unlock_bh(&txq->lock); in iwl_trans_txq_freeze_timer()
1772 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); in iwl_trans_txq_send_hcmd_sync()
1773 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_trans_txq_send_hcmd_sync() local
1780 &trans->status), in iwl_trans_txq_send_hcmd_sync()
1782 return -EIO; in iwl_trans_txq_send_hcmd_sync()
1786 cmd_idx = trans->ops->send_cmd(trans, cmd); in iwl_trans_txq_send_hcmd_sync()
1789 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_txq_send_hcmd_sync()
1795 ret = wait_event_timeout(trans->wait_command_queue, in iwl_trans_txq_send_hcmd_sync()
1797 &trans->status), in iwl_trans_txq_send_hcmd_sync()
1804 txq->read_ptr, txq->write_ptr); in iwl_trans_txq_send_hcmd_sync()
1806 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_txq_send_hcmd_sync()
1809 ret = -ETIMEDOUT; in iwl_trans_txq_send_hcmd_sync()
1815 if (test_bit(STATUS_FW_ERROR, &trans->status)) { in iwl_trans_txq_send_hcmd_sync()
1817 &trans->status)) { in iwl_trans_txq_send_hcmd_sync()
1821 ret = -EIO; in iwl_trans_txq_send_hcmd_sync()
1825 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && in iwl_trans_txq_send_hcmd_sync()
1826 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_txq_send_hcmd_sync()
1828 ret = -ERFKILL; in iwl_trans_txq_send_hcmd_sync()
1832 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { in iwl_trans_txq_send_hcmd_sync()
1834 ret = -EIO; in iwl_trans_txq_send_hcmd_sync()
1841 if (cmd->flags & CMD_WANT_SKB) { in iwl_trans_txq_send_hcmd_sync()
1846 * address (cmd->meta.source). in iwl_trans_txq_send_hcmd_sync()
1848 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; in iwl_trans_txq_send_hcmd_sync()
1851 if (cmd->resp_pkt) { in iwl_trans_txq_send_hcmd_sync()
1853 cmd->resp_pkt = NULL; in iwl_trans_txq_send_hcmd_sync()
1863 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_trans_txq_send_hcmd()
1864 return -ENODEV; in iwl_trans_txq_send_hcmd()
1866 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && in iwl_trans_txq_send_hcmd()
1867 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_txq_send_hcmd()
1869 cmd->id); in iwl_trans_txq_send_hcmd()
1870 return -ERFKILL; in iwl_trans_txq_send_hcmd()
1873 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && in iwl_trans_txq_send_hcmd()
1874 !(cmd->flags & CMD_SEND_IN_D3))) { in iwl_trans_txq_send_hcmd()
1875 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); in iwl_trans_txq_send_hcmd()
1876 return -EHOSTDOWN; in iwl_trans_txq_send_hcmd()
1879 if (cmd->flags & CMD_ASYNC) { in iwl_trans_txq_send_hcmd()
1883 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) in iwl_trans_txq_send_hcmd()
1884 return -EINVAL; in iwl_trans_txq_send_hcmd()
1886 ret = trans->ops->send_cmd(trans, cmd); in iwl_trans_txq_send_hcmd()
1890 iwl_get_cmd_string(trans, cmd->id), ret); in iwl_trans_txq_send_hcmd()