1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 #ifndef __iwl_trans_queue_tx_h__ 6 #define __iwl_trans_queue_tx_h__ 7 #include "iwl-fh.h" 8 #include "fw/api/tx.h" 9 10 struct iwl_tso_hdr_page { 11 struct page *page; 12 u8 *pos; 13 }; 14 15 static inline dma_addr_t 16 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) 17 { 18 return txq->first_tb_dma + 19 sizeof(struct iwl_pcie_first_tb_buf) * idx; 20 } 21 22 static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) 23 { 24 return index & (q->n_window - 1); 25 } 26 27 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id); 28 29 static inline void iwl_wake_queue(struct iwl_trans *trans, 30 struct iwl_txq *txq) 31 { 32 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { 33 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 34 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 35 } 36 } 37 38 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, 39 struct iwl_txq *txq, int idx) 40 { 41 if (trans->trans_cfg->gen2) 42 idx = iwl_txq_get_cmd_index(txq, idx); 43 44 return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; 45 } 46 47 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 48 bool cmd_queue); 49 /* 50 * We need this inline in case dma_addr_t is only 32-bits - since the 51 * hardware is always 64-bit, the issue can still occur in that case, 52 * so use u64 for 'phys' here to force the addition in 64-bit. 53 */ 54 static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) 55 { 56 return upper_32_bits(phys) != upper_32_bits(phys + len); 57 } 58 59 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); 60 61 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) 62 { 63 if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { 64 iwl_op_mode_queue_full(trans->op_mode, txq->id); 65 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 66 } else { 67 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 68 txq->id); 69 } 70 } 71 72 /** 73 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning 74 * @index -- current index 75 */ 76 static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) 77 { 78 return ++index & 79 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 80 } 81 82 /** 83 * iwl_txq_dec_wrap - decrement queue index, wrap back to end 84 * @index -- current index 85 */ 86 static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) 87 { 88 return --index & 89 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 90 } 91 92 static inline bool iwl_txq_used(const struct iwl_txq *q, int i) 93 { 94 int index = iwl_txq_get_cmd_index(q, i); 95 int r = iwl_txq_get_cmd_index(q, q->read_ptr); 96 int w = iwl_txq_get_cmd_index(q, q->write_ptr); 97 98 return w >= r ? 99 (index >= r && index < w) : 100 !(index < r && index >= w); 101 } 102 103 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb); 104 105 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); 106 107 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, 108 struct iwl_tfh_tfd *tfd, dma_addr_t addr, 109 u16 len); 110 111 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, 112 struct iwl_cmd_meta *meta, 113 struct iwl_tfh_tfd *tfd); 114 115 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, 116 u32 sta_mask, u8 tid, 117 int size, unsigned int timeout); 118 119 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 120 struct iwl_device_tx_cmd *dev_cmd, int txq_id); 121 122 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); 123 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 124 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); 125 void iwl_txq_gen2_tx_free(struct iwl_trans *trans); 126 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 127 bool cmd_queue); 128 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size); 129 #ifdef CONFIG_INET 130 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 131 struct sk_buff *skb); 132 #endif 133 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, 134 struct iwl_tfd *tfd) 135 { 136 return tfd->num_tbs & 0x1f; 137 } 138 139 static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, 140 void *_tfd, u8 idx) 141 { 142 struct iwl_tfd *tfd; 143 struct iwl_tfd_tb *tb; 144 145 if (trans->trans_cfg->gen2) { 146 struct iwl_tfh_tfd *tfh_tfd = _tfd; 147 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; 148 149 return le16_to_cpu(tfh_tb->tb_len); 150 } 151 152 tfd = (struct iwl_tfd *)_tfd; 153 tb = &tfd->tbs[idx]; 154 155 return le16_to_cpu(tb->hi_n_len) >> 4; 156 } 157 158 static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans, 159 struct iwl_tfd *tfd, 160 u8 idx, dma_addr_t addr, u16 len) 161 { 162 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 163 u16 hi_n_len = len << 4; 164 165 put_unaligned_le32(addr, &tb->lo); 166 hi_n_len |= iwl_get_dma_hi_addr(addr); 167 168 tb->hi_n_len = cpu_to_le16(hi_n_len); 169 170 tfd->num_tbs = idx + 1; 171 } 172 173 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 174 struct iwl_cmd_meta *meta, 175 struct iwl_txq *txq, int index); 176 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 177 struct iwl_txq *txq); 178 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 179 struct iwl_txq *txq, u16 byte_cnt, 180 int num_tbs); 181 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 182 struct sk_buff_head *skbs, bool is_flush); 183 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); 184 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, 185 bool freeze); 186 void iwl_txq_progress(struct iwl_txq *txq); 187 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 188 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 189 #endif /* __iwl_trans_queue_tx_h__ */ 190