1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2020 Intel Corporation 5 */ 6 #include <net/tso.h> 7 #include <linux/tcp.h> 8 9 #include "iwl-debug.h" 10 #include "iwl-csr.h" 11 #include "iwl-io.h" 12 #include "internal.h" 13 #include "fw/api/tx.h" 14 #include "queue/tx.h" 15 16 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 17 18 /* 19 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command 20 * @priv: device private data point 21 * @cmd: a pointer to the ucode command structure 22 * 23 * The function returns < 0 values to indicate the operation 24 * failed. On success, it returns the index (>= 0) of command in the 25 * command queue. 26 */ 27 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, 28 struct iwl_host_cmd *cmd) 29 { 30 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 31 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 32 struct iwl_device_cmd *out_cmd; 33 struct iwl_cmd_meta *out_meta; 34 void *dup_buf = NULL; 35 dma_addr_t phys_addr; 36 int i, cmd_pos, idx; 37 u16 copy_size, cmd_size, tb0_size; 38 bool had_nocopy = false; 39 u8 group_id = iwl_cmd_groupid(cmd->id); 40 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 41 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 42 struct iwl_tfh_tfd *tfd; 43 44 copy_size = sizeof(struct iwl_cmd_header_wide); 45 cmd_size = sizeof(struct iwl_cmd_header_wide); 46 47 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 48 cmddata[i] = cmd->data[i]; 49 cmdlen[i] = cmd->len[i]; 50 51 if (!cmd->len[i]) 52 continue; 53 54 /* need at least IWL_FIRST_TB_SIZE copied */ 55 if (copy_size < IWL_FIRST_TB_SIZE) { 56 int copy = IWL_FIRST_TB_SIZE - copy_size; 57 58 if (copy > cmdlen[i]) 59 copy = cmdlen[i]; 60 cmdlen[i] -= copy; 61 cmddata[i] += copy; 62 copy_size += copy; 63 } 64 65 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 66 had_nocopy = true; 67 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 68 idx = -EINVAL; 69 goto free_dup_buf; 70 } 71 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 72 /* 73 * This is also a chunk that isn't copied 74 * to the static buffer so set had_nocopy. 75 */ 76 had_nocopy = true; 77 78 /* only allowed once */ 79 if (WARN_ON(dup_buf)) { 80 idx = -EINVAL; 81 goto free_dup_buf; 82 } 83 84 dup_buf = kmemdup(cmddata[i], cmdlen[i], 85 GFP_ATOMIC); 86 if (!dup_buf) 87 return -ENOMEM; 88 } else { 89 /* NOCOPY must not be followed by normal! */ 90 if (WARN_ON(had_nocopy)) { 91 idx = -EINVAL; 92 goto free_dup_buf; 93 } 94 copy_size += cmdlen[i]; 95 } 96 cmd_size += cmd->len[i]; 97 } 98 99 /* 100 * If any of the command structures end up being larger than the 101 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into 102 * separate TFDs, then we will need to increase the size of the buffers 103 */ 104 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 105 "Command %s (%#x) is too large (%d bytes)\n", 106 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { 107 idx = -EINVAL; 108 goto free_dup_buf; 109 } 110 111 spin_lock_bh(&txq->lock); 112 113 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 114 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 115 memset(tfd, 0, sizeof(*tfd)); 116 117 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 118 spin_unlock_bh(&txq->lock); 119 120 IWL_ERR(trans, "No space in command queue\n"); 121 iwl_op_mode_cmd_queue_full(trans->op_mode); 122 idx = -ENOSPC; 123 goto free_dup_buf; 124 } 125 126 out_cmd = txq->entries[idx].cmd; 127 out_meta = &txq->entries[idx].meta; 128 129 /* re-initialize to NULL */ 130 memset(out_meta, 0, sizeof(*out_meta)); 131 if (cmd->flags & CMD_WANT_SKB) 132 out_meta->source = cmd; 133 134 /* set up the header */ 135 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 136 out_cmd->hdr_wide.group_id = group_id; 137 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 138 out_cmd->hdr_wide.length = 139 cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); 140 out_cmd->hdr_wide.reserved = 0; 141 out_cmd->hdr_wide.sequence = 142 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 143 INDEX_TO_SEQ(txq->write_ptr)); 144 145 cmd_pos = sizeof(struct iwl_cmd_header_wide); 146 copy_size = sizeof(struct iwl_cmd_header_wide); 147 148 /* and copy the data that needs to be copied */ 149 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 150 int copy; 151 152 if (!cmd->len[i]) 153 continue; 154 155 /* copy everything if not nocopy/dup */ 156 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 157 IWL_HCMD_DFL_DUP))) { 158 copy = cmd->len[i]; 159 160 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 161 cmd_pos += copy; 162 copy_size += copy; 163 continue; 164 } 165 166 /* 167 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 168 * in total (for bi-directional DMA), but copy up to what 169 * we can fit into the payload for debug dump purposes. 170 */ 171 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 172 173 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 174 cmd_pos += copy; 175 176 /* However, treat copy_size the proper way, we need it below */ 177 if (copy_size < IWL_FIRST_TB_SIZE) { 178 copy = IWL_FIRST_TB_SIZE - copy_size; 179 180 if (copy > cmd->len[i]) 181 copy = cmd->len[i]; 182 copy_size += copy; 183 } 184 } 185 186 IWL_DEBUG_HC(trans, 187 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 188 iwl_get_cmd_string(trans, cmd->id), group_id, 189 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 190 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 191 192 /* start the TFD with the minimum copy bytes */ 193 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 194 memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); 195 iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx), 196 tb0_size); 197 198 /* map first command fragment, if any remains */ 199 if (copy_size > tb0_size) { 200 phys_addr = dma_map_single(trans->dev, 201 (u8 *)out_cmd + tb0_size, 202 copy_size - tb0_size, 203 DMA_TO_DEVICE); 204 if (dma_mapping_error(trans->dev, phys_addr)) { 205 idx = -ENOMEM; 206 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 207 goto out; 208 } 209 iwl_txq_gen2_set_tb(trans, tfd, phys_addr, 210 copy_size - tb0_size); 211 } 212 213 /* map the remaining (adjusted) nocopy/dup fragments */ 214 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 215 const void *data = cmddata[i]; 216 217 if (!cmdlen[i]) 218 continue; 219 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 220 IWL_HCMD_DFL_DUP))) 221 continue; 222 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 223 data = dup_buf; 224 phys_addr = dma_map_single(trans->dev, (void *)data, 225 cmdlen[i], DMA_TO_DEVICE); 226 if (dma_mapping_error(trans->dev, phys_addr)) { 227 idx = -ENOMEM; 228 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 229 goto out; 230 } 231 iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); 232 } 233 234 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 235 out_meta->flags = cmd->flags; 236 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 237 kfree_sensitive(txq->entries[idx].free_buf); 238 txq->entries[idx].free_buf = dup_buf; 239 240 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 241 242 /* start timer if queue currently empty */ 243 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 244 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 245 246 spin_lock(&trans_pcie->reg_lock); 247 /* Increment and update queue's write index */ 248 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 249 iwl_txq_inc_wr_ptr(trans, txq); 250 spin_unlock(&trans_pcie->reg_lock); 251 252 out: 253 spin_unlock_bh(&txq->lock); 254 free_dup_buf: 255 if (idx < 0) 256 kfree(dup_buf); 257 return idx; 258 } 259