1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <net/ip6_checksum.h> 12 #include <net/tso.h> 13 14 #include "iwl-debug.h" 15 #include "iwl-csr.h" 16 #include "iwl-prph.h" 17 #include "iwl-io.h" 18 #include "iwl-scd.h" 19 #include "iwl-op-mode.h" 20 #include "internal.h" 21 #include "fw/api/tx.h" 22 23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24 * DMA services 25 * 26 * Theory of operation 27 * 28 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29 * of buffer descriptors, each of which points to one or more data buffers for 30 * the device to read from or fill. Driver and device exchange status of each 31 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32 * entries in each circular buffer, to protect against confusing empty and full 33 * queue states. 34 * 35 * The device reads or writes the data in the queues via the device's several 36 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37 * 38 * For Tx queue, there are low mark and high mark limits. If, after queuing 39 * the packet for Tx, free space become < low mark, Tx queue stopped. When 40 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41 * Tx queue resumed. 42 * 43 ***************************************************/ 44 45 46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47 struct iwl_dma_ptr *ptr, size_t size) 48 { 49 if (WARN_ON(ptr->addr)) 50 return -EINVAL; 51 52 ptr->addr = dma_alloc_coherent(trans->dev, size, 53 &ptr->dma, GFP_KERNEL); 54 if (!ptr->addr) 55 return -ENOMEM; 56 ptr->size = size; 57 return 0; 58 } 59 60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61 { 62 if (unlikely(!ptr->addr)) 63 return; 64 65 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66 memset(ptr, 0, sizeof(*ptr)); 67 } 68 69 /* 70 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71 */ 72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73 struct iwl_txq *txq) 74 { 75 u32 reg = 0; 76 int txq_id = txq->id; 77 78 lockdep_assert_held(&txq->lock); 79 80 /* 81 * explicitly wake up the NIC if: 82 * 1. shadow registers aren't enabled 83 * 2. NIC is woken up for CMD regardless of shadow outside this function 84 * 3. there is a chance that the NIC is asleep 85 */ 86 if (!trans->trans_cfg->base_params->shadow_reg_enable && 87 txq_id != trans->txqs.cmd.q_id && 88 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89 /* 90 * wake up nic if it's powered down ... 91 * uCode will wake up, and interrupt us again, so next 92 * time we'll skip this part. 93 */ 94 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95 96 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98 txq_id, reg); 99 iwl_set_bit(trans, CSR_GP_CNTRL, 100 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101 txq->need_update = true; 102 return; 103 } 104 } 105 106 /* 107 * if not in power-save mode, uCode will never sleep when we're 108 * trying to tx (during RFKILL, we're not trying to tx). 109 */ 110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 111 if (!txq->block) 112 iwl_write32(trans, HBUS_TARG_WRPTR, 113 txq->write_ptr | (txq_id << 8)); 114 } 115 116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117 { 118 int i; 119 120 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 121 struct iwl_txq *txq = trans->txqs.txq[i]; 122 123 if (!test_bit(i, trans->txqs.queue_used)) 124 continue; 125 126 spin_lock_bh(&txq->lock); 127 if (txq->need_update) { 128 iwl_pcie_txq_inc_wr_ptr(trans, txq); 129 txq->need_update = false; 130 } 131 spin_unlock_bh(&txq->lock); 132 } 133 } 134 135 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 136 u8 idx, dma_addr_t addr, u16 len) 137 { 138 struct iwl_tfd *tfd_fh = (void *)tfd; 139 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 140 141 u16 hi_n_len = len << 4; 142 143 put_unaligned_le32(addr, &tb->lo); 144 hi_n_len |= iwl_get_dma_hi_addr(addr); 145 146 tb->hi_n_len = cpu_to_le16(hi_n_len); 147 148 tfd_fh->num_tbs = idx + 1; 149 } 150 151 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152 dma_addr_t addr, u16 len, bool reset) 153 { 154 void *tfd; 155 u32 num_tbs; 156 157 tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158 159 if (reset) 160 memset(tfd, 0, trans->txqs.tfd.size); 161 162 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163 164 /* Each TFD can point to a maximum max_tbs Tx buffers */ 165 if (num_tbs >= trans->txqs.tfd.max_tbs) { 166 IWL_ERR(trans, "Error can not send more than %d chunks\n", 167 trans->txqs.tfd.max_tbs); 168 return -EINVAL; 169 } 170 171 if (WARN(addr & ~IWL_TX_DMA_MASK, 172 "Unaligned address = %llx\n", (unsigned long long)addr)) 173 return -EINVAL; 174 175 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176 177 return num_tbs; 178 } 179 180 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 181 { 182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 183 184 lockdep_assert_held(&trans_pcie->reg_lock); 185 186 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 187 return; 188 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 189 return; 190 191 trans_pcie->cmd_hold_nic_awake = false; 192 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 193 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 194 } 195 196 /* 197 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 198 */ 199 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 200 { 201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 202 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 203 204 if (!txq) { 205 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 206 return; 207 } 208 209 spin_lock_bh(&txq->lock); 210 while (txq->write_ptr != txq->read_ptr) { 211 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 212 txq_id, txq->read_ptr); 213 214 if (txq_id != trans->txqs.cmd.q_id) { 215 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 216 217 if (WARN_ON_ONCE(!skb)) 218 continue; 219 220 iwl_txq_free_tso_page(trans, skb); 221 } 222 iwl_txq_free_tfd(trans, txq); 223 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 224 225 if (txq->read_ptr == txq->write_ptr) { 226 unsigned long flags; 227 228 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 229 if (txq_id == trans->txqs.cmd.q_id) 230 iwl_pcie_clear_cmd_in_flight(trans); 231 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 232 } 233 } 234 235 while (!skb_queue_empty(&txq->overflow_q)) { 236 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 237 238 iwl_op_mode_free_skb(trans->op_mode, skb); 239 } 240 241 spin_unlock_bh(&txq->lock); 242 243 /* just in case - this queue may have been stopped */ 244 iwl_wake_queue(trans, txq); 245 } 246 247 /* 248 * iwl_pcie_txq_free - Deallocate DMA queue. 249 * @txq: Transmit queue to deallocate. 250 * 251 * Empty queue by removing and destroying all BD's. 252 * Free all buffers. 253 * 0-fill, but do not free "txq" descriptor structure. 254 */ 255 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 256 { 257 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 258 struct device *dev = trans->dev; 259 int i; 260 261 if (WARN_ON(!txq)) 262 return; 263 264 iwl_pcie_txq_unmap(trans, txq_id); 265 266 /* De-alloc array of command/tx buffers */ 267 if (txq_id == trans->txqs.cmd.q_id) 268 for (i = 0; i < txq->n_window; i++) { 269 kfree_sensitive(txq->entries[i].cmd); 270 kfree_sensitive(txq->entries[i].free_buf); 271 } 272 273 /* De-alloc circular buffer of TFDs */ 274 if (txq->tfds) { 275 dma_free_coherent(dev, 276 trans->txqs.tfd.size * 277 trans->trans_cfg->base_params->max_tfd_queue_size, 278 txq->tfds, txq->dma_addr); 279 txq->dma_addr = 0; 280 txq->tfds = NULL; 281 282 dma_free_coherent(dev, 283 sizeof(*txq->first_tb_bufs) * txq->n_window, 284 txq->first_tb_bufs, txq->first_tb_dma); 285 } 286 287 kfree(txq->entries); 288 txq->entries = NULL; 289 290 del_timer_sync(&txq->stuck_timer); 291 292 /* 0-fill queue descriptor structure */ 293 memset(txq, 0, sizeof(*txq)); 294 } 295 296 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 297 { 298 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 299 int nq = trans->trans_cfg->base_params->num_of_queues; 300 int chan; 301 u32 reg_val; 302 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 303 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 304 305 /* make sure all queue are not stopped/used */ 306 memset(trans->txqs.queue_stopped, 0, 307 sizeof(trans->txqs.queue_stopped)); 308 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 309 310 trans_pcie->scd_base_addr = 311 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 312 313 WARN_ON(scd_base_addr != 0 && 314 scd_base_addr != trans_pcie->scd_base_addr); 315 316 /* reset context data, TX status and translation data */ 317 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 318 SCD_CONTEXT_MEM_LOWER_BOUND, 319 NULL, clear_dwords); 320 321 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 322 trans->txqs.scd_bc_tbls.dma >> 10); 323 324 /* The chain extension of the SCD doesn't work well. This feature is 325 * enabled by default by the HW, so we need to disable it manually. 326 */ 327 if (trans->trans_cfg->base_params->scd_chain_ext_wa) 328 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 329 330 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 331 trans->txqs.cmd.fifo, 332 trans->txqs.cmd.wdg_timeout); 333 334 /* Activate all Tx DMA/FIFO channels */ 335 iwl_scd_activate_fifos(trans); 336 337 /* Enable DMA channel */ 338 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 339 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 340 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 341 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 342 343 /* Update FH chicken bits */ 344 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 345 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 346 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 347 348 /* Enable L1-Active */ 349 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 350 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 351 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 352 } 353 354 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 355 { 356 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 357 int txq_id; 358 359 /* 360 * we should never get here in gen2 trans mode return early to avoid 361 * having invalid accesses 362 */ 363 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 364 return; 365 366 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 367 txq_id++) { 368 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 369 if (trans->trans_cfg->use_tfh) 370 iwl_write_direct64(trans, 371 FH_MEM_CBBC_QUEUE(trans, txq_id), 372 txq->dma_addr); 373 else 374 iwl_write_direct32(trans, 375 FH_MEM_CBBC_QUEUE(trans, txq_id), 376 txq->dma_addr >> 8); 377 iwl_pcie_txq_unmap(trans, txq_id); 378 txq->read_ptr = 0; 379 txq->write_ptr = 0; 380 } 381 382 /* Tell NIC where to find the "keep warm" buffer */ 383 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 384 trans_pcie->kw.dma >> 4); 385 386 /* 387 * Send 0 as the scd_base_addr since the device may have be reset 388 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 389 * contain garbage. 390 */ 391 iwl_pcie_tx_start(trans, 0); 392 } 393 394 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 395 { 396 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 397 unsigned long flags; 398 int ch, ret; 399 u32 mask = 0; 400 401 spin_lock(&trans_pcie->irq_lock); 402 403 if (!iwl_trans_grab_nic_access(trans, &flags)) 404 goto out; 405 406 /* Stop each Tx DMA channel */ 407 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 408 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 409 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 410 } 411 412 /* Wait for DMA channels to be idle */ 413 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 414 if (ret < 0) 415 IWL_ERR(trans, 416 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 417 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 418 419 iwl_trans_release_nic_access(trans, &flags); 420 421 out: 422 spin_unlock(&trans_pcie->irq_lock); 423 } 424 425 /* 426 * iwl_pcie_tx_stop - Stop all Tx DMA channels 427 */ 428 int iwl_pcie_tx_stop(struct iwl_trans *trans) 429 { 430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 431 int txq_id; 432 433 /* Turn off all Tx DMA fifos */ 434 iwl_scd_deactivate_fifos(trans); 435 436 /* Turn off all Tx DMA channels */ 437 iwl_pcie_tx_stop_fh(trans); 438 439 /* 440 * This function can be called before the op_mode disabled the 441 * queues. This happens when we have an rfkill interrupt. 442 * Since we stop Tx altogether - mark the queues as stopped. 443 */ 444 memset(trans->txqs.queue_stopped, 0, 445 sizeof(trans->txqs.queue_stopped)); 446 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 447 448 /* This can happen: start_hw, stop_device */ 449 if (!trans_pcie->txq_memory) 450 return 0; 451 452 /* Unmap DMA from host system and free skb's */ 453 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 454 txq_id++) 455 iwl_pcie_txq_unmap(trans, txq_id); 456 457 return 0; 458 } 459 460 /* 461 * iwl_trans_tx_free - Free TXQ Context 462 * 463 * Destroy all TX DMA queues and structures 464 */ 465 void iwl_pcie_tx_free(struct iwl_trans *trans) 466 { 467 int txq_id; 468 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 469 470 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 471 472 /* Tx queues */ 473 if (trans_pcie->txq_memory) { 474 for (txq_id = 0; 475 txq_id < trans->trans_cfg->base_params->num_of_queues; 476 txq_id++) { 477 iwl_pcie_txq_free(trans, txq_id); 478 trans->txqs.txq[txq_id] = NULL; 479 } 480 } 481 482 kfree(trans_pcie->txq_memory); 483 trans_pcie->txq_memory = NULL; 484 485 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 486 487 iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 488 } 489 490 /* 491 * iwl_pcie_tx_alloc - allocate TX context 492 * Allocate all Tx DMA structures and initialize them 493 */ 494 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 495 { 496 int ret; 497 int txq_id, slots_num; 498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 499 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 500 501 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 502 return -EINVAL; 503 504 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 505 506 /*It is not allowed to alloc twice, so warn when this happens. 507 * We cannot rely on the previous allocation, so free and fail */ 508 if (WARN_ON(trans_pcie->txq_memory)) { 509 ret = -EINVAL; 510 goto error; 511 } 512 513 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 514 bc_tbls_size); 515 if (ret) { 516 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 517 goto error; 518 } 519 520 /* Alloc keep-warm buffer */ 521 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 522 if (ret) { 523 IWL_ERR(trans, "Keep Warm allocation failed\n"); 524 goto error; 525 } 526 527 trans_pcie->txq_memory = 528 kcalloc(trans->trans_cfg->base_params->num_of_queues, 529 sizeof(struct iwl_txq), GFP_KERNEL); 530 if (!trans_pcie->txq_memory) { 531 IWL_ERR(trans, "Not enough memory for txq\n"); 532 ret = -ENOMEM; 533 goto error; 534 } 535 536 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 537 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 538 txq_id++) { 539 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 540 541 if (cmd_queue) 542 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 543 trans->cfg->min_txq_size); 544 else 545 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 546 trans->cfg->min_256_ba_txq_size); 547 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 548 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 549 cmd_queue); 550 if (ret) { 551 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 552 goto error; 553 } 554 trans->txqs.txq[txq_id]->id = txq_id; 555 } 556 557 return 0; 558 559 error: 560 iwl_pcie_tx_free(trans); 561 562 return ret; 563 } 564 565 int iwl_pcie_tx_init(struct iwl_trans *trans) 566 { 567 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 568 int ret; 569 int txq_id, slots_num; 570 bool alloc = false; 571 572 if (!trans_pcie->txq_memory) { 573 ret = iwl_pcie_tx_alloc(trans); 574 if (ret) 575 goto error; 576 alloc = true; 577 } 578 579 spin_lock(&trans_pcie->irq_lock); 580 581 /* Turn off all Tx DMA fifos */ 582 iwl_scd_deactivate_fifos(trans); 583 584 /* Tell NIC where to find the "keep warm" buffer */ 585 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 586 trans_pcie->kw.dma >> 4); 587 588 spin_unlock(&trans_pcie->irq_lock); 589 590 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 591 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 592 txq_id++) { 593 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 594 595 if (cmd_queue) 596 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 597 trans->cfg->min_txq_size); 598 else 599 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 600 trans->cfg->min_256_ba_txq_size); 601 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 602 cmd_queue); 603 if (ret) { 604 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 605 goto error; 606 } 607 608 /* 609 * Tell nic where to find circular buffer of TFDs for a 610 * given Tx queue, and enable the DMA channel used for that 611 * queue. 612 * Circular buffer (TFD queue in DRAM) physical base address 613 */ 614 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 615 trans->txqs.txq[txq_id]->dma_addr >> 8); 616 } 617 618 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 619 if (trans->trans_cfg->base_params->num_of_queues > 20) 620 iwl_set_bits_prph(trans, SCD_GP_CTRL, 621 SCD_GP_CTRL_ENABLE_31_QUEUES); 622 623 return 0; 624 error: 625 /*Upon error, free only if we allocated something */ 626 if (alloc) 627 iwl_pcie_tx_free(trans); 628 return ret; 629 } 630 631 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 632 const struct iwl_host_cmd *cmd) 633 { 634 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 635 int ret; 636 637 lockdep_assert_held(&trans_pcie->reg_lock); 638 639 /* Make sure the NIC is still alive in the bus */ 640 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 641 return -ENODEV; 642 643 /* 644 * wake up the NIC to make sure that the firmware will see the host 645 * command - we will let the NIC sleep once all the host commands 646 * returned. This needs to be done only on NICs that have 647 * apmg_wake_up_wa set. 648 */ 649 if (trans->trans_cfg->base_params->apmg_wake_up_wa && 650 !trans_pcie->cmd_hold_nic_awake) { 651 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 652 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 653 654 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 655 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 656 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 657 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 658 15000); 659 if (ret < 0) { 660 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 661 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 662 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 663 return -EIO; 664 } 665 trans_pcie->cmd_hold_nic_awake = true; 666 } 667 668 return 0; 669 } 670 671 /* 672 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 673 * 674 * When FW advances 'R' index, all entries between old and new 'R' index 675 * need to be reclaimed. As result, some free space forms. If there is 676 * enough free space (> low mark), wake the stack that feeds us. 677 */ 678 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 679 { 680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 681 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 682 unsigned long flags; 683 int nfreed = 0; 684 u16 r; 685 686 lockdep_assert_held(&txq->lock); 687 688 idx = iwl_txq_get_cmd_index(txq, idx); 689 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 690 691 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 692 (!iwl_txq_used(txq, idx))) { 693 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 694 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 695 __func__, txq_id, idx, 696 trans->trans_cfg->base_params->max_tfd_queue_size, 697 txq->write_ptr, txq->read_ptr); 698 return; 699 } 700 701 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 702 r = iwl_txq_inc_wrap(trans, r)) { 703 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 704 705 if (nfreed++ > 0) { 706 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 707 idx, txq->write_ptr, r); 708 iwl_force_nmi(trans); 709 } 710 } 711 712 if (txq->read_ptr == txq->write_ptr) { 713 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 714 iwl_pcie_clear_cmd_in_flight(trans); 715 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 716 } 717 718 iwl_txq_progress(txq); 719 } 720 721 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 722 u16 txq_id) 723 { 724 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 725 u32 tbl_dw_addr; 726 u32 tbl_dw; 727 u16 scd_q2ratid; 728 729 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 730 731 tbl_dw_addr = trans_pcie->scd_base_addr + 732 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 733 734 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 735 736 if (txq_id & 0x1) 737 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 738 else 739 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 740 741 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 742 743 return 0; 744 } 745 746 /* Receiver address (actually, Rx station's index into station table), 747 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 748 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 749 750 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 751 const struct iwl_trans_txq_scd_cfg *cfg, 752 unsigned int wdg_timeout) 753 { 754 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 755 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 756 int fifo = -1; 757 bool scd_bug = false; 758 759 if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 760 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 761 762 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 763 764 if (cfg) { 765 fifo = cfg->fifo; 766 767 /* Disable the scheduler prior configuring the cmd queue */ 768 if (txq_id == trans->txqs.cmd.q_id && 769 trans_pcie->scd_set_active) 770 iwl_scd_enable_set_active(trans, 0); 771 772 /* Stop this Tx queue before configuring it */ 773 iwl_scd_txq_set_inactive(trans, txq_id); 774 775 /* Set this queue as a chain-building queue unless it is CMD */ 776 if (txq_id != trans->txqs.cmd.q_id) 777 iwl_scd_txq_set_chain(trans, txq_id); 778 779 if (cfg->aggregate) { 780 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 781 782 /* Map receiver-address / traffic-ID to this queue */ 783 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 784 785 /* enable aggregations for the queue */ 786 iwl_scd_txq_enable_agg(trans, txq_id); 787 txq->ampdu = true; 788 } else { 789 /* 790 * disable aggregations for the queue, this will also 791 * make the ra_tid mapping configuration irrelevant 792 * since it is now a non-AGG queue. 793 */ 794 iwl_scd_txq_disable_agg(trans, txq_id); 795 796 ssn = txq->read_ptr; 797 } 798 } else { 799 /* 800 * If we need to move the SCD write pointer by steps of 801 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 802 * the op_mode know by returning true later. 803 * Do this only in case cfg is NULL since this trick can 804 * be done only if we have DQA enabled which is true for mvm 805 * only. And mvm never sets a cfg pointer. 806 * This is really ugly, but this is the easiest way out for 807 * this sad hardware issue. 808 * This bug has been fixed on devices 9000 and up. 809 */ 810 scd_bug = !trans->trans_cfg->mq_rx_supported && 811 !((ssn - txq->write_ptr) & 0x3f) && 812 (ssn != txq->write_ptr); 813 if (scd_bug) 814 ssn++; 815 } 816 817 /* Place first TFD at index corresponding to start sequence number. 818 * Assumes that ssn_idx is valid (!= 0xFFF) */ 819 txq->read_ptr = (ssn & 0xff); 820 txq->write_ptr = (ssn & 0xff); 821 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 822 (ssn & 0xff) | (txq_id << 8)); 823 824 if (cfg) { 825 u8 frame_limit = cfg->frame_limit; 826 827 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 828 829 /* Set up Tx window size and frame limit for this queue */ 830 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 831 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 832 iwl_trans_write_mem32(trans, 833 trans_pcie->scd_base_addr + 834 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 835 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 836 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 837 838 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 839 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 840 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 841 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 842 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 843 SCD_QUEUE_STTS_REG_MSK); 844 845 /* enable the scheduler for this queue (only) */ 846 if (txq_id == trans->txqs.cmd.q_id && 847 trans_pcie->scd_set_active) 848 iwl_scd_enable_set_active(trans, BIT(txq_id)); 849 850 IWL_DEBUG_TX_QUEUES(trans, 851 "Activate queue %d on FIFO %d WrPtr: %d\n", 852 txq_id, fifo, ssn & 0xff); 853 } else { 854 IWL_DEBUG_TX_QUEUES(trans, 855 "Activate queue %d WrPtr: %d\n", 856 txq_id, ssn & 0xff); 857 } 858 859 return scd_bug; 860 } 861 862 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 863 bool shared_mode) 864 { 865 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 866 867 txq->ampdu = !shared_mode; 868 } 869 870 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 871 bool configure_scd) 872 { 873 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 874 u32 stts_addr = trans_pcie->scd_base_addr + 875 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 876 static const u32 zero_val[4] = {}; 877 878 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 879 trans->txqs.txq[txq_id]->frozen = false; 880 881 /* 882 * Upon HW Rfkill - we stop the device, and then stop the queues 883 * in the op_mode. Just for the sake of the simplicity of the op_mode, 884 * allow the op_mode to call txq_disable after it already called 885 * stop_device. 886 */ 887 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 888 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 889 "queue %d not used", txq_id); 890 return; 891 } 892 893 if (configure_scd) { 894 iwl_scd_txq_set_inactive(trans, txq_id); 895 896 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 897 ARRAY_SIZE(zero_val)); 898 } 899 900 iwl_pcie_txq_unmap(trans, txq_id); 901 trans->txqs.txq[txq_id]->ampdu = false; 902 903 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 904 } 905 906 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 907 908 /* 909 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 910 * @priv: device private data point 911 * @cmd: a pointer to the ucode command structure 912 * 913 * The function returns < 0 values to indicate the operation 914 * failed. On success, it returns the index (>= 0) of command in the 915 * command queue. 916 */ 917 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 918 struct iwl_host_cmd *cmd) 919 { 920 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 921 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 922 struct iwl_device_cmd *out_cmd; 923 struct iwl_cmd_meta *out_meta; 924 unsigned long flags; 925 void *dup_buf = NULL; 926 dma_addr_t phys_addr; 927 int idx; 928 u16 copy_size, cmd_size, tb0_size; 929 bool had_nocopy = false; 930 u8 group_id = iwl_cmd_groupid(cmd->id); 931 int i, ret; 932 u32 cmd_pos; 933 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 934 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 935 936 if (WARN(!trans->wide_cmd_header && 937 group_id > IWL_ALWAYS_LONG_GROUP, 938 "unsupported wide command %#x\n", cmd->id)) 939 return -EINVAL; 940 941 if (group_id != 0) { 942 copy_size = sizeof(struct iwl_cmd_header_wide); 943 cmd_size = sizeof(struct iwl_cmd_header_wide); 944 } else { 945 copy_size = sizeof(struct iwl_cmd_header); 946 cmd_size = sizeof(struct iwl_cmd_header); 947 } 948 949 /* need one for the header if the first is NOCOPY */ 950 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 951 952 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 953 cmddata[i] = cmd->data[i]; 954 cmdlen[i] = cmd->len[i]; 955 956 if (!cmd->len[i]) 957 continue; 958 959 /* need at least IWL_FIRST_TB_SIZE copied */ 960 if (copy_size < IWL_FIRST_TB_SIZE) { 961 int copy = IWL_FIRST_TB_SIZE - copy_size; 962 963 if (copy > cmdlen[i]) 964 copy = cmdlen[i]; 965 cmdlen[i] -= copy; 966 cmddata[i] += copy; 967 copy_size += copy; 968 } 969 970 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 971 had_nocopy = true; 972 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 973 idx = -EINVAL; 974 goto free_dup_buf; 975 } 976 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 977 /* 978 * This is also a chunk that isn't copied 979 * to the static buffer so set had_nocopy. 980 */ 981 had_nocopy = true; 982 983 /* only allowed once */ 984 if (WARN_ON(dup_buf)) { 985 idx = -EINVAL; 986 goto free_dup_buf; 987 } 988 989 dup_buf = kmemdup(cmddata[i], cmdlen[i], 990 GFP_ATOMIC); 991 if (!dup_buf) 992 return -ENOMEM; 993 } else { 994 /* NOCOPY must not be followed by normal! */ 995 if (WARN_ON(had_nocopy)) { 996 idx = -EINVAL; 997 goto free_dup_buf; 998 } 999 copy_size += cmdlen[i]; 1000 } 1001 cmd_size += cmd->len[i]; 1002 } 1003 1004 /* 1005 * If any of the command structures end up being larger than 1006 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1007 * allocated into separate TFDs, then we will need to 1008 * increase the size of the buffers. 1009 */ 1010 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1011 "Command %s (%#x) is too large (%d bytes)\n", 1012 iwl_get_cmd_string(trans, cmd->id), 1013 cmd->id, copy_size)) { 1014 idx = -EINVAL; 1015 goto free_dup_buf; 1016 } 1017 1018 spin_lock_bh(&txq->lock); 1019 1020 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1021 spin_unlock_bh(&txq->lock); 1022 1023 IWL_ERR(trans, "No space in command queue\n"); 1024 iwl_op_mode_cmd_queue_full(trans->op_mode); 1025 idx = -ENOSPC; 1026 goto free_dup_buf; 1027 } 1028 1029 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1030 out_cmd = txq->entries[idx].cmd; 1031 out_meta = &txq->entries[idx].meta; 1032 1033 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1034 if (cmd->flags & CMD_WANT_SKB) 1035 out_meta->source = cmd; 1036 1037 /* set up the header */ 1038 if (group_id != 0) { 1039 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1040 out_cmd->hdr_wide.group_id = group_id; 1041 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1042 out_cmd->hdr_wide.length = 1043 cpu_to_le16(cmd_size - 1044 sizeof(struct iwl_cmd_header_wide)); 1045 out_cmd->hdr_wide.reserved = 0; 1046 out_cmd->hdr_wide.sequence = 1047 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1048 INDEX_TO_SEQ(txq->write_ptr)); 1049 1050 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1051 copy_size = sizeof(struct iwl_cmd_header_wide); 1052 } else { 1053 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1054 out_cmd->hdr.sequence = 1055 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1056 INDEX_TO_SEQ(txq->write_ptr)); 1057 out_cmd->hdr.group_id = 0; 1058 1059 cmd_pos = sizeof(struct iwl_cmd_header); 1060 copy_size = sizeof(struct iwl_cmd_header); 1061 } 1062 1063 /* and copy the data that needs to be copied */ 1064 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1065 int copy; 1066 1067 if (!cmd->len[i]) 1068 continue; 1069 1070 /* copy everything if not nocopy/dup */ 1071 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1072 IWL_HCMD_DFL_DUP))) { 1073 copy = cmd->len[i]; 1074 1075 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1076 cmd_pos += copy; 1077 copy_size += copy; 1078 continue; 1079 } 1080 1081 /* 1082 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1083 * in total (for bi-directional DMA), but copy up to what 1084 * we can fit into the payload for debug dump purposes. 1085 */ 1086 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1087 1088 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1089 cmd_pos += copy; 1090 1091 /* However, treat copy_size the proper way, we need it below */ 1092 if (copy_size < IWL_FIRST_TB_SIZE) { 1093 copy = IWL_FIRST_TB_SIZE - copy_size; 1094 1095 if (copy > cmd->len[i]) 1096 copy = cmd->len[i]; 1097 copy_size += copy; 1098 } 1099 } 1100 1101 IWL_DEBUG_HC(trans, 1102 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1103 iwl_get_cmd_string(trans, cmd->id), 1104 group_id, out_cmd->hdr.cmd, 1105 le16_to_cpu(out_cmd->hdr.sequence), 1106 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1107 1108 /* start the TFD with the minimum copy bytes */ 1109 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1110 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1111 iwl_pcie_txq_build_tfd(trans, txq, 1112 iwl_txq_get_first_tb_dma(txq, idx), 1113 tb0_size, true); 1114 1115 /* map first command fragment, if any remains */ 1116 if (copy_size > tb0_size) { 1117 phys_addr = dma_map_single(trans->dev, 1118 ((u8 *)&out_cmd->hdr) + tb0_size, 1119 copy_size - tb0_size, 1120 DMA_TO_DEVICE); 1121 if (dma_mapping_error(trans->dev, phys_addr)) { 1122 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1123 txq->write_ptr); 1124 idx = -ENOMEM; 1125 goto out; 1126 } 1127 1128 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1129 copy_size - tb0_size, false); 1130 } 1131 1132 /* map the remaining (adjusted) nocopy/dup fragments */ 1133 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1134 const void *data = cmddata[i]; 1135 1136 if (!cmdlen[i]) 1137 continue; 1138 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1139 IWL_HCMD_DFL_DUP))) 1140 continue; 1141 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1142 data = dup_buf; 1143 phys_addr = dma_map_single(trans->dev, (void *)data, 1144 cmdlen[i], DMA_TO_DEVICE); 1145 if (dma_mapping_error(trans->dev, phys_addr)) { 1146 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1147 txq->write_ptr); 1148 idx = -ENOMEM; 1149 goto out; 1150 } 1151 1152 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1153 } 1154 1155 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1156 out_meta->flags = cmd->flags; 1157 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1158 kfree_sensitive(txq->entries[idx].free_buf); 1159 txq->entries[idx].free_buf = dup_buf; 1160 1161 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1162 1163 /* start timer if queue currently empty */ 1164 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1165 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1166 1167 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1168 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1169 if (ret < 0) { 1170 idx = ret; 1171 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1172 goto out; 1173 } 1174 1175 /* Increment and update queue's write index */ 1176 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1177 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1178 1179 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1180 1181 out: 1182 spin_unlock_bh(&txq->lock); 1183 free_dup_buf: 1184 if (idx < 0) 1185 kfree(dup_buf); 1186 return idx; 1187 } 1188 1189 /* 1190 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1191 * @rxb: Rx buffer to reclaim 1192 */ 1193 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1194 struct iwl_rx_cmd_buffer *rxb) 1195 { 1196 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1197 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1198 u8 group_id; 1199 u32 cmd_id; 1200 int txq_id = SEQ_TO_QUEUE(sequence); 1201 int index = SEQ_TO_INDEX(sequence); 1202 int cmd_index; 1203 struct iwl_device_cmd *cmd; 1204 struct iwl_cmd_meta *meta; 1205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1206 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1207 1208 /* If a Tx command is being handled and it isn't in the actual 1209 * command queue then there a command routing bug has been introduced 1210 * in the queue management code. */ 1211 if (WARN(txq_id != trans->txqs.cmd.q_id, 1212 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1213 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1214 txq->write_ptr)) { 1215 iwl_print_hex_error(trans, pkt, 32); 1216 return; 1217 } 1218 1219 spin_lock_bh(&txq->lock); 1220 1221 cmd_index = iwl_txq_get_cmd_index(txq, index); 1222 cmd = txq->entries[cmd_index].cmd; 1223 meta = &txq->entries[cmd_index].meta; 1224 group_id = cmd->hdr.group_id; 1225 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1226 1227 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1228 1229 /* Input error checking is done when commands are added to queue. */ 1230 if (meta->flags & CMD_WANT_SKB) { 1231 struct page *p = rxb_steal_page(rxb); 1232 1233 meta->source->resp_pkt = pkt; 1234 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1235 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1236 } 1237 1238 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1239 iwl_op_mode_async_cb(trans->op_mode, cmd); 1240 1241 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1242 1243 if (!(meta->flags & CMD_ASYNC)) { 1244 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1245 IWL_WARN(trans, 1246 "HCMD_ACTIVE already clear for command %s\n", 1247 iwl_get_cmd_string(trans, cmd_id)); 1248 } 1249 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1250 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1251 iwl_get_cmd_string(trans, cmd_id)); 1252 wake_up(&trans_pcie->wait_command_queue); 1253 } 1254 1255 meta->flags = 0; 1256 1257 spin_unlock_bh(&txq->lock); 1258 } 1259 1260 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1261 1262 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1263 struct iwl_host_cmd *cmd) 1264 { 1265 int ret; 1266 1267 /* An asynchronous command can not expect an SKB to be set. */ 1268 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1269 return -EINVAL; 1270 1271 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1272 if (ret < 0) { 1273 IWL_ERR(trans, 1274 "Error sending %s: enqueue_hcmd failed: %d\n", 1275 iwl_get_cmd_string(trans, cmd->id), ret); 1276 return ret; 1277 } 1278 return 0; 1279 } 1280 1281 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1282 struct iwl_host_cmd *cmd) 1283 { 1284 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1285 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1286 int cmd_idx; 1287 int ret; 1288 1289 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1290 iwl_get_cmd_string(trans, cmd->id)); 1291 1292 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1293 &trans->status), 1294 "Command %s: a command is already active!\n", 1295 iwl_get_cmd_string(trans, cmd->id))) 1296 return -EIO; 1297 1298 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1299 iwl_get_cmd_string(trans, cmd->id)); 1300 1301 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1302 if (cmd_idx < 0) { 1303 ret = cmd_idx; 1304 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1305 IWL_ERR(trans, 1306 "Error sending %s: enqueue_hcmd failed: %d\n", 1307 iwl_get_cmd_string(trans, cmd->id), ret); 1308 return ret; 1309 } 1310 1311 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1312 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1313 &trans->status), 1314 HOST_COMPLETE_TIMEOUT); 1315 if (!ret) { 1316 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1317 iwl_get_cmd_string(trans, cmd->id), 1318 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1319 1320 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1321 txq->read_ptr, txq->write_ptr); 1322 1323 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1324 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1325 iwl_get_cmd_string(trans, cmd->id)); 1326 ret = -ETIMEDOUT; 1327 1328 iwl_trans_pcie_sync_nmi(trans); 1329 goto cancel; 1330 } 1331 1332 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1333 iwl_trans_pcie_dump_regs(trans); 1334 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1335 iwl_get_cmd_string(trans, cmd->id)); 1336 dump_stack(); 1337 ret = -EIO; 1338 goto cancel; 1339 } 1340 1341 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1342 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1343 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1344 ret = -ERFKILL; 1345 goto cancel; 1346 } 1347 1348 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1349 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1350 iwl_get_cmd_string(trans, cmd->id)); 1351 ret = -EIO; 1352 goto cancel; 1353 } 1354 1355 return 0; 1356 1357 cancel: 1358 if (cmd->flags & CMD_WANT_SKB) { 1359 /* 1360 * Cancel the CMD_WANT_SKB flag for the cmd in the 1361 * TX cmd queue. Otherwise in case the cmd comes 1362 * in later, it will possibly set an invalid 1363 * address (cmd->meta.source). 1364 */ 1365 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1366 } 1367 1368 if (cmd->resp_pkt) { 1369 iwl_free_resp(cmd); 1370 cmd->resp_pkt = NULL; 1371 } 1372 1373 return ret; 1374 } 1375 1376 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1377 { 1378 /* Make sure the NIC is still alive in the bus */ 1379 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1380 return -ENODEV; 1381 1382 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1383 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1384 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1385 cmd->id); 1386 return -ERFKILL; 1387 } 1388 1389 if (cmd->flags & CMD_ASYNC) 1390 return iwl_pcie_send_hcmd_async(trans, cmd); 1391 1392 /* We still can fail on RFKILL that can be asserted while we wait */ 1393 return iwl_pcie_send_hcmd_sync(trans, cmd); 1394 } 1395 1396 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1397 struct iwl_txq *txq, u8 hdr_len, 1398 struct iwl_cmd_meta *out_meta) 1399 { 1400 u16 head_tb_len; 1401 int i; 1402 1403 /* 1404 * Set up TFD's third entry to point directly to remainder 1405 * of skb's head, if any 1406 */ 1407 head_tb_len = skb_headlen(skb) - hdr_len; 1408 1409 if (head_tb_len > 0) { 1410 dma_addr_t tb_phys = dma_map_single(trans->dev, 1411 skb->data + hdr_len, 1412 head_tb_len, DMA_TO_DEVICE); 1413 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1414 return -EINVAL; 1415 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 1416 tb_phys, head_tb_len); 1417 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1418 } 1419 1420 /* set up the remaining entries to point to the data */ 1421 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1422 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1423 dma_addr_t tb_phys; 1424 int tb_idx; 1425 1426 if (!skb_frag_size(frag)) 1427 continue; 1428 1429 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1430 skb_frag_size(frag), DMA_TO_DEVICE); 1431 1432 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1433 return -EINVAL; 1434 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 1435 tb_phys, skb_frag_size(frag)); 1436 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1437 skb_frag_size(frag), false); 1438 if (tb_idx < 0) 1439 return tb_idx; 1440 1441 out_meta->tbs |= BIT(tb_idx); 1442 } 1443 1444 return 0; 1445 } 1446 1447 #ifdef CONFIG_INET 1448 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1449 struct iwl_txq *txq, u8 hdr_len, 1450 struct iwl_cmd_meta *out_meta, 1451 struct iwl_device_tx_cmd *dev_cmd, 1452 u16 tb1_len) 1453 { 1454 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1455 struct ieee80211_hdr *hdr = (void *)skb->data; 1456 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1457 unsigned int mss = skb_shinfo(skb)->gso_size; 1458 u16 length, iv_len, amsdu_pad; 1459 u8 *start_hdr; 1460 struct iwl_tso_hdr_page *hdr_page; 1461 struct tso_t tso; 1462 1463 /* if the packet is protected, then it must be CCMP or GCMP */ 1464 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 1465 iv_len = ieee80211_has_protected(hdr->frame_control) ? 1466 IEEE80211_CCMP_HDR_LEN : 0; 1467 1468 trace_iwlwifi_dev_tx(trans->dev, skb, 1469 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1470 trans->txqs.tfd.size, 1471 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 1472 1473 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 1474 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 1475 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 1476 amsdu_pad = 0; 1477 1478 /* total amount of header we may need for this A-MSDU */ 1479 hdr_room = DIV_ROUND_UP(total_len, mss) * 1480 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1481 1482 /* Our device supports 9 segments at most, it will fit in 1 page */ 1483 hdr_page = get_page_hdr(trans, hdr_room, skb); 1484 if (!hdr_page) 1485 return -ENOMEM; 1486 1487 start_hdr = hdr_page->pos; 1488 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 1489 hdr_page->pos += iv_len; 1490 1491 /* 1492 * Pull the ieee80211 header + IV to be able to use TSO core, 1493 * we will restore it for the tx_status flow. 1494 */ 1495 skb_pull(skb, hdr_len + iv_len); 1496 1497 /* 1498 * Remove the length of all the headers that we don't actually 1499 * have in the MPDU by themselves, but that we duplicate into 1500 * all the different MSDUs inside the A-MSDU. 1501 */ 1502 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 1503 1504 tso_start(skb, &tso); 1505 1506 while (total_len) { 1507 /* this is the data left for this subframe */ 1508 unsigned int data_left = 1509 min_t(unsigned int, mss, total_len); 1510 struct sk_buff *csum_skb = NULL; 1511 unsigned int hdr_tb_len; 1512 dma_addr_t hdr_tb_phys; 1513 u8 *subf_hdrs_start = hdr_page->pos; 1514 1515 total_len -= data_left; 1516 1517 memset(hdr_page->pos, 0, amsdu_pad); 1518 hdr_page->pos += amsdu_pad; 1519 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 1520 data_left)) & 0x3; 1521 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 1522 hdr_page->pos += ETH_ALEN; 1523 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 1524 hdr_page->pos += ETH_ALEN; 1525 1526 length = snap_ip_tcp_hdrlen + data_left; 1527 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 1528 hdr_page->pos += sizeof(length); 1529 1530 /* 1531 * This will copy the SNAP as well which will be considered 1532 * as MAC header. 1533 */ 1534 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 1535 1536 hdr_page->pos += snap_ip_tcp_hdrlen; 1537 1538 hdr_tb_len = hdr_page->pos - start_hdr; 1539 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 1540 hdr_tb_len, DMA_TO_DEVICE); 1541 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 1542 dev_kfree_skb(csum_skb); 1543 return -EINVAL; 1544 } 1545 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 1546 hdr_tb_len, false); 1547 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 1548 hdr_tb_phys, hdr_tb_len); 1549 /* add this subframe's headers' length to the tx_cmd */ 1550 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 1551 1552 /* prepare the start_hdr for the next subframe */ 1553 start_hdr = hdr_page->pos; 1554 1555 /* put the payload */ 1556 while (data_left) { 1557 unsigned int size = min_t(unsigned int, tso.size, 1558 data_left); 1559 dma_addr_t tb_phys; 1560 1561 tb_phys = dma_map_single(trans->dev, tso.data, 1562 size, DMA_TO_DEVICE); 1563 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1564 dev_kfree_skb(csum_skb); 1565 return -EINVAL; 1566 } 1567 1568 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1569 size, false); 1570 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 1571 tb_phys, size); 1572 1573 data_left -= size; 1574 tso_build_data(skb, &tso, size); 1575 } 1576 } 1577 1578 /* re -add the WiFi header and IV */ 1579 skb_push(skb, hdr_len + iv_len); 1580 1581 return 0; 1582 } 1583 #else /* CONFIG_INET */ 1584 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1585 struct iwl_txq *txq, u8 hdr_len, 1586 struct iwl_cmd_meta *out_meta, 1587 struct iwl_device_tx_cmd *dev_cmd, 1588 u16 tb1_len) 1589 { 1590 /* No A-MSDU without CONFIG_INET */ 1591 WARN_ON(1); 1592 1593 return -1; 1594 } 1595 #endif /* CONFIG_INET */ 1596 1597 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1598 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1599 { 1600 struct ieee80211_hdr *hdr; 1601 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1602 struct iwl_cmd_meta *out_meta; 1603 struct iwl_txq *txq; 1604 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1605 void *tb1_addr; 1606 void *tfd; 1607 u16 len, tb1_len; 1608 bool wait_write_ptr; 1609 __le16 fc; 1610 u8 hdr_len; 1611 u16 wifi_seq; 1612 bool amsdu; 1613 1614 txq = trans->txqs.txq[txq_id]; 1615 1616 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1617 "TX on unused queue %d\n", txq_id)) 1618 return -EINVAL; 1619 1620 if (skb_is_nonlinear(skb) && 1621 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1622 __skb_linearize(skb)) 1623 return -ENOMEM; 1624 1625 /* mac80211 always puts the full header into the SKB's head, 1626 * so there's no need to check if it's readable there 1627 */ 1628 hdr = (struct ieee80211_hdr *)skb->data; 1629 fc = hdr->frame_control; 1630 hdr_len = ieee80211_hdrlen(fc); 1631 1632 spin_lock(&txq->lock); 1633 1634 if (iwl_txq_space(trans, txq) < txq->high_mark) { 1635 iwl_txq_stop(trans, txq); 1636 1637 /* don't put the packet on the ring, if there is no room */ 1638 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1639 struct iwl_device_tx_cmd **dev_cmd_ptr; 1640 1641 dev_cmd_ptr = (void *)((u8 *)skb->cb + 1642 trans->txqs.dev_cmd_offs); 1643 1644 *dev_cmd_ptr = dev_cmd; 1645 __skb_queue_tail(&txq->overflow_q, skb); 1646 1647 spin_unlock(&txq->lock); 1648 return 0; 1649 } 1650 } 1651 1652 /* In AGG mode, the index in the ring must correspond to the WiFi 1653 * sequence number. This is a HW requirements to help the SCD to parse 1654 * the BA. 1655 * Check here that the packets are in the right place on the ring. 1656 */ 1657 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1658 WARN_ONCE(txq->ampdu && 1659 (wifi_seq & 0xff) != txq->write_ptr, 1660 "Q: %d WiFi Seq %d tfdNum %d", 1661 txq_id, wifi_seq, txq->write_ptr); 1662 1663 /* Set up driver data for this TFD */ 1664 txq->entries[txq->write_ptr].skb = skb; 1665 txq->entries[txq->write_ptr].cmd = dev_cmd; 1666 1667 dev_cmd->hdr.sequence = 1668 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1669 INDEX_TO_SEQ(txq->write_ptr))); 1670 1671 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1672 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1673 offsetof(struct iwl_tx_cmd, scratch); 1674 1675 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1676 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1677 1678 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1679 out_meta = &txq->entries[txq->write_ptr].meta; 1680 out_meta->flags = 0; 1681 1682 /* 1683 * The second TB (tb1) points to the remainder of the TX command 1684 * and the 802.11 header - dword aligned size 1685 * (This calculation modifies the TX command, so do it before the 1686 * setup of the first TB) 1687 */ 1688 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1689 hdr_len - IWL_FIRST_TB_SIZE; 1690 /* do not align A-MSDU to dword as the subframe header aligns it */ 1691 amsdu = ieee80211_is_data_qos(fc) && 1692 (*ieee80211_get_qos_ctl(hdr) & 1693 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 1694 if (!amsdu) { 1695 tb1_len = ALIGN(len, 4); 1696 /* Tell NIC about any 2-byte padding after MAC header */ 1697 if (tb1_len != len) 1698 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1699 } else { 1700 tb1_len = len; 1701 } 1702 1703 /* 1704 * The first TB points to bi-directional DMA data, we'll 1705 * memcpy the data into it later. 1706 */ 1707 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1708 IWL_FIRST_TB_SIZE, true); 1709 1710 /* there must be data left over for TB1 or this code must be changed */ 1711 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1712 1713 /* map the data for TB1 */ 1714 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1715 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1716 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1717 goto out_err; 1718 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1719 1720 trace_iwlwifi_dev_tx(trans->dev, skb, 1721 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1722 trans->txqs.tfd.size, 1723 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1724 hdr_len); 1725 1726 /* 1727 * If gso_size wasn't set, don't give the frame "amsdu treatment" 1728 * (adding subframes, etc.). 1729 * This can happen in some testing flows when the amsdu was already 1730 * pre-built, and we just need to send the resulting skb. 1731 */ 1732 if (amsdu && skb_shinfo(skb)->gso_size) { 1733 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 1734 out_meta, dev_cmd, 1735 tb1_len))) 1736 goto out_err; 1737 } else { 1738 struct sk_buff *frag; 1739 1740 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1741 out_meta))) 1742 goto out_err; 1743 1744 skb_walk_frags(skb, frag) { 1745 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 1746 out_meta))) 1747 goto out_err; 1748 } 1749 } 1750 1751 /* building the A-MSDU might have changed this data, so memcpy it now */ 1752 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 1753 1754 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1755 /* Set up entry for this TFD in Tx byte-count array */ 1756 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 1757 iwl_txq_gen1_tfd_get_num_tbs(trans, 1758 tfd)); 1759 1760 wait_write_ptr = ieee80211_has_morefrags(fc); 1761 1762 /* start timer if queue currently empty */ 1763 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1764 /* 1765 * If the TXQ is active, then set the timer, if not, 1766 * set the timer in remainder so that the timer will 1767 * be armed with the right value when the station will 1768 * wake up. 1769 */ 1770 if (!txq->frozen) 1771 mod_timer(&txq->stuck_timer, 1772 jiffies + txq->wd_timeout); 1773 else 1774 txq->frozen_expiry_remainder = txq->wd_timeout; 1775 } 1776 1777 /* Tell device the write index *just past* this latest filled TFD */ 1778 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1779 if (!wait_write_ptr) 1780 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1781 1782 /* 1783 * At this point the frame is "transmitted" successfully 1784 * and we will get a TX status notification eventually. 1785 */ 1786 spin_unlock(&txq->lock); 1787 return 0; 1788 out_err: 1789 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1790 spin_unlock(&txq->lock); 1791 return -1; 1792 } 1793