1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <net/ip6_checksum.h> 12 #include <net/tso.h> 13 14 #include "iwl-debug.h" 15 #include "iwl-csr.h" 16 #include "iwl-prph.h" 17 #include "iwl-io.h" 18 #include "iwl-scd.h" 19 #include "iwl-op-mode.h" 20 #include "internal.h" 21 #include "fw/api/tx.h" 22 23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24 * DMA services 25 * 26 * Theory of operation 27 * 28 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29 * of buffer descriptors, each of which points to one or more data buffers for 30 * the device to read from or fill. Driver and device exchange status of each 31 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32 * entries in each circular buffer, to protect against confusing empty and full 33 * queue states. 34 * 35 * The device reads or writes the data in the queues via the device's several 36 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37 * 38 * For Tx queue, there are low mark and high mark limits. If, after queuing 39 * the packet for Tx, free space become < low mark, Tx queue stopped. When 40 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41 * Tx queue resumed. 42 * 43 ***************************************************/ 44 45 46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47 struct iwl_dma_ptr *ptr, size_t size) 48 { 49 if (WARN_ON(ptr->addr)) 50 return -EINVAL; 51 52 ptr->addr = dma_alloc_coherent(trans->dev, size, 53 &ptr->dma, GFP_KERNEL); 54 if (!ptr->addr) 55 return -ENOMEM; 56 ptr->size = size; 57 return 0; 58 } 59 60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61 { 62 if (unlikely(!ptr->addr)) 63 return; 64 65 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66 memset(ptr, 0, sizeof(*ptr)); 67 } 68 69 /* 70 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71 */ 72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73 struct iwl_txq *txq) 74 { 75 u32 reg = 0; 76 int txq_id = txq->id; 77 78 lockdep_assert_held(&txq->lock); 79 80 /* 81 * explicitly wake up the NIC if: 82 * 1. shadow registers aren't enabled 83 * 2. NIC is woken up for CMD regardless of shadow outside this function 84 * 3. there is a chance that the NIC is asleep 85 */ 86 if (!trans->trans_cfg->base_params->shadow_reg_enable && 87 txq_id != trans->txqs.cmd.q_id && 88 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89 /* 90 * wake up nic if it's powered down ... 91 * uCode will wake up, and interrupt us again, so next 92 * time we'll skip this part. 93 */ 94 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95 96 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98 txq_id, reg); 99 iwl_set_bit(trans, CSR_GP_CNTRL, 100 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101 txq->need_update = true; 102 return; 103 } 104 } 105 106 /* 107 * if not in power-save mode, uCode will never sleep when we're 108 * trying to tx (during RFKILL, we're not trying to tx). 109 */ 110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 111 if (!txq->block) 112 iwl_write32(trans, HBUS_TARG_WRPTR, 113 txq->write_ptr | (txq_id << 8)); 114 } 115 116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117 { 118 int i; 119 120 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 121 struct iwl_txq *txq = trans->txqs.txq[i]; 122 123 if (!test_bit(i, trans->txqs.queue_used)) 124 continue; 125 126 spin_lock_bh(&txq->lock); 127 if (txq->need_update) { 128 iwl_pcie_txq_inc_wr_ptr(trans, txq); 129 txq->need_update = false; 130 } 131 spin_unlock_bh(&txq->lock); 132 } 133 } 134 135 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 136 u8 idx, dma_addr_t addr, u16 len) 137 { 138 struct iwl_tfd *tfd_fh = (void *)tfd; 139 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 140 141 u16 hi_n_len = len << 4; 142 143 put_unaligned_le32(addr, &tb->lo); 144 hi_n_len |= iwl_get_dma_hi_addr(addr); 145 146 tb->hi_n_len = cpu_to_le16(hi_n_len); 147 148 tfd_fh->num_tbs = idx + 1; 149 } 150 151 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152 dma_addr_t addr, u16 len, bool reset) 153 { 154 void *tfd; 155 u32 num_tbs; 156 157 tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158 159 if (reset) 160 memset(tfd, 0, trans->txqs.tfd.size); 161 162 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163 164 /* Each TFD can point to a maximum max_tbs Tx buffers */ 165 if (num_tbs >= trans->txqs.tfd.max_tbs) { 166 IWL_ERR(trans, "Error can not send more than %d chunks\n", 167 trans->txqs.tfd.max_tbs); 168 return -EINVAL; 169 } 170 171 if (WARN(addr & ~IWL_TX_DMA_MASK, 172 "Unaligned address = %llx\n", (unsigned long long)addr)) 173 return -EINVAL; 174 175 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176 177 return num_tbs; 178 } 179 180 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 181 { 182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 183 184 lockdep_assert_held(&trans_pcie->reg_lock); 185 186 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 187 return; 188 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 189 return; 190 191 trans_pcie->cmd_hold_nic_awake = false; 192 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 193 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 194 } 195 196 /* 197 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 198 */ 199 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 200 { 201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 202 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 203 204 if (!txq) { 205 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 206 return; 207 } 208 209 spin_lock_bh(&txq->lock); 210 while (txq->write_ptr != txq->read_ptr) { 211 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 212 txq_id, txq->read_ptr); 213 214 if (txq_id != trans->txqs.cmd.q_id) { 215 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 216 217 if (WARN_ON_ONCE(!skb)) 218 continue; 219 220 iwl_txq_free_tso_page(trans, skb); 221 } 222 iwl_txq_free_tfd(trans, txq); 223 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 224 225 if (txq->read_ptr == txq->write_ptr) { 226 spin_lock(&trans_pcie->reg_lock); 227 if (txq_id == trans->txqs.cmd.q_id) 228 iwl_pcie_clear_cmd_in_flight(trans); 229 spin_unlock(&trans_pcie->reg_lock); 230 } 231 } 232 233 while (!skb_queue_empty(&txq->overflow_q)) { 234 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 235 236 iwl_op_mode_free_skb(trans->op_mode, skb); 237 } 238 239 spin_unlock_bh(&txq->lock); 240 241 /* just in case - this queue may have been stopped */ 242 iwl_wake_queue(trans, txq); 243 } 244 245 /* 246 * iwl_pcie_txq_free - Deallocate DMA queue. 247 * @txq: Transmit queue to deallocate. 248 * 249 * Empty queue by removing and destroying all BD's. 250 * Free all buffers. 251 * 0-fill, but do not free "txq" descriptor structure. 252 */ 253 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 254 { 255 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 256 struct device *dev = trans->dev; 257 int i; 258 259 if (WARN_ON(!txq)) 260 return; 261 262 iwl_pcie_txq_unmap(trans, txq_id); 263 264 /* De-alloc array of command/tx buffers */ 265 if (txq_id == trans->txqs.cmd.q_id) 266 for (i = 0; i < txq->n_window; i++) { 267 kfree_sensitive(txq->entries[i].cmd); 268 kfree_sensitive(txq->entries[i].free_buf); 269 } 270 271 /* De-alloc circular buffer of TFDs */ 272 if (txq->tfds) { 273 dma_free_coherent(dev, 274 trans->txqs.tfd.size * 275 trans->trans_cfg->base_params->max_tfd_queue_size, 276 txq->tfds, txq->dma_addr); 277 txq->dma_addr = 0; 278 txq->tfds = NULL; 279 280 dma_free_coherent(dev, 281 sizeof(*txq->first_tb_bufs) * txq->n_window, 282 txq->first_tb_bufs, txq->first_tb_dma); 283 } 284 285 kfree(txq->entries); 286 txq->entries = NULL; 287 288 del_timer_sync(&txq->stuck_timer); 289 290 /* 0-fill queue descriptor structure */ 291 memset(txq, 0, sizeof(*txq)); 292 } 293 294 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 295 { 296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 297 int nq = trans->trans_cfg->base_params->num_of_queues; 298 int chan; 299 u32 reg_val; 300 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 301 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 302 303 /* make sure all queue are not stopped/used */ 304 memset(trans->txqs.queue_stopped, 0, 305 sizeof(trans->txqs.queue_stopped)); 306 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 307 308 trans_pcie->scd_base_addr = 309 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 310 311 WARN_ON(scd_base_addr != 0 && 312 scd_base_addr != trans_pcie->scd_base_addr); 313 314 /* reset context data, TX status and translation data */ 315 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 316 SCD_CONTEXT_MEM_LOWER_BOUND, 317 NULL, clear_dwords); 318 319 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 320 trans->txqs.scd_bc_tbls.dma >> 10); 321 322 /* The chain extension of the SCD doesn't work well. This feature is 323 * enabled by default by the HW, so we need to disable it manually. 324 */ 325 if (trans->trans_cfg->base_params->scd_chain_ext_wa) 326 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 327 328 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 329 trans->txqs.cmd.fifo, 330 trans->txqs.cmd.wdg_timeout); 331 332 /* Activate all Tx DMA/FIFO channels */ 333 iwl_scd_activate_fifos(trans); 334 335 /* Enable DMA channel */ 336 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 337 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 338 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 339 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 340 341 /* Update FH chicken bits */ 342 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 343 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 344 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 345 346 /* Enable L1-Active */ 347 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 348 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 349 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 350 } 351 352 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 353 { 354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 355 int txq_id; 356 357 /* 358 * we should never get here in gen2 trans mode return early to avoid 359 * having invalid accesses 360 */ 361 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 362 return; 363 364 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 365 txq_id++) { 366 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 367 if (trans->trans_cfg->use_tfh) 368 iwl_write_direct64(trans, 369 FH_MEM_CBBC_QUEUE(trans, txq_id), 370 txq->dma_addr); 371 else 372 iwl_write_direct32(trans, 373 FH_MEM_CBBC_QUEUE(trans, txq_id), 374 txq->dma_addr >> 8); 375 iwl_pcie_txq_unmap(trans, txq_id); 376 txq->read_ptr = 0; 377 txq->write_ptr = 0; 378 } 379 380 /* Tell NIC where to find the "keep warm" buffer */ 381 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 382 trans_pcie->kw.dma >> 4); 383 384 /* 385 * Send 0 as the scd_base_addr since the device may have be reset 386 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 387 * contain garbage. 388 */ 389 iwl_pcie_tx_start(trans, 0); 390 } 391 392 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 393 { 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 395 unsigned long flags; 396 int ch, ret; 397 u32 mask = 0; 398 399 spin_lock_bh(&trans_pcie->irq_lock); 400 401 if (!iwl_trans_grab_nic_access(trans, &flags)) 402 goto out; 403 404 /* Stop each Tx DMA channel */ 405 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 406 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 407 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 408 } 409 410 /* Wait for DMA channels to be idle */ 411 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 412 if (ret < 0) 413 IWL_ERR(trans, 414 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 415 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 416 417 iwl_trans_release_nic_access(trans, &flags); 418 419 out: 420 spin_unlock_bh(&trans_pcie->irq_lock); 421 } 422 423 /* 424 * iwl_pcie_tx_stop - Stop all Tx DMA channels 425 */ 426 int iwl_pcie_tx_stop(struct iwl_trans *trans) 427 { 428 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 429 int txq_id; 430 431 /* Turn off all Tx DMA fifos */ 432 iwl_scd_deactivate_fifos(trans); 433 434 /* Turn off all Tx DMA channels */ 435 iwl_pcie_tx_stop_fh(trans); 436 437 /* 438 * This function can be called before the op_mode disabled the 439 * queues. This happens when we have an rfkill interrupt. 440 * Since we stop Tx altogether - mark the queues as stopped. 441 */ 442 memset(trans->txqs.queue_stopped, 0, 443 sizeof(trans->txqs.queue_stopped)); 444 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 445 446 /* This can happen: start_hw, stop_device */ 447 if (!trans_pcie->txq_memory) 448 return 0; 449 450 /* Unmap DMA from host system and free skb's */ 451 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 452 txq_id++) 453 iwl_pcie_txq_unmap(trans, txq_id); 454 455 return 0; 456 } 457 458 /* 459 * iwl_trans_tx_free - Free TXQ Context 460 * 461 * Destroy all TX DMA queues and structures 462 */ 463 void iwl_pcie_tx_free(struct iwl_trans *trans) 464 { 465 int txq_id; 466 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 467 468 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 469 470 /* Tx queues */ 471 if (trans_pcie->txq_memory) { 472 for (txq_id = 0; 473 txq_id < trans->trans_cfg->base_params->num_of_queues; 474 txq_id++) { 475 iwl_pcie_txq_free(trans, txq_id); 476 trans->txqs.txq[txq_id] = NULL; 477 } 478 } 479 480 kfree(trans_pcie->txq_memory); 481 trans_pcie->txq_memory = NULL; 482 483 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 484 485 iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 486 } 487 488 /* 489 * iwl_pcie_tx_alloc - allocate TX context 490 * Allocate all Tx DMA structures and initialize them 491 */ 492 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 493 { 494 int ret; 495 int txq_id, slots_num; 496 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 498 499 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 500 return -EINVAL; 501 502 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 503 504 /*It is not allowed to alloc twice, so warn when this happens. 505 * We cannot rely on the previous allocation, so free and fail */ 506 if (WARN_ON(trans_pcie->txq_memory)) { 507 ret = -EINVAL; 508 goto error; 509 } 510 511 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 512 bc_tbls_size); 513 if (ret) { 514 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 515 goto error; 516 } 517 518 /* Alloc keep-warm buffer */ 519 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 520 if (ret) { 521 IWL_ERR(trans, "Keep Warm allocation failed\n"); 522 goto error; 523 } 524 525 trans_pcie->txq_memory = 526 kcalloc(trans->trans_cfg->base_params->num_of_queues, 527 sizeof(struct iwl_txq), GFP_KERNEL); 528 if (!trans_pcie->txq_memory) { 529 IWL_ERR(trans, "Not enough memory for txq\n"); 530 ret = -ENOMEM; 531 goto error; 532 } 533 534 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 535 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 536 txq_id++) { 537 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 538 539 if (cmd_queue) 540 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 541 trans->cfg->min_txq_size); 542 else 543 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 544 trans->cfg->min_256_ba_txq_size); 545 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 546 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 547 cmd_queue); 548 if (ret) { 549 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 550 goto error; 551 } 552 trans->txqs.txq[txq_id]->id = txq_id; 553 } 554 555 return 0; 556 557 error: 558 iwl_pcie_tx_free(trans); 559 560 return ret; 561 } 562 563 int iwl_pcie_tx_init(struct iwl_trans *trans) 564 { 565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 566 int ret; 567 int txq_id, slots_num; 568 bool alloc = false; 569 570 if (!trans_pcie->txq_memory) { 571 ret = iwl_pcie_tx_alloc(trans); 572 if (ret) 573 goto error; 574 alloc = true; 575 } 576 577 spin_lock_bh(&trans_pcie->irq_lock); 578 579 /* Turn off all Tx DMA fifos */ 580 iwl_scd_deactivate_fifos(trans); 581 582 /* Tell NIC where to find the "keep warm" buffer */ 583 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 584 trans_pcie->kw.dma >> 4); 585 586 spin_unlock_bh(&trans_pcie->irq_lock); 587 588 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 589 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 590 txq_id++) { 591 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 592 593 if (cmd_queue) 594 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 595 trans->cfg->min_txq_size); 596 else 597 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 598 trans->cfg->min_256_ba_txq_size); 599 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 600 cmd_queue); 601 if (ret) { 602 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 603 goto error; 604 } 605 606 /* 607 * Tell nic where to find circular buffer of TFDs for a 608 * given Tx queue, and enable the DMA channel used for that 609 * queue. 610 * Circular buffer (TFD queue in DRAM) physical base address 611 */ 612 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 613 trans->txqs.txq[txq_id]->dma_addr >> 8); 614 } 615 616 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 617 if (trans->trans_cfg->base_params->num_of_queues > 20) 618 iwl_set_bits_prph(trans, SCD_GP_CTRL, 619 SCD_GP_CTRL_ENABLE_31_QUEUES); 620 621 return 0; 622 error: 623 /*Upon error, free only if we allocated something */ 624 if (alloc) 625 iwl_pcie_tx_free(trans); 626 return ret; 627 } 628 629 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 630 const struct iwl_host_cmd *cmd) 631 { 632 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 633 int ret; 634 635 lockdep_assert_held(&trans_pcie->reg_lock); 636 637 /* Make sure the NIC is still alive in the bus */ 638 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 639 return -ENODEV; 640 641 /* 642 * wake up the NIC to make sure that the firmware will see the host 643 * command - we will let the NIC sleep once all the host commands 644 * returned. This needs to be done only on NICs that have 645 * apmg_wake_up_wa set. 646 */ 647 if (trans->trans_cfg->base_params->apmg_wake_up_wa && 648 !trans_pcie->cmd_hold_nic_awake) { 649 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 650 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 651 652 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 653 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 654 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 655 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 656 15000); 657 if (ret < 0) { 658 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 659 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 660 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 661 return -EIO; 662 } 663 trans_pcie->cmd_hold_nic_awake = true; 664 } 665 666 return 0; 667 } 668 669 /* 670 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 671 * 672 * When FW advances 'R' index, all entries between old and new 'R' index 673 * need to be reclaimed. As result, some free space forms. If there is 674 * enough free space (> low mark), wake the stack that feeds us. 675 */ 676 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 677 { 678 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 679 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 680 int nfreed = 0; 681 u16 r; 682 683 lockdep_assert_held(&txq->lock); 684 685 idx = iwl_txq_get_cmd_index(txq, idx); 686 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 687 688 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 689 (!iwl_txq_used(txq, idx))) { 690 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 691 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 692 __func__, txq_id, idx, 693 trans->trans_cfg->base_params->max_tfd_queue_size, 694 txq->write_ptr, txq->read_ptr); 695 return; 696 } 697 698 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 699 r = iwl_txq_inc_wrap(trans, r)) { 700 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 701 702 if (nfreed++ > 0) { 703 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 704 idx, txq->write_ptr, r); 705 iwl_force_nmi(trans); 706 } 707 } 708 709 if (txq->read_ptr == txq->write_ptr) { 710 /* BHs are also disabled due to txq->lock */ 711 spin_lock(&trans_pcie->reg_lock); 712 iwl_pcie_clear_cmd_in_flight(trans); 713 spin_unlock(&trans_pcie->reg_lock); 714 } 715 716 iwl_txq_progress(txq); 717 } 718 719 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 720 u16 txq_id) 721 { 722 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 723 u32 tbl_dw_addr; 724 u32 tbl_dw; 725 u16 scd_q2ratid; 726 727 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 728 729 tbl_dw_addr = trans_pcie->scd_base_addr + 730 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 731 732 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 733 734 if (txq_id & 0x1) 735 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 736 else 737 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 738 739 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 740 741 return 0; 742 } 743 744 /* Receiver address (actually, Rx station's index into station table), 745 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 746 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 747 748 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 749 const struct iwl_trans_txq_scd_cfg *cfg, 750 unsigned int wdg_timeout) 751 { 752 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 753 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 754 int fifo = -1; 755 bool scd_bug = false; 756 757 if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 758 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 759 760 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 761 762 if (cfg) { 763 fifo = cfg->fifo; 764 765 /* Disable the scheduler prior configuring the cmd queue */ 766 if (txq_id == trans->txqs.cmd.q_id && 767 trans_pcie->scd_set_active) 768 iwl_scd_enable_set_active(trans, 0); 769 770 /* Stop this Tx queue before configuring it */ 771 iwl_scd_txq_set_inactive(trans, txq_id); 772 773 /* Set this queue as a chain-building queue unless it is CMD */ 774 if (txq_id != trans->txqs.cmd.q_id) 775 iwl_scd_txq_set_chain(trans, txq_id); 776 777 if (cfg->aggregate) { 778 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 779 780 /* Map receiver-address / traffic-ID to this queue */ 781 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 782 783 /* enable aggregations for the queue */ 784 iwl_scd_txq_enable_agg(trans, txq_id); 785 txq->ampdu = true; 786 } else { 787 /* 788 * disable aggregations for the queue, this will also 789 * make the ra_tid mapping configuration irrelevant 790 * since it is now a non-AGG queue. 791 */ 792 iwl_scd_txq_disable_agg(trans, txq_id); 793 794 ssn = txq->read_ptr; 795 } 796 } else { 797 /* 798 * If we need to move the SCD write pointer by steps of 799 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 800 * the op_mode know by returning true later. 801 * Do this only in case cfg is NULL since this trick can 802 * be done only if we have DQA enabled which is true for mvm 803 * only. And mvm never sets a cfg pointer. 804 * This is really ugly, but this is the easiest way out for 805 * this sad hardware issue. 806 * This bug has been fixed on devices 9000 and up. 807 */ 808 scd_bug = !trans->trans_cfg->mq_rx_supported && 809 !((ssn - txq->write_ptr) & 0x3f) && 810 (ssn != txq->write_ptr); 811 if (scd_bug) 812 ssn++; 813 } 814 815 /* Place first TFD at index corresponding to start sequence number. 816 * Assumes that ssn_idx is valid (!= 0xFFF) */ 817 txq->read_ptr = (ssn & 0xff); 818 txq->write_ptr = (ssn & 0xff); 819 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 820 (ssn & 0xff) | (txq_id << 8)); 821 822 if (cfg) { 823 u8 frame_limit = cfg->frame_limit; 824 825 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 826 827 /* Set up Tx window size and frame limit for this queue */ 828 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 829 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 830 iwl_trans_write_mem32(trans, 831 trans_pcie->scd_base_addr + 832 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 833 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 834 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 835 836 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 837 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 838 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 839 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 840 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 841 SCD_QUEUE_STTS_REG_MSK); 842 843 /* enable the scheduler for this queue (only) */ 844 if (txq_id == trans->txqs.cmd.q_id && 845 trans_pcie->scd_set_active) 846 iwl_scd_enable_set_active(trans, BIT(txq_id)); 847 848 IWL_DEBUG_TX_QUEUES(trans, 849 "Activate queue %d on FIFO %d WrPtr: %d\n", 850 txq_id, fifo, ssn & 0xff); 851 } else { 852 IWL_DEBUG_TX_QUEUES(trans, 853 "Activate queue %d WrPtr: %d\n", 854 txq_id, ssn & 0xff); 855 } 856 857 return scd_bug; 858 } 859 860 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 861 bool shared_mode) 862 { 863 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 864 865 txq->ampdu = !shared_mode; 866 } 867 868 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 869 bool configure_scd) 870 { 871 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 872 u32 stts_addr = trans_pcie->scd_base_addr + 873 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 874 static const u32 zero_val[4] = {}; 875 876 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 877 trans->txqs.txq[txq_id]->frozen = false; 878 879 /* 880 * Upon HW Rfkill - we stop the device, and then stop the queues 881 * in the op_mode. Just for the sake of the simplicity of the op_mode, 882 * allow the op_mode to call txq_disable after it already called 883 * stop_device. 884 */ 885 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 886 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 887 "queue %d not used", txq_id); 888 return; 889 } 890 891 if (configure_scd) { 892 iwl_scd_txq_set_inactive(trans, txq_id); 893 894 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 895 ARRAY_SIZE(zero_val)); 896 } 897 898 iwl_pcie_txq_unmap(trans, txq_id); 899 trans->txqs.txq[txq_id]->ampdu = false; 900 901 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 902 } 903 904 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 905 906 /* 907 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 908 * @priv: device private data point 909 * @cmd: a pointer to the ucode command structure 910 * 911 * The function returns < 0 values to indicate the operation 912 * failed. On success, it returns the index (>= 0) of command in the 913 * command queue. 914 */ 915 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 916 struct iwl_host_cmd *cmd) 917 { 918 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 919 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 920 struct iwl_device_cmd *out_cmd; 921 struct iwl_cmd_meta *out_meta; 922 void *dup_buf = NULL; 923 dma_addr_t phys_addr; 924 int idx; 925 u16 copy_size, cmd_size, tb0_size; 926 bool had_nocopy = false; 927 u8 group_id = iwl_cmd_groupid(cmd->id); 928 int i, ret; 929 u32 cmd_pos; 930 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 931 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 932 933 if (WARN(!trans->wide_cmd_header && 934 group_id > IWL_ALWAYS_LONG_GROUP, 935 "unsupported wide command %#x\n", cmd->id)) 936 return -EINVAL; 937 938 if (group_id != 0) { 939 copy_size = sizeof(struct iwl_cmd_header_wide); 940 cmd_size = sizeof(struct iwl_cmd_header_wide); 941 } else { 942 copy_size = sizeof(struct iwl_cmd_header); 943 cmd_size = sizeof(struct iwl_cmd_header); 944 } 945 946 /* need one for the header if the first is NOCOPY */ 947 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 948 949 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 950 cmddata[i] = cmd->data[i]; 951 cmdlen[i] = cmd->len[i]; 952 953 if (!cmd->len[i]) 954 continue; 955 956 /* need at least IWL_FIRST_TB_SIZE copied */ 957 if (copy_size < IWL_FIRST_TB_SIZE) { 958 int copy = IWL_FIRST_TB_SIZE - copy_size; 959 960 if (copy > cmdlen[i]) 961 copy = cmdlen[i]; 962 cmdlen[i] -= copy; 963 cmddata[i] += copy; 964 copy_size += copy; 965 } 966 967 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 968 had_nocopy = true; 969 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 970 idx = -EINVAL; 971 goto free_dup_buf; 972 } 973 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 974 /* 975 * This is also a chunk that isn't copied 976 * to the static buffer so set had_nocopy. 977 */ 978 had_nocopy = true; 979 980 /* only allowed once */ 981 if (WARN_ON(dup_buf)) { 982 idx = -EINVAL; 983 goto free_dup_buf; 984 } 985 986 dup_buf = kmemdup(cmddata[i], cmdlen[i], 987 GFP_ATOMIC); 988 if (!dup_buf) 989 return -ENOMEM; 990 } else { 991 /* NOCOPY must not be followed by normal! */ 992 if (WARN_ON(had_nocopy)) { 993 idx = -EINVAL; 994 goto free_dup_buf; 995 } 996 copy_size += cmdlen[i]; 997 } 998 cmd_size += cmd->len[i]; 999 } 1000 1001 /* 1002 * If any of the command structures end up being larger than 1003 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1004 * allocated into separate TFDs, then we will need to 1005 * increase the size of the buffers. 1006 */ 1007 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1008 "Command %s (%#x) is too large (%d bytes)\n", 1009 iwl_get_cmd_string(trans, cmd->id), 1010 cmd->id, copy_size)) { 1011 idx = -EINVAL; 1012 goto free_dup_buf; 1013 } 1014 1015 spin_lock_bh(&txq->lock); 1016 1017 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1018 spin_unlock_bh(&txq->lock); 1019 1020 IWL_ERR(trans, "No space in command queue\n"); 1021 iwl_op_mode_cmd_queue_full(trans->op_mode); 1022 idx = -ENOSPC; 1023 goto free_dup_buf; 1024 } 1025 1026 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1027 out_cmd = txq->entries[idx].cmd; 1028 out_meta = &txq->entries[idx].meta; 1029 1030 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1031 if (cmd->flags & CMD_WANT_SKB) 1032 out_meta->source = cmd; 1033 1034 /* set up the header */ 1035 if (group_id != 0) { 1036 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1037 out_cmd->hdr_wide.group_id = group_id; 1038 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1039 out_cmd->hdr_wide.length = 1040 cpu_to_le16(cmd_size - 1041 sizeof(struct iwl_cmd_header_wide)); 1042 out_cmd->hdr_wide.reserved = 0; 1043 out_cmd->hdr_wide.sequence = 1044 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1045 INDEX_TO_SEQ(txq->write_ptr)); 1046 1047 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1048 copy_size = sizeof(struct iwl_cmd_header_wide); 1049 } else { 1050 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1051 out_cmd->hdr.sequence = 1052 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1053 INDEX_TO_SEQ(txq->write_ptr)); 1054 out_cmd->hdr.group_id = 0; 1055 1056 cmd_pos = sizeof(struct iwl_cmd_header); 1057 copy_size = sizeof(struct iwl_cmd_header); 1058 } 1059 1060 /* and copy the data that needs to be copied */ 1061 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1062 int copy; 1063 1064 if (!cmd->len[i]) 1065 continue; 1066 1067 /* copy everything if not nocopy/dup */ 1068 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1069 IWL_HCMD_DFL_DUP))) { 1070 copy = cmd->len[i]; 1071 1072 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1073 cmd_pos += copy; 1074 copy_size += copy; 1075 continue; 1076 } 1077 1078 /* 1079 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1080 * in total (for bi-directional DMA), but copy up to what 1081 * we can fit into the payload for debug dump purposes. 1082 */ 1083 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1084 1085 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1086 cmd_pos += copy; 1087 1088 /* However, treat copy_size the proper way, we need it below */ 1089 if (copy_size < IWL_FIRST_TB_SIZE) { 1090 copy = IWL_FIRST_TB_SIZE - copy_size; 1091 1092 if (copy > cmd->len[i]) 1093 copy = cmd->len[i]; 1094 copy_size += copy; 1095 } 1096 } 1097 1098 IWL_DEBUG_HC(trans, 1099 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1100 iwl_get_cmd_string(trans, cmd->id), 1101 group_id, out_cmd->hdr.cmd, 1102 le16_to_cpu(out_cmd->hdr.sequence), 1103 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1104 1105 /* start the TFD with the minimum copy bytes */ 1106 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1107 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1108 iwl_pcie_txq_build_tfd(trans, txq, 1109 iwl_txq_get_first_tb_dma(txq, idx), 1110 tb0_size, true); 1111 1112 /* map first command fragment, if any remains */ 1113 if (copy_size > tb0_size) { 1114 phys_addr = dma_map_single(trans->dev, 1115 ((u8 *)&out_cmd->hdr) + tb0_size, 1116 copy_size - tb0_size, 1117 DMA_TO_DEVICE); 1118 if (dma_mapping_error(trans->dev, phys_addr)) { 1119 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1120 txq->write_ptr); 1121 idx = -ENOMEM; 1122 goto out; 1123 } 1124 1125 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1126 copy_size - tb0_size, false); 1127 } 1128 1129 /* map the remaining (adjusted) nocopy/dup fragments */ 1130 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1131 const void *data = cmddata[i]; 1132 1133 if (!cmdlen[i]) 1134 continue; 1135 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1136 IWL_HCMD_DFL_DUP))) 1137 continue; 1138 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1139 data = dup_buf; 1140 phys_addr = dma_map_single(trans->dev, (void *)data, 1141 cmdlen[i], DMA_TO_DEVICE); 1142 if (dma_mapping_error(trans->dev, phys_addr)) { 1143 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1144 txq->write_ptr); 1145 idx = -ENOMEM; 1146 goto out; 1147 } 1148 1149 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1150 } 1151 1152 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1153 out_meta->flags = cmd->flags; 1154 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1155 kfree_sensitive(txq->entries[idx].free_buf); 1156 txq->entries[idx].free_buf = dup_buf; 1157 1158 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1159 1160 /* start timer if queue currently empty */ 1161 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1162 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1163 1164 spin_lock(&trans_pcie->reg_lock); 1165 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1166 if (ret < 0) { 1167 idx = ret; 1168 goto unlock_reg; 1169 } 1170 1171 /* Increment and update queue's write index */ 1172 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1173 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1174 1175 unlock_reg: 1176 spin_unlock(&trans_pcie->reg_lock); 1177 out: 1178 spin_unlock_bh(&txq->lock); 1179 free_dup_buf: 1180 if (idx < 0) 1181 kfree(dup_buf); 1182 return idx; 1183 } 1184 1185 /* 1186 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1187 * @rxb: Rx buffer to reclaim 1188 */ 1189 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1190 struct iwl_rx_cmd_buffer *rxb) 1191 { 1192 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1193 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1194 u8 group_id; 1195 u32 cmd_id; 1196 int txq_id = SEQ_TO_QUEUE(sequence); 1197 int index = SEQ_TO_INDEX(sequence); 1198 int cmd_index; 1199 struct iwl_device_cmd *cmd; 1200 struct iwl_cmd_meta *meta; 1201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1202 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1203 1204 /* If a Tx command is being handled and it isn't in the actual 1205 * command queue then there a command routing bug has been introduced 1206 * in the queue management code. */ 1207 if (WARN(txq_id != trans->txqs.cmd.q_id, 1208 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1209 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1210 txq->write_ptr)) { 1211 iwl_print_hex_error(trans, pkt, 32); 1212 return; 1213 } 1214 1215 spin_lock_bh(&txq->lock); 1216 1217 cmd_index = iwl_txq_get_cmd_index(txq, index); 1218 cmd = txq->entries[cmd_index].cmd; 1219 meta = &txq->entries[cmd_index].meta; 1220 group_id = cmd->hdr.group_id; 1221 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1222 1223 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1224 1225 /* Input error checking is done when commands are added to queue. */ 1226 if (meta->flags & CMD_WANT_SKB) { 1227 struct page *p = rxb_steal_page(rxb); 1228 1229 meta->source->resp_pkt = pkt; 1230 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1231 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1232 } 1233 1234 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1235 iwl_op_mode_async_cb(trans->op_mode, cmd); 1236 1237 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1238 1239 if (!(meta->flags & CMD_ASYNC)) { 1240 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1241 IWL_WARN(trans, 1242 "HCMD_ACTIVE already clear for command %s\n", 1243 iwl_get_cmd_string(trans, cmd_id)); 1244 } 1245 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1246 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1247 iwl_get_cmd_string(trans, cmd_id)); 1248 wake_up(&trans->wait_command_queue); 1249 } 1250 1251 meta->flags = 0; 1252 1253 spin_unlock_bh(&txq->lock); 1254 } 1255 1256 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1257 struct iwl_txq *txq, u8 hdr_len, 1258 struct iwl_cmd_meta *out_meta) 1259 { 1260 u16 head_tb_len; 1261 int i; 1262 1263 /* 1264 * Set up TFD's third entry to point directly to remainder 1265 * of skb's head, if any 1266 */ 1267 head_tb_len = skb_headlen(skb) - hdr_len; 1268 1269 if (head_tb_len > 0) { 1270 dma_addr_t tb_phys = dma_map_single(trans->dev, 1271 skb->data + hdr_len, 1272 head_tb_len, DMA_TO_DEVICE); 1273 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1274 return -EINVAL; 1275 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 1276 tb_phys, head_tb_len); 1277 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1278 } 1279 1280 /* set up the remaining entries to point to the data */ 1281 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1282 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1283 dma_addr_t tb_phys; 1284 int tb_idx; 1285 1286 if (!skb_frag_size(frag)) 1287 continue; 1288 1289 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1290 skb_frag_size(frag), DMA_TO_DEVICE); 1291 1292 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1293 return -EINVAL; 1294 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 1295 tb_phys, skb_frag_size(frag)); 1296 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1297 skb_frag_size(frag), false); 1298 if (tb_idx < 0) 1299 return tb_idx; 1300 1301 out_meta->tbs |= BIT(tb_idx); 1302 } 1303 1304 return 0; 1305 } 1306 1307 #ifdef CONFIG_INET 1308 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1309 struct iwl_txq *txq, u8 hdr_len, 1310 struct iwl_cmd_meta *out_meta, 1311 struct iwl_device_tx_cmd *dev_cmd, 1312 u16 tb1_len) 1313 { 1314 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1315 struct ieee80211_hdr *hdr = (void *)skb->data; 1316 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1317 unsigned int mss = skb_shinfo(skb)->gso_size; 1318 u16 length, iv_len, amsdu_pad; 1319 u8 *start_hdr; 1320 struct iwl_tso_hdr_page *hdr_page; 1321 struct tso_t tso; 1322 1323 /* if the packet is protected, then it must be CCMP or GCMP */ 1324 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 1325 iv_len = ieee80211_has_protected(hdr->frame_control) ? 1326 IEEE80211_CCMP_HDR_LEN : 0; 1327 1328 trace_iwlwifi_dev_tx(trans->dev, skb, 1329 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1330 trans->txqs.tfd.size, 1331 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 1332 1333 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 1334 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 1335 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 1336 amsdu_pad = 0; 1337 1338 /* total amount of header we may need for this A-MSDU */ 1339 hdr_room = DIV_ROUND_UP(total_len, mss) * 1340 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1341 1342 /* Our device supports 9 segments at most, it will fit in 1 page */ 1343 hdr_page = get_page_hdr(trans, hdr_room, skb); 1344 if (!hdr_page) 1345 return -ENOMEM; 1346 1347 start_hdr = hdr_page->pos; 1348 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 1349 hdr_page->pos += iv_len; 1350 1351 /* 1352 * Pull the ieee80211 header + IV to be able to use TSO core, 1353 * we will restore it for the tx_status flow. 1354 */ 1355 skb_pull(skb, hdr_len + iv_len); 1356 1357 /* 1358 * Remove the length of all the headers that we don't actually 1359 * have in the MPDU by themselves, but that we duplicate into 1360 * all the different MSDUs inside the A-MSDU. 1361 */ 1362 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 1363 1364 tso_start(skb, &tso); 1365 1366 while (total_len) { 1367 /* this is the data left for this subframe */ 1368 unsigned int data_left = 1369 min_t(unsigned int, mss, total_len); 1370 struct sk_buff *csum_skb = NULL; 1371 unsigned int hdr_tb_len; 1372 dma_addr_t hdr_tb_phys; 1373 u8 *subf_hdrs_start = hdr_page->pos; 1374 1375 total_len -= data_left; 1376 1377 memset(hdr_page->pos, 0, amsdu_pad); 1378 hdr_page->pos += amsdu_pad; 1379 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 1380 data_left)) & 0x3; 1381 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 1382 hdr_page->pos += ETH_ALEN; 1383 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 1384 hdr_page->pos += ETH_ALEN; 1385 1386 length = snap_ip_tcp_hdrlen + data_left; 1387 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 1388 hdr_page->pos += sizeof(length); 1389 1390 /* 1391 * This will copy the SNAP as well which will be considered 1392 * as MAC header. 1393 */ 1394 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 1395 1396 hdr_page->pos += snap_ip_tcp_hdrlen; 1397 1398 hdr_tb_len = hdr_page->pos - start_hdr; 1399 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 1400 hdr_tb_len, DMA_TO_DEVICE); 1401 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 1402 dev_kfree_skb(csum_skb); 1403 return -EINVAL; 1404 } 1405 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 1406 hdr_tb_len, false); 1407 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 1408 hdr_tb_phys, hdr_tb_len); 1409 /* add this subframe's headers' length to the tx_cmd */ 1410 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 1411 1412 /* prepare the start_hdr for the next subframe */ 1413 start_hdr = hdr_page->pos; 1414 1415 /* put the payload */ 1416 while (data_left) { 1417 unsigned int size = min_t(unsigned int, tso.size, 1418 data_left); 1419 dma_addr_t tb_phys; 1420 1421 tb_phys = dma_map_single(trans->dev, tso.data, 1422 size, DMA_TO_DEVICE); 1423 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1424 dev_kfree_skb(csum_skb); 1425 return -EINVAL; 1426 } 1427 1428 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1429 size, false); 1430 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 1431 tb_phys, size); 1432 1433 data_left -= size; 1434 tso_build_data(skb, &tso, size); 1435 } 1436 } 1437 1438 /* re -add the WiFi header and IV */ 1439 skb_push(skb, hdr_len + iv_len); 1440 1441 return 0; 1442 } 1443 #else /* CONFIG_INET */ 1444 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1445 struct iwl_txq *txq, u8 hdr_len, 1446 struct iwl_cmd_meta *out_meta, 1447 struct iwl_device_tx_cmd *dev_cmd, 1448 u16 tb1_len) 1449 { 1450 /* No A-MSDU without CONFIG_INET */ 1451 WARN_ON(1); 1452 1453 return -1; 1454 } 1455 #endif /* CONFIG_INET */ 1456 1457 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1458 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1459 { 1460 struct ieee80211_hdr *hdr; 1461 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1462 struct iwl_cmd_meta *out_meta; 1463 struct iwl_txq *txq; 1464 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1465 void *tb1_addr; 1466 void *tfd; 1467 u16 len, tb1_len; 1468 bool wait_write_ptr; 1469 __le16 fc; 1470 u8 hdr_len; 1471 u16 wifi_seq; 1472 bool amsdu; 1473 1474 txq = trans->txqs.txq[txq_id]; 1475 1476 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1477 "TX on unused queue %d\n", txq_id)) 1478 return -EINVAL; 1479 1480 if (skb_is_nonlinear(skb) && 1481 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1482 __skb_linearize(skb)) 1483 return -ENOMEM; 1484 1485 /* mac80211 always puts the full header into the SKB's head, 1486 * so there's no need to check if it's readable there 1487 */ 1488 hdr = (struct ieee80211_hdr *)skb->data; 1489 fc = hdr->frame_control; 1490 hdr_len = ieee80211_hdrlen(fc); 1491 1492 spin_lock(&txq->lock); 1493 1494 if (iwl_txq_space(trans, txq) < txq->high_mark) { 1495 iwl_txq_stop(trans, txq); 1496 1497 /* don't put the packet on the ring, if there is no room */ 1498 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1499 struct iwl_device_tx_cmd **dev_cmd_ptr; 1500 1501 dev_cmd_ptr = (void *)((u8 *)skb->cb + 1502 trans->txqs.dev_cmd_offs); 1503 1504 *dev_cmd_ptr = dev_cmd; 1505 __skb_queue_tail(&txq->overflow_q, skb); 1506 1507 spin_unlock(&txq->lock); 1508 return 0; 1509 } 1510 } 1511 1512 /* In AGG mode, the index in the ring must correspond to the WiFi 1513 * sequence number. This is a HW requirements to help the SCD to parse 1514 * the BA. 1515 * Check here that the packets are in the right place on the ring. 1516 */ 1517 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1518 WARN_ONCE(txq->ampdu && 1519 (wifi_seq & 0xff) != txq->write_ptr, 1520 "Q: %d WiFi Seq %d tfdNum %d", 1521 txq_id, wifi_seq, txq->write_ptr); 1522 1523 /* Set up driver data for this TFD */ 1524 txq->entries[txq->write_ptr].skb = skb; 1525 txq->entries[txq->write_ptr].cmd = dev_cmd; 1526 1527 dev_cmd->hdr.sequence = 1528 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1529 INDEX_TO_SEQ(txq->write_ptr))); 1530 1531 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1532 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1533 offsetof(struct iwl_tx_cmd, scratch); 1534 1535 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1536 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1537 1538 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1539 out_meta = &txq->entries[txq->write_ptr].meta; 1540 out_meta->flags = 0; 1541 1542 /* 1543 * The second TB (tb1) points to the remainder of the TX command 1544 * and the 802.11 header - dword aligned size 1545 * (This calculation modifies the TX command, so do it before the 1546 * setup of the first TB) 1547 */ 1548 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1549 hdr_len - IWL_FIRST_TB_SIZE; 1550 /* do not align A-MSDU to dword as the subframe header aligns it */ 1551 amsdu = ieee80211_is_data_qos(fc) && 1552 (*ieee80211_get_qos_ctl(hdr) & 1553 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 1554 if (!amsdu) { 1555 tb1_len = ALIGN(len, 4); 1556 /* Tell NIC about any 2-byte padding after MAC header */ 1557 if (tb1_len != len) 1558 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1559 } else { 1560 tb1_len = len; 1561 } 1562 1563 /* 1564 * The first TB points to bi-directional DMA data, we'll 1565 * memcpy the data into it later. 1566 */ 1567 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1568 IWL_FIRST_TB_SIZE, true); 1569 1570 /* there must be data left over for TB1 or this code must be changed */ 1571 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1572 1573 /* map the data for TB1 */ 1574 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1575 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1576 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1577 goto out_err; 1578 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1579 1580 trace_iwlwifi_dev_tx(trans->dev, skb, 1581 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1582 trans->txqs.tfd.size, 1583 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1584 hdr_len); 1585 1586 /* 1587 * If gso_size wasn't set, don't give the frame "amsdu treatment" 1588 * (adding subframes, etc.). 1589 * This can happen in some testing flows when the amsdu was already 1590 * pre-built, and we just need to send the resulting skb. 1591 */ 1592 if (amsdu && skb_shinfo(skb)->gso_size) { 1593 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 1594 out_meta, dev_cmd, 1595 tb1_len))) 1596 goto out_err; 1597 } else { 1598 struct sk_buff *frag; 1599 1600 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1601 out_meta))) 1602 goto out_err; 1603 1604 skb_walk_frags(skb, frag) { 1605 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 1606 out_meta))) 1607 goto out_err; 1608 } 1609 } 1610 1611 /* building the A-MSDU might have changed this data, so memcpy it now */ 1612 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 1613 1614 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1615 /* Set up entry for this TFD in Tx byte-count array */ 1616 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 1617 iwl_txq_gen1_tfd_get_num_tbs(trans, 1618 tfd)); 1619 1620 wait_write_ptr = ieee80211_has_morefrags(fc); 1621 1622 /* start timer if queue currently empty */ 1623 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1624 /* 1625 * If the TXQ is active, then set the timer, if not, 1626 * set the timer in remainder so that the timer will 1627 * be armed with the right value when the station will 1628 * wake up. 1629 */ 1630 if (!txq->frozen) 1631 mod_timer(&txq->stuck_timer, 1632 jiffies + txq->wd_timeout); 1633 else 1634 txq->frozen_expiry_remainder = txq->wd_timeout; 1635 } 1636 1637 /* Tell device the write index *just past* this latest filled TFD */ 1638 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1639 if (!wait_write_ptr) 1640 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1641 1642 /* 1643 * At this point the frame is "transmitted" successfully 1644 * and we will get a TX status notification eventually. 1645 */ 1646 spin_unlock(&txq->lock); 1647 return 0; 1648 out_err: 1649 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1650 spin_unlock(&txq->lock); 1651 return -1; 1652 } 1653