1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <net/ip6_checksum.h> 12 #include <net/tso.h> 13 14 #include "iwl-debug.h" 15 #include "iwl-csr.h" 16 #include "iwl-prph.h" 17 #include "iwl-io.h" 18 #include "iwl-scd.h" 19 #include "iwl-op-mode.h" 20 #include "internal.h" 21 #include "fw/api/tx.h" 22 23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24 * DMA services 25 * 26 * Theory of operation 27 * 28 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29 * of buffer descriptors, each of which points to one or more data buffers for 30 * the device to read from or fill. Driver and device exchange status of each 31 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32 * entries in each circular buffer, to protect against confusing empty and full 33 * queue states. 34 * 35 * The device reads or writes the data in the queues via the device's several 36 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37 * 38 * For Tx queue, there are low mark and high mark limits. If, after queuing 39 * the packet for Tx, free space become < low mark, Tx queue stopped. When 40 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41 * Tx queue resumed. 42 * 43 ***************************************************/ 44 45 46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47 struct iwl_dma_ptr *ptr, size_t size) 48 { 49 if (WARN_ON(ptr->addr)) 50 return -EINVAL; 51 52 ptr->addr = dma_alloc_coherent(trans->dev, size, 53 &ptr->dma, GFP_KERNEL); 54 if (!ptr->addr) 55 return -ENOMEM; 56 ptr->size = size; 57 return 0; 58 } 59 60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61 { 62 if (unlikely(!ptr->addr)) 63 return; 64 65 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66 memset(ptr, 0, sizeof(*ptr)); 67 } 68 69 /* 70 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71 */ 72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73 struct iwl_txq *txq) 74 { 75 u32 reg = 0; 76 int txq_id = txq->id; 77 78 lockdep_assert_held(&txq->lock); 79 80 /* 81 * explicitly wake up the NIC if: 82 * 1. shadow registers aren't enabled 83 * 2. NIC is woken up for CMD regardless of shadow outside this function 84 * 3. there is a chance that the NIC is asleep 85 */ 86 if (!trans->trans_cfg->base_params->shadow_reg_enable && 87 txq_id != trans->txqs.cmd.q_id && 88 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89 /* 90 * wake up nic if it's powered down ... 91 * uCode will wake up, and interrupt us again, so next 92 * time we'll skip this part. 93 */ 94 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95 96 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98 txq_id, reg); 99 iwl_set_bit(trans, CSR_GP_CNTRL, 100 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101 txq->need_update = true; 102 return; 103 } 104 } 105 106 /* 107 * if not in power-save mode, uCode will never sleep when we're 108 * trying to tx (during RFKILL, we're not trying to tx). 109 */ 110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 111 if (!txq->block) 112 iwl_write32(trans, HBUS_TARG_WRPTR, 113 txq->write_ptr | (txq_id << 8)); 114 } 115 116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117 { 118 int i; 119 120 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 121 struct iwl_txq *txq = trans->txqs.txq[i]; 122 123 if (!test_bit(i, trans->txqs.queue_used)) 124 continue; 125 126 spin_lock_bh(&txq->lock); 127 if (txq->need_update) { 128 iwl_pcie_txq_inc_wr_ptr(trans, txq); 129 txq->need_update = false; 130 } 131 spin_unlock_bh(&txq->lock); 132 } 133 } 134 135 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 136 u8 idx, dma_addr_t addr, u16 len) 137 { 138 struct iwl_tfd *tfd_fh = (void *)tfd; 139 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 140 141 u16 hi_n_len = len << 4; 142 143 put_unaligned_le32(addr, &tb->lo); 144 hi_n_len |= iwl_get_dma_hi_addr(addr); 145 146 tb->hi_n_len = cpu_to_le16(hi_n_len); 147 148 tfd_fh->num_tbs = idx + 1; 149 } 150 151 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152 dma_addr_t addr, u16 len, bool reset) 153 { 154 void *tfd; 155 u32 num_tbs; 156 157 tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158 159 if (reset) 160 memset(tfd, 0, trans->txqs.tfd.size); 161 162 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163 164 /* Each TFD can point to a maximum max_tbs Tx buffers */ 165 if (num_tbs >= trans->txqs.tfd.max_tbs) { 166 IWL_ERR(trans, "Error can not send more than %d chunks\n", 167 trans->txqs.tfd.max_tbs); 168 return -EINVAL; 169 } 170 171 if (WARN(addr & ~IWL_TX_DMA_MASK, 172 "Unaligned address = %llx\n", (unsigned long long)addr)) 173 return -EINVAL; 174 175 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176 177 return num_tbs; 178 } 179 180 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 181 { 182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 183 184 lockdep_assert_held(&trans_pcie->reg_lock); 185 186 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 187 return; 188 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 189 return; 190 191 trans_pcie->cmd_hold_nic_awake = false; 192 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 193 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 194 } 195 196 /* 197 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 198 */ 199 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 200 { 201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 202 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 203 204 if (!txq) { 205 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 206 return; 207 } 208 209 spin_lock_bh(&txq->lock); 210 while (txq->write_ptr != txq->read_ptr) { 211 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 212 txq_id, txq->read_ptr); 213 214 if (txq_id != trans->txqs.cmd.q_id) { 215 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 216 217 if (WARN_ON_ONCE(!skb)) 218 continue; 219 220 iwl_txq_free_tso_page(trans, skb); 221 } 222 iwl_txq_free_tfd(trans, txq); 223 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 224 225 if (txq->read_ptr == txq->write_ptr) { 226 spin_lock(&trans_pcie->reg_lock); 227 if (txq_id == trans->txqs.cmd.q_id) 228 iwl_pcie_clear_cmd_in_flight(trans); 229 spin_unlock(&trans_pcie->reg_lock); 230 } 231 } 232 233 while (!skb_queue_empty(&txq->overflow_q)) { 234 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 235 236 iwl_op_mode_free_skb(trans->op_mode, skb); 237 } 238 239 spin_unlock_bh(&txq->lock); 240 241 /* just in case - this queue may have been stopped */ 242 iwl_wake_queue(trans, txq); 243 } 244 245 /* 246 * iwl_pcie_txq_free - Deallocate DMA queue. 247 * @txq: Transmit queue to deallocate. 248 * 249 * Empty queue by removing and destroying all BD's. 250 * Free all buffers. 251 * 0-fill, but do not free "txq" descriptor structure. 252 */ 253 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 254 { 255 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 256 struct device *dev = trans->dev; 257 int i; 258 259 if (WARN_ON(!txq)) 260 return; 261 262 iwl_pcie_txq_unmap(trans, txq_id); 263 264 /* De-alloc array of command/tx buffers */ 265 if (txq_id == trans->txqs.cmd.q_id) 266 for (i = 0; i < txq->n_window; i++) { 267 kfree_sensitive(txq->entries[i].cmd); 268 kfree_sensitive(txq->entries[i].free_buf); 269 } 270 271 /* De-alloc circular buffer of TFDs */ 272 if (txq->tfds) { 273 dma_free_coherent(dev, 274 trans->txqs.tfd.size * 275 trans->trans_cfg->base_params->max_tfd_queue_size, 276 txq->tfds, txq->dma_addr); 277 txq->dma_addr = 0; 278 txq->tfds = NULL; 279 280 dma_free_coherent(dev, 281 sizeof(*txq->first_tb_bufs) * txq->n_window, 282 txq->first_tb_bufs, txq->first_tb_dma); 283 } 284 285 kfree(txq->entries); 286 txq->entries = NULL; 287 288 del_timer_sync(&txq->stuck_timer); 289 290 /* 0-fill queue descriptor structure */ 291 memset(txq, 0, sizeof(*txq)); 292 } 293 294 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 295 { 296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 297 int nq = trans->trans_cfg->base_params->num_of_queues; 298 int chan; 299 u32 reg_val; 300 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 301 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 302 303 /* make sure all queue are not stopped/used */ 304 memset(trans->txqs.queue_stopped, 0, 305 sizeof(trans->txqs.queue_stopped)); 306 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 307 308 trans_pcie->scd_base_addr = 309 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 310 311 WARN_ON(scd_base_addr != 0 && 312 scd_base_addr != trans_pcie->scd_base_addr); 313 314 /* reset context data, TX status and translation data */ 315 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 316 SCD_CONTEXT_MEM_LOWER_BOUND, 317 NULL, clear_dwords); 318 319 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 320 trans->txqs.scd_bc_tbls.dma >> 10); 321 322 /* The chain extension of the SCD doesn't work well. This feature is 323 * enabled by default by the HW, so we need to disable it manually. 324 */ 325 if (trans->trans_cfg->base_params->scd_chain_ext_wa) 326 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 327 328 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 329 trans->txqs.cmd.fifo, 330 trans->txqs.cmd.wdg_timeout); 331 332 /* Activate all Tx DMA/FIFO channels */ 333 iwl_scd_activate_fifos(trans); 334 335 /* Enable DMA channel */ 336 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 337 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 338 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 339 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 340 341 /* Update FH chicken bits */ 342 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 343 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 344 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 345 346 /* Enable L1-Active */ 347 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 348 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 349 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 350 } 351 352 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 353 { 354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 355 int txq_id; 356 357 /* 358 * we should never get here in gen2 trans mode return early to avoid 359 * having invalid accesses 360 */ 361 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 362 return; 363 364 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 365 txq_id++) { 366 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 367 if (trans->trans_cfg->use_tfh) 368 iwl_write_direct64(trans, 369 FH_MEM_CBBC_QUEUE(trans, txq_id), 370 txq->dma_addr); 371 else 372 iwl_write_direct32(trans, 373 FH_MEM_CBBC_QUEUE(trans, txq_id), 374 txq->dma_addr >> 8); 375 iwl_pcie_txq_unmap(trans, txq_id); 376 txq->read_ptr = 0; 377 txq->write_ptr = 0; 378 } 379 380 /* Tell NIC where to find the "keep warm" buffer */ 381 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 382 trans_pcie->kw.dma >> 4); 383 384 /* 385 * Send 0 as the scd_base_addr since the device may have be reset 386 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 387 * contain garbage. 388 */ 389 iwl_pcie_tx_start(trans, 0); 390 } 391 392 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 393 { 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 395 int ch, ret; 396 u32 mask = 0; 397 398 spin_lock_bh(&trans_pcie->irq_lock); 399 400 if (!iwl_trans_grab_nic_access(trans)) 401 goto out; 402 403 /* Stop each Tx DMA channel */ 404 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 405 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 406 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 407 } 408 409 /* Wait for DMA channels to be idle */ 410 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 411 if (ret < 0) 412 IWL_ERR(trans, 413 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 414 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 415 416 iwl_trans_release_nic_access(trans); 417 418 out: 419 spin_unlock_bh(&trans_pcie->irq_lock); 420 } 421 422 /* 423 * iwl_pcie_tx_stop - Stop all Tx DMA channels 424 */ 425 int iwl_pcie_tx_stop(struct iwl_trans *trans) 426 { 427 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 428 int txq_id; 429 430 /* Turn off all Tx DMA fifos */ 431 iwl_scd_deactivate_fifos(trans); 432 433 /* Turn off all Tx DMA channels */ 434 iwl_pcie_tx_stop_fh(trans); 435 436 /* 437 * This function can be called before the op_mode disabled the 438 * queues. This happens when we have an rfkill interrupt. 439 * Since we stop Tx altogether - mark the queues as stopped. 440 */ 441 memset(trans->txqs.queue_stopped, 0, 442 sizeof(trans->txqs.queue_stopped)); 443 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 444 445 /* This can happen: start_hw, stop_device */ 446 if (!trans_pcie->txq_memory) 447 return 0; 448 449 /* Unmap DMA from host system and free skb's */ 450 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 451 txq_id++) 452 iwl_pcie_txq_unmap(trans, txq_id); 453 454 return 0; 455 } 456 457 /* 458 * iwl_trans_tx_free - Free TXQ Context 459 * 460 * Destroy all TX DMA queues and structures 461 */ 462 void iwl_pcie_tx_free(struct iwl_trans *trans) 463 { 464 int txq_id; 465 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 466 467 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 468 469 /* Tx queues */ 470 if (trans_pcie->txq_memory) { 471 for (txq_id = 0; 472 txq_id < trans->trans_cfg->base_params->num_of_queues; 473 txq_id++) { 474 iwl_pcie_txq_free(trans, txq_id); 475 trans->txqs.txq[txq_id] = NULL; 476 } 477 } 478 479 kfree(trans_pcie->txq_memory); 480 trans_pcie->txq_memory = NULL; 481 482 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 483 484 iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 485 } 486 487 /* 488 * iwl_pcie_tx_alloc - allocate TX context 489 * Allocate all Tx DMA structures and initialize them 490 */ 491 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 492 { 493 int ret; 494 int txq_id, slots_num; 495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 496 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 497 498 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 499 return -EINVAL; 500 501 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 502 503 /*It is not allowed to alloc twice, so warn when this happens. 504 * We cannot rely on the previous allocation, so free and fail */ 505 if (WARN_ON(trans_pcie->txq_memory)) { 506 ret = -EINVAL; 507 goto error; 508 } 509 510 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 511 bc_tbls_size); 512 if (ret) { 513 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 514 goto error; 515 } 516 517 /* Alloc keep-warm buffer */ 518 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 519 if (ret) { 520 IWL_ERR(trans, "Keep Warm allocation failed\n"); 521 goto error; 522 } 523 524 trans_pcie->txq_memory = 525 kcalloc(trans->trans_cfg->base_params->num_of_queues, 526 sizeof(struct iwl_txq), GFP_KERNEL); 527 if (!trans_pcie->txq_memory) { 528 IWL_ERR(trans, "Not enough memory for txq\n"); 529 ret = -ENOMEM; 530 goto error; 531 } 532 533 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 534 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 535 txq_id++) { 536 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 537 538 if (cmd_queue) 539 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 540 trans->cfg->min_txq_size); 541 else 542 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 543 trans->cfg->min_256_ba_txq_size); 544 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 545 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 546 cmd_queue); 547 if (ret) { 548 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 549 goto error; 550 } 551 trans->txqs.txq[txq_id]->id = txq_id; 552 } 553 554 return 0; 555 556 error: 557 iwl_pcie_tx_free(trans); 558 559 return ret; 560 } 561 562 int iwl_pcie_tx_init(struct iwl_trans *trans) 563 { 564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 565 int ret; 566 int txq_id, slots_num; 567 bool alloc = false; 568 569 if (!trans_pcie->txq_memory) { 570 ret = iwl_pcie_tx_alloc(trans); 571 if (ret) 572 goto error; 573 alloc = true; 574 } 575 576 spin_lock_bh(&trans_pcie->irq_lock); 577 578 /* Turn off all Tx DMA fifos */ 579 iwl_scd_deactivate_fifos(trans); 580 581 /* Tell NIC where to find the "keep warm" buffer */ 582 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 583 trans_pcie->kw.dma >> 4); 584 585 spin_unlock_bh(&trans_pcie->irq_lock); 586 587 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 588 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 589 txq_id++) { 590 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 591 592 if (cmd_queue) 593 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 594 trans->cfg->min_txq_size); 595 else 596 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 597 trans->cfg->min_256_ba_txq_size); 598 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 599 cmd_queue); 600 if (ret) { 601 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 602 goto error; 603 } 604 605 /* 606 * Tell nic where to find circular buffer of TFDs for a 607 * given Tx queue, and enable the DMA channel used for that 608 * queue. 609 * Circular buffer (TFD queue in DRAM) physical base address 610 */ 611 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 612 trans->txqs.txq[txq_id]->dma_addr >> 8); 613 } 614 615 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 616 if (trans->trans_cfg->base_params->num_of_queues > 20) 617 iwl_set_bits_prph(trans, SCD_GP_CTRL, 618 SCD_GP_CTRL_ENABLE_31_QUEUES); 619 620 return 0; 621 error: 622 /*Upon error, free only if we allocated something */ 623 if (alloc) 624 iwl_pcie_tx_free(trans); 625 return ret; 626 } 627 628 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 629 const struct iwl_host_cmd *cmd) 630 { 631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 632 int ret; 633 634 lockdep_assert_held(&trans_pcie->reg_lock); 635 636 /* Make sure the NIC is still alive in the bus */ 637 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 638 return -ENODEV; 639 640 /* 641 * wake up the NIC to make sure that the firmware will see the host 642 * command - we will let the NIC sleep once all the host commands 643 * returned. This needs to be done only on NICs that have 644 * apmg_wake_up_wa set. 645 */ 646 if (trans->trans_cfg->base_params->apmg_wake_up_wa && 647 !trans_pcie->cmd_hold_nic_awake) { 648 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 649 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 650 651 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 652 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 653 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 654 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 655 15000); 656 if (ret < 0) { 657 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 658 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 659 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 660 return -EIO; 661 } 662 trans_pcie->cmd_hold_nic_awake = true; 663 } 664 665 return 0; 666 } 667 668 /* 669 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 670 * 671 * When FW advances 'R' index, all entries between old and new 'R' index 672 * need to be reclaimed. As result, some free space forms. If there is 673 * enough free space (> low mark), wake the stack that feeds us. 674 */ 675 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 676 { 677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 678 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 679 int nfreed = 0; 680 u16 r; 681 682 lockdep_assert_held(&txq->lock); 683 684 idx = iwl_txq_get_cmd_index(txq, idx); 685 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 686 687 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 688 (!iwl_txq_used(txq, idx))) { 689 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 690 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 691 __func__, txq_id, idx, 692 trans->trans_cfg->base_params->max_tfd_queue_size, 693 txq->write_ptr, txq->read_ptr); 694 return; 695 } 696 697 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 698 r = iwl_txq_inc_wrap(trans, r)) { 699 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 700 701 if (nfreed++ > 0) { 702 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 703 idx, txq->write_ptr, r); 704 iwl_force_nmi(trans); 705 } 706 } 707 708 if (txq->read_ptr == txq->write_ptr) { 709 /* BHs are also disabled due to txq->lock */ 710 spin_lock(&trans_pcie->reg_lock); 711 iwl_pcie_clear_cmd_in_flight(trans); 712 spin_unlock(&trans_pcie->reg_lock); 713 } 714 715 iwl_txq_progress(txq); 716 } 717 718 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 719 u16 txq_id) 720 { 721 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 722 u32 tbl_dw_addr; 723 u32 tbl_dw; 724 u16 scd_q2ratid; 725 726 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 727 728 tbl_dw_addr = trans_pcie->scd_base_addr + 729 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 730 731 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 732 733 if (txq_id & 0x1) 734 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 735 else 736 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 737 738 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 739 740 return 0; 741 } 742 743 /* Receiver address (actually, Rx station's index into station table), 744 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 745 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 746 747 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 748 const struct iwl_trans_txq_scd_cfg *cfg, 749 unsigned int wdg_timeout) 750 { 751 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 752 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 753 int fifo = -1; 754 bool scd_bug = false; 755 756 if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 757 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 758 759 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 760 761 if (cfg) { 762 fifo = cfg->fifo; 763 764 /* Disable the scheduler prior configuring the cmd queue */ 765 if (txq_id == trans->txqs.cmd.q_id && 766 trans_pcie->scd_set_active) 767 iwl_scd_enable_set_active(trans, 0); 768 769 /* Stop this Tx queue before configuring it */ 770 iwl_scd_txq_set_inactive(trans, txq_id); 771 772 /* Set this queue as a chain-building queue unless it is CMD */ 773 if (txq_id != trans->txqs.cmd.q_id) 774 iwl_scd_txq_set_chain(trans, txq_id); 775 776 if (cfg->aggregate) { 777 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 778 779 /* Map receiver-address / traffic-ID to this queue */ 780 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 781 782 /* enable aggregations for the queue */ 783 iwl_scd_txq_enable_agg(trans, txq_id); 784 txq->ampdu = true; 785 } else { 786 /* 787 * disable aggregations for the queue, this will also 788 * make the ra_tid mapping configuration irrelevant 789 * since it is now a non-AGG queue. 790 */ 791 iwl_scd_txq_disable_agg(trans, txq_id); 792 793 ssn = txq->read_ptr; 794 } 795 } else { 796 /* 797 * If we need to move the SCD write pointer by steps of 798 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 799 * the op_mode know by returning true later. 800 * Do this only in case cfg is NULL since this trick can 801 * be done only if we have DQA enabled which is true for mvm 802 * only. And mvm never sets a cfg pointer. 803 * This is really ugly, but this is the easiest way out for 804 * this sad hardware issue. 805 * This bug has been fixed on devices 9000 and up. 806 */ 807 scd_bug = !trans->trans_cfg->mq_rx_supported && 808 !((ssn - txq->write_ptr) & 0x3f) && 809 (ssn != txq->write_ptr); 810 if (scd_bug) 811 ssn++; 812 } 813 814 /* Place first TFD at index corresponding to start sequence number. 815 * Assumes that ssn_idx is valid (!= 0xFFF) */ 816 txq->read_ptr = (ssn & 0xff); 817 txq->write_ptr = (ssn & 0xff); 818 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 819 (ssn & 0xff) | (txq_id << 8)); 820 821 if (cfg) { 822 u8 frame_limit = cfg->frame_limit; 823 824 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 825 826 /* Set up Tx window size and frame limit for this queue */ 827 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 828 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 829 iwl_trans_write_mem32(trans, 830 trans_pcie->scd_base_addr + 831 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 832 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 833 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 834 835 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 836 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 837 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 838 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 839 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 840 SCD_QUEUE_STTS_REG_MSK); 841 842 /* enable the scheduler for this queue (only) */ 843 if (txq_id == trans->txqs.cmd.q_id && 844 trans_pcie->scd_set_active) 845 iwl_scd_enable_set_active(trans, BIT(txq_id)); 846 847 IWL_DEBUG_TX_QUEUES(trans, 848 "Activate queue %d on FIFO %d WrPtr: %d\n", 849 txq_id, fifo, ssn & 0xff); 850 } else { 851 IWL_DEBUG_TX_QUEUES(trans, 852 "Activate queue %d WrPtr: %d\n", 853 txq_id, ssn & 0xff); 854 } 855 856 return scd_bug; 857 } 858 859 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 860 bool shared_mode) 861 { 862 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 863 864 txq->ampdu = !shared_mode; 865 } 866 867 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 868 bool configure_scd) 869 { 870 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 871 u32 stts_addr = trans_pcie->scd_base_addr + 872 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 873 static const u32 zero_val[4] = {}; 874 875 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 876 trans->txqs.txq[txq_id]->frozen = false; 877 878 /* 879 * Upon HW Rfkill - we stop the device, and then stop the queues 880 * in the op_mode. Just for the sake of the simplicity of the op_mode, 881 * allow the op_mode to call txq_disable after it already called 882 * stop_device. 883 */ 884 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 885 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 886 "queue %d not used", txq_id); 887 return; 888 } 889 890 if (configure_scd) { 891 iwl_scd_txq_set_inactive(trans, txq_id); 892 893 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 894 ARRAY_SIZE(zero_val)); 895 } 896 897 iwl_pcie_txq_unmap(trans, txq_id); 898 trans->txqs.txq[txq_id]->ampdu = false; 899 900 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 901 } 902 903 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 904 905 /* 906 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 907 * @priv: device private data point 908 * @cmd: a pointer to the ucode command structure 909 * 910 * The function returns < 0 values to indicate the operation 911 * failed. On success, it returns the index (>= 0) of command in the 912 * command queue. 913 */ 914 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 915 struct iwl_host_cmd *cmd) 916 { 917 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 918 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 919 struct iwl_device_cmd *out_cmd; 920 struct iwl_cmd_meta *out_meta; 921 void *dup_buf = NULL; 922 dma_addr_t phys_addr; 923 int idx; 924 u16 copy_size, cmd_size, tb0_size; 925 bool had_nocopy = false; 926 u8 group_id = iwl_cmd_groupid(cmd->id); 927 int i, ret; 928 u32 cmd_pos; 929 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 930 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 931 932 if (WARN(!trans->wide_cmd_header && 933 group_id > IWL_ALWAYS_LONG_GROUP, 934 "unsupported wide command %#x\n", cmd->id)) 935 return -EINVAL; 936 937 if (group_id != 0) { 938 copy_size = sizeof(struct iwl_cmd_header_wide); 939 cmd_size = sizeof(struct iwl_cmd_header_wide); 940 } else { 941 copy_size = sizeof(struct iwl_cmd_header); 942 cmd_size = sizeof(struct iwl_cmd_header); 943 } 944 945 /* need one for the header if the first is NOCOPY */ 946 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 947 948 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 949 cmddata[i] = cmd->data[i]; 950 cmdlen[i] = cmd->len[i]; 951 952 if (!cmd->len[i]) 953 continue; 954 955 /* need at least IWL_FIRST_TB_SIZE copied */ 956 if (copy_size < IWL_FIRST_TB_SIZE) { 957 int copy = IWL_FIRST_TB_SIZE - copy_size; 958 959 if (copy > cmdlen[i]) 960 copy = cmdlen[i]; 961 cmdlen[i] -= copy; 962 cmddata[i] += copy; 963 copy_size += copy; 964 } 965 966 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 967 had_nocopy = true; 968 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 969 idx = -EINVAL; 970 goto free_dup_buf; 971 } 972 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 973 /* 974 * This is also a chunk that isn't copied 975 * to the static buffer so set had_nocopy. 976 */ 977 had_nocopy = true; 978 979 /* only allowed once */ 980 if (WARN_ON(dup_buf)) { 981 idx = -EINVAL; 982 goto free_dup_buf; 983 } 984 985 dup_buf = kmemdup(cmddata[i], cmdlen[i], 986 GFP_ATOMIC); 987 if (!dup_buf) 988 return -ENOMEM; 989 } else { 990 /* NOCOPY must not be followed by normal! */ 991 if (WARN_ON(had_nocopy)) { 992 idx = -EINVAL; 993 goto free_dup_buf; 994 } 995 copy_size += cmdlen[i]; 996 } 997 cmd_size += cmd->len[i]; 998 } 999 1000 /* 1001 * If any of the command structures end up being larger than 1002 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1003 * allocated into separate TFDs, then we will need to 1004 * increase the size of the buffers. 1005 */ 1006 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1007 "Command %s (%#x) is too large (%d bytes)\n", 1008 iwl_get_cmd_string(trans, cmd->id), 1009 cmd->id, copy_size)) { 1010 idx = -EINVAL; 1011 goto free_dup_buf; 1012 } 1013 1014 spin_lock_bh(&txq->lock); 1015 1016 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1017 spin_unlock_bh(&txq->lock); 1018 1019 IWL_ERR(trans, "No space in command queue\n"); 1020 iwl_op_mode_cmd_queue_full(trans->op_mode); 1021 idx = -ENOSPC; 1022 goto free_dup_buf; 1023 } 1024 1025 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1026 out_cmd = txq->entries[idx].cmd; 1027 out_meta = &txq->entries[idx].meta; 1028 1029 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1030 if (cmd->flags & CMD_WANT_SKB) 1031 out_meta->source = cmd; 1032 1033 /* set up the header */ 1034 if (group_id != 0) { 1035 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1036 out_cmd->hdr_wide.group_id = group_id; 1037 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1038 out_cmd->hdr_wide.length = 1039 cpu_to_le16(cmd_size - 1040 sizeof(struct iwl_cmd_header_wide)); 1041 out_cmd->hdr_wide.reserved = 0; 1042 out_cmd->hdr_wide.sequence = 1043 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1044 INDEX_TO_SEQ(txq->write_ptr)); 1045 1046 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1047 copy_size = sizeof(struct iwl_cmd_header_wide); 1048 } else { 1049 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1050 out_cmd->hdr.sequence = 1051 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1052 INDEX_TO_SEQ(txq->write_ptr)); 1053 out_cmd->hdr.group_id = 0; 1054 1055 cmd_pos = sizeof(struct iwl_cmd_header); 1056 copy_size = sizeof(struct iwl_cmd_header); 1057 } 1058 1059 /* and copy the data that needs to be copied */ 1060 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1061 int copy; 1062 1063 if (!cmd->len[i]) 1064 continue; 1065 1066 /* copy everything if not nocopy/dup */ 1067 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1068 IWL_HCMD_DFL_DUP))) { 1069 copy = cmd->len[i]; 1070 1071 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1072 cmd_pos += copy; 1073 copy_size += copy; 1074 continue; 1075 } 1076 1077 /* 1078 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1079 * in total (for bi-directional DMA), but copy up to what 1080 * we can fit into the payload for debug dump purposes. 1081 */ 1082 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1083 1084 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1085 cmd_pos += copy; 1086 1087 /* However, treat copy_size the proper way, we need it below */ 1088 if (copy_size < IWL_FIRST_TB_SIZE) { 1089 copy = IWL_FIRST_TB_SIZE - copy_size; 1090 1091 if (copy > cmd->len[i]) 1092 copy = cmd->len[i]; 1093 copy_size += copy; 1094 } 1095 } 1096 1097 IWL_DEBUG_HC(trans, 1098 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1099 iwl_get_cmd_string(trans, cmd->id), 1100 group_id, out_cmd->hdr.cmd, 1101 le16_to_cpu(out_cmd->hdr.sequence), 1102 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1103 1104 /* start the TFD with the minimum copy bytes */ 1105 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1106 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1107 iwl_pcie_txq_build_tfd(trans, txq, 1108 iwl_txq_get_first_tb_dma(txq, idx), 1109 tb0_size, true); 1110 1111 /* map first command fragment, if any remains */ 1112 if (copy_size > tb0_size) { 1113 phys_addr = dma_map_single(trans->dev, 1114 ((u8 *)&out_cmd->hdr) + tb0_size, 1115 copy_size - tb0_size, 1116 DMA_TO_DEVICE); 1117 if (dma_mapping_error(trans->dev, phys_addr)) { 1118 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1119 txq->write_ptr); 1120 idx = -ENOMEM; 1121 goto out; 1122 } 1123 1124 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1125 copy_size - tb0_size, false); 1126 } 1127 1128 /* map the remaining (adjusted) nocopy/dup fragments */ 1129 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1130 const void *data = cmddata[i]; 1131 1132 if (!cmdlen[i]) 1133 continue; 1134 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1135 IWL_HCMD_DFL_DUP))) 1136 continue; 1137 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1138 data = dup_buf; 1139 phys_addr = dma_map_single(trans->dev, (void *)data, 1140 cmdlen[i], DMA_TO_DEVICE); 1141 if (dma_mapping_error(trans->dev, phys_addr)) { 1142 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1143 txq->write_ptr); 1144 idx = -ENOMEM; 1145 goto out; 1146 } 1147 1148 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1149 } 1150 1151 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1152 out_meta->flags = cmd->flags; 1153 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1154 kfree_sensitive(txq->entries[idx].free_buf); 1155 txq->entries[idx].free_buf = dup_buf; 1156 1157 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1158 1159 /* start timer if queue currently empty */ 1160 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1161 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1162 1163 spin_lock(&trans_pcie->reg_lock); 1164 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1165 if (ret < 0) { 1166 idx = ret; 1167 goto unlock_reg; 1168 } 1169 1170 /* Increment and update queue's write index */ 1171 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1172 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1173 1174 unlock_reg: 1175 spin_unlock(&trans_pcie->reg_lock); 1176 out: 1177 spin_unlock_bh(&txq->lock); 1178 free_dup_buf: 1179 if (idx < 0) 1180 kfree(dup_buf); 1181 return idx; 1182 } 1183 1184 /* 1185 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1186 * @rxb: Rx buffer to reclaim 1187 */ 1188 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1189 struct iwl_rx_cmd_buffer *rxb) 1190 { 1191 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1192 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1193 u8 group_id; 1194 u32 cmd_id; 1195 int txq_id = SEQ_TO_QUEUE(sequence); 1196 int index = SEQ_TO_INDEX(sequence); 1197 int cmd_index; 1198 struct iwl_device_cmd *cmd; 1199 struct iwl_cmd_meta *meta; 1200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1201 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1202 1203 /* If a Tx command is being handled and it isn't in the actual 1204 * command queue then there a command routing bug has been introduced 1205 * in the queue management code. */ 1206 if (WARN(txq_id != trans->txqs.cmd.q_id, 1207 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1208 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1209 txq->write_ptr)) { 1210 iwl_print_hex_error(trans, pkt, 32); 1211 return; 1212 } 1213 1214 spin_lock_bh(&txq->lock); 1215 1216 cmd_index = iwl_txq_get_cmd_index(txq, index); 1217 cmd = txq->entries[cmd_index].cmd; 1218 meta = &txq->entries[cmd_index].meta; 1219 group_id = cmd->hdr.group_id; 1220 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1221 1222 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1223 1224 /* Input error checking is done when commands are added to queue. */ 1225 if (meta->flags & CMD_WANT_SKB) { 1226 struct page *p = rxb_steal_page(rxb); 1227 1228 meta->source->resp_pkt = pkt; 1229 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1230 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1231 } 1232 1233 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1234 iwl_op_mode_async_cb(trans->op_mode, cmd); 1235 1236 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1237 1238 if (!(meta->flags & CMD_ASYNC)) { 1239 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1240 IWL_WARN(trans, 1241 "HCMD_ACTIVE already clear for command %s\n", 1242 iwl_get_cmd_string(trans, cmd_id)); 1243 } 1244 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1245 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1246 iwl_get_cmd_string(trans, cmd_id)); 1247 wake_up(&trans->wait_command_queue); 1248 } 1249 1250 meta->flags = 0; 1251 1252 spin_unlock_bh(&txq->lock); 1253 } 1254 1255 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1256 struct iwl_txq *txq, u8 hdr_len, 1257 struct iwl_cmd_meta *out_meta) 1258 { 1259 u16 head_tb_len; 1260 int i; 1261 1262 /* 1263 * Set up TFD's third entry to point directly to remainder 1264 * of skb's head, if any 1265 */ 1266 head_tb_len = skb_headlen(skb) - hdr_len; 1267 1268 if (head_tb_len > 0) { 1269 dma_addr_t tb_phys = dma_map_single(trans->dev, 1270 skb->data + hdr_len, 1271 head_tb_len, DMA_TO_DEVICE); 1272 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1273 return -EINVAL; 1274 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 1275 tb_phys, head_tb_len); 1276 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1277 } 1278 1279 /* set up the remaining entries to point to the data */ 1280 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1281 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1282 dma_addr_t tb_phys; 1283 int tb_idx; 1284 1285 if (!skb_frag_size(frag)) 1286 continue; 1287 1288 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1289 skb_frag_size(frag), DMA_TO_DEVICE); 1290 1291 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1292 return -EINVAL; 1293 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 1294 tb_phys, skb_frag_size(frag)); 1295 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1296 skb_frag_size(frag), false); 1297 if (tb_idx < 0) 1298 return tb_idx; 1299 1300 out_meta->tbs |= BIT(tb_idx); 1301 } 1302 1303 return 0; 1304 } 1305 1306 #ifdef CONFIG_INET 1307 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1308 struct iwl_txq *txq, u8 hdr_len, 1309 struct iwl_cmd_meta *out_meta, 1310 struct iwl_device_tx_cmd *dev_cmd, 1311 u16 tb1_len) 1312 { 1313 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1314 struct ieee80211_hdr *hdr = (void *)skb->data; 1315 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1316 unsigned int mss = skb_shinfo(skb)->gso_size; 1317 u16 length, iv_len, amsdu_pad; 1318 u8 *start_hdr; 1319 struct iwl_tso_hdr_page *hdr_page; 1320 struct tso_t tso; 1321 1322 /* if the packet is protected, then it must be CCMP or GCMP */ 1323 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 1324 iv_len = ieee80211_has_protected(hdr->frame_control) ? 1325 IEEE80211_CCMP_HDR_LEN : 0; 1326 1327 trace_iwlwifi_dev_tx(trans->dev, skb, 1328 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1329 trans->txqs.tfd.size, 1330 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 1331 1332 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 1333 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 1334 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 1335 amsdu_pad = 0; 1336 1337 /* total amount of header we may need for this A-MSDU */ 1338 hdr_room = DIV_ROUND_UP(total_len, mss) * 1339 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1340 1341 /* Our device supports 9 segments at most, it will fit in 1 page */ 1342 hdr_page = get_page_hdr(trans, hdr_room, skb); 1343 if (!hdr_page) 1344 return -ENOMEM; 1345 1346 start_hdr = hdr_page->pos; 1347 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 1348 hdr_page->pos += iv_len; 1349 1350 /* 1351 * Pull the ieee80211 header + IV to be able to use TSO core, 1352 * we will restore it for the tx_status flow. 1353 */ 1354 skb_pull(skb, hdr_len + iv_len); 1355 1356 /* 1357 * Remove the length of all the headers that we don't actually 1358 * have in the MPDU by themselves, but that we duplicate into 1359 * all the different MSDUs inside the A-MSDU. 1360 */ 1361 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 1362 1363 tso_start(skb, &tso); 1364 1365 while (total_len) { 1366 /* this is the data left for this subframe */ 1367 unsigned int data_left = 1368 min_t(unsigned int, mss, total_len); 1369 struct sk_buff *csum_skb = NULL; 1370 unsigned int hdr_tb_len; 1371 dma_addr_t hdr_tb_phys; 1372 u8 *subf_hdrs_start = hdr_page->pos; 1373 1374 total_len -= data_left; 1375 1376 memset(hdr_page->pos, 0, amsdu_pad); 1377 hdr_page->pos += amsdu_pad; 1378 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 1379 data_left)) & 0x3; 1380 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 1381 hdr_page->pos += ETH_ALEN; 1382 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 1383 hdr_page->pos += ETH_ALEN; 1384 1385 length = snap_ip_tcp_hdrlen + data_left; 1386 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 1387 hdr_page->pos += sizeof(length); 1388 1389 /* 1390 * This will copy the SNAP as well which will be considered 1391 * as MAC header. 1392 */ 1393 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 1394 1395 hdr_page->pos += snap_ip_tcp_hdrlen; 1396 1397 hdr_tb_len = hdr_page->pos - start_hdr; 1398 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 1399 hdr_tb_len, DMA_TO_DEVICE); 1400 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 1401 dev_kfree_skb(csum_skb); 1402 return -EINVAL; 1403 } 1404 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 1405 hdr_tb_len, false); 1406 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 1407 hdr_tb_phys, hdr_tb_len); 1408 /* add this subframe's headers' length to the tx_cmd */ 1409 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 1410 1411 /* prepare the start_hdr for the next subframe */ 1412 start_hdr = hdr_page->pos; 1413 1414 /* put the payload */ 1415 while (data_left) { 1416 unsigned int size = min_t(unsigned int, tso.size, 1417 data_left); 1418 dma_addr_t tb_phys; 1419 1420 tb_phys = dma_map_single(trans->dev, tso.data, 1421 size, DMA_TO_DEVICE); 1422 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1423 dev_kfree_skb(csum_skb); 1424 return -EINVAL; 1425 } 1426 1427 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1428 size, false); 1429 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 1430 tb_phys, size); 1431 1432 data_left -= size; 1433 tso_build_data(skb, &tso, size); 1434 } 1435 } 1436 1437 /* re -add the WiFi header and IV */ 1438 skb_push(skb, hdr_len + iv_len); 1439 1440 return 0; 1441 } 1442 #else /* CONFIG_INET */ 1443 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1444 struct iwl_txq *txq, u8 hdr_len, 1445 struct iwl_cmd_meta *out_meta, 1446 struct iwl_device_tx_cmd *dev_cmd, 1447 u16 tb1_len) 1448 { 1449 /* No A-MSDU without CONFIG_INET */ 1450 WARN_ON(1); 1451 1452 return -1; 1453 } 1454 #endif /* CONFIG_INET */ 1455 1456 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1457 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1458 { 1459 struct ieee80211_hdr *hdr; 1460 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1461 struct iwl_cmd_meta *out_meta; 1462 struct iwl_txq *txq; 1463 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1464 void *tb1_addr; 1465 void *tfd; 1466 u16 len, tb1_len; 1467 bool wait_write_ptr; 1468 __le16 fc; 1469 u8 hdr_len; 1470 u16 wifi_seq; 1471 bool amsdu; 1472 1473 txq = trans->txqs.txq[txq_id]; 1474 1475 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1476 "TX on unused queue %d\n", txq_id)) 1477 return -EINVAL; 1478 1479 if (skb_is_nonlinear(skb) && 1480 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1481 __skb_linearize(skb)) 1482 return -ENOMEM; 1483 1484 /* mac80211 always puts the full header into the SKB's head, 1485 * so there's no need to check if it's readable there 1486 */ 1487 hdr = (struct ieee80211_hdr *)skb->data; 1488 fc = hdr->frame_control; 1489 hdr_len = ieee80211_hdrlen(fc); 1490 1491 spin_lock(&txq->lock); 1492 1493 if (iwl_txq_space(trans, txq) < txq->high_mark) { 1494 iwl_txq_stop(trans, txq); 1495 1496 /* don't put the packet on the ring, if there is no room */ 1497 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1498 struct iwl_device_tx_cmd **dev_cmd_ptr; 1499 1500 dev_cmd_ptr = (void *)((u8 *)skb->cb + 1501 trans->txqs.dev_cmd_offs); 1502 1503 *dev_cmd_ptr = dev_cmd; 1504 __skb_queue_tail(&txq->overflow_q, skb); 1505 1506 spin_unlock(&txq->lock); 1507 return 0; 1508 } 1509 } 1510 1511 /* In AGG mode, the index in the ring must correspond to the WiFi 1512 * sequence number. This is a HW requirements to help the SCD to parse 1513 * the BA. 1514 * Check here that the packets are in the right place on the ring. 1515 */ 1516 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1517 WARN_ONCE(txq->ampdu && 1518 (wifi_seq & 0xff) != txq->write_ptr, 1519 "Q: %d WiFi Seq %d tfdNum %d", 1520 txq_id, wifi_seq, txq->write_ptr); 1521 1522 /* Set up driver data for this TFD */ 1523 txq->entries[txq->write_ptr].skb = skb; 1524 txq->entries[txq->write_ptr].cmd = dev_cmd; 1525 1526 dev_cmd->hdr.sequence = 1527 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1528 INDEX_TO_SEQ(txq->write_ptr))); 1529 1530 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1531 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1532 offsetof(struct iwl_tx_cmd, scratch); 1533 1534 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1535 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1536 1537 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1538 out_meta = &txq->entries[txq->write_ptr].meta; 1539 out_meta->flags = 0; 1540 1541 /* 1542 * The second TB (tb1) points to the remainder of the TX command 1543 * and the 802.11 header - dword aligned size 1544 * (This calculation modifies the TX command, so do it before the 1545 * setup of the first TB) 1546 */ 1547 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1548 hdr_len - IWL_FIRST_TB_SIZE; 1549 /* do not align A-MSDU to dword as the subframe header aligns it */ 1550 amsdu = ieee80211_is_data_qos(fc) && 1551 (*ieee80211_get_qos_ctl(hdr) & 1552 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 1553 if (!amsdu) { 1554 tb1_len = ALIGN(len, 4); 1555 /* Tell NIC about any 2-byte padding after MAC header */ 1556 if (tb1_len != len) 1557 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1558 } else { 1559 tb1_len = len; 1560 } 1561 1562 /* 1563 * The first TB points to bi-directional DMA data, we'll 1564 * memcpy the data into it later. 1565 */ 1566 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1567 IWL_FIRST_TB_SIZE, true); 1568 1569 /* there must be data left over for TB1 or this code must be changed */ 1570 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1571 1572 /* map the data for TB1 */ 1573 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1574 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1575 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1576 goto out_err; 1577 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1578 1579 trace_iwlwifi_dev_tx(trans->dev, skb, 1580 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1581 trans->txqs.tfd.size, 1582 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1583 hdr_len); 1584 1585 /* 1586 * If gso_size wasn't set, don't give the frame "amsdu treatment" 1587 * (adding subframes, etc.). 1588 * This can happen in some testing flows when the amsdu was already 1589 * pre-built, and we just need to send the resulting skb. 1590 */ 1591 if (amsdu && skb_shinfo(skb)->gso_size) { 1592 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 1593 out_meta, dev_cmd, 1594 tb1_len))) 1595 goto out_err; 1596 } else { 1597 struct sk_buff *frag; 1598 1599 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1600 out_meta))) 1601 goto out_err; 1602 1603 skb_walk_frags(skb, frag) { 1604 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 1605 out_meta))) 1606 goto out_err; 1607 } 1608 } 1609 1610 /* building the A-MSDU might have changed this data, so memcpy it now */ 1611 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 1612 1613 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1614 /* Set up entry for this TFD in Tx byte-count array */ 1615 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 1616 iwl_txq_gen1_tfd_get_num_tbs(trans, 1617 tfd)); 1618 1619 wait_write_ptr = ieee80211_has_morefrags(fc); 1620 1621 /* start timer if queue currently empty */ 1622 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1623 /* 1624 * If the TXQ is active, then set the timer, if not, 1625 * set the timer in remainder so that the timer will 1626 * be armed with the right value when the station will 1627 * wake up. 1628 */ 1629 if (!txq->frozen) 1630 mod_timer(&txq->stuck_timer, 1631 jiffies + txq->wd_timeout); 1632 else 1633 txq->frozen_expiry_remainder = txq->wd_timeout; 1634 } 1635 1636 /* Tell device the write index *just past* this latest filled TFD */ 1637 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1638 if (!wait_write_ptr) 1639 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1640 1641 /* 1642 * At this point the frame is "transmitted" successfully 1643 * and we will get a TX status notification eventually. 1644 */ 1645 spin_unlock(&txq->lock); 1646 return 0; 1647 out_err: 1648 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1649 spin_unlock(&txq->lock); 1650 return -1; 1651 } 1652