1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <net/ip6_checksum.h> 12 #include <net/tso.h> 13 14 #include "iwl-debug.h" 15 #include "iwl-csr.h" 16 #include "iwl-prph.h" 17 #include "iwl-io.h" 18 #include "iwl-scd.h" 19 #include "iwl-op-mode.h" 20 #include "internal.h" 21 #include "fw/api/tx.h" 22 23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24 * DMA services 25 * 26 * Theory of operation 27 * 28 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29 * of buffer descriptors, each of which points to one or more data buffers for 30 * the device to read from or fill. Driver and device exchange status of each 31 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32 * entries in each circular buffer, to protect against confusing empty and full 33 * queue states. 34 * 35 * The device reads or writes the data in the queues via the device's several 36 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37 * 38 * For Tx queue, there are low mark and high mark limits. If, after queuing 39 * the packet for Tx, free space become < low mark, Tx queue stopped. When 40 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41 * Tx queue resumed. 42 * 43 ***************************************************/ 44 45 46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47 struct iwl_dma_ptr *ptr, size_t size) 48 { 49 if (WARN_ON(ptr->addr)) 50 return -EINVAL; 51 52 ptr->addr = dma_alloc_coherent(trans->dev, size, 53 &ptr->dma, GFP_KERNEL); 54 if (!ptr->addr) 55 return -ENOMEM; 56 ptr->size = size; 57 return 0; 58 } 59 60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61 { 62 if (unlikely(!ptr->addr)) 63 return; 64 65 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66 memset(ptr, 0, sizeof(*ptr)); 67 } 68 69 /* 70 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71 */ 72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73 struct iwl_txq *txq) 74 { 75 u32 reg = 0; 76 int txq_id = txq->id; 77 78 lockdep_assert_held(&txq->lock); 79 80 /* 81 * explicitly wake up the NIC if: 82 * 1. shadow registers aren't enabled 83 * 2. NIC is woken up for CMD regardless of shadow outside this function 84 * 3. there is a chance that the NIC is asleep 85 */ 86 if (!trans->trans_cfg->base_params->shadow_reg_enable && 87 txq_id != trans->txqs.cmd.q_id && 88 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89 /* 90 * wake up nic if it's powered down ... 91 * uCode will wake up, and interrupt us again, so next 92 * time we'll skip this part. 93 */ 94 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95 96 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98 txq_id, reg); 99 iwl_set_bit(trans, CSR_GP_CNTRL, 100 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101 txq->need_update = true; 102 return; 103 } 104 } 105 106 /* 107 * if not in power-save mode, uCode will never sleep when we're 108 * trying to tx (during RFKILL, we're not trying to tx). 109 */ 110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 111 if (!txq->block) 112 iwl_write32(trans, HBUS_TARG_WRPTR, 113 txq->write_ptr | (txq_id << 8)); 114 } 115 116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117 { 118 int i; 119 120 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 121 struct iwl_txq *txq = trans->txqs.txq[i]; 122 123 if (!test_bit(i, trans->txqs.queue_used)) 124 continue; 125 126 spin_lock_bh(&txq->lock); 127 if (txq->need_update) { 128 iwl_pcie_txq_inc_wr_ptr(trans, txq); 129 txq->need_update = false; 130 } 131 spin_unlock_bh(&txq->lock); 132 } 133 } 134 135 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 136 u8 idx, dma_addr_t addr, u16 len) 137 { 138 struct iwl_tfd *tfd_fh = (void *)tfd; 139 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 140 141 u16 hi_n_len = len << 4; 142 143 put_unaligned_le32(addr, &tb->lo); 144 hi_n_len |= iwl_get_dma_hi_addr(addr); 145 146 tb->hi_n_len = cpu_to_le16(hi_n_len); 147 148 tfd_fh->num_tbs = idx + 1; 149 } 150 151 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152 dma_addr_t addr, u16 len, bool reset) 153 { 154 void *tfd; 155 u32 num_tbs; 156 157 tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158 159 if (reset) 160 memset(tfd, 0, trans->txqs.tfd.size); 161 162 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163 164 /* Each TFD can point to a maximum max_tbs Tx buffers */ 165 if (num_tbs >= trans->txqs.tfd.max_tbs) { 166 IWL_ERR(trans, "Error can not send more than %d chunks\n", 167 trans->txqs.tfd.max_tbs); 168 return -EINVAL; 169 } 170 171 if (WARN(addr & ~IWL_TX_DMA_MASK, 172 "Unaligned address = %llx\n", (unsigned long long)addr)) 173 return -EINVAL; 174 175 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176 177 return num_tbs; 178 } 179 180 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 181 { 182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 183 184 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 185 return; 186 187 spin_lock(&trans_pcie->reg_lock); 188 189 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { 190 spin_unlock(&trans_pcie->reg_lock); 191 return; 192 } 193 194 trans_pcie->cmd_hold_nic_awake = false; 195 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 196 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 197 spin_unlock(&trans_pcie->reg_lock); 198 } 199 200 /* 201 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 202 */ 203 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 204 { 205 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 206 207 if (!txq) { 208 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 209 return; 210 } 211 212 spin_lock_bh(&txq->lock); 213 while (txq->write_ptr != txq->read_ptr) { 214 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 215 txq_id, txq->read_ptr); 216 217 if (txq_id != trans->txqs.cmd.q_id) { 218 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 219 220 if (WARN_ON_ONCE(!skb)) 221 continue; 222 223 iwl_txq_free_tso_page(trans, skb); 224 } 225 iwl_txq_free_tfd(trans, txq); 226 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 227 228 if (txq->read_ptr == txq->write_ptr && 229 txq_id == trans->txqs.cmd.q_id) 230 iwl_pcie_clear_cmd_in_flight(trans); 231 } 232 233 while (!skb_queue_empty(&txq->overflow_q)) { 234 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 235 236 iwl_op_mode_free_skb(trans->op_mode, skb); 237 } 238 239 spin_unlock_bh(&txq->lock); 240 241 /* just in case - this queue may have been stopped */ 242 iwl_wake_queue(trans, txq); 243 } 244 245 /* 246 * iwl_pcie_txq_free - Deallocate DMA queue. 247 * @txq: Transmit queue to deallocate. 248 * 249 * Empty queue by removing and destroying all BD's. 250 * Free all buffers. 251 * 0-fill, but do not free "txq" descriptor structure. 252 */ 253 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 254 { 255 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 256 struct device *dev = trans->dev; 257 int i; 258 259 if (WARN_ON(!txq)) 260 return; 261 262 iwl_pcie_txq_unmap(trans, txq_id); 263 264 /* De-alloc array of command/tx buffers */ 265 if (txq_id == trans->txqs.cmd.q_id) 266 for (i = 0; i < txq->n_window; i++) { 267 kfree_sensitive(txq->entries[i].cmd); 268 kfree_sensitive(txq->entries[i].free_buf); 269 } 270 271 /* De-alloc circular buffer of TFDs */ 272 if (txq->tfds) { 273 dma_free_coherent(dev, 274 trans->txqs.tfd.size * 275 trans->trans_cfg->base_params->max_tfd_queue_size, 276 txq->tfds, txq->dma_addr); 277 txq->dma_addr = 0; 278 txq->tfds = NULL; 279 280 dma_free_coherent(dev, 281 sizeof(*txq->first_tb_bufs) * txq->n_window, 282 txq->first_tb_bufs, txq->first_tb_dma); 283 } 284 285 kfree(txq->entries); 286 txq->entries = NULL; 287 288 del_timer_sync(&txq->stuck_timer); 289 290 /* 0-fill queue descriptor structure */ 291 memset(txq, 0, sizeof(*txq)); 292 } 293 294 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 295 { 296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 297 int nq = trans->trans_cfg->base_params->num_of_queues; 298 int chan; 299 u32 reg_val; 300 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 301 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 302 303 /* make sure all queue are not stopped/used */ 304 memset(trans->txqs.queue_stopped, 0, 305 sizeof(trans->txqs.queue_stopped)); 306 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 307 308 trans_pcie->scd_base_addr = 309 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 310 311 WARN_ON(scd_base_addr != 0 && 312 scd_base_addr != trans_pcie->scd_base_addr); 313 314 /* reset context data, TX status and translation data */ 315 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 316 SCD_CONTEXT_MEM_LOWER_BOUND, 317 NULL, clear_dwords); 318 319 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 320 trans->txqs.scd_bc_tbls.dma >> 10); 321 322 /* The chain extension of the SCD doesn't work well. This feature is 323 * enabled by default by the HW, so we need to disable it manually. 324 */ 325 if (trans->trans_cfg->base_params->scd_chain_ext_wa) 326 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 327 328 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 329 trans->txqs.cmd.fifo, 330 trans->txqs.cmd.wdg_timeout); 331 332 /* Activate all Tx DMA/FIFO channels */ 333 iwl_scd_activate_fifos(trans); 334 335 /* Enable DMA channel */ 336 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 337 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 338 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 339 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 340 341 /* Update FH chicken bits */ 342 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 343 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 344 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 345 346 /* Enable L1-Active */ 347 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 348 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 349 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 350 } 351 352 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 353 { 354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 355 int txq_id; 356 357 /* 358 * we should never get here in gen2 trans mode return early to avoid 359 * having invalid accesses 360 */ 361 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 362 return; 363 364 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 365 txq_id++) { 366 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 367 if (trans->trans_cfg->use_tfh) 368 iwl_write_direct64(trans, 369 FH_MEM_CBBC_QUEUE(trans, txq_id), 370 txq->dma_addr); 371 else 372 iwl_write_direct32(trans, 373 FH_MEM_CBBC_QUEUE(trans, txq_id), 374 txq->dma_addr >> 8); 375 iwl_pcie_txq_unmap(trans, txq_id); 376 txq->read_ptr = 0; 377 txq->write_ptr = 0; 378 } 379 380 /* Tell NIC where to find the "keep warm" buffer */ 381 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 382 trans_pcie->kw.dma >> 4); 383 384 /* 385 * Send 0 as the scd_base_addr since the device may have be reset 386 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 387 * contain garbage. 388 */ 389 iwl_pcie_tx_start(trans, 0); 390 } 391 392 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 393 { 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 395 int ch, ret; 396 u32 mask = 0; 397 398 spin_lock_bh(&trans_pcie->irq_lock); 399 400 if (!iwl_trans_grab_nic_access(trans)) 401 goto out; 402 403 /* Stop each Tx DMA channel */ 404 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 405 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 406 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 407 } 408 409 /* Wait for DMA channels to be idle */ 410 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 411 if (ret < 0) 412 IWL_ERR(trans, 413 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 414 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 415 416 iwl_trans_release_nic_access(trans); 417 418 out: 419 spin_unlock_bh(&trans_pcie->irq_lock); 420 } 421 422 /* 423 * iwl_pcie_tx_stop - Stop all Tx DMA channels 424 */ 425 int iwl_pcie_tx_stop(struct iwl_trans *trans) 426 { 427 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 428 int txq_id; 429 430 /* Turn off all Tx DMA fifos */ 431 iwl_scd_deactivate_fifos(trans); 432 433 /* Turn off all Tx DMA channels */ 434 iwl_pcie_tx_stop_fh(trans); 435 436 /* 437 * This function can be called before the op_mode disabled the 438 * queues. This happens when we have an rfkill interrupt. 439 * Since we stop Tx altogether - mark the queues as stopped. 440 */ 441 memset(trans->txqs.queue_stopped, 0, 442 sizeof(trans->txqs.queue_stopped)); 443 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 444 445 /* This can happen: start_hw, stop_device */ 446 if (!trans_pcie->txq_memory) 447 return 0; 448 449 /* Unmap DMA from host system and free skb's */ 450 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 451 txq_id++) 452 iwl_pcie_txq_unmap(trans, txq_id); 453 454 return 0; 455 } 456 457 /* 458 * iwl_trans_tx_free - Free TXQ Context 459 * 460 * Destroy all TX DMA queues and structures 461 */ 462 void iwl_pcie_tx_free(struct iwl_trans *trans) 463 { 464 int txq_id; 465 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 466 467 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 468 469 /* Tx queues */ 470 if (trans_pcie->txq_memory) { 471 for (txq_id = 0; 472 txq_id < trans->trans_cfg->base_params->num_of_queues; 473 txq_id++) { 474 iwl_pcie_txq_free(trans, txq_id); 475 trans->txqs.txq[txq_id] = NULL; 476 } 477 } 478 479 kfree(trans_pcie->txq_memory); 480 trans_pcie->txq_memory = NULL; 481 482 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 483 484 iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 485 } 486 487 /* 488 * iwl_pcie_tx_alloc - allocate TX context 489 * Allocate all Tx DMA structures and initialize them 490 */ 491 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 492 { 493 int ret; 494 int txq_id, slots_num; 495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 496 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 497 498 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 499 return -EINVAL; 500 501 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 502 503 /*It is not allowed to alloc twice, so warn when this happens. 504 * We cannot rely on the previous allocation, so free and fail */ 505 if (WARN_ON(trans_pcie->txq_memory)) { 506 ret = -EINVAL; 507 goto error; 508 } 509 510 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 511 bc_tbls_size); 512 if (ret) { 513 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 514 goto error; 515 } 516 517 /* Alloc keep-warm buffer */ 518 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 519 if (ret) { 520 IWL_ERR(trans, "Keep Warm allocation failed\n"); 521 goto error; 522 } 523 524 trans_pcie->txq_memory = 525 kcalloc(trans->trans_cfg->base_params->num_of_queues, 526 sizeof(struct iwl_txq), GFP_KERNEL); 527 if (!trans_pcie->txq_memory) { 528 IWL_ERR(trans, "Not enough memory for txq\n"); 529 ret = -ENOMEM; 530 goto error; 531 } 532 533 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 534 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 535 txq_id++) { 536 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 537 538 if (cmd_queue) 539 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 540 trans->cfg->min_txq_size); 541 else 542 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 543 trans->cfg->min_256_ba_txq_size); 544 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 545 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 546 cmd_queue); 547 if (ret) { 548 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 549 goto error; 550 } 551 trans->txqs.txq[txq_id]->id = txq_id; 552 } 553 554 return 0; 555 556 error: 557 iwl_pcie_tx_free(trans); 558 559 return ret; 560 } 561 562 int iwl_pcie_tx_init(struct iwl_trans *trans) 563 { 564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 565 int ret; 566 int txq_id, slots_num; 567 bool alloc = false; 568 569 if (!trans_pcie->txq_memory) { 570 ret = iwl_pcie_tx_alloc(trans); 571 if (ret) 572 goto error; 573 alloc = true; 574 } 575 576 spin_lock_bh(&trans_pcie->irq_lock); 577 578 /* Turn off all Tx DMA fifos */ 579 iwl_scd_deactivate_fifos(trans); 580 581 /* Tell NIC where to find the "keep warm" buffer */ 582 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 583 trans_pcie->kw.dma >> 4); 584 585 spin_unlock_bh(&trans_pcie->irq_lock); 586 587 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 588 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 589 txq_id++) { 590 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 591 592 if (cmd_queue) 593 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 594 trans->cfg->min_txq_size); 595 else 596 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 597 trans->cfg->min_256_ba_txq_size); 598 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 599 cmd_queue); 600 if (ret) { 601 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 602 goto error; 603 } 604 605 /* 606 * Tell nic where to find circular buffer of TFDs for a 607 * given Tx queue, and enable the DMA channel used for that 608 * queue. 609 * Circular buffer (TFD queue in DRAM) physical base address 610 */ 611 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 612 trans->txqs.txq[txq_id]->dma_addr >> 8); 613 } 614 615 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 616 if (trans->trans_cfg->base_params->num_of_queues > 20) 617 iwl_set_bits_prph(trans, SCD_GP_CTRL, 618 SCD_GP_CTRL_ENABLE_31_QUEUES); 619 620 return 0; 621 error: 622 /*Upon error, free only if we allocated something */ 623 if (alloc) 624 iwl_pcie_tx_free(trans); 625 return ret; 626 } 627 628 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 629 const struct iwl_host_cmd *cmd) 630 { 631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 632 int ret = 0; 633 634 /* Make sure the NIC is still alive in the bus */ 635 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 636 return -ENODEV; 637 638 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 639 return 0; 640 641 spin_lock(&trans_pcie->reg_lock); 642 /* 643 * wake up the NIC to make sure that the firmware will see the host 644 * command - we will let the NIC sleep once all the host commands 645 * returned. This needs to be done only on NICs that have 646 * apmg_wake_up_wa set (see above.) 647 */ 648 if (!trans_pcie->cmd_hold_nic_awake) { 649 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 650 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 651 652 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 653 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 654 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 655 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 656 15000); 657 if (ret < 0) { 658 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 659 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 660 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 661 ret = -EIO; 662 } else { 663 trans_pcie->cmd_hold_nic_awake = true; 664 } 665 } 666 spin_unlock(&trans_pcie->reg_lock); 667 668 return ret; 669 } 670 671 /* 672 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 673 * 674 * When FW advances 'R' index, all entries between old and new 'R' index 675 * need to be reclaimed. As result, some free space forms. If there is 676 * enough free space (> low mark), wake the stack that feeds us. 677 */ 678 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 679 { 680 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 681 int nfreed = 0; 682 u16 r; 683 684 lockdep_assert_held(&txq->lock); 685 686 idx = iwl_txq_get_cmd_index(txq, idx); 687 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 688 689 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 690 (!iwl_txq_used(txq, idx))) { 691 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 692 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 693 __func__, txq_id, idx, 694 trans->trans_cfg->base_params->max_tfd_queue_size, 695 txq->write_ptr, txq->read_ptr); 696 return; 697 } 698 699 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 700 r = iwl_txq_inc_wrap(trans, r)) { 701 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 702 703 if (nfreed++ > 0) { 704 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 705 idx, txq->write_ptr, r); 706 iwl_force_nmi(trans); 707 } 708 } 709 710 if (txq->read_ptr == txq->write_ptr) 711 iwl_pcie_clear_cmd_in_flight(trans); 712 713 iwl_txq_progress(txq); 714 } 715 716 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 717 u16 txq_id) 718 { 719 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 720 u32 tbl_dw_addr; 721 u32 tbl_dw; 722 u16 scd_q2ratid; 723 724 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 725 726 tbl_dw_addr = trans_pcie->scd_base_addr + 727 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 728 729 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 730 731 if (txq_id & 0x1) 732 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 733 else 734 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 735 736 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 737 738 return 0; 739 } 740 741 /* Receiver address (actually, Rx station's index into station table), 742 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 743 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 744 745 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 746 const struct iwl_trans_txq_scd_cfg *cfg, 747 unsigned int wdg_timeout) 748 { 749 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 750 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 751 int fifo = -1; 752 bool scd_bug = false; 753 754 if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 755 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 756 757 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 758 759 if (cfg) { 760 fifo = cfg->fifo; 761 762 /* Disable the scheduler prior configuring the cmd queue */ 763 if (txq_id == trans->txqs.cmd.q_id && 764 trans_pcie->scd_set_active) 765 iwl_scd_enable_set_active(trans, 0); 766 767 /* Stop this Tx queue before configuring it */ 768 iwl_scd_txq_set_inactive(trans, txq_id); 769 770 /* Set this queue as a chain-building queue unless it is CMD */ 771 if (txq_id != trans->txqs.cmd.q_id) 772 iwl_scd_txq_set_chain(trans, txq_id); 773 774 if (cfg->aggregate) { 775 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 776 777 /* Map receiver-address / traffic-ID to this queue */ 778 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 779 780 /* enable aggregations for the queue */ 781 iwl_scd_txq_enable_agg(trans, txq_id); 782 txq->ampdu = true; 783 } else { 784 /* 785 * disable aggregations for the queue, this will also 786 * make the ra_tid mapping configuration irrelevant 787 * since it is now a non-AGG queue. 788 */ 789 iwl_scd_txq_disable_agg(trans, txq_id); 790 791 ssn = txq->read_ptr; 792 } 793 } else { 794 /* 795 * If we need to move the SCD write pointer by steps of 796 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 797 * the op_mode know by returning true later. 798 * Do this only in case cfg is NULL since this trick can 799 * be done only if we have DQA enabled which is true for mvm 800 * only. And mvm never sets a cfg pointer. 801 * This is really ugly, but this is the easiest way out for 802 * this sad hardware issue. 803 * This bug has been fixed on devices 9000 and up. 804 */ 805 scd_bug = !trans->trans_cfg->mq_rx_supported && 806 !((ssn - txq->write_ptr) & 0x3f) && 807 (ssn != txq->write_ptr); 808 if (scd_bug) 809 ssn++; 810 } 811 812 /* Place first TFD at index corresponding to start sequence number. 813 * Assumes that ssn_idx is valid (!= 0xFFF) */ 814 txq->read_ptr = (ssn & 0xff); 815 txq->write_ptr = (ssn & 0xff); 816 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 817 (ssn & 0xff) | (txq_id << 8)); 818 819 if (cfg) { 820 u8 frame_limit = cfg->frame_limit; 821 822 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 823 824 /* Set up Tx window size and frame limit for this queue */ 825 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 826 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 827 iwl_trans_write_mem32(trans, 828 trans_pcie->scd_base_addr + 829 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 830 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 831 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 832 833 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 834 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 835 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 836 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 837 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 838 SCD_QUEUE_STTS_REG_MSK); 839 840 /* enable the scheduler for this queue (only) */ 841 if (txq_id == trans->txqs.cmd.q_id && 842 trans_pcie->scd_set_active) 843 iwl_scd_enable_set_active(trans, BIT(txq_id)); 844 845 IWL_DEBUG_TX_QUEUES(trans, 846 "Activate queue %d on FIFO %d WrPtr: %d\n", 847 txq_id, fifo, ssn & 0xff); 848 } else { 849 IWL_DEBUG_TX_QUEUES(trans, 850 "Activate queue %d WrPtr: %d\n", 851 txq_id, ssn & 0xff); 852 } 853 854 return scd_bug; 855 } 856 857 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 858 bool shared_mode) 859 { 860 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 861 862 txq->ampdu = !shared_mode; 863 } 864 865 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 866 bool configure_scd) 867 { 868 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 869 u32 stts_addr = trans_pcie->scd_base_addr + 870 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 871 static const u32 zero_val[4] = {}; 872 873 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 874 trans->txqs.txq[txq_id]->frozen = false; 875 876 /* 877 * Upon HW Rfkill - we stop the device, and then stop the queues 878 * in the op_mode. Just for the sake of the simplicity of the op_mode, 879 * allow the op_mode to call txq_disable after it already called 880 * stop_device. 881 */ 882 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 883 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 884 "queue %d not used", txq_id); 885 return; 886 } 887 888 if (configure_scd) { 889 iwl_scd_txq_set_inactive(trans, txq_id); 890 891 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 892 ARRAY_SIZE(zero_val)); 893 } 894 895 iwl_pcie_txq_unmap(trans, txq_id); 896 trans->txqs.txq[txq_id]->ampdu = false; 897 898 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 899 } 900 901 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 902 903 /* 904 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 905 * @priv: device private data point 906 * @cmd: a pointer to the ucode command structure 907 * 908 * The function returns < 0 values to indicate the operation 909 * failed. On success, it returns the index (>= 0) of command in the 910 * command queue. 911 */ 912 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 913 struct iwl_host_cmd *cmd) 914 { 915 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 916 struct iwl_device_cmd *out_cmd; 917 struct iwl_cmd_meta *out_meta; 918 void *dup_buf = NULL; 919 dma_addr_t phys_addr; 920 int idx; 921 u16 copy_size, cmd_size, tb0_size; 922 bool had_nocopy = false; 923 u8 group_id = iwl_cmd_groupid(cmd->id); 924 int i, ret; 925 u32 cmd_pos; 926 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 927 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 928 unsigned long flags; 929 930 if (WARN(!trans->wide_cmd_header && 931 group_id > IWL_ALWAYS_LONG_GROUP, 932 "unsupported wide command %#x\n", cmd->id)) 933 return -EINVAL; 934 935 if (group_id != 0) { 936 copy_size = sizeof(struct iwl_cmd_header_wide); 937 cmd_size = sizeof(struct iwl_cmd_header_wide); 938 } else { 939 copy_size = sizeof(struct iwl_cmd_header); 940 cmd_size = sizeof(struct iwl_cmd_header); 941 } 942 943 /* need one for the header if the first is NOCOPY */ 944 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 945 946 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 947 cmddata[i] = cmd->data[i]; 948 cmdlen[i] = cmd->len[i]; 949 950 if (!cmd->len[i]) 951 continue; 952 953 /* need at least IWL_FIRST_TB_SIZE copied */ 954 if (copy_size < IWL_FIRST_TB_SIZE) { 955 int copy = IWL_FIRST_TB_SIZE - copy_size; 956 957 if (copy > cmdlen[i]) 958 copy = cmdlen[i]; 959 cmdlen[i] -= copy; 960 cmddata[i] += copy; 961 copy_size += copy; 962 } 963 964 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 965 had_nocopy = true; 966 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 967 idx = -EINVAL; 968 goto free_dup_buf; 969 } 970 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 971 /* 972 * This is also a chunk that isn't copied 973 * to the static buffer so set had_nocopy. 974 */ 975 had_nocopy = true; 976 977 /* only allowed once */ 978 if (WARN_ON(dup_buf)) { 979 idx = -EINVAL; 980 goto free_dup_buf; 981 } 982 983 dup_buf = kmemdup(cmddata[i], cmdlen[i], 984 GFP_ATOMIC); 985 if (!dup_buf) 986 return -ENOMEM; 987 } else { 988 /* NOCOPY must not be followed by normal! */ 989 if (WARN_ON(had_nocopy)) { 990 idx = -EINVAL; 991 goto free_dup_buf; 992 } 993 copy_size += cmdlen[i]; 994 } 995 cmd_size += cmd->len[i]; 996 } 997 998 /* 999 * If any of the command structures end up being larger than 1000 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1001 * allocated into separate TFDs, then we will need to 1002 * increase the size of the buffers. 1003 */ 1004 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1005 "Command %s (%#x) is too large (%d bytes)\n", 1006 iwl_get_cmd_string(trans, cmd->id), 1007 cmd->id, copy_size)) { 1008 idx = -EINVAL; 1009 goto free_dup_buf; 1010 } 1011 1012 spin_lock_irqsave(&txq->lock, flags); 1013 1014 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1015 spin_unlock_irqrestore(&txq->lock, flags); 1016 1017 IWL_ERR(trans, "No space in command queue\n"); 1018 iwl_op_mode_cmd_queue_full(trans->op_mode); 1019 idx = -ENOSPC; 1020 goto free_dup_buf; 1021 } 1022 1023 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1024 out_cmd = txq->entries[idx].cmd; 1025 out_meta = &txq->entries[idx].meta; 1026 1027 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1028 if (cmd->flags & CMD_WANT_SKB) 1029 out_meta->source = cmd; 1030 1031 /* set up the header */ 1032 if (group_id != 0) { 1033 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1034 out_cmd->hdr_wide.group_id = group_id; 1035 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1036 out_cmd->hdr_wide.length = 1037 cpu_to_le16(cmd_size - 1038 sizeof(struct iwl_cmd_header_wide)); 1039 out_cmd->hdr_wide.reserved = 0; 1040 out_cmd->hdr_wide.sequence = 1041 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1042 INDEX_TO_SEQ(txq->write_ptr)); 1043 1044 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1045 copy_size = sizeof(struct iwl_cmd_header_wide); 1046 } else { 1047 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1048 out_cmd->hdr.sequence = 1049 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1050 INDEX_TO_SEQ(txq->write_ptr)); 1051 out_cmd->hdr.group_id = 0; 1052 1053 cmd_pos = sizeof(struct iwl_cmd_header); 1054 copy_size = sizeof(struct iwl_cmd_header); 1055 } 1056 1057 /* and copy the data that needs to be copied */ 1058 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1059 int copy; 1060 1061 if (!cmd->len[i]) 1062 continue; 1063 1064 /* copy everything if not nocopy/dup */ 1065 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1066 IWL_HCMD_DFL_DUP))) { 1067 copy = cmd->len[i]; 1068 1069 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1070 cmd_pos += copy; 1071 copy_size += copy; 1072 continue; 1073 } 1074 1075 /* 1076 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1077 * in total (for bi-directional DMA), but copy up to what 1078 * we can fit into the payload for debug dump purposes. 1079 */ 1080 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1081 1082 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1083 cmd_pos += copy; 1084 1085 /* However, treat copy_size the proper way, we need it below */ 1086 if (copy_size < IWL_FIRST_TB_SIZE) { 1087 copy = IWL_FIRST_TB_SIZE - copy_size; 1088 1089 if (copy > cmd->len[i]) 1090 copy = cmd->len[i]; 1091 copy_size += copy; 1092 } 1093 } 1094 1095 IWL_DEBUG_HC(trans, 1096 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1097 iwl_get_cmd_string(trans, cmd->id), 1098 group_id, out_cmd->hdr.cmd, 1099 le16_to_cpu(out_cmd->hdr.sequence), 1100 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1101 1102 /* start the TFD with the minimum copy bytes */ 1103 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1104 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1105 iwl_pcie_txq_build_tfd(trans, txq, 1106 iwl_txq_get_first_tb_dma(txq, idx), 1107 tb0_size, true); 1108 1109 /* map first command fragment, if any remains */ 1110 if (copy_size > tb0_size) { 1111 phys_addr = dma_map_single(trans->dev, 1112 ((u8 *)&out_cmd->hdr) + tb0_size, 1113 copy_size - tb0_size, 1114 DMA_TO_DEVICE); 1115 if (dma_mapping_error(trans->dev, phys_addr)) { 1116 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1117 txq->write_ptr); 1118 idx = -ENOMEM; 1119 goto out; 1120 } 1121 1122 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1123 copy_size - tb0_size, false); 1124 } 1125 1126 /* map the remaining (adjusted) nocopy/dup fragments */ 1127 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1128 const void *data = cmddata[i]; 1129 1130 if (!cmdlen[i]) 1131 continue; 1132 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1133 IWL_HCMD_DFL_DUP))) 1134 continue; 1135 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1136 data = dup_buf; 1137 phys_addr = dma_map_single(trans->dev, (void *)data, 1138 cmdlen[i], DMA_TO_DEVICE); 1139 if (dma_mapping_error(trans->dev, phys_addr)) { 1140 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1141 txq->write_ptr); 1142 idx = -ENOMEM; 1143 goto out; 1144 } 1145 1146 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1147 } 1148 1149 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1150 out_meta->flags = cmd->flags; 1151 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1152 kfree_sensitive(txq->entries[idx].free_buf); 1153 txq->entries[idx].free_buf = dup_buf; 1154 1155 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1156 1157 /* start timer if queue currently empty */ 1158 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1159 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1160 1161 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1162 if (ret < 0) { 1163 idx = ret; 1164 goto out; 1165 } 1166 1167 /* Increment and update queue's write index */ 1168 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1169 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1170 1171 out: 1172 spin_unlock_irqrestore(&txq->lock, flags); 1173 free_dup_buf: 1174 if (idx < 0) 1175 kfree(dup_buf); 1176 return idx; 1177 } 1178 1179 /* 1180 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1181 * @rxb: Rx buffer to reclaim 1182 */ 1183 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1184 struct iwl_rx_cmd_buffer *rxb) 1185 { 1186 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1187 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1188 u8 group_id; 1189 u32 cmd_id; 1190 int txq_id = SEQ_TO_QUEUE(sequence); 1191 int index = SEQ_TO_INDEX(sequence); 1192 int cmd_index; 1193 struct iwl_device_cmd *cmd; 1194 struct iwl_cmd_meta *meta; 1195 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1196 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1197 1198 /* If a Tx command is being handled and it isn't in the actual 1199 * command queue then there a command routing bug has been introduced 1200 * in the queue management code. */ 1201 if (WARN(txq_id != trans->txqs.cmd.q_id, 1202 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1203 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1204 txq->write_ptr)) { 1205 iwl_print_hex_error(trans, pkt, 32); 1206 return; 1207 } 1208 1209 spin_lock_bh(&txq->lock); 1210 1211 cmd_index = iwl_txq_get_cmd_index(txq, index); 1212 cmd = txq->entries[cmd_index].cmd; 1213 meta = &txq->entries[cmd_index].meta; 1214 group_id = cmd->hdr.group_id; 1215 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1216 1217 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1218 1219 /* Input error checking is done when commands are added to queue. */ 1220 if (meta->flags & CMD_WANT_SKB) { 1221 struct page *p = rxb_steal_page(rxb); 1222 1223 meta->source->resp_pkt = pkt; 1224 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1225 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1226 } 1227 1228 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1229 iwl_op_mode_async_cb(trans->op_mode, cmd); 1230 1231 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1232 1233 if (!(meta->flags & CMD_ASYNC)) { 1234 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1235 IWL_WARN(trans, 1236 "HCMD_ACTIVE already clear for command %s\n", 1237 iwl_get_cmd_string(trans, cmd_id)); 1238 } 1239 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1240 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1241 iwl_get_cmd_string(trans, cmd_id)); 1242 wake_up(&trans->wait_command_queue); 1243 } 1244 1245 meta->flags = 0; 1246 1247 spin_unlock_bh(&txq->lock); 1248 } 1249 1250 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1251 struct iwl_txq *txq, u8 hdr_len, 1252 struct iwl_cmd_meta *out_meta) 1253 { 1254 u16 head_tb_len; 1255 int i; 1256 1257 /* 1258 * Set up TFD's third entry to point directly to remainder 1259 * of skb's head, if any 1260 */ 1261 head_tb_len = skb_headlen(skb) - hdr_len; 1262 1263 if (head_tb_len > 0) { 1264 dma_addr_t tb_phys = dma_map_single(trans->dev, 1265 skb->data + hdr_len, 1266 head_tb_len, DMA_TO_DEVICE); 1267 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1268 return -EINVAL; 1269 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 1270 tb_phys, head_tb_len); 1271 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1272 } 1273 1274 /* set up the remaining entries to point to the data */ 1275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1276 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1277 dma_addr_t tb_phys; 1278 int tb_idx; 1279 1280 if (!skb_frag_size(frag)) 1281 continue; 1282 1283 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1284 skb_frag_size(frag), DMA_TO_DEVICE); 1285 1286 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1287 return -EINVAL; 1288 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 1289 tb_phys, skb_frag_size(frag)); 1290 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1291 skb_frag_size(frag), false); 1292 if (tb_idx < 0) 1293 return tb_idx; 1294 1295 out_meta->tbs |= BIT(tb_idx); 1296 } 1297 1298 return 0; 1299 } 1300 1301 #ifdef CONFIG_INET 1302 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1303 struct iwl_txq *txq, u8 hdr_len, 1304 struct iwl_cmd_meta *out_meta, 1305 struct iwl_device_tx_cmd *dev_cmd, 1306 u16 tb1_len) 1307 { 1308 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1309 struct ieee80211_hdr *hdr = (void *)skb->data; 1310 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1311 unsigned int mss = skb_shinfo(skb)->gso_size; 1312 u16 length, iv_len, amsdu_pad; 1313 u8 *start_hdr; 1314 struct iwl_tso_hdr_page *hdr_page; 1315 struct tso_t tso; 1316 1317 /* if the packet is protected, then it must be CCMP or GCMP */ 1318 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 1319 iv_len = ieee80211_has_protected(hdr->frame_control) ? 1320 IEEE80211_CCMP_HDR_LEN : 0; 1321 1322 trace_iwlwifi_dev_tx(trans->dev, skb, 1323 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1324 trans->txqs.tfd.size, 1325 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 1326 1327 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 1328 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 1329 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 1330 amsdu_pad = 0; 1331 1332 /* total amount of header we may need for this A-MSDU */ 1333 hdr_room = DIV_ROUND_UP(total_len, mss) * 1334 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1335 1336 /* Our device supports 9 segments at most, it will fit in 1 page */ 1337 hdr_page = get_page_hdr(trans, hdr_room, skb); 1338 if (!hdr_page) 1339 return -ENOMEM; 1340 1341 start_hdr = hdr_page->pos; 1342 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 1343 hdr_page->pos += iv_len; 1344 1345 /* 1346 * Pull the ieee80211 header + IV to be able to use TSO core, 1347 * we will restore it for the tx_status flow. 1348 */ 1349 skb_pull(skb, hdr_len + iv_len); 1350 1351 /* 1352 * Remove the length of all the headers that we don't actually 1353 * have in the MPDU by themselves, but that we duplicate into 1354 * all the different MSDUs inside the A-MSDU. 1355 */ 1356 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 1357 1358 tso_start(skb, &tso); 1359 1360 while (total_len) { 1361 /* this is the data left for this subframe */ 1362 unsigned int data_left = 1363 min_t(unsigned int, mss, total_len); 1364 struct sk_buff *csum_skb = NULL; 1365 unsigned int hdr_tb_len; 1366 dma_addr_t hdr_tb_phys; 1367 u8 *subf_hdrs_start = hdr_page->pos; 1368 1369 total_len -= data_left; 1370 1371 memset(hdr_page->pos, 0, amsdu_pad); 1372 hdr_page->pos += amsdu_pad; 1373 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 1374 data_left)) & 0x3; 1375 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 1376 hdr_page->pos += ETH_ALEN; 1377 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 1378 hdr_page->pos += ETH_ALEN; 1379 1380 length = snap_ip_tcp_hdrlen + data_left; 1381 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 1382 hdr_page->pos += sizeof(length); 1383 1384 /* 1385 * This will copy the SNAP as well which will be considered 1386 * as MAC header. 1387 */ 1388 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 1389 1390 hdr_page->pos += snap_ip_tcp_hdrlen; 1391 1392 hdr_tb_len = hdr_page->pos - start_hdr; 1393 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 1394 hdr_tb_len, DMA_TO_DEVICE); 1395 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 1396 dev_kfree_skb(csum_skb); 1397 return -EINVAL; 1398 } 1399 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 1400 hdr_tb_len, false); 1401 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 1402 hdr_tb_phys, hdr_tb_len); 1403 /* add this subframe's headers' length to the tx_cmd */ 1404 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 1405 1406 /* prepare the start_hdr for the next subframe */ 1407 start_hdr = hdr_page->pos; 1408 1409 /* put the payload */ 1410 while (data_left) { 1411 unsigned int size = min_t(unsigned int, tso.size, 1412 data_left); 1413 dma_addr_t tb_phys; 1414 1415 tb_phys = dma_map_single(trans->dev, tso.data, 1416 size, DMA_TO_DEVICE); 1417 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1418 dev_kfree_skb(csum_skb); 1419 return -EINVAL; 1420 } 1421 1422 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1423 size, false); 1424 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 1425 tb_phys, size); 1426 1427 data_left -= size; 1428 tso_build_data(skb, &tso, size); 1429 } 1430 } 1431 1432 /* re -add the WiFi header and IV */ 1433 skb_push(skb, hdr_len + iv_len); 1434 1435 return 0; 1436 } 1437 #else /* CONFIG_INET */ 1438 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1439 struct iwl_txq *txq, u8 hdr_len, 1440 struct iwl_cmd_meta *out_meta, 1441 struct iwl_device_tx_cmd *dev_cmd, 1442 u16 tb1_len) 1443 { 1444 /* No A-MSDU without CONFIG_INET */ 1445 WARN_ON(1); 1446 1447 return -1; 1448 } 1449 #endif /* CONFIG_INET */ 1450 1451 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1452 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1453 { 1454 struct ieee80211_hdr *hdr; 1455 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1456 struct iwl_cmd_meta *out_meta; 1457 struct iwl_txq *txq; 1458 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1459 void *tb1_addr; 1460 void *tfd; 1461 u16 len, tb1_len; 1462 bool wait_write_ptr; 1463 __le16 fc; 1464 u8 hdr_len; 1465 u16 wifi_seq; 1466 bool amsdu; 1467 1468 txq = trans->txqs.txq[txq_id]; 1469 1470 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1471 "TX on unused queue %d\n", txq_id)) 1472 return -EINVAL; 1473 1474 if (skb_is_nonlinear(skb) && 1475 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1476 __skb_linearize(skb)) 1477 return -ENOMEM; 1478 1479 /* mac80211 always puts the full header into the SKB's head, 1480 * so there's no need to check if it's readable there 1481 */ 1482 hdr = (struct ieee80211_hdr *)skb->data; 1483 fc = hdr->frame_control; 1484 hdr_len = ieee80211_hdrlen(fc); 1485 1486 spin_lock(&txq->lock); 1487 1488 if (iwl_txq_space(trans, txq) < txq->high_mark) { 1489 iwl_txq_stop(trans, txq); 1490 1491 /* don't put the packet on the ring, if there is no room */ 1492 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1493 struct iwl_device_tx_cmd **dev_cmd_ptr; 1494 1495 dev_cmd_ptr = (void *)((u8 *)skb->cb + 1496 trans->txqs.dev_cmd_offs); 1497 1498 *dev_cmd_ptr = dev_cmd; 1499 __skb_queue_tail(&txq->overflow_q, skb); 1500 1501 spin_unlock(&txq->lock); 1502 return 0; 1503 } 1504 } 1505 1506 /* In AGG mode, the index in the ring must correspond to the WiFi 1507 * sequence number. This is a HW requirements to help the SCD to parse 1508 * the BA. 1509 * Check here that the packets are in the right place on the ring. 1510 */ 1511 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1512 WARN_ONCE(txq->ampdu && 1513 (wifi_seq & 0xff) != txq->write_ptr, 1514 "Q: %d WiFi Seq %d tfdNum %d", 1515 txq_id, wifi_seq, txq->write_ptr); 1516 1517 /* Set up driver data for this TFD */ 1518 txq->entries[txq->write_ptr].skb = skb; 1519 txq->entries[txq->write_ptr].cmd = dev_cmd; 1520 1521 dev_cmd->hdr.sequence = 1522 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1523 INDEX_TO_SEQ(txq->write_ptr))); 1524 1525 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1526 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1527 offsetof(struct iwl_tx_cmd, scratch); 1528 1529 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1530 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1531 1532 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1533 out_meta = &txq->entries[txq->write_ptr].meta; 1534 out_meta->flags = 0; 1535 1536 /* 1537 * The second TB (tb1) points to the remainder of the TX command 1538 * and the 802.11 header - dword aligned size 1539 * (This calculation modifies the TX command, so do it before the 1540 * setup of the first TB) 1541 */ 1542 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1543 hdr_len - IWL_FIRST_TB_SIZE; 1544 /* do not align A-MSDU to dword as the subframe header aligns it */ 1545 amsdu = ieee80211_is_data_qos(fc) && 1546 (*ieee80211_get_qos_ctl(hdr) & 1547 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 1548 if (!amsdu) { 1549 tb1_len = ALIGN(len, 4); 1550 /* Tell NIC about any 2-byte padding after MAC header */ 1551 if (tb1_len != len) 1552 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1553 } else { 1554 tb1_len = len; 1555 } 1556 1557 /* 1558 * The first TB points to bi-directional DMA data, we'll 1559 * memcpy the data into it later. 1560 */ 1561 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1562 IWL_FIRST_TB_SIZE, true); 1563 1564 /* there must be data left over for TB1 or this code must be changed */ 1565 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1566 1567 /* map the data for TB1 */ 1568 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1569 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1570 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1571 goto out_err; 1572 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1573 1574 trace_iwlwifi_dev_tx(trans->dev, skb, 1575 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1576 trans->txqs.tfd.size, 1577 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1578 hdr_len); 1579 1580 /* 1581 * If gso_size wasn't set, don't give the frame "amsdu treatment" 1582 * (adding subframes, etc.). 1583 * This can happen in some testing flows when the amsdu was already 1584 * pre-built, and we just need to send the resulting skb. 1585 */ 1586 if (amsdu && skb_shinfo(skb)->gso_size) { 1587 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 1588 out_meta, dev_cmd, 1589 tb1_len))) 1590 goto out_err; 1591 } else { 1592 struct sk_buff *frag; 1593 1594 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1595 out_meta))) 1596 goto out_err; 1597 1598 skb_walk_frags(skb, frag) { 1599 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 1600 out_meta))) 1601 goto out_err; 1602 } 1603 } 1604 1605 /* building the A-MSDU might have changed this data, so memcpy it now */ 1606 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 1607 1608 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1609 /* Set up entry for this TFD in Tx byte-count array */ 1610 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 1611 iwl_txq_gen1_tfd_get_num_tbs(trans, 1612 tfd)); 1613 1614 wait_write_ptr = ieee80211_has_morefrags(fc); 1615 1616 /* start timer if queue currently empty */ 1617 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1618 /* 1619 * If the TXQ is active, then set the timer, if not, 1620 * set the timer in remainder so that the timer will 1621 * be armed with the right value when the station will 1622 * wake up. 1623 */ 1624 if (!txq->frozen) 1625 mod_timer(&txq->stuck_timer, 1626 jiffies + txq->wd_timeout); 1627 else 1628 txq->frozen_expiry_remainder = txq->wd_timeout; 1629 } 1630 1631 /* Tell device the write index *just past* this latest filled TFD */ 1632 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1633 if (!wait_write_ptr) 1634 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1635 1636 /* 1637 * At this point the frame is "transmitted" successfully 1638 * and we will get a TX status notification eventually. 1639 */ 1640 spin_unlock(&txq->lock); 1641 return 0; 1642 out_err: 1643 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1644 spin_unlock(&txq->lock); 1645 return -1; 1646 } 1647