1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #include <linux/etherdevice.h> 32 #include <linux/ieee80211.h> 33 #include <linux/slab.h> 34 #include <linux/sched.h> 35 #include <linux/pm_runtime.h> 36 #include <net/ip6_checksum.h> 37 #include <net/tso.h> 38 39 #include "iwl-debug.h" 40 #include "iwl-csr.h" 41 #include "iwl-prph.h" 42 #include "iwl-io.h" 43 #include "iwl-scd.h" 44 #include "iwl-op-mode.h" 45 #include "internal.h" 46 /* FIXME: need to abstract out TX command (once we know what it looks like) */ 47 #include "dvm/commands.h" 48 49 #define IWL_TX_CRC_SIZE 4 50 #define IWL_TX_DELIMITER_SIZE 4 51 52 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 53 * DMA services 54 * 55 * Theory of operation 56 * 57 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 58 * of buffer descriptors, each of which points to one or more data buffers for 59 * the device to read from or fill. Driver and device exchange status of each 60 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 61 * entries in each circular buffer, to protect against confusing empty and full 62 * queue states. 63 * 64 * The device reads or writes the data in the queues via the device's several 65 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 66 * 67 * For Tx queue, there are low mark and high mark limits. If, after queuing 68 * the packet for Tx, free space become < low mark, Tx queue stopped. When 69 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 70 * Tx queue resumed. 71 * 72 ***************************************************/ 73 74 int iwl_queue_space(const struct iwl_txq *q) 75 { 76 unsigned int max; 77 unsigned int used; 78 79 /* 80 * To avoid ambiguity between empty and completely full queues, there 81 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 82 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 83 * to reserve any queue entries for this purpose. 84 */ 85 if (q->n_window < TFD_QUEUE_SIZE_MAX) 86 max = q->n_window; 87 else 88 max = TFD_QUEUE_SIZE_MAX - 1; 89 90 /* 91 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 92 * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 93 */ 94 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 95 96 if (WARN_ON(used > max)) 97 return 0; 98 99 return max - used; 100 } 101 102 /* 103 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 104 */ 105 static int iwl_queue_init(struct iwl_txq *q, int slots_num) 106 { 107 q->n_window = slots_num; 108 109 /* slots_num must be power-of-two size, otherwise 110 * get_cmd_index is broken. */ 111 if (WARN_ON(!is_power_of_2(slots_num))) 112 return -EINVAL; 113 114 q->low_mark = q->n_window / 4; 115 if (q->low_mark < 4) 116 q->low_mark = 4; 117 118 q->high_mark = q->n_window / 8; 119 if (q->high_mark < 2) 120 q->high_mark = 2; 121 122 q->write_ptr = 0; 123 q->read_ptr = 0; 124 125 return 0; 126 } 127 128 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 129 struct iwl_dma_ptr *ptr, size_t size) 130 { 131 if (WARN_ON(ptr->addr)) 132 return -EINVAL; 133 134 ptr->addr = dma_alloc_coherent(trans->dev, size, 135 &ptr->dma, GFP_KERNEL); 136 if (!ptr->addr) 137 return -ENOMEM; 138 ptr->size = size; 139 return 0; 140 } 141 142 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 143 { 144 if (unlikely(!ptr->addr)) 145 return; 146 147 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 148 memset(ptr, 0, sizeof(*ptr)); 149 } 150 151 static void iwl_pcie_txq_stuck_timer(unsigned long data) 152 { 153 struct iwl_txq *txq = (void *)data; 154 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 155 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 156 157 spin_lock(&txq->lock); 158 /* check if triggered erroneously */ 159 if (txq->read_ptr == txq->write_ptr) { 160 spin_unlock(&txq->lock); 161 return; 162 } 163 spin_unlock(&txq->lock); 164 165 iwl_trans_pcie_log_scd_error(trans, txq); 166 167 iwl_force_nmi(trans); 168 } 169 170 /* 171 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 172 */ 173 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 174 struct iwl_txq *txq, u16 byte_cnt, 175 int num_tbs) 176 { 177 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 178 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 179 int write_ptr = txq->write_ptr; 180 int txq_id = txq->id; 181 u8 sec_ctl = 0; 182 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 183 __le16 bc_ent; 184 struct iwl_tx_cmd *tx_cmd = 185 (void *)txq->entries[txq->write_ptr].cmd->payload; 186 u8 sta_id = tx_cmd->sta_id; 187 188 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 189 190 sec_ctl = tx_cmd->sec_ctl; 191 192 switch (sec_ctl & TX_CMD_SEC_MSK) { 193 case TX_CMD_SEC_CCM: 194 len += IEEE80211_CCMP_MIC_LEN; 195 break; 196 case TX_CMD_SEC_TKIP: 197 len += IEEE80211_TKIP_ICV_LEN; 198 break; 199 case TX_CMD_SEC_WEP: 200 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 201 break; 202 } 203 if (trans_pcie->bc_table_dword) 204 len = DIV_ROUND_UP(len, 4); 205 206 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 207 return; 208 209 bc_ent = cpu_to_le16(len | (sta_id << 12)); 210 211 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 212 213 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 214 scd_bc_tbl[txq_id]. 215 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 216 } 217 218 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 219 struct iwl_txq *txq) 220 { 221 struct iwl_trans_pcie *trans_pcie = 222 IWL_TRANS_GET_PCIE_TRANS(trans); 223 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 224 int txq_id = txq->id; 225 int read_ptr = txq->read_ptr; 226 u8 sta_id = 0; 227 __le16 bc_ent; 228 struct iwl_tx_cmd *tx_cmd = 229 (void *)txq->entries[read_ptr].cmd->payload; 230 231 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 232 233 if (txq_id != trans_pcie->cmd_queue) 234 sta_id = tx_cmd->sta_id; 235 236 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 237 238 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 239 240 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 241 scd_bc_tbl[txq_id]. 242 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 243 } 244 245 /* 246 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 247 */ 248 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 249 struct iwl_txq *txq) 250 { 251 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 252 u32 reg = 0; 253 int txq_id = txq->id; 254 255 lockdep_assert_held(&txq->lock); 256 257 /* 258 * explicitly wake up the NIC if: 259 * 1. shadow registers aren't enabled 260 * 2. NIC is woken up for CMD regardless of shadow outside this function 261 * 3. there is a chance that the NIC is asleep 262 */ 263 if (!trans->cfg->base_params->shadow_reg_enable && 264 txq_id != trans_pcie->cmd_queue && 265 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 266 /* 267 * wake up nic if it's powered down ... 268 * uCode will wake up, and interrupt us again, so next 269 * time we'll skip this part. 270 */ 271 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 272 273 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 274 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 275 txq_id, reg); 276 iwl_set_bit(trans, CSR_GP_CNTRL, 277 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 278 txq->need_update = true; 279 return; 280 } 281 } 282 283 /* 284 * if not in power-save mode, uCode will never sleep when we're 285 * trying to tx (during RFKILL, we're not trying to tx). 286 */ 287 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 288 if (!txq->block) 289 iwl_write32(trans, HBUS_TARG_WRPTR, 290 txq->write_ptr | (txq_id << 8)); 291 } 292 293 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 294 { 295 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 296 int i; 297 298 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 299 struct iwl_txq *txq = trans_pcie->txq[i]; 300 301 if (!test_bit(i, trans_pcie->queue_used)) 302 continue; 303 304 spin_lock_bh(&txq->lock); 305 if (txq->need_update) { 306 iwl_pcie_txq_inc_wr_ptr(trans, txq); 307 txq->need_update = false; 308 } 309 spin_unlock_bh(&txq->lock); 310 } 311 } 312 313 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, 314 void *_tfd, u8 idx) 315 { 316 317 if (trans->cfg->use_tfh) { 318 struct iwl_tfh_tfd *tfd = _tfd; 319 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 320 321 return (dma_addr_t)(le64_to_cpu(tb->addr)); 322 } else { 323 struct iwl_tfd *tfd = _tfd; 324 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 325 dma_addr_t addr = get_unaligned_le32(&tb->lo); 326 dma_addr_t hi_len; 327 328 if (sizeof(dma_addr_t) <= sizeof(u32)) 329 return addr; 330 331 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 332 333 /* 334 * shift by 16 twice to avoid warnings on 32-bit 335 * (where this code never runs anyway due to the 336 * if statement above) 337 */ 338 return addr | ((hi_len << 16) << 16); 339 } 340 } 341 342 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 343 u8 idx, dma_addr_t addr, u16 len) 344 { 345 struct iwl_tfd *tfd_fh = (void *)tfd; 346 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 347 348 u16 hi_n_len = len << 4; 349 350 put_unaligned_le32(addr, &tb->lo); 351 hi_n_len |= iwl_get_dma_hi_addr(addr); 352 353 tb->hi_n_len = cpu_to_le16(hi_n_len); 354 355 tfd_fh->num_tbs = idx + 1; 356 } 357 358 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) 359 { 360 if (trans->cfg->use_tfh) { 361 struct iwl_tfh_tfd *tfd = _tfd; 362 363 return le16_to_cpu(tfd->num_tbs) & 0x1f; 364 } else { 365 struct iwl_tfd *tfd = _tfd; 366 367 return tfd->num_tbs & 0x1f; 368 } 369 } 370 371 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 372 struct iwl_cmd_meta *meta, 373 struct iwl_txq *txq, int index) 374 { 375 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 376 int i, num_tbs; 377 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 378 379 /* Sanity check on number of chunks */ 380 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 381 382 if (num_tbs >= trans_pcie->max_tbs) { 383 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 384 /* @todo issue fatal error, it is quite serious situation */ 385 return; 386 } 387 388 /* first TB is never freed - it's the bidirectional DMA data */ 389 390 for (i = 1; i < num_tbs; i++) { 391 if (meta->tbs & BIT(i)) 392 dma_unmap_page(trans->dev, 393 iwl_pcie_tfd_tb_get_addr(trans, tfd, i), 394 iwl_pcie_tfd_tb_get_len(trans, tfd, i), 395 DMA_TO_DEVICE); 396 else 397 dma_unmap_single(trans->dev, 398 iwl_pcie_tfd_tb_get_addr(trans, tfd, 399 i), 400 iwl_pcie_tfd_tb_get_len(trans, tfd, 401 i), 402 DMA_TO_DEVICE); 403 } 404 405 if (trans->cfg->use_tfh) { 406 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 407 408 tfd_fh->num_tbs = 0; 409 } else { 410 struct iwl_tfd *tfd_fh = (void *)tfd; 411 412 tfd_fh->num_tbs = 0; 413 } 414 415 } 416 417 /* 418 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 419 * @trans - transport private data 420 * @txq - tx queue 421 * @dma_dir - the direction of the DMA mapping 422 * 423 * Does NOT advance any TFD circular buffer read/write indexes 424 * Does NOT free the TFD itself (which is within circular buffer) 425 */ 426 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 427 { 428 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 429 * idx is bounded by n_window 430 */ 431 int rd_ptr = txq->read_ptr; 432 int idx = get_cmd_index(txq, rd_ptr); 433 434 lockdep_assert_held(&txq->lock); 435 436 /* We have only q->n_window txq->entries, but we use 437 * TFD_QUEUE_SIZE_MAX tfds 438 */ 439 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 440 441 /* free SKB */ 442 if (txq->entries) { 443 struct sk_buff *skb; 444 445 skb = txq->entries[idx].skb; 446 447 /* Can be called from irqs-disabled context 448 * If skb is not NULL, it means that the whole queue is being 449 * freed and that the queue is not empty - free the skb 450 */ 451 if (skb) { 452 iwl_op_mode_free_skb(trans->op_mode, skb); 453 txq->entries[idx].skb = NULL; 454 } 455 } 456 } 457 458 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 459 dma_addr_t addr, u16 len, bool reset) 460 { 461 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 462 void *tfd; 463 u32 num_tbs; 464 465 tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; 466 467 if (reset) 468 memset(tfd, 0, trans_pcie->tfd_size); 469 470 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 471 472 /* Each TFD can point to a maximum max_tbs Tx buffers */ 473 if (num_tbs >= trans_pcie->max_tbs) { 474 IWL_ERR(trans, "Error can not send more than %d chunks\n", 475 trans_pcie->max_tbs); 476 return -EINVAL; 477 } 478 479 if (WARN(addr & ~IWL_TX_DMA_MASK, 480 "Unaligned address = %llx\n", (unsigned long long)addr)) 481 return -EINVAL; 482 483 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 484 485 return num_tbs; 486 } 487 488 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, 489 int slots_num, bool cmd_queue) 490 { 491 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 492 size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; 493 size_t tb0_buf_sz; 494 int i; 495 496 if (WARN_ON(txq->entries || txq->tfds)) 497 return -EINVAL; 498 499 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 500 (unsigned long)txq); 501 txq->trans_pcie = trans_pcie; 502 503 txq->n_window = slots_num; 504 505 txq->entries = kcalloc(slots_num, 506 sizeof(struct iwl_pcie_txq_entry), 507 GFP_KERNEL); 508 509 if (!txq->entries) 510 goto error; 511 512 if (cmd_queue) 513 for (i = 0; i < slots_num; i++) { 514 txq->entries[i].cmd = 515 kmalloc(sizeof(struct iwl_device_cmd), 516 GFP_KERNEL); 517 if (!txq->entries[i].cmd) 518 goto error; 519 } 520 521 /* Circular buffer of transmit frame descriptors (TFDs), 522 * shared with device */ 523 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 524 &txq->dma_addr, GFP_KERNEL); 525 if (!txq->tfds) 526 goto error; 527 528 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); 529 530 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 531 532 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 533 &txq->first_tb_dma, 534 GFP_KERNEL); 535 if (!txq->first_tb_bufs) 536 goto err_free_tfds; 537 538 return 0; 539 err_free_tfds: 540 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 541 error: 542 if (txq->entries && cmd_queue) 543 for (i = 0; i < slots_num; i++) 544 kfree(txq->entries[i].cmd); 545 kfree(txq->entries); 546 txq->entries = NULL; 547 548 return -ENOMEM; 549 550 } 551 552 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 553 int slots_num, bool cmd_queue) 554 { 555 int ret; 556 557 txq->need_update = false; 558 559 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 560 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 561 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 562 563 /* Initialize queue's high/low-water marks, and head/tail indexes */ 564 ret = iwl_queue_init(txq, slots_num); 565 if (ret) 566 return ret; 567 568 spin_lock_init(&txq->lock); 569 570 if (cmd_queue) { 571 static struct lock_class_key iwl_pcie_cmd_queue_lock_class; 572 573 lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); 574 } 575 576 __skb_queue_head_init(&txq->overflow_q); 577 578 return 0; 579 } 580 581 static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 582 struct sk_buff *skb) 583 { 584 struct page **page_ptr; 585 586 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 587 588 if (*page_ptr) { 589 __free_page(*page_ptr); 590 *page_ptr = NULL; 591 } 592 } 593 594 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 595 { 596 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 597 598 lockdep_assert_held(&trans_pcie->reg_lock); 599 600 if (trans_pcie->ref_cmd_in_flight) { 601 trans_pcie->ref_cmd_in_flight = false; 602 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 603 iwl_trans_unref(trans); 604 } 605 606 if (!trans->cfg->base_params->apmg_wake_up_wa) 607 return; 608 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 609 return; 610 611 trans_pcie->cmd_hold_nic_awake = false; 612 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 613 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 614 } 615 616 /* 617 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 618 */ 619 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 620 { 621 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 622 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 623 624 spin_lock_bh(&txq->lock); 625 while (txq->write_ptr != txq->read_ptr) { 626 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 627 txq_id, txq->read_ptr); 628 629 if (txq_id != trans_pcie->cmd_queue) { 630 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 631 632 if (WARN_ON_ONCE(!skb)) 633 continue; 634 635 iwl_pcie_free_tso_page(trans_pcie, skb); 636 } 637 iwl_pcie_txq_free_tfd(trans, txq); 638 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 639 640 if (txq->read_ptr == txq->write_ptr) { 641 unsigned long flags; 642 643 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 644 if (txq_id != trans_pcie->cmd_queue) { 645 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", 646 txq->id); 647 iwl_trans_unref(trans); 648 } else { 649 iwl_pcie_clear_cmd_in_flight(trans); 650 } 651 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 652 } 653 } 654 655 while (!skb_queue_empty(&txq->overflow_q)) { 656 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 657 658 iwl_op_mode_free_skb(trans->op_mode, skb); 659 } 660 661 spin_unlock_bh(&txq->lock); 662 663 /* just in case - this queue may have been stopped */ 664 iwl_wake_queue(trans, txq); 665 } 666 667 /* 668 * iwl_pcie_txq_free - Deallocate DMA queue. 669 * @txq: Transmit queue to deallocate. 670 * 671 * Empty queue by removing and destroying all BD's. 672 * Free all buffers. 673 * 0-fill, but do not free "txq" descriptor structure. 674 */ 675 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 676 { 677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 678 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 679 struct device *dev = trans->dev; 680 int i; 681 682 if (WARN_ON(!txq)) 683 return; 684 685 iwl_pcie_txq_unmap(trans, txq_id); 686 687 /* De-alloc array of command/tx buffers */ 688 if (txq_id == trans_pcie->cmd_queue) 689 for (i = 0; i < txq->n_window; i++) { 690 kzfree(txq->entries[i].cmd); 691 kzfree(txq->entries[i].free_buf); 692 } 693 694 /* De-alloc circular buffer of TFDs */ 695 if (txq->tfds) { 696 dma_free_coherent(dev, 697 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, 698 txq->tfds, txq->dma_addr); 699 txq->dma_addr = 0; 700 txq->tfds = NULL; 701 702 dma_free_coherent(dev, 703 sizeof(*txq->first_tb_bufs) * txq->n_window, 704 txq->first_tb_bufs, txq->first_tb_dma); 705 } 706 707 kfree(txq->entries); 708 txq->entries = NULL; 709 710 del_timer_sync(&txq->stuck_timer); 711 712 /* 0-fill queue descriptor structure */ 713 memset(txq, 0, sizeof(*txq)); 714 } 715 716 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 717 { 718 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 719 int nq = trans->cfg->base_params->num_of_queues; 720 int chan; 721 u32 reg_val; 722 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 723 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 724 725 /* make sure all queue are not stopped/used */ 726 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 727 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 728 729 trans_pcie->scd_base_addr = 730 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 731 732 WARN_ON(scd_base_addr != 0 && 733 scd_base_addr != trans_pcie->scd_base_addr); 734 735 /* reset context data, TX status and translation data */ 736 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 737 SCD_CONTEXT_MEM_LOWER_BOUND, 738 NULL, clear_dwords); 739 740 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 741 trans_pcie->scd_bc_tbls.dma >> 10); 742 743 /* The chain extension of the SCD doesn't work well. This feature is 744 * enabled by default by the HW, so we need to disable it manually. 745 */ 746 if (trans->cfg->base_params->scd_chain_ext_wa) 747 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 748 749 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 750 trans_pcie->cmd_fifo, 751 trans_pcie->cmd_q_wdg_timeout); 752 753 /* Activate all Tx DMA/FIFO channels */ 754 iwl_scd_activate_fifos(trans); 755 756 /* Enable DMA channel */ 757 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 758 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 759 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 760 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 761 762 /* Update FH chicken bits */ 763 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 764 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 765 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 766 767 /* Enable L1-Active */ 768 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) 769 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 770 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 771 } 772 773 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 774 { 775 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 776 int txq_id; 777 778 /* 779 * we should never get here in gen2 trans mode return early to avoid 780 * having invalid accesses 781 */ 782 if (WARN_ON_ONCE(trans->cfg->gen2)) 783 return; 784 785 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 786 txq_id++) { 787 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 788 if (trans->cfg->use_tfh) 789 iwl_write_direct64(trans, 790 FH_MEM_CBBC_QUEUE(trans, txq_id), 791 txq->dma_addr); 792 else 793 iwl_write_direct32(trans, 794 FH_MEM_CBBC_QUEUE(trans, txq_id), 795 txq->dma_addr >> 8); 796 iwl_pcie_txq_unmap(trans, txq_id); 797 txq->read_ptr = 0; 798 txq->write_ptr = 0; 799 } 800 801 /* Tell NIC where to find the "keep warm" buffer */ 802 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 803 trans_pcie->kw.dma >> 4); 804 805 /* 806 * Send 0 as the scd_base_addr since the device may have be reset 807 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 808 * contain garbage. 809 */ 810 iwl_pcie_tx_start(trans, 0); 811 } 812 813 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 814 { 815 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 816 unsigned long flags; 817 int ch, ret; 818 u32 mask = 0; 819 820 spin_lock(&trans_pcie->irq_lock); 821 822 if (!iwl_trans_grab_nic_access(trans, &flags)) 823 goto out; 824 825 /* Stop each Tx DMA channel */ 826 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 827 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 828 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 829 } 830 831 /* Wait for DMA channels to be idle */ 832 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 833 if (ret < 0) 834 IWL_ERR(trans, 835 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 836 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 837 838 iwl_trans_release_nic_access(trans, &flags); 839 840 out: 841 spin_unlock(&trans_pcie->irq_lock); 842 } 843 844 /* 845 * iwl_pcie_tx_stop - Stop all Tx DMA channels 846 */ 847 int iwl_pcie_tx_stop(struct iwl_trans *trans) 848 { 849 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 850 int txq_id; 851 852 /* Turn off all Tx DMA fifos */ 853 iwl_scd_deactivate_fifos(trans); 854 855 /* Turn off all Tx DMA channels */ 856 iwl_pcie_tx_stop_fh(trans); 857 858 /* 859 * This function can be called before the op_mode disabled the 860 * queues. This happens when we have an rfkill interrupt. 861 * Since we stop Tx altogether - mark the queues as stopped. 862 */ 863 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 864 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 865 866 /* This can happen: start_hw, stop_device */ 867 if (!trans_pcie->txq_memory) 868 return 0; 869 870 /* Unmap DMA from host system and free skb's */ 871 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 872 txq_id++) 873 iwl_pcie_txq_unmap(trans, txq_id); 874 875 return 0; 876 } 877 878 /* 879 * iwl_trans_tx_free - Free TXQ Context 880 * 881 * Destroy all TX DMA queues and structures 882 */ 883 void iwl_pcie_tx_free(struct iwl_trans *trans) 884 { 885 int txq_id; 886 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 887 888 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 889 890 /* Tx queues */ 891 if (trans_pcie->txq_memory) { 892 for (txq_id = 0; 893 txq_id < trans->cfg->base_params->num_of_queues; 894 txq_id++) { 895 iwl_pcie_txq_free(trans, txq_id); 896 trans_pcie->txq[txq_id] = NULL; 897 } 898 } 899 900 kfree(trans_pcie->txq_memory); 901 trans_pcie->txq_memory = NULL; 902 903 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 904 905 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 906 } 907 908 /* 909 * iwl_pcie_tx_alloc - allocate TX context 910 * Allocate all Tx DMA structures and initialize them 911 */ 912 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 913 { 914 int ret; 915 int txq_id, slots_num; 916 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 917 918 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 919 sizeof(struct iwlagn_scd_bc_tbl); 920 921 /*It is not allowed to alloc twice, so warn when this happens. 922 * We cannot rely on the previous allocation, so free and fail */ 923 if (WARN_ON(trans_pcie->txq_memory)) { 924 ret = -EINVAL; 925 goto error; 926 } 927 928 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 929 scd_bc_tbls_size); 930 if (ret) { 931 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 932 goto error; 933 } 934 935 /* Alloc keep-warm buffer */ 936 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 937 if (ret) { 938 IWL_ERR(trans, "Keep Warm allocation failed\n"); 939 goto error; 940 } 941 942 trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, 943 sizeof(struct iwl_txq), GFP_KERNEL); 944 if (!trans_pcie->txq_memory) { 945 IWL_ERR(trans, "Not enough memory for txq\n"); 946 ret = -ENOMEM; 947 goto error; 948 } 949 950 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 951 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 952 txq_id++) { 953 bool cmd_queue = (txq_id == trans_pcie->cmd_queue); 954 955 slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 956 trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 957 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], 958 slots_num, cmd_queue); 959 if (ret) { 960 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 961 goto error; 962 } 963 trans_pcie->txq[txq_id]->id = txq_id; 964 } 965 966 return 0; 967 968 error: 969 iwl_pcie_tx_free(trans); 970 971 return ret; 972 } 973 974 int iwl_pcie_tx_init(struct iwl_trans *trans) 975 { 976 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 977 int ret; 978 int txq_id, slots_num; 979 bool alloc = false; 980 981 if (!trans_pcie->txq_memory) { 982 ret = iwl_pcie_tx_alloc(trans); 983 if (ret) 984 goto error; 985 alloc = true; 986 } 987 988 spin_lock(&trans_pcie->irq_lock); 989 990 /* Turn off all Tx DMA fifos */ 991 iwl_scd_deactivate_fifos(trans); 992 993 /* Tell NIC where to find the "keep warm" buffer */ 994 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 995 trans_pcie->kw.dma >> 4); 996 997 spin_unlock(&trans_pcie->irq_lock); 998 999 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 1000 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1001 txq_id++) { 1002 bool cmd_queue = (txq_id == trans_pcie->cmd_queue); 1003 1004 slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1005 ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], 1006 slots_num, cmd_queue); 1007 if (ret) { 1008 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1009 goto error; 1010 } 1011 1012 /* 1013 * Tell nic where to find circular buffer of TFDs for a 1014 * given Tx queue, and enable the DMA channel used for that 1015 * queue. 1016 * Circular buffer (TFD queue in DRAM) physical base address 1017 */ 1018 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 1019 trans_pcie->txq[txq_id]->dma_addr >> 8); 1020 } 1021 1022 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1023 if (trans->cfg->base_params->num_of_queues > 20) 1024 iwl_set_bits_prph(trans, SCD_GP_CTRL, 1025 SCD_GP_CTRL_ENABLE_31_QUEUES); 1026 1027 return 0; 1028 error: 1029 /*Upon error, free only if we allocated something */ 1030 if (alloc) 1031 iwl_pcie_tx_free(trans); 1032 return ret; 1033 } 1034 1035 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 1036 { 1037 lockdep_assert_held(&txq->lock); 1038 1039 if (!txq->wd_timeout) 1040 return; 1041 1042 /* 1043 * station is asleep and we send data - that must 1044 * be uAPSD or PS-Poll. Don't rearm the timer. 1045 */ 1046 if (txq->frozen) 1047 return; 1048 1049 /* 1050 * if empty delete timer, otherwise move timer forward 1051 * since we're making progress on this queue 1052 */ 1053 if (txq->read_ptr == txq->write_ptr) 1054 del_timer(&txq->stuck_timer); 1055 else 1056 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1057 } 1058 1059 /* Frees buffers until index _not_ inclusive */ 1060 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1061 struct sk_buff_head *skbs) 1062 { 1063 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1064 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1065 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 1066 int last_to_free; 1067 1068 /* This function is not meant to release cmd queue*/ 1069 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1070 return; 1071 1072 spin_lock_bh(&txq->lock); 1073 1074 if (!test_bit(txq_id, trans_pcie->queue_used)) { 1075 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1076 txq_id, ssn); 1077 goto out; 1078 } 1079 1080 if (txq->read_ptr == tfd_num) 1081 goto out; 1082 1083 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1084 txq_id, txq->read_ptr, tfd_num, ssn); 1085 1086 /*Since we free until index _not_ inclusive, the one before index is 1087 * the last we will free. This one must be used */ 1088 last_to_free = iwl_queue_dec_wrap(tfd_num); 1089 1090 if (!iwl_queue_used(txq, last_to_free)) { 1091 IWL_ERR(trans, 1092 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1093 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1094 txq->write_ptr, txq->read_ptr); 1095 goto out; 1096 } 1097 1098 if (WARN_ON(!skb_queue_empty(skbs))) 1099 goto out; 1100 1101 for (; 1102 txq->read_ptr != tfd_num; 1103 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1104 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 1105 1106 if (WARN_ON_ONCE(!skb)) 1107 continue; 1108 1109 iwl_pcie_free_tso_page(trans_pcie, skb); 1110 1111 __skb_queue_tail(skbs, skb); 1112 1113 txq->entries[txq->read_ptr].skb = NULL; 1114 1115 if (!trans->cfg->use_tfh) 1116 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1117 1118 iwl_pcie_txq_free_tfd(trans, txq); 1119 } 1120 1121 iwl_pcie_txq_progress(txq); 1122 1123 if (iwl_queue_space(txq) > txq->low_mark && 1124 test_bit(txq_id, trans_pcie->queue_stopped)) { 1125 struct sk_buff_head overflow_skbs; 1126 1127 __skb_queue_head_init(&overflow_skbs); 1128 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1129 1130 /* 1131 * This is tricky: we are in reclaim path which is non 1132 * re-entrant, so noone will try to take the access the 1133 * txq data from that path. We stopped tx, so we can't 1134 * have tx as well. Bottom line, we can unlock and re-lock 1135 * later. 1136 */ 1137 spin_unlock_bh(&txq->lock); 1138 1139 while (!skb_queue_empty(&overflow_skbs)) { 1140 struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1141 struct iwl_device_cmd *dev_cmd_ptr; 1142 1143 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1144 trans_pcie->dev_cmd_offs); 1145 1146 /* 1147 * Note that we can very well be overflowing again. 1148 * In that case, iwl_queue_space will be small again 1149 * and we won't wake mac80211's queue. 1150 */ 1151 iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); 1152 } 1153 spin_lock_bh(&txq->lock); 1154 1155 if (iwl_queue_space(txq) > txq->low_mark) 1156 iwl_wake_queue(trans, txq); 1157 } 1158 1159 if (txq->read_ptr == txq->write_ptr) { 1160 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); 1161 iwl_trans_unref(trans); 1162 } 1163 1164 out: 1165 spin_unlock_bh(&txq->lock); 1166 } 1167 1168 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1169 const struct iwl_host_cmd *cmd) 1170 { 1171 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1172 int ret; 1173 1174 lockdep_assert_held(&trans_pcie->reg_lock); 1175 1176 if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1177 !trans_pcie->ref_cmd_in_flight) { 1178 trans_pcie->ref_cmd_in_flight = true; 1179 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1180 iwl_trans_ref(trans); 1181 } 1182 1183 /* 1184 * wake up the NIC to make sure that the firmware will see the host 1185 * command - we will let the NIC sleep once all the host commands 1186 * returned. This needs to be done only on NICs that have 1187 * apmg_wake_up_wa set. 1188 */ 1189 if (trans->cfg->base_params->apmg_wake_up_wa && 1190 !trans_pcie->cmd_hold_nic_awake) { 1191 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1192 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1193 1194 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1195 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1196 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1197 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1198 15000); 1199 if (ret < 0) { 1200 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1201 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1202 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1203 return -EIO; 1204 } 1205 trans_pcie->cmd_hold_nic_awake = true; 1206 } 1207 1208 return 0; 1209 } 1210 1211 /* 1212 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1213 * 1214 * When FW advances 'R' index, all entries between old and new 'R' index 1215 * need to be reclaimed. As result, some free space forms. If there is 1216 * enough free space (> low mark), wake the stack that feeds us. 1217 */ 1218 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1219 { 1220 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1221 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1222 unsigned long flags; 1223 int nfreed = 0; 1224 1225 lockdep_assert_held(&txq->lock); 1226 1227 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { 1228 IWL_ERR(trans, 1229 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1230 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1231 txq->write_ptr, txq->read_ptr); 1232 return; 1233 } 1234 1235 for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; 1236 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1237 1238 if (nfreed++ > 0) { 1239 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1240 idx, txq->write_ptr, txq->read_ptr); 1241 iwl_force_nmi(trans); 1242 } 1243 } 1244 1245 if (txq->read_ptr == txq->write_ptr) { 1246 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1247 iwl_pcie_clear_cmd_in_flight(trans); 1248 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1249 } 1250 1251 iwl_pcie_txq_progress(txq); 1252 } 1253 1254 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1255 u16 txq_id) 1256 { 1257 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1258 u32 tbl_dw_addr; 1259 u32 tbl_dw; 1260 u16 scd_q2ratid; 1261 1262 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1263 1264 tbl_dw_addr = trans_pcie->scd_base_addr + 1265 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1266 1267 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1268 1269 if (txq_id & 0x1) 1270 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1271 else 1272 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1273 1274 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1275 1276 return 0; 1277 } 1278 1279 /* Receiver address (actually, Rx station's index into station table), 1280 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1281 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1282 1283 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1284 const struct iwl_trans_txq_scd_cfg *cfg, 1285 unsigned int wdg_timeout) 1286 { 1287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1288 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1289 int fifo = -1; 1290 bool scd_bug = false; 1291 1292 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1293 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1294 1295 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1296 1297 if (cfg) { 1298 fifo = cfg->fifo; 1299 1300 /* Disable the scheduler prior configuring the cmd queue */ 1301 if (txq_id == trans_pcie->cmd_queue && 1302 trans_pcie->scd_set_active) 1303 iwl_scd_enable_set_active(trans, 0); 1304 1305 /* Stop this Tx queue before configuring it */ 1306 iwl_scd_txq_set_inactive(trans, txq_id); 1307 1308 /* Set this queue as a chain-building queue unless it is CMD */ 1309 if (txq_id != trans_pcie->cmd_queue) 1310 iwl_scd_txq_set_chain(trans, txq_id); 1311 1312 if (cfg->aggregate) { 1313 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1314 1315 /* Map receiver-address / traffic-ID to this queue */ 1316 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1317 1318 /* enable aggregations for the queue */ 1319 iwl_scd_txq_enable_agg(trans, txq_id); 1320 txq->ampdu = true; 1321 } else { 1322 /* 1323 * disable aggregations for the queue, this will also 1324 * make the ra_tid mapping configuration irrelevant 1325 * since it is now a non-AGG queue. 1326 */ 1327 iwl_scd_txq_disable_agg(trans, txq_id); 1328 1329 ssn = txq->read_ptr; 1330 } 1331 } else { 1332 /* 1333 * If we need to move the SCD write pointer by steps of 1334 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 1335 * the op_mode know by returning true later. 1336 * Do this only in case cfg is NULL since this trick can 1337 * be done only if we have DQA enabled which is true for mvm 1338 * only. And mvm never sets a cfg pointer. 1339 * This is really ugly, but this is the easiest way out for 1340 * this sad hardware issue. 1341 * This bug has been fixed on devices 9000 and up. 1342 */ 1343 scd_bug = !trans->cfg->mq_rx_supported && 1344 !((ssn - txq->write_ptr) & 0x3f) && 1345 (ssn != txq->write_ptr); 1346 if (scd_bug) 1347 ssn++; 1348 } 1349 1350 /* Place first TFD at index corresponding to start sequence number. 1351 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1352 txq->read_ptr = (ssn & 0xff); 1353 txq->write_ptr = (ssn & 0xff); 1354 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1355 (ssn & 0xff) | (txq_id << 8)); 1356 1357 if (cfg) { 1358 u8 frame_limit = cfg->frame_limit; 1359 1360 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1361 1362 /* Set up Tx window size and frame limit for this queue */ 1363 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1364 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1365 iwl_trans_write_mem32(trans, 1366 trans_pcie->scd_base_addr + 1367 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1368 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 1369 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 1370 1371 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1372 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1373 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1374 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1375 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1376 SCD_QUEUE_STTS_REG_MSK); 1377 1378 /* enable the scheduler for this queue (only) */ 1379 if (txq_id == trans_pcie->cmd_queue && 1380 trans_pcie->scd_set_active) 1381 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1382 1383 IWL_DEBUG_TX_QUEUES(trans, 1384 "Activate queue %d on FIFO %d WrPtr: %d\n", 1385 txq_id, fifo, ssn & 0xff); 1386 } else { 1387 IWL_DEBUG_TX_QUEUES(trans, 1388 "Activate queue %d WrPtr: %d\n", 1389 txq_id, ssn & 0xff); 1390 } 1391 1392 return scd_bug; 1393 } 1394 1395 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 1396 bool shared_mode) 1397 { 1398 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1399 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1400 1401 txq->ampdu = !shared_mode; 1402 } 1403 1404 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1405 bool configure_scd) 1406 { 1407 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1408 u32 stts_addr = trans_pcie->scd_base_addr + 1409 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1410 static const u32 zero_val[4] = {}; 1411 1412 trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; 1413 trans_pcie->txq[txq_id]->frozen = false; 1414 1415 /* 1416 * Upon HW Rfkill - we stop the device, and then stop the queues 1417 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1418 * allow the op_mode to call txq_disable after it already called 1419 * stop_device. 1420 */ 1421 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1422 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1423 "queue %d not used", txq_id); 1424 return; 1425 } 1426 1427 if (configure_scd) { 1428 iwl_scd_txq_set_inactive(trans, txq_id); 1429 1430 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1431 ARRAY_SIZE(zero_val)); 1432 } 1433 1434 iwl_pcie_txq_unmap(trans, txq_id); 1435 trans_pcie->txq[txq_id]->ampdu = false; 1436 1437 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1438 } 1439 1440 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1441 1442 /* 1443 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1444 * @priv: device private data point 1445 * @cmd: a pointer to the ucode command structure 1446 * 1447 * The function returns < 0 values to indicate the operation 1448 * failed. On success, it returns the index (>= 0) of command in the 1449 * command queue. 1450 */ 1451 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1452 struct iwl_host_cmd *cmd) 1453 { 1454 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1455 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1456 struct iwl_device_cmd *out_cmd; 1457 struct iwl_cmd_meta *out_meta; 1458 unsigned long flags; 1459 void *dup_buf = NULL; 1460 dma_addr_t phys_addr; 1461 int idx; 1462 u16 copy_size, cmd_size, tb0_size; 1463 bool had_nocopy = false; 1464 u8 group_id = iwl_cmd_groupid(cmd->id); 1465 int i, ret; 1466 u32 cmd_pos; 1467 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1468 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1469 1470 if (WARN(!trans->wide_cmd_header && 1471 group_id > IWL_ALWAYS_LONG_GROUP, 1472 "unsupported wide command %#x\n", cmd->id)) 1473 return -EINVAL; 1474 1475 if (group_id != 0) { 1476 copy_size = sizeof(struct iwl_cmd_header_wide); 1477 cmd_size = sizeof(struct iwl_cmd_header_wide); 1478 } else { 1479 copy_size = sizeof(struct iwl_cmd_header); 1480 cmd_size = sizeof(struct iwl_cmd_header); 1481 } 1482 1483 /* need one for the header if the first is NOCOPY */ 1484 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1485 1486 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1487 cmddata[i] = cmd->data[i]; 1488 cmdlen[i] = cmd->len[i]; 1489 1490 if (!cmd->len[i]) 1491 continue; 1492 1493 /* need at least IWL_FIRST_TB_SIZE copied */ 1494 if (copy_size < IWL_FIRST_TB_SIZE) { 1495 int copy = IWL_FIRST_TB_SIZE - copy_size; 1496 1497 if (copy > cmdlen[i]) 1498 copy = cmdlen[i]; 1499 cmdlen[i] -= copy; 1500 cmddata[i] += copy; 1501 copy_size += copy; 1502 } 1503 1504 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1505 had_nocopy = true; 1506 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1507 idx = -EINVAL; 1508 goto free_dup_buf; 1509 } 1510 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1511 /* 1512 * This is also a chunk that isn't copied 1513 * to the static buffer so set had_nocopy. 1514 */ 1515 had_nocopy = true; 1516 1517 /* only allowed once */ 1518 if (WARN_ON(dup_buf)) { 1519 idx = -EINVAL; 1520 goto free_dup_buf; 1521 } 1522 1523 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1524 GFP_ATOMIC); 1525 if (!dup_buf) 1526 return -ENOMEM; 1527 } else { 1528 /* NOCOPY must not be followed by normal! */ 1529 if (WARN_ON(had_nocopy)) { 1530 idx = -EINVAL; 1531 goto free_dup_buf; 1532 } 1533 copy_size += cmdlen[i]; 1534 } 1535 cmd_size += cmd->len[i]; 1536 } 1537 1538 /* 1539 * If any of the command structures end up being larger than 1540 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1541 * allocated into separate TFDs, then we will need to 1542 * increase the size of the buffers. 1543 */ 1544 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1545 "Command %s (%#x) is too large (%d bytes)\n", 1546 iwl_get_cmd_string(trans, cmd->id), 1547 cmd->id, copy_size)) { 1548 idx = -EINVAL; 1549 goto free_dup_buf; 1550 } 1551 1552 spin_lock_bh(&txq->lock); 1553 1554 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1555 spin_unlock_bh(&txq->lock); 1556 1557 IWL_ERR(trans, "No space in command queue\n"); 1558 iwl_op_mode_cmd_queue_full(trans->op_mode); 1559 idx = -ENOSPC; 1560 goto free_dup_buf; 1561 } 1562 1563 idx = get_cmd_index(txq, txq->write_ptr); 1564 out_cmd = txq->entries[idx].cmd; 1565 out_meta = &txq->entries[idx].meta; 1566 1567 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1568 if (cmd->flags & CMD_WANT_SKB) 1569 out_meta->source = cmd; 1570 1571 /* set up the header */ 1572 if (group_id != 0) { 1573 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1574 out_cmd->hdr_wide.group_id = group_id; 1575 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1576 out_cmd->hdr_wide.length = 1577 cpu_to_le16(cmd_size - 1578 sizeof(struct iwl_cmd_header_wide)); 1579 out_cmd->hdr_wide.reserved = 0; 1580 out_cmd->hdr_wide.sequence = 1581 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1582 INDEX_TO_SEQ(txq->write_ptr)); 1583 1584 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1585 copy_size = sizeof(struct iwl_cmd_header_wide); 1586 } else { 1587 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1588 out_cmd->hdr.sequence = 1589 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1590 INDEX_TO_SEQ(txq->write_ptr)); 1591 out_cmd->hdr.group_id = 0; 1592 1593 cmd_pos = sizeof(struct iwl_cmd_header); 1594 copy_size = sizeof(struct iwl_cmd_header); 1595 } 1596 1597 /* and copy the data that needs to be copied */ 1598 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1599 int copy; 1600 1601 if (!cmd->len[i]) 1602 continue; 1603 1604 /* copy everything if not nocopy/dup */ 1605 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1606 IWL_HCMD_DFL_DUP))) { 1607 copy = cmd->len[i]; 1608 1609 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1610 cmd_pos += copy; 1611 copy_size += copy; 1612 continue; 1613 } 1614 1615 /* 1616 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1617 * in total (for bi-directional DMA), but copy up to what 1618 * we can fit into the payload for debug dump purposes. 1619 */ 1620 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1621 1622 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1623 cmd_pos += copy; 1624 1625 /* However, treat copy_size the proper way, we need it below */ 1626 if (copy_size < IWL_FIRST_TB_SIZE) { 1627 copy = IWL_FIRST_TB_SIZE - copy_size; 1628 1629 if (copy > cmd->len[i]) 1630 copy = cmd->len[i]; 1631 copy_size += copy; 1632 } 1633 } 1634 1635 IWL_DEBUG_HC(trans, 1636 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1637 iwl_get_cmd_string(trans, cmd->id), 1638 group_id, out_cmd->hdr.cmd, 1639 le16_to_cpu(out_cmd->hdr.sequence), 1640 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); 1641 1642 /* start the TFD with the minimum copy bytes */ 1643 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1644 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1645 iwl_pcie_txq_build_tfd(trans, txq, 1646 iwl_pcie_get_first_tb_dma(txq, idx), 1647 tb0_size, true); 1648 1649 /* map first command fragment, if any remains */ 1650 if (copy_size > tb0_size) { 1651 phys_addr = dma_map_single(trans->dev, 1652 ((u8 *)&out_cmd->hdr) + tb0_size, 1653 copy_size - tb0_size, 1654 DMA_TO_DEVICE); 1655 if (dma_mapping_error(trans->dev, phys_addr)) { 1656 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1657 txq->write_ptr); 1658 idx = -ENOMEM; 1659 goto out; 1660 } 1661 1662 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1663 copy_size - tb0_size, false); 1664 } 1665 1666 /* map the remaining (adjusted) nocopy/dup fragments */ 1667 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1668 const void *data = cmddata[i]; 1669 1670 if (!cmdlen[i]) 1671 continue; 1672 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1673 IWL_HCMD_DFL_DUP))) 1674 continue; 1675 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1676 data = dup_buf; 1677 phys_addr = dma_map_single(trans->dev, (void *)data, 1678 cmdlen[i], DMA_TO_DEVICE); 1679 if (dma_mapping_error(trans->dev, phys_addr)) { 1680 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1681 txq->write_ptr); 1682 idx = -ENOMEM; 1683 goto out; 1684 } 1685 1686 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1687 } 1688 1689 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1690 out_meta->flags = cmd->flags; 1691 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1692 kzfree(txq->entries[idx].free_buf); 1693 txq->entries[idx].free_buf = dup_buf; 1694 1695 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1696 1697 /* start timer if queue currently empty */ 1698 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1699 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1700 1701 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1702 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1703 if (ret < 0) { 1704 idx = ret; 1705 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1706 goto out; 1707 } 1708 1709 /* Increment and update queue's write index */ 1710 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 1711 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1712 1713 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1714 1715 out: 1716 spin_unlock_bh(&txq->lock); 1717 free_dup_buf: 1718 if (idx < 0) 1719 kfree(dup_buf); 1720 return idx; 1721 } 1722 1723 /* 1724 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1725 * @rxb: Rx buffer to reclaim 1726 */ 1727 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1728 struct iwl_rx_cmd_buffer *rxb) 1729 { 1730 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1731 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1732 u8 group_id; 1733 u32 cmd_id; 1734 int txq_id = SEQ_TO_QUEUE(sequence); 1735 int index = SEQ_TO_INDEX(sequence); 1736 int cmd_index; 1737 struct iwl_device_cmd *cmd; 1738 struct iwl_cmd_meta *meta; 1739 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1740 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1741 1742 /* If a Tx command is being handled and it isn't in the actual 1743 * command queue then there a command routing bug has been introduced 1744 * in the queue management code. */ 1745 if (WARN(txq_id != trans_pcie->cmd_queue, 1746 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1747 txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, 1748 txq->write_ptr)) { 1749 iwl_print_hex_error(trans, pkt, 32); 1750 return; 1751 } 1752 1753 spin_lock_bh(&txq->lock); 1754 1755 cmd_index = get_cmd_index(txq, index); 1756 cmd = txq->entries[cmd_index].cmd; 1757 meta = &txq->entries[cmd_index].meta; 1758 group_id = cmd->hdr.group_id; 1759 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1760 1761 iwl_pcie_tfd_unmap(trans, meta, txq, index); 1762 1763 /* Input error checking is done when commands are added to queue. */ 1764 if (meta->flags & CMD_WANT_SKB) { 1765 struct page *p = rxb_steal_page(rxb); 1766 1767 meta->source->resp_pkt = pkt; 1768 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1769 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1770 } 1771 1772 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1773 iwl_op_mode_async_cb(trans->op_mode, cmd); 1774 1775 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1776 1777 if (!(meta->flags & CMD_ASYNC)) { 1778 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1779 IWL_WARN(trans, 1780 "HCMD_ACTIVE already clear for command %s\n", 1781 iwl_get_cmd_string(trans, cmd_id)); 1782 } 1783 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1784 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1785 iwl_get_cmd_string(trans, cmd_id)); 1786 wake_up(&trans_pcie->wait_command_queue); 1787 } 1788 1789 if (meta->flags & CMD_MAKE_TRANS_IDLE) { 1790 IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 1791 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1792 set_bit(STATUS_TRANS_IDLE, &trans->status); 1793 wake_up(&trans_pcie->d0i3_waitq); 1794 } 1795 1796 if (meta->flags & CMD_WAKE_UP_TRANS) { 1797 IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 1798 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1799 clear_bit(STATUS_TRANS_IDLE, &trans->status); 1800 wake_up(&trans_pcie->d0i3_waitq); 1801 } 1802 1803 meta->flags = 0; 1804 1805 spin_unlock_bh(&txq->lock); 1806 } 1807 1808 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1809 1810 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1811 struct iwl_host_cmd *cmd) 1812 { 1813 int ret; 1814 1815 /* An asynchronous command can not expect an SKB to be set. */ 1816 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1817 return -EINVAL; 1818 1819 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1820 if (ret < 0) { 1821 IWL_ERR(trans, 1822 "Error sending %s: enqueue_hcmd failed: %d\n", 1823 iwl_get_cmd_string(trans, cmd->id), ret); 1824 return ret; 1825 } 1826 return 0; 1827 } 1828 1829 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1830 struct iwl_host_cmd *cmd) 1831 { 1832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1833 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1834 int cmd_idx; 1835 int ret; 1836 1837 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1838 iwl_get_cmd_string(trans, cmd->id)); 1839 1840 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1841 &trans->status), 1842 "Command %s: a command is already active!\n", 1843 iwl_get_cmd_string(trans, cmd->id))) 1844 return -EIO; 1845 1846 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1847 iwl_get_cmd_string(trans, cmd->id)); 1848 1849 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { 1850 ret = wait_event_timeout(trans_pcie->d0i3_waitq, 1851 pm_runtime_active(&trans_pcie->pci_dev->dev), 1852 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 1853 if (!ret) { 1854 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); 1855 return -ETIMEDOUT; 1856 } 1857 } 1858 1859 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1860 if (cmd_idx < 0) { 1861 ret = cmd_idx; 1862 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1863 IWL_ERR(trans, 1864 "Error sending %s: enqueue_hcmd failed: %d\n", 1865 iwl_get_cmd_string(trans, cmd->id), ret); 1866 return ret; 1867 } 1868 1869 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1870 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1871 &trans->status), 1872 HOST_COMPLETE_TIMEOUT); 1873 if (!ret) { 1874 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1875 iwl_get_cmd_string(trans, cmd->id), 1876 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1877 1878 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1879 txq->read_ptr, txq->write_ptr); 1880 1881 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1882 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1883 iwl_get_cmd_string(trans, cmd->id)); 1884 ret = -ETIMEDOUT; 1885 1886 iwl_force_nmi(trans); 1887 iwl_trans_fw_error(trans); 1888 1889 goto cancel; 1890 } 1891 1892 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1893 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1894 iwl_get_cmd_string(trans, cmd->id)); 1895 dump_stack(); 1896 ret = -EIO; 1897 goto cancel; 1898 } 1899 1900 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1901 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1902 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1903 ret = -ERFKILL; 1904 goto cancel; 1905 } 1906 1907 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1908 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1909 iwl_get_cmd_string(trans, cmd->id)); 1910 ret = -EIO; 1911 goto cancel; 1912 } 1913 1914 return 0; 1915 1916 cancel: 1917 if (cmd->flags & CMD_WANT_SKB) { 1918 /* 1919 * Cancel the CMD_WANT_SKB flag for the cmd in the 1920 * TX cmd queue. Otherwise in case the cmd comes 1921 * in later, it will possibly set an invalid 1922 * address (cmd->meta.source). 1923 */ 1924 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1925 } 1926 1927 if (cmd->resp_pkt) { 1928 iwl_free_resp(cmd); 1929 cmd->resp_pkt = NULL; 1930 } 1931 1932 return ret; 1933 } 1934 1935 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1936 { 1937 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1938 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1939 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1940 cmd->id); 1941 return -ERFKILL; 1942 } 1943 1944 if (cmd->flags & CMD_ASYNC) 1945 return iwl_pcie_send_hcmd_async(trans, cmd); 1946 1947 /* We still can fail on RFKILL that can be asserted while we wait */ 1948 return iwl_pcie_send_hcmd_sync(trans, cmd); 1949 } 1950 1951 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1952 struct iwl_txq *txq, u8 hdr_len, 1953 struct iwl_cmd_meta *out_meta, 1954 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 1955 { 1956 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1957 u16 tb2_len; 1958 int i; 1959 1960 /* 1961 * Set up TFD's third entry to point directly to remainder 1962 * of skb's head, if any 1963 */ 1964 tb2_len = skb_headlen(skb) - hdr_len; 1965 1966 if (tb2_len > 0) { 1967 dma_addr_t tb2_phys = dma_map_single(trans->dev, 1968 skb->data + hdr_len, 1969 tb2_len, DMA_TO_DEVICE); 1970 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 1971 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1972 txq->write_ptr); 1973 return -EINVAL; 1974 } 1975 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 1976 } 1977 1978 /* set up the remaining entries to point to the data */ 1979 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1981 dma_addr_t tb_phys; 1982 int tb_idx; 1983 1984 if (!skb_frag_size(frag)) 1985 continue; 1986 1987 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1988 skb_frag_size(frag), DMA_TO_DEVICE); 1989 1990 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1991 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1992 txq->write_ptr); 1993 return -EINVAL; 1994 } 1995 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1996 skb_frag_size(frag), false); 1997 1998 out_meta->tbs |= BIT(tb_idx); 1999 } 2000 2001 trace_iwlwifi_dev_tx(trans->dev, skb, 2002 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2003 trans_pcie->tfd_size, 2004 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2005 hdr_len); 2006 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); 2007 return 0; 2008 } 2009 2010 #ifdef CONFIG_INET 2011 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) 2012 { 2013 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2014 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); 2015 2016 if (!p->page) 2017 goto alloc; 2018 2019 /* enough room on this page */ 2020 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) 2021 return p; 2022 2023 /* We don't have enough room on this page, get a new one. */ 2024 __free_page(p->page); 2025 2026 alloc: 2027 p->page = alloc_page(GFP_ATOMIC); 2028 if (!p->page) 2029 return NULL; 2030 p->pos = page_address(p->page); 2031 return p; 2032 } 2033 2034 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 2035 bool ipv6, unsigned int len) 2036 { 2037 if (ipv6) { 2038 struct ipv6hdr *iphv6 = iph; 2039 2040 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 2041 len + tcph->doff * 4, 2042 IPPROTO_TCP, 0); 2043 } else { 2044 struct iphdr *iphv4 = iph; 2045 2046 ip_send_check(iphv4); 2047 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 2048 len + tcph->doff * 4, 2049 IPPROTO_TCP, 0); 2050 } 2051 } 2052 2053 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2054 struct iwl_txq *txq, u8 hdr_len, 2055 struct iwl_cmd_meta *out_meta, 2056 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2057 { 2058 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 2059 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 2060 struct ieee80211_hdr *hdr = (void *)skb->data; 2061 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 2062 unsigned int mss = skb_shinfo(skb)->gso_size; 2063 u16 length, iv_len, amsdu_pad; 2064 u8 *start_hdr; 2065 struct iwl_tso_hdr_page *hdr_page; 2066 struct page **page_ptr; 2067 int ret; 2068 struct tso_t tso; 2069 2070 /* if the packet is protected, then it must be CCMP or GCMP */ 2071 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 2072 iv_len = ieee80211_has_protected(hdr->frame_control) ? 2073 IEEE80211_CCMP_HDR_LEN : 0; 2074 2075 trace_iwlwifi_dev_tx(trans->dev, skb, 2076 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2077 trans_pcie->tfd_size, 2078 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2079 2080 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 2081 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 2082 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 2083 amsdu_pad = 0; 2084 2085 /* total amount of header we may need for this A-MSDU */ 2086 hdr_room = DIV_ROUND_UP(total_len, mss) * 2087 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 2088 2089 /* Our device supports 9 segments at most, it will fit in 1 page */ 2090 hdr_page = get_page_hdr(trans, hdr_room); 2091 if (!hdr_page) 2092 return -ENOMEM; 2093 2094 get_page(hdr_page->page); 2095 start_hdr = hdr_page->pos; 2096 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 2097 *page_ptr = hdr_page->page; 2098 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 2099 hdr_page->pos += iv_len; 2100 2101 /* 2102 * Pull the ieee80211 header + IV to be able to use TSO core, 2103 * we will restore it for the tx_status flow. 2104 */ 2105 skb_pull(skb, hdr_len + iv_len); 2106 2107 /* 2108 * Remove the length of all the headers that we don't actually 2109 * have in the MPDU by themselves, but that we duplicate into 2110 * all the different MSDUs inside the A-MSDU. 2111 */ 2112 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 2113 2114 tso_start(skb, &tso); 2115 2116 while (total_len) { 2117 /* this is the data left for this subframe */ 2118 unsigned int data_left = 2119 min_t(unsigned int, mss, total_len); 2120 struct sk_buff *csum_skb = NULL; 2121 unsigned int hdr_tb_len; 2122 dma_addr_t hdr_tb_phys; 2123 struct tcphdr *tcph; 2124 u8 *iph, *subf_hdrs_start = hdr_page->pos; 2125 2126 total_len -= data_left; 2127 2128 memset(hdr_page->pos, 0, amsdu_pad); 2129 hdr_page->pos += amsdu_pad; 2130 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 2131 data_left)) & 0x3; 2132 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 2133 hdr_page->pos += ETH_ALEN; 2134 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 2135 hdr_page->pos += ETH_ALEN; 2136 2137 length = snap_ip_tcp_hdrlen + data_left; 2138 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 2139 hdr_page->pos += sizeof(length); 2140 2141 /* 2142 * This will copy the SNAP as well which will be considered 2143 * as MAC header. 2144 */ 2145 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 2146 iph = hdr_page->pos + 8; 2147 tcph = (void *)(iph + ip_hdrlen); 2148 2149 /* For testing on current hardware only */ 2150 if (trans_pcie->sw_csum_tx) { 2151 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 2152 GFP_ATOMIC); 2153 if (!csum_skb) { 2154 ret = -ENOMEM; 2155 goto out_unmap; 2156 } 2157 2158 iwl_compute_pseudo_hdr_csum(iph, tcph, 2159 skb->protocol == 2160 htons(ETH_P_IPV6), 2161 data_left); 2162 2163 skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); 2164 skb_reset_transport_header(csum_skb); 2165 csum_skb->csum_start = 2166 (unsigned char *)tcp_hdr(csum_skb) - 2167 csum_skb->head; 2168 } 2169 2170 hdr_page->pos += snap_ip_tcp_hdrlen; 2171 2172 hdr_tb_len = hdr_page->pos - start_hdr; 2173 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 2174 hdr_tb_len, DMA_TO_DEVICE); 2175 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 2176 dev_kfree_skb(csum_skb); 2177 ret = -EINVAL; 2178 goto out_unmap; 2179 } 2180 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2181 hdr_tb_len, false); 2182 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 2183 hdr_tb_len); 2184 /* add this subframe's headers' length to the tx_cmd */ 2185 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 2186 2187 /* prepare the start_hdr for the next subframe */ 2188 start_hdr = hdr_page->pos; 2189 2190 /* put the payload */ 2191 while (data_left) { 2192 unsigned int size = min_t(unsigned int, tso.size, 2193 data_left); 2194 dma_addr_t tb_phys; 2195 2196 if (trans_pcie->sw_csum_tx) 2197 skb_put_data(csum_skb, tso.data, size); 2198 2199 tb_phys = dma_map_single(trans->dev, tso.data, 2200 size, DMA_TO_DEVICE); 2201 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2202 dev_kfree_skb(csum_skb); 2203 ret = -EINVAL; 2204 goto out_unmap; 2205 } 2206 2207 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2208 size, false); 2209 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 2210 size); 2211 2212 data_left -= size; 2213 tso_build_data(skb, &tso, size); 2214 } 2215 2216 /* For testing on early hardware only */ 2217 if (trans_pcie->sw_csum_tx) { 2218 __wsum csum; 2219 2220 csum = skb_checksum(csum_skb, 2221 skb_checksum_start_offset(csum_skb), 2222 csum_skb->len - 2223 skb_checksum_start_offset(csum_skb), 2224 0); 2225 dev_kfree_skb(csum_skb); 2226 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 2227 hdr_tb_len, DMA_TO_DEVICE); 2228 tcph->check = csum_fold(csum); 2229 dma_sync_single_for_device(trans->dev, hdr_tb_phys, 2230 hdr_tb_len, DMA_TO_DEVICE); 2231 } 2232 } 2233 2234 /* re -add the WiFi header and IV */ 2235 skb_push(skb, hdr_len + iv_len); 2236 2237 return 0; 2238 2239 out_unmap: 2240 iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2241 return ret; 2242 } 2243 #else /* CONFIG_INET */ 2244 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2245 struct iwl_txq *txq, u8 hdr_len, 2246 struct iwl_cmd_meta *out_meta, 2247 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2248 { 2249 /* No A-MSDU without CONFIG_INET */ 2250 WARN_ON(1); 2251 2252 return -1; 2253 } 2254 #endif /* CONFIG_INET */ 2255 2256 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2257 struct iwl_device_cmd *dev_cmd, int txq_id) 2258 { 2259 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2260 struct ieee80211_hdr *hdr; 2261 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2262 struct iwl_cmd_meta *out_meta; 2263 struct iwl_txq *txq; 2264 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2265 void *tb1_addr; 2266 void *tfd; 2267 u16 len, tb1_len; 2268 bool wait_write_ptr; 2269 __le16 fc; 2270 u8 hdr_len; 2271 u16 wifi_seq; 2272 bool amsdu; 2273 2274 txq = trans_pcie->txq[txq_id]; 2275 2276 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 2277 "TX on unused queue %d\n", txq_id)) 2278 return -EINVAL; 2279 2280 if (unlikely(trans_pcie->sw_csum_tx && 2281 skb->ip_summed == CHECKSUM_PARTIAL)) { 2282 int offs = skb_checksum_start_offset(skb); 2283 int csum_offs = offs + skb->csum_offset; 2284 __wsum csum; 2285 2286 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 2287 return -1; 2288 2289 csum = skb_checksum(skb, offs, skb->len - offs, 0); 2290 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 2291 2292 skb->ip_summed = CHECKSUM_UNNECESSARY; 2293 } 2294 2295 if (skb_is_nonlinear(skb) && 2296 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && 2297 __skb_linearize(skb)) 2298 return -ENOMEM; 2299 2300 /* mac80211 always puts the full header into the SKB's head, 2301 * so there's no need to check if it's readable there 2302 */ 2303 hdr = (struct ieee80211_hdr *)skb->data; 2304 fc = hdr->frame_control; 2305 hdr_len = ieee80211_hdrlen(fc); 2306 2307 spin_lock(&txq->lock); 2308 2309 if (iwl_queue_space(txq) < txq->high_mark) { 2310 iwl_stop_queue(trans, txq); 2311 2312 /* don't put the packet on the ring, if there is no room */ 2313 if (unlikely(iwl_queue_space(txq) < 3)) { 2314 struct iwl_device_cmd **dev_cmd_ptr; 2315 2316 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2317 trans_pcie->dev_cmd_offs); 2318 2319 *dev_cmd_ptr = dev_cmd; 2320 __skb_queue_tail(&txq->overflow_q, skb); 2321 2322 spin_unlock(&txq->lock); 2323 return 0; 2324 } 2325 } 2326 2327 /* In AGG mode, the index in the ring must correspond to the WiFi 2328 * sequence number. This is a HW requirements to help the SCD to parse 2329 * the BA. 2330 * Check here that the packets are in the right place on the ring. 2331 */ 2332 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2333 WARN_ONCE(txq->ampdu && 2334 (wifi_seq & 0xff) != txq->write_ptr, 2335 "Q: %d WiFi Seq %d tfdNum %d", 2336 txq_id, wifi_seq, txq->write_ptr); 2337 2338 /* Set up driver data for this TFD */ 2339 txq->entries[txq->write_ptr].skb = skb; 2340 txq->entries[txq->write_ptr].cmd = dev_cmd; 2341 2342 dev_cmd->hdr.sequence = 2343 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2344 INDEX_TO_SEQ(txq->write_ptr))); 2345 2346 tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); 2347 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2348 offsetof(struct iwl_tx_cmd, scratch); 2349 2350 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2351 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2352 2353 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2354 out_meta = &txq->entries[txq->write_ptr].meta; 2355 out_meta->flags = 0; 2356 2357 /* 2358 * The second TB (tb1) points to the remainder of the TX command 2359 * and the 802.11 header - dword aligned size 2360 * (This calculation modifies the TX command, so do it before the 2361 * setup of the first TB) 2362 */ 2363 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2364 hdr_len - IWL_FIRST_TB_SIZE; 2365 /* do not align A-MSDU to dword as the subframe header aligns it */ 2366 amsdu = ieee80211_is_data_qos(fc) && 2367 (*ieee80211_get_qos_ctl(hdr) & 2368 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2369 if (trans_pcie->sw_csum_tx || !amsdu) { 2370 tb1_len = ALIGN(len, 4); 2371 /* Tell NIC about any 2-byte padding after MAC header */ 2372 if (tb1_len != len) 2373 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2374 } else { 2375 tb1_len = len; 2376 } 2377 2378 /* 2379 * The first TB points to bi-directional DMA data, we'll 2380 * memcpy the data into it later. 2381 */ 2382 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 2383 IWL_FIRST_TB_SIZE, true); 2384 2385 /* there must be data left over for TB1 or this code must be changed */ 2386 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2387 2388 /* map the data for TB1 */ 2389 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2390 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2391 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2392 goto out_err; 2393 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2394 2395 if (amsdu) { 2396 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2397 out_meta, dev_cmd, 2398 tb1_len))) 2399 goto out_err; 2400 } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2401 out_meta, dev_cmd, tb1_len))) { 2402 goto out_err; 2403 } 2404 2405 /* building the A-MSDU might have changed this data, so memcpy it now */ 2406 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2407 IWL_FIRST_TB_SIZE); 2408 2409 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2410 /* Set up entry for this TFD in Tx byte-count array */ 2411 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2412 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2413 2414 wait_write_ptr = ieee80211_has_morefrags(fc); 2415 2416 /* start timer if queue currently empty */ 2417 if (txq->read_ptr == txq->write_ptr) { 2418 if (txq->wd_timeout) { 2419 /* 2420 * If the TXQ is active, then set the timer, if not, 2421 * set the timer in remainder so that the timer will 2422 * be armed with the right value when the station will 2423 * wake up. 2424 */ 2425 if (!txq->frozen) 2426 mod_timer(&txq->stuck_timer, 2427 jiffies + txq->wd_timeout); 2428 else 2429 txq->frozen_expiry_remainder = txq->wd_timeout; 2430 } 2431 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); 2432 iwl_trans_ref(trans); 2433 } 2434 2435 /* Tell device the write index *just past* this latest filled TFD */ 2436 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 2437 if (!wait_write_ptr) 2438 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2439 2440 /* 2441 * At this point the frame is "transmitted" successfully 2442 * and we will get a TX status notification eventually. 2443 */ 2444 spin_unlock(&txq->lock); 2445 return 0; 2446 out_err: 2447 spin_unlock(&txq->lock); 2448 return -1; 2449 } 2450