1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #include <linux/etherdevice.h> 32 #include <linux/ieee80211.h> 33 #include <linux/slab.h> 34 #include <linux/sched.h> 35 #include <linux/pm_runtime.h> 36 #include <net/ip6_checksum.h> 37 #include <net/tso.h> 38 39 #include "iwl-debug.h" 40 #include "iwl-csr.h" 41 #include "iwl-prph.h" 42 #include "iwl-io.h" 43 #include "iwl-scd.h" 44 #include "iwl-op-mode.h" 45 #include "internal.h" 46 #include "fw/api/tx.h" 47 48 #define IWL_TX_CRC_SIZE 4 49 #define IWL_TX_DELIMITER_SIZE 4 50 51 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 52 * DMA services 53 * 54 * Theory of operation 55 * 56 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 57 * of buffer descriptors, each of which points to one or more data buffers for 58 * the device to read from or fill. Driver and device exchange status of each 59 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 60 * entries in each circular buffer, to protect against confusing empty and full 61 * queue states. 62 * 63 * The device reads or writes the data in the queues via the device's several 64 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 65 * 66 * For Tx queue, there are low mark and high mark limits. If, after queuing 67 * the packet for Tx, free space become < low mark, Tx queue stopped. When 68 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 69 * Tx queue resumed. 70 * 71 ***************************************************/ 72 73 int iwl_queue_space(const struct iwl_txq *q) 74 { 75 unsigned int max; 76 unsigned int used; 77 78 /* 79 * To avoid ambiguity between empty and completely full queues, there 80 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 81 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 82 * to reserve any queue entries for this purpose. 83 */ 84 if (q->n_window < TFD_QUEUE_SIZE_MAX) 85 max = q->n_window; 86 else 87 max = TFD_QUEUE_SIZE_MAX - 1; 88 89 /* 90 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 91 * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 92 */ 93 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 94 95 if (WARN_ON(used > max)) 96 return 0; 97 98 return max - used; 99 } 100 101 /* 102 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 103 */ 104 static int iwl_queue_init(struct iwl_txq *q, int slots_num) 105 { 106 q->n_window = slots_num; 107 108 /* slots_num must be power-of-two size, otherwise 109 * iwl_pcie_get_cmd_index is broken. */ 110 if (WARN_ON(!is_power_of_2(slots_num))) 111 return -EINVAL; 112 113 q->low_mark = q->n_window / 4; 114 if (q->low_mark < 4) 115 q->low_mark = 4; 116 117 q->high_mark = q->n_window / 8; 118 if (q->high_mark < 2) 119 q->high_mark = 2; 120 121 q->write_ptr = 0; 122 q->read_ptr = 0; 123 124 return 0; 125 } 126 127 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 128 struct iwl_dma_ptr *ptr, size_t size) 129 { 130 if (WARN_ON(ptr->addr)) 131 return -EINVAL; 132 133 ptr->addr = dma_alloc_coherent(trans->dev, size, 134 &ptr->dma, GFP_KERNEL); 135 if (!ptr->addr) 136 return -ENOMEM; 137 ptr->size = size; 138 return 0; 139 } 140 141 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 142 { 143 if (unlikely(!ptr->addr)) 144 return; 145 146 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 147 memset(ptr, 0, sizeof(*ptr)); 148 } 149 150 static void iwl_pcie_txq_stuck_timer(unsigned long data) 151 { 152 struct iwl_txq *txq = (void *)data; 153 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 154 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 155 156 spin_lock(&txq->lock); 157 /* check if triggered erroneously */ 158 if (txq->read_ptr == txq->write_ptr) { 159 spin_unlock(&txq->lock); 160 return; 161 } 162 spin_unlock(&txq->lock); 163 164 iwl_trans_pcie_log_scd_error(trans, txq); 165 166 iwl_force_nmi(trans); 167 } 168 169 /* 170 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 171 */ 172 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 173 struct iwl_txq *txq, u16 byte_cnt, 174 int num_tbs) 175 { 176 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 178 int write_ptr = txq->write_ptr; 179 int txq_id = txq->id; 180 u8 sec_ctl = 0; 181 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 182 __le16 bc_ent; 183 struct iwl_tx_cmd *tx_cmd = 184 (void *)txq->entries[txq->write_ptr].cmd->payload; 185 u8 sta_id = tx_cmd->sta_id; 186 187 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 188 189 sec_ctl = tx_cmd->sec_ctl; 190 191 switch (sec_ctl & TX_CMD_SEC_MSK) { 192 case TX_CMD_SEC_CCM: 193 len += IEEE80211_CCMP_MIC_LEN; 194 break; 195 case TX_CMD_SEC_TKIP: 196 len += IEEE80211_TKIP_ICV_LEN; 197 break; 198 case TX_CMD_SEC_WEP: 199 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 200 break; 201 } 202 if (trans_pcie->bc_table_dword) 203 len = DIV_ROUND_UP(len, 4); 204 205 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 206 return; 207 208 bc_ent = cpu_to_le16(len | (sta_id << 12)); 209 210 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 211 212 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 213 scd_bc_tbl[txq_id]. 214 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 215 } 216 217 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 218 struct iwl_txq *txq) 219 { 220 struct iwl_trans_pcie *trans_pcie = 221 IWL_TRANS_GET_PCIE_TRANS(trans); 222 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 223 int txq_id = txq->id; 224 int read_ptr = txq->read_ptr; 225 u8 sta_id = 0; 226 __le16 bc_ent; 227 struct iwl_tx_cmd *tx_cmd = 228 (void *)txq->entries[read_ptr].cmd->payload; 229 230 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 231 232 if (txq_id != trans_pcie->cmd_queue) 233 sta_id = tx_cmd->sta_id; 234 235 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 236 237 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 238 239 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 240 scd_bc_tbl[txq_id]. 241 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 242 } 243 244 /* 245 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 246 */ 247 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 248 struct iwl_txq *txq) 249 { 250 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 251 u32 reg = 0; 252 int txq_id = txq->id; 253 254 lockdep_assert_held(&txq->lock); 255 256 /* 257 * explicitly wake up the NIC if: 258 * 1. shadow registers aren't enabled 259 * 2. NIC is woken up for CMD regardless of shadow outside this function 260 * 3. there is a chance that the NIC is asleep 261 */ 262 if (!trans->cfg->base_params->shadow_reg_enable && 263 txq_id != trans_pcie->cmd_queue && 264 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 265 /* 266 * wake up nic if it's powered down ... 267 * uCode will wake up, and interrupt us again, so next 268 * time we'll skip this part. 269 */ 270 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 271 272 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 273 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 274 txq_id, reg); 275 iwl_set_bit(trans, CSR_GP_CNTRL, 276 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 277 txq->need_update = true; 278 return; 279 } 280 } 281 282 /* 283 * if not in power-save mode, uCode will never sleep when we're 284 * trying to tx (during RFKILL, we're not trying to tx). 285 */ 286 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 287 if (!txq->block) 288 iwl_write32(trans, HBUS_TARG_WRPTR, 289 txq->write_ptr | (txq_id << 8)); 290 } 291 292 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 293 { 294 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 295 int i; 296 297 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 298 struct iwl_txq *txq = trans_pcie->txq[i]; 299 300 if (!test_bit(i, trans_pcie->queue_used)) 301 continue; 302 303 spin_lock_bh(&txq->lock); 304 if (txq->need_update) { 305 iwl_pcie_txq_inc_wr_ptr(trans, txq); 306 txq->need_update = false; 307 } 308 spin_unlock_bh(&txq->lock); 309 } 310 } 311 312 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, 313 void *_tfd, u8 idx) 314 { 315 316 if (trans->cfg->use_tfh) { 317 struct iwl_tfh_tfd *tfd = _tfd; 318 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 319 320 return (dma_addr_t)(le64_to_cpu(tb->addr)); 321 } else { 322 struct iwl_tfd *tfd = _tfd; 323 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 324 dma_addr_t addr = get_unaligned_le32(&tb->lo); 325 dma_addr_t hi_len; 326 327 if (sizeof(dma_addr_t) <= sizeof(u32)) 328 return addr; 329 330 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 331 332 /* 333 * shift by 16 twice to avoid warnings on 32-bit 334 * (where this code never runs anyway due to the 335 * if statement above) 336 */ 337 return addr | ((hi_len << 16) << 16); 338 } 339 } 340 341 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 342 u8 idx, dma_addr_t addr, u16 len) 343 { 344 struct iwl_tfd *tfd_fh = (void *)tfd; 345 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 346 347 u16 hi_n_len = len << 4; 348 349 put_unaligned_le32(addr, &tb->lo); 350 hi_n_len |= iwl_get_dma_hi_addr(addr); 351 352 tb->hi_n_len = cpu_to_le16(hi_n_len); 353 354 tfd_fh->num_tbs = idx + 1; 355 } 356 357 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) 358 { 359 if (trans->cfg->use_tfh) { 360 struct iwl_tfh_tfd *tfd = _tfd; 361 362 return le16_to_cpu(tfd->num_tbs) & 0x1f; 363 } else { 364 struct iwl_tfd *tfd = _tfd; 365 366 return tfd->num_tbs & 0x1f; 367 } 368 } 369 370 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 371 struct iwl_cmd_meta *meta, 372 struct iwl_txq *txq, int index) 373 { 374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 375 int i, num_tbs; 376 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 377 378 /* Sanity check on number of chunks */ 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 380 381 if (num_tbs >= trans_pcie->max_tbs) { 382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 383 /* @todo issue fatal error, it is quite serious situation */ 384 return; 385 } 386 387 /* first TB is never freed - it's the bidirectional DMA data */ 388 389 for (i = 1; i < num_tbs; i++) { 390 if (meta->tbs & BIT(i)) 391 dma_unmap_page(trans->dev, 392 iwl_pcie_tfd_tb_get_addr(trans, tfd, i), 393 iwl_pcie_tfd_tb_get_len(trans, tfd, i), 394 DMA_TO_DEVICE); 395 else 396 dma_unmap_single(trans->dev, 397 iwl_pcie_tfd_tb_get_addr(trans, tfd, 398 i), 399 iwl_pcie_tfd_tb_get_len(trans, tfd, 400 i), 401 DMA_TO_DEVICE); 402 } 403 404 if (trans->cfg->use_tfh) { 405 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 406 407 tfd_fh->num_tbs = 0; 408 } else { 409 struct iwl_tfd *tfd_fh = (void *)tfd; 410 411 tfd_fh->num_tbs = 0; 412 } 413 414 } 415 416 /* 417 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 418 * @trans - transport private data 419 * @txq - tx queue 420 * @dma_dir - the direction of the DMA mapping 421 * 422 * Does NOT advance any TFD circular buffer read/write indexes 423 * Does NOT free the TFD itself (which is within circular buffer) 424 */ 425 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 426 { 427 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 428 * idx is bounded by n_window 429 */ 430 int rd_ptr = txq->read_ptr; 431 int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); 432 433 lockdep_assert_held(&txq->lock); 434 435 /* We have only q->n_window txq->entries, but we use 436 * TFD_QUEUE_SIZE_MAX tfds 437 */ 438 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 439 440 /* free SKB */ 441 if (txq->entries) { 442 struct sk_buff *skb; 443 444 skb = txq->entries[idx].skb; 445 446 /* Can be called from irqs-disabled context 447 * If skb is not NULL, it means that the whole queue is being 448 * freed and that the queue is not empty - free the skb 449 */ 450 if (skb) { 451 iwl_op_mode_free_skb(trans->op_mode, skb); 452 txq->entries[idx].skb = NULL; 453 } 454 } 455 } 456 457 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 458 dma_addr_t addr, u16 len, bool reset) 459 { 460 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 461 void *tfd; 462 u32 num_tbs; 463 464 tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; 465 466 if (reset) 467 memset(tfd, 0, trans_pcie->tfd_size); 468 469 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 470 471 /* Each TFD can point to a maximum max_tbs Tx buffers */ 472 if (num_tbs >= trans_pcie->max_tbs) { 473 IWL_ERR(trans, "Error can not send more than %d chunks\n", 474 trans_pcie->max_tbs); 475 return -EINVAL; 476 } 477 478 if (WARN(addr & ~IWL_TX_DMA_MASK, 479 "Unaligned address = %llx\n", (unsigned long long)addr)) 480 return -EINVAL; 481 482 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 483 484 return num_tbs; 485 } 486 487 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, 488 int slots_num, bool cmd_queue) 489 { 490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 491 size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; 492 size_t tb0_buf_sz; 493 int i; 494 495 if (WARN_ON(txq->entries || txq->tfds)) 496 return -EINVAL; 497 498 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 499 (unsigned long)txq); 500 txq->trans_pcie = trans_pcie; 501 502 txq->n_window = slots_num; 503 504 txq->entries = kcalloc(slots_num, 505 sizeof(struct iwl_pcie_txq_entry), 506 GFP_KERNEL); 507 508 if (!txq->entries) 509 goto error; 510 511 if (cmd_queue) 512 for (i = 0; i < slots_num; i++) { 513 txq->entries[i].cmd = 514 kmalloc(sizeof(struct iwl_device_cmd), 515 GFP_KERNEL); 516 if (!txq->entries[i].cmd) 517 goto error; 518 } 519 520 /* Circular buffer of transmit frame descriptors (TFDs), 521 * shared with device */ 522 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 523 &txq->dma_addr, GFP_KERNEL); 524 if (!txq->tfds) 525 goto error; 526 527 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); 528 529 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 530 531 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 532 &txq->first_tb_dma, 533 GFP_KERNEL); 534 if (!txq->first_tb_bufs) 535 goto err_free_tfds; 536 537 return 0; 538 err_free_tfds: 539 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 540 error: 541 if (txq->entries && cmd_queue) 542 for (i = 0; i < slots_num; i++) 543 kfree(txq->entries[i].cmd); 544 kfree(txq->entries); 545 txq->entries = NULL; 546 547 return -ENOMEM; 548 549 } 550 551 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 552 int slots_num, bool cmd_queue) 553 { 554 int ret; 555 556 txq->need_update = false; 557 558 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 559 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 560 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 561 562 /* Initialize queue's high/low-water marks, and head/tail indexes */ 563 ret = iwl_queue_init(txq, slots_num); 564 if (ret) 565 return ret; 566 567 spin_lock_init(&txq->lock); 568 569 if (cmd_queue) { 570 static struct lock_class_key iwl_pcie_cmd_queue_lock_class; 571 572 lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); 573 } 574 575 __skb_queue_head_init(&txq->overflow_q); 576 577 return 0; 578 } 579 580 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 581 struct sk_buff *skb) 582 { 583 struct page **page_ptr; 584 585 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 586 587 if (*page_ptr) { 588 __free_page(*page_ptr); 589 *page_ptr = NULL; 590 } 591 } 592 593 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 594 { 595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 596 597 lockdep_assert_held(&trans_pcie->reg_lock); 598 599 if (trans_pcie->ref_cmd_in_flight) { 600 trans_pcie->ref_cmd_in_flight = false; 601 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 602 iwl_trans_unref(trans); 603 } 604 605 if (!trans->cfg->base_params->apmg_wake_up_wa) 606 return; 607 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 608 return; 609 610 trans_pcie->cmd_hold_nic_awake = false; 611 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 612 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 613 } 614 615 /* 616 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 617 */ 618 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 619 { 620 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 621 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 622 623 spin_lock_bh(&txq->lock); 624 while (txq->write_ptr != txq->read_ptr) { 625 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 626 txq_id, txq->read_ptr); 627 628 if (txq_id != trans_pcie->cmd_queue) { 629 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 630 631 if (WARN_ON_ONCE(!skb)) 632 continue; 633 634 iwl_pcie_free_tso_page(trans_pcie, skb); 635 } 636 iwl_pcie_txq_free_tfd(trans, txq); 637 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 638 639 if (txq->read_ptr == txq->write_ptr) { 640 unsigned long flags; 641 642 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 643 if (txq_id != trans_pcie->cmd_queue) { 644 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", 645 txq->id); 646 iwl_trans_unref(trans); 647 } else { 648 iwl_pcie_clear_cmd_in_flight(trans); 649 } 650 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 651 } 652 } 653 654 while (!skb_queue_empty(&txq->overflow_q)) { 655 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 656 657 iwl_op_mode_free_skb(trans->op_mode, skb); 658 } 659 660 spin_unlock_bh(&txq->lock); 661 662 /* just in case - this queue may have been stopped */ 663 iwl_wake_queue(trans, txq); 664 } 665 666 /* 667 * iwl_pcie_txq_free - Deallocate DMA queue. 668 * @txq: Transmit queue to deallocate. 669 * 670 * Empty queue by removing and destroying all BD's. 671 * Free all buffers. 672 * 0-fill, but do not free "txq" descriptor structure. 673 */ 674 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 675 { 676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 677 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 678 struct device *dev = trans->dev; 679 int i; 680 681 if (WARN_ON(!txq)) 682 return; 683 684 iwl_pcie_txq_unmap(trans, txq_id); 685 686 /* De-alloc array of command/tx buffers */ 687 if (txq_id == trans_pcie->cmd_queue) 688 for (i = 0; i < txq->n_window; i++) { 689 kzfree(txq->entries[i].cmd); 690 kzfree(txq->entries[i].free_buf); 691 } 692 693 /* De-alloc circular buffer of TFDs */ 694 if (txq->tfds) { 695 dma_free_coherent(dev, 696 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, 697 txq->tfds, txq->dma_addr); 698 txq->dma_addr = 0; 699 txq->tfds = NULL; 700 701 dma_free_coherent(dev, 702 sizeof(*txq->first_tb_bufs) * txq->n_window, 703 txq->first_tb_bufs, txq->first_tb_dma); 704 } 705 706 kfree(txq->entries); 707 txq->entries = NULL; 708 709 del_timer_sync(&txq->stuck_timer); 710 711 /* 0-fill queue descriptor structure */ 712 memset(txq, 0, sizeof(*txq)); 713 } 714 715 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 716 { 717 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 718 int nq = trans->cfg->base_params->num_of_queues; 719 int chan; 720 u32 reg_val; 721 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 722 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 723 724 /* make sure all queue are not stopped/used */ 725 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 726 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 727 728 trans_pcie->scd_base_addr = 729 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 730 731 WARN_ON(scd_base_addr != 0 && 732 scd_base_addr != trans_pcie->scd_base_addr); 733 734 /* reset context data, TX status and translation data */ 735 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 736 SCD_CONTEXT_MEM_LOWER_BOUND, 737 NULL, clear_dwords); 738 739 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 740 trans_pcie->scd_bc_tbls.dma >> 10); 741 742 /* The chain extension of the SCD doesn't work well. This feature is 743 * enabled by default by the HW, so we need to disable it manually. 744 */ 745 if (trans->cfg->base_params->scd_chain_ext_wa) 746 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 747 748 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 749 trans_pcie->cmd_fifo, 750 trans_pcie->cmd_q_wdg_timeout); 751 752 /* Activate all Tx DMA/FIFO channels */ 753 iwl_scd_activate_fifos(trans); 754 755 /* Enable DMA channel */ 756 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 757 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 758 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 759 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 760 761 /* Update FH chicken bits */ 762 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 763 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 764 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 765 766 /* Enable L1-Active */ 767 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) 768 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 769 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 770 } 771 772 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 773 { 774 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 775 int txq_id; 776 777 /* 778 * we should never get here in gen2 trans mode return early to avoid 779 * having invalid accesses 780 */ 781 if (WARN_ON_ONCE(trans->cfg->gen2)) 782 return; 783 784 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 785 txq_id++) { 786 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 787 if (trans->cfg->use_tfh) 788 iwl_write_direct64(trans, 789 FH_MEM_CBBC_QUEUE(trans, txq_id), 790 txq->dma_addr); 791 else 792 iwl_write_direct32(trans, 793 FH_MEM_CBBC_QUEUE(trans, txq_id), 794 txq->dma_addr >> 8); 795 iwl_pcie_txq_unmap(trans, txq_id); 796 txq->read_ptr = 0; 797 txq->write_ptr = 0; 798 } 799 800 /* Tell NIC where to find the "keep warm" buffer */ 801 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 802 trans_pcie->kw.dma >> 4); 803 804 /* 805 * Send 0 as the scd_base_addr since the device may have be reset 806 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 807 * contain garbage. 808 */ 809 iwl_pcie_tx_start(trans, 0); 810 } 811 812 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 813 { 814 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 815 unsigned long flags; 816 int ch, ret; 817 u32 mask = 0; 818 819 spin_lock(&trans_pcie->irq_lock); 820 821 if (!iwl_trans_grab_nic_access(trans, &flags)) 822 goto out; 823 824 /* Stop each Tx DMA channel */ 825 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 826 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 827 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 828 } 829 830 /* Wait for DMA channels to be idle */ 831 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 832 if (ret < 0) 833 IWL_ERR(trans, 834 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 835 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 836 837 iwl_trans_release_nic_access(trans, &flags); 838 839 out: 840 spin_unlock(&trans_pcie->irq_lock); 841 } 842 843 /* 844 * iwl_pcie_tx_stop - Stop all Tx DMA channels 845 */ 846 int iwl_pcie_tx_stop(struct iwl_trans *trans) 847 { 848 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 849 int txq_id; 850 851 /* Turn off all Tx DMA fifos */ 852 iwl_scd_deactivate_fifos(trans); 853 854 /* Turn off all Tx DMA channels */ 855 iwl_pcie_tx_stop_fh(trans); 856 857 /* 858 * This function can be called before the op_mode disabled the 859 * queues. This happens when we have an rfkill interrupt. 860 * Since we stop Tx altogether - mark the queues as stopped. 861 */ 862 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 863 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 864 865 /* This can happen: start_hw, stop_device */ 866 if (!trans_pcie->txq_memory) 867 return 0; 868 869 /* Unmap DMA from host system and free skb's */ 870 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 871 txq_id++) 872 iwl_pcie_txq_unmap(trans, txq_id); 873 874 return 0; 875 } 876 877 /* 878 * iwl_trans_tx_free - Free TXQ Context 879 * 880 * Destroy all TX DMA queues and structures 881 */ 882 void iwl_pcie_tx_free(struct iwl_trans *trans) 883 { 884 int txq_id; 885 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 886 887 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 888 889 /* Tx queues */ 890 if (trans_pcie->txq_memory) { 891 for (txq_id = 0; 892 txq_id < trans->cfg->base_params->num_of_queues; 893 txq_id++) { 894 iwl_pcie_txq_free(trans, txq_id); 895 trans_pcie->txq[txq_id] = NULL; 896 } 897 } 898 899 kfree(trans_pcie->txq_memory); 900 trans_pcie->txq_memory = NULL; 901 902 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 903 904 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 905 } 906 907 /* 908 * iwl_pcie_tx_alloc - allocate TX context 909 * Allocate all Tx DMA structures and initialize them 910 */ 911 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 912 { 913 int ret; 914 int txq_id, slots_num; 915 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 916 917 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 918 sizeof(struct iwlagn_scd_bc_tbl); 919 920 /*It is not allowed to alloc twice, so warn when this happens. 921 * We cannot rely on the previous allocation, so free and fail */ 922 if (WARN_ON(trans_pcie->txq_memory)) { 923 ret = -EINVAL; 924 goto error; 925 } 926 927 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 928 scd_bc_tbls_size); 929 if (ret) { 930 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 931 goto error; 932 } 933 934 /* Alloc keep-warm buffer */ 935 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 936 if (ret) { 937 IWL_ERR(trans, "Keep Warm allocation failed\n"); 938 goto error; 939 } 940 941 trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, 942 sizeof(struct iwl_txq), GFP_KERNEL); 943 if (!trans_pcie->txq_memory) { 944 IWL_ERR(trans, "Not enough memory for txq\n"); 945 ret = -ENOMEM; 946 goto error; 947 } 948 949 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 950 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 951 txq_id++) { 952 bool cmd_queue = (txq_id == trans_pcie->cmd_queue); 953 954 slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size : 955 TFD_TX_CMD_SLOTS; 956 trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 957 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], 958 slots_num, cmd_queue); 959 if (ret) { 960 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 961 goto error; 962 } 963 trans_pcie->txq[txq_id]->id = txq_id; 964 } 965 966 return 0; 967 968 error: 969 iwl_pcie_tx_free(trans); 970 971 return ret; 972 } 973 974 void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans) 975 { 976 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 977 int queue_size = TFD_CMD_SLOTS; 978 979 if (trans->cfg->tx_cmd_queue_size) 980 queue_size = trans->cfg->tx_cmd_queue_size; 981 982 if (WARN_ON(!(is_power_of_2(queue_size) && 983 TFD_QUEUE_CB_SIZE(queue_size) > 0))) 984 trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS; 985 else 986 trans_pcie->tx_cmd_queue_size = queue_size; 987 } 988 989 int iwl_pcie_tx_init(struct iwl_trans *trans) 990 { 991 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 992 int ret; 993 int txq_id, slots_num; 994 bool alloc = false; 995 996 iwl_pcie_set_tx_cmd_queue_size(trans); 997 998 if (!trans_pcie->txq_memory) { 999 ret = iwl_pcie_tx_alloc(trans); 1000 if (ret) 1001 goto error; 1002 alloc = true; 1003 } 1004 1005 spin_lock(&trans_pcie->irq_lock); 1006 1007 /* Turn off all Tx DMA fifos */ 1008 iwl_scd_deactivate_fifos(trans); 1009 1010 /* Tell NIC where to find the "keep warm" buffer */ 1011 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 1012 trans_pcie->kw.dma >> 4); 1013 1014 spin_unlock(&trans_pcie->irq_lock); 1015 1016 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 1017 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1018 txq_id++) { 1019 bool cmd_queue = (txq_id == trans_pcie->cmd_queue); 1020 1021 slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size : 1022 TFD_TX_CMD_SLOTS; 1023 ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], 1024 slots_num, cmd_queue); 1025 if (ret) { 1026 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1027 goto error; 1028 } 1029 1030 /* 1031 * Tell nic where to find circular buffer of TFDs for a 1032 * given Tx queue, and enable the DMA channel used for that 1033 * queue. 1034 * Circular buffer (TFD queue in DRAM) physical base address 1035 */ 1036 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 1037 trans_pcie->txq[txq_id]->dma_addr >> 8); 1038 } 1039 1040 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1041 if (trans->cfg->base_params->num_of_queues > 20) 1042 iwl_set_bits_prph(trans, SCD_GP_CTRL, 1043 SCD_GP_CTRL_ENABLE_31_QUEUES); 1044 1045 return 0; 1046 error: 1047 /*Upon error, free only if we allocated something */ 1048 if (alloc) 1049 iwl_pcie_tx_free(trans); 1050 return ret; 1051 } 1052 1053 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 1054 { 1055 lockdep_assert_held(&txq->lock); 1056 1057 if (!txq->wd_timeout) 1058 return; 1059 1060 /* 1061 * station is asleep and we send data - that must 1062 * be uAPSD or PS-Poll. Don't rearm the timer. 1063 */ 1064 if (txq->frozen) 1065 return; 1066 1067 /* 1068 * if empty delete timer, otherwise move timer forward 1069 * since we're making progress on this queue 1070 */ 1071 if (txq->read_ptr == txq->write_ptr) 1072 del_timer(&txq->stuck_timer); 1073 else 1074 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1075 } 1076 1077 /* Frees buffers until index _not_ inclusive */ 1078 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1079 struct sk_buff_head *skbs) 1080 { 1081 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1082 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1083 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 1084 int last_to_free; 1085 1086 /* This function is not meant to release cmd queue*/ 1087 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1088 return; 1089 1090 spin_lock_bh(&txq->lock); 1091 1092 if (!test_bit(txq_id, trans_pcie->queue_used)) { 1093 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1094 txq_id, ssn); 1095 goto out; 1096 } 1097 1098 if (txq->read_ptr == tfd_num) 1099 goto out; 1100 1101 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1102 txq_id, txq->read_ptr, tfd_num, ssn); 1103 1104 /*Since we free until index _not_ inclusive, the one before index is 1105 * the last we will free. This one must be used */ 1106 last_to_free = iwl_queue_dec_wrap(tfd_num); 1107 1108 if (!iwl_queue_used(txq, last_to_free)) { 1109 IWL_ERR(trans, 1110 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1111 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1112 txq->write_ptr, txq->read_ptr); 1113 goto out; 1114 } 1115 1116 if (WARN_ON(!skb_queue_empty(skbs))) 1117 goto out; 1118 1119 for (; 1120 txq->read_ptr != tfd_num; 1121 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1122 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); 1123 struct sk_buff *skb = txq->entries[idx].skb; 1124 1125 if (WARN_ON_ONCE(!skb)) 1126 continue; 1127 1128 iwl_pcie_free_tso_page(trans_pcie, skb); 1129 1130 __skb_queue_tail(skbs, skb); 1131 1132 txq->entries[idx].skb = NULL; 1133 1134 if (!trans->cfg->use_tfh) 1135 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1136 1137 iwl_pcie_txq_free_tfd(trans, txq); 1138 } 1139 1140 iwl_pcie_txq_progress(txq); 1141 1142 if (iwl_queue_space(txq) > txq->low_mark && 1143 test_bit(txq_id, trans_pcie->queue_stopped)) { 1144 struct sk_buff_head overflow_skbs; 1145 1146 __skb_queue_head_init(&overflow_skbs); 1147 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1148 1149 /* 1150 * This is tricky: we are in reclaim path which is non 1151 * re-entrant, so noone will try to take the access the 1152 * txq data from that path. We stopped tx, so we can't 1153 * have tx as well. Bottom line, we can unlock and re-lock 1154 * later. 1155 */ 1156 spin_unlock_bh(&txq->lock); 1157 1158 while (!skb_queue_empty(&overflow_skbs)) { 1159 struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1160 struct iwl_device_cmd *dev_cmd_ptr; 1161 1162 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1163 trans_pcie->dev_cmd_offs); 1164 1165 /* 1166 * Note that we can very well be overflowing again. 1167 * In that case, iwl_queue_space will be small again 1168 * and we won't wake mac80211's queue. 1169 */ 1170 iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); 1171 } 1172 spin_lock_bh(&txq->lock); 1173 1174 if (iwl_queue_space(txq) > txq->low_mark) 1175 iwl_wake_queue(trans, txq); 1176 } 1177 1178 if (txq->read_ptr == txq->write_ptr) { 1179 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); 1180 iwl_trans_unref(trans); 1181 } 1182 1183 out: 1184 spin_unlock_bh(&txq->lock); 1185 } 1186 1187 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1188 const struct iwl_host_cmd *cmd) 1189 { 1190 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1191 int ret; 1192 1193 lockdep_assert_held(&trans_pcie->reg_lock); 1194 1195 if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1196 !trans_pcie->ref_cmd_in_flight) { 1197 trans_pcie->ref_cmd_in_flight = true; 1198 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1199 iwl_trans_ref(trans); 1200 } 1201 1202 /* 1203 * wake up the NIC to make sure that the firmware will see the host 1204 * command - we will let the NIC sleep once all the host commands 1205 * returned. This needs to be done only on NICs that have 1206 * apmg_wake_up_wa set. 1207 */ 1208 if (trans->cfg->base_params->apmg_wake_up_wa && 1209 !trans_pcie->cmd_hold_nic_awake) { 1210 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1211 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1212 1213 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1214 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1215 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1216 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1217 15000); 1218 if (ret < 0) { 1219 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1220 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1221 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1222 return -EIO; 1223 } 1224 trans_pcie->cmd_hold_nic_awake = true; 1225 } 1226 1227 return 0; 1228 } 1229 1230 /* 1231 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1232 * 1233 * When FW advances 'R' index, all entries between old and new 'R' index 1234 * need to be reclaimed. As result, some free space forms. If there is 1235 * enough free space (> low mark), wake the stack that feeds us. 1236 */ 1237 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1238 { 1239 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1240 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1241 unsigned long flags; 1242 int nfreed = 0; 1243 1244 lockdep_assert_held(&txq->lock); 1245 1246 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { 1247 IWL_ERR(trans, 1248 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1249 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1250 txq->write_ptr, txq->read_ptr); 1251 return; 1252 } 1253 1254 for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; 1255 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1256 1257 if (nfreed++ > 0) { 1258 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1259 idx, txq->write_ptr, txq->read_ptr); 1260 iwl_force_nmi(trans); 1261 } 1262 } 1263 1264 if (txq->read_ptr == txq->write_ptr) { 1265 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1266 iwl_pcie_clear_cmd_in_flight(trans); 1267 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1268 } 1269 1270 iwl_pcie_txq_progress(txq); 1271 } 1272 1273 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1274 u16 txq_id) 1275 { 1276 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1277 u32 tbl_dw_addr; 1278 u32 tbl_dw; 1279 u16 scd_q2ratid; 1280 1281 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1282 1283 tbl_dw_addr = trans_pcie->scd_base_addr + 1284 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1285 1286 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1287 1288 if (txq_id & 0x1) 1289 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1290 else 1291 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1292 1293 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1294 1295 return 0; 1296 } 1297 1298 /* Receiver address (actually, Rx station's index into station table), 1299 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1300 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1301 1302 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1303 const struct iwl_trans_txq_scd_cfg *cfg, 1304 unsigned int wdg_timeout) 1305 { 1306 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1307 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1308 int fifo = -1; 1309 bool scd_bug = false; 1310 1311 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1312 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1313 1314 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1315 1316 if (cfg) { 1317 fifo = cfg->fifo; 1318 1319 /* Disable the scheduler prior configuring the cmd queue */ 1320 if (txq_id == trans_pcie->cmd_queue && 1321 trans_pcie->scd_set_active) 1322 iwl_scd_enable_set_active(trans, 0); 1323 1324 /* Stop this Tx queue before configuring it */ 1325 iwl_scd_txq_set_inactive(trans, txq_id); 1326 1327 /* Set this queue as a chain-building queue unless it is CMD */ 1328 if (txq_id != trans_pcie->cmd_queue) 1329 iwl_scd_txq_set_chain(trans, txq_id); 1330 1331 if (cfg->aggregate) { 1332 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1333 1334 /* Map receiver-address / traffic-ID to this queue */ 1335 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1336 1337 /* enable aggregations for the queue */ 1338 iwl_scd_txq_enable_agg(trans, txq_id); 1339 txq->ampdu = true; 1340 } else { 1341 /* 1342 * disable aggregations for the queue, this will also 1343 * make the ra_tid mapping configuration irrelevant 1344 * since it is now a non-AGG queue. 1345 */ 1346 iwl_scd_txq_disable_agg(trans, txq_id); 1347 1348 ssn = txq->read_ptr; 1349 } 1350 } else { 1351 /* 1352 * If we need to move the SCD write pointer by steps of 1353 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 1354 * the op_mode know by returning true later. 1355 * Do this only in case cfg is NULL since this trick can 1356 * be done only if we have DQA enabled which is true for mvm 1357 * only. And mvm never sets a cfg pointer. 1358 * This is really ugly, but this is the easiest way out for 1359 * this sad hardware issue. 1360 * This bug has been fixed on devices 9000 and up. 1361 */ 1362 scd_bug = !trans->cfg->mq_rx_supported && 1363 !((ssn - txq->write_ptr) & 0x3f) && 1364 (ssn != txq->write_ptr); 1365 if (scd_bug) 1366 ssn++; 1367 } 1368 1369 /* Place first TFD at index corresponding to start sequence number. 1370 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1371 txq->read_ptr = (ssn & 0xff); 1372 txq->write_ptr = (ssn & 0xff); 1373 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1374 (ssn & 0xff) | (txq_id << 8)); 1375 1376 if (cfg) { 1377 u8 frame_limit = cfg->frame_limit; 1378 1379 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1380 1381 /* Set up Tx window size and frame limit for this queue */ 1382 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1383 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1384 iwl_trans_write_mem32(trans, 1385 trans_pcie->scd_base_addr + 1386 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1387 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 1388 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 1389 1390 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1391 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1392 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1393 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1394 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1395 SCD_QUEUE_STTS_REG_MSK); 1396 1397 /* enable the scheduler for this queue (only) */ 1398 if (txq_id == trans_pcie->cmd_queue && 1399 trans_pcie->scd_set_active) 1400 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1401 1402 IWL_DEBUG_TX_QUEUES(trans, 1403 "Activate queue %d on FIFO %d WrPtr: %d\n", 1404 txq_id, fifo, ssn & 0xff); 1405 } else { 1406 IWL_DEBUG_TX_QUEUES(trans, 1407 "Activate queue %d WrPtr: %d\n", 1408 txq_id, ssn & 0xff); 1409 } 1410 1411 return scd_bug; 1412 } 1413 1414 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 1415 bool shared_mode) 1416 { 1417 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1418 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1419 1420 txq->ampdu = !shared_mode; 1421 } 1422 1423 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1424 bool configure_scd) 1425 { 1426 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1427 u32 stts_addr = trans_pcie->scd_base_addr + 1428 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1429 static const u32 zero_val[4] = {}; 1430 1431 trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; 1432 trans_pcie->txq[txq_id]->frozen = false; 1433 1434 /* 1435 * Upon HW Rfkill - we stop the device, and then stop the queues 1436 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1437 * allow the op_mode to call txq_disable after it already called 1438 * stop_device. 1439 */ 1440 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1441 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1442 "queue %d not used", txq_id); 1443 return; 1444 } 1445 1446 if (configure_scd) { 1447 iwl_scd_txq_set_inactive(trans, txq_id); 1448 1449 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1450 ARRAY_SIZE(zero_val)); 1451 } 1452 1453 iwl_pcie_txq_unmap(trans, txq_id); 1454 trans_pcie->txq[txq_id]->ampdu = false; 1455 1456 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1457 } 1458 1459 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1460 1461 /* 1462 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1463 * @priv: device private data point 1464 * @cmd: a pointer to the ucode command structure 1465 * 1466 * The function returns < 0 values to indicate the operation 1467 * failed. On success, it returns the index (>= 0) of command in the 1468 * command queue. 1469 */ 1470 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1471 struct iwl_host_cmd *cmd) 1472 { 1473 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1474 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1475 struct iwl_device_cmd *out_cmd; 1476 struct iwl_cmd_meta *out_meta; 1477 unsigned long flags; 1478 void *dup_buf = NULL; 1479 dma_addr_t phys_addr; 1480 int idx; 1481 u16 copy_size, cmd_size, tb0_size; 1482 bool had_nocopy = false; 1483 u8 group_id = iwl_cmd_groupid(cmd->id); 1484 int i, ret; 1485 u32 cmd_pos; 1486 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1487 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1488 1489 if (WARN(!trans->wide_cmd_header && 1490 group_id > IWL_ALWAYS_LONG_GROUP, 1491 "unsupported wide command %#x\n", cmd->id)) 1492 return -EINVAL; 1493 1494 if (group_id != 0) { 1495 copy_size = sizeof(struct iwl_cmd_header_wide); 1496 cmd_size = sizeof(struct iwl_cmd_header_wide); 1497 } else { 1498 copy_size = sizeof(struct iwl_cmd_header); 1499 cmd_size = sizeof(struct iwl_cmd_header); 1500 } 1501 1502 /* need one for the header if the first is NOCOPY */ 1503 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1504 1505 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1506 cmddata[i] = cmd->data[i]; 1507 cmdlen[i] = cmd->len[i]; 1508 1509 if (!cmd->len[i]) 1510 continue; 1511 1512 /* need at least IWL_FIRST_TB_SIZE copied */ 1513 if (copy_size < IWL_FIRST_TB_SIZE) { 1514 int copy = IWL_FIRST_TB_SIZE - copy_size; 1515 1516 if (copy > cmdlen[i]) 1517 copy = cmdlen[i]; 1518 cmdlen[i] -= copy; 1519 cmddata[i] += copy; 1520 copy_size += copy; 1521 } 1522 1523 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1524 had_nocopy = true; 1525 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1526 idx = -EINVAL; 1527 goto free_dup_buf; 1528 } 1529 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1530 /* 1531 * This is also a chunk that isn't copied 1532 * to the static buffer so set had_nocopy. 1533 */ 1534 had_nocopy = true; 1535 1536 /* only allowed once */ 1537 if (WARN_ON(dup_buf)) { 1538 idx = -EINVAL; 1539 goto free_dup_buf; 1540 } 1541 1542 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1543 GFP_ATOMIC); 1544 if (!dup_buf) 1545 return -ENOMEM; 1546 } else { 1547 /* NOCOPY must not be followed by normal! */ 1548 if (WARN_ON(had_nocopy)) { 1549 idx = -EINVAL; 1550 goto free_dup_buf; 1551 } 1552 copy_size += cmdlen[i]; 1553 } 1554 cmd_size += cmd->len[i]; 1555 } 1556 1557 /* 1558 * If any of the command structures end up being larger than 1559 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1560 * allocated into separate TFDs, then we will need to 1561 * increase the size of the buffers. 1562 */ 1563 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1564 "Command %s (%#x) is too large (%d bytes)\n", 1565 iwl_get_cmd_string(trans, cmd->id), 1566 cmd->id, copy_size)) { 1567 idx = -EINVAL; 1568 goto free_dup_buf; 1569 } 1570 1571 spin_lock_bh(&txq->lock); 1572 1573 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1574 spin_unlock_bh(&txq->lock); 1575 1576 IWL_ERR(trans, "No space in command queue\n"); 1577 iwl_op_mode_cmd_queue_full(trans->op_mode); 1578 idx = -ENOSPC; 1579 goto free_dup_buf; 1580 } 1581 1582 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 1583 out_cmd = txq->entries[idx].cmd; 1584 out_meta = &txq->entries[idx].meta; 1585 1586 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1587 if (cmd->flags & CMD_WANT_SKB) 1588 out_meta->source = cmd; 1589 1590 /* set up the header */ 1591 if (group_id != 0) { 1592 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1593 out_cmd->hdr_wide.group_id = group_id; 1594 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1595 out_cmd->hdr_wide.length = 1596 cpu_to_le16(cmd_size - 1597 sizeof(struct iwl_cmd_header_wide)); 1598 out_cmd->hdr_wide.reserved = 0; 1599 out_cmd->hdr_wide.sequence = 1600 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1601 INDEX_TO_SEQ(txq->write_ptr)); 1602 1603 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1604 copy_size = sizeof(struct iwl_cmd_header_wide); 1605 } else { 1606 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1607 out_cmd->hdr.sequence = 1608 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1609 INDEX_TO_SEQ(txq->write_ptr)); 1610 out_cmd->hdr.group_id = 0; 1611 1612 cmd_pos = sizeof(struct iwl_cmd_header); 1613 copy_size = sizeof(struct iwl_cmd_header); 1614 } 1615 1616 /* and copy the data that needs to be copied */ 1617 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1618 int copy; 1619 1620 if (!cmd->len[i]) 1621 continue; 1622 1623 /* copy everything if not nocopy/dup */ 1624 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1625 IWL_HCMD_DFL_DUP))) { 1626 copy = cmd->len[i]; 1627 1628 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1629 cmd_pos += copy; 1630 copy_size += copy; 1631 continue; 1632 } 1633 1634 /* 1635 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1636 * in total (for bi-directional DMA), but copy up to what 1637 * we can fit into the payload for debug dump purposes. 1638 */ 1639 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1640 1641 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1642 cmd_pos += copy; 1643 1644 /* However, treat copy_size the proper way, we need it below */ 1645 if (copy_size < IWL_FIRST_TB_SIZE) { 1646 copy = IWL_FIRST_TB_SIZE - copy_size; 1647 1648 if (copy > cmd->len[i]) 1649 copy = cmd->len[i]; 1650 copy_size += copy; 1651 } 1652 } 1653 1654 IWL_DEBUG_HC(trans, 1655 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1656 iwl_get_cmd_string(trans, cmd->id), 1657 group_id, out_cmd->hdr.cmd, 1658 le16_to_cpu(out_cmd->hdr.sequence), 1659 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); 1660 1661 /* start the TFD with the minimum copy bytes */ 1662 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1663 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1664 iwl_pcie_txq_build_tfd(trans, txq, 1665 iwl_pcie_get_first_tb_dma(txq, idx), 1666 tb0_size, true); 1667 1668 /* map first command fragment, if any remains */ 1669 if (copy_size > tb0_size) { 1670 phys_addr = dma_map_single(trans->dev, 1671 ((u8 *)&out_cmd->hdr) + tb0_size, 1672 copy_size - tb0_size, 1673 DMA_TO_DEVICE); 1674 if (dma_mapping_error(trans->dev, phys_addr)) { 1675 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1676 txq->write_ptr); 1677 idx = -ENOMEM; 1678 goto out; 1679 } 1680 1681 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1682 copy_size - tb0_size, false); 1683 } 1684 1685 /* map the remaining (adjusted) nocopy/dup fragments */ 1686 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1687 const void *data = cmddata[i]; 1688 1689 if (!cmdlen[i]) 1690 continue; 1691 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1692 IWL_HCMD_DFL_DUP))) 1693 continue; 1694 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1695 data = dup_buf; 1696 phys_addr = dma_map_single(trans->dev, (void *)data, 1697 cmdlen[i], DMA_TO_DEVICE); 1698 if (dma_mapping_error(trans->dev, phys_addr)) { 1699 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1700 txq->write_ptr); 1701 idx = -ENOMEM; 1702 goto out; 1703 } 1704 1705 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1706 } 1707 1708 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1709 out_meta->flags = cmd->flags; 1710 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1711 kzfree(txq->entries[idx].free_buf); 1712 txq->entries[idx].free_buf = dup_buf; 1713 1714 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1715 1716 /* start timer if queue currently empty */ 1717 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1718 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1719 1720 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1721 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1722 if (ret < 0) { 1723 idx = ret; 1724 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1725 goto out; 1726 } 1727 1728 /* Increment and update queue's write index */ 1729 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 1730 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1731 1732 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1733 1734 out: 1735 spin_unlock_bh(&txq->lock); 1736 free_dup_buf: 1737 if (idx < 0) 1738 kfree(dup_buf); 1739 return idx; 1740 } 1741 1742 /* 1743 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1744 * @rxb: Rx buffer to reclaim 1745 */ 1746 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1747 struct iwl_rx_cmd_buffer *rxb) 1748 { 1749 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1750 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1751 u8 group_id; 1752 u32 cmd_id; 1753 int txq_id = SEQ_TO_QUEUE(sequence); 1754 int index = SEQ_TO_INDEX(sequence); 1755 int cmd_index; 1756 struct iwl_device_cmd *cmd; 1757 struct iwl_cmd_meta *meta; 1758 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1759 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1760 1761 /* If a Tx command is being handled and it isn't in the actual 1762 * command queue then there a command routing bug has been introduced 1763 * in the queue management code. */ 1764 if (WARN(txq_id != trans_pcie->cmd_queue, 1765 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1766 txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, 1767 txq->write_ptr)) { 1768 iwl_print_hex_error(trans, pkt, 32); 1769 return; 1770 } 1771 1772 spin_lock_bh(&txq->lock); 1773 1774 cmd_index = iwl_pcie_get_cmd_index(txq, index); 1775 cmd = txq->entries[cmd_index].cmd; 1776 meta = &txq->entries[cmd_index].meta; 1777 group_id = cmd->hdr.group_id; 1778 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1779 1780 iwl_pcie_tfd_unmap(trans, meta, txq, index); 1781 1782 /* Input error checking is done when commands are added to queue. */ 1783 if (meta->flags & CMD_WANT_SKB) { 1784 struct page *p = rxb_steal_page(rxb); 1785 1786 meta->source->resp_pkt = pkt; 1787 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1788 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1789 } 1790 1791 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1792 iwl_op_mode_async_cb(trans->op_mode, cmd); 1793 1794 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1795 1796 if (!(meta->flags & CMD_ASYNC)) { 1797 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1798 IWL_WARN(trans, 1799 "HCMD_ACTIVE already clear for command %s\n", 1800 iwl_get_cmd_string(trans, cmd_id)); 1801 } 1802 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1803 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1804 iwl_get_cmd_string(trans, cmd_id)); 1805 wake_up(&trans_pcie->wait_command_queue); 1806 } 1807 1808 if (meta->flags & CMD_MAKE_TRANS_IDLE) { 1809 IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 1810 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1811 set_bit(STATUS_TRANS_IDLE, &trans->status); 1812 wake_up(&trans_pcie->d0i3_waitq); 1813 } 1814 1815 if (meta->flags & CMD_WAKE_UP_TRANS) { 1816 IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 1817 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1818 clear_bit(STATUS_TRANS_IDLE, &trans->status); 1819 wake_up(&trans_pcie->d0i3_waitq); 1820 } 1821 1822 meta->flags = 0; 1823 1824 spin_unlock_bh(&txq->lock); 1825 } 1826 1827 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1828 1829 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1830 struct iwl_host_cmd *cmd) 1831 { 1832 int ret; 1833 1834 /* An asynchronous command can not expect an SKB to be set. */ 1835 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1836 return -EINVAL; 1837 1838 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1839 if (ret < 0) { 1840 IWL_ERR(trans, 1841 "Error sending %s: enqueue_hcmd failed: %d\n", 1842 iwl_get_cmd_string(trans, cmd->id), ret); 1843 return ret; 1844 } 1845 return 0; 1846 } 1847 1848 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1849 struct iwl_host_cmd *cmd) 1850 { 1851 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1852 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1853 int cmd_idx; 1854 int ret; 1855 1856 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1857 iwl_get_cmd_string(trans, cmd->id)); 1858 1859 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1860 &trans->status), 1861 "Command %s: a command is already active!\n", 1862 iwl_get_cmd_string(trans, cmd->id))) 1863 return -EIO; 1864 1865 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1866 iwl_get_cmd_string(trans, cmd->id)); 1867 1868 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { 1869 ret = wait_event_timeout(trans_pcie->d0i3_waitq, 1870 pm_runtime_active(&trans_pcie->pci_dev->dev), 1871 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 1872 if (!ret) { 1873 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); 1874 return -ETIMEDOUT; 1875 } 1876 } 1877 1878 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1879 if (cmd_idx < 0) { 1880 ret = cmd_idx; 1881 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1882 IWL_ERR(trans, 1883 "Error sending %s: enqueue_hcmd failed: %d\n", 1884 iwl_get_cmd_string(trans, cmd->id), ret); 1885 return ret; 1886 } 1887 1888 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1889 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1890 &trans->status), 1891 HOST_COMPLETE_TIMEOUT); 1892 if (!ret) { 1893 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1894 iwl_get_cmd_string(trans, cmd->id), 1895 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1896 1897 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1898 txq->read_ptr, txq->write_ptr); 1899 1900 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1901 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1902 iwl_get_cmd_string(trans, cmd->id)); 1903 ret = -ETIMEDOUT; 1904 1905 iwl_force_nmi(trans); 1906 iwl_trans_fw_error(trans); 1907 1908 goto cancel; 1909 } 1910 1911 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1912 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1913 iwl_get_cmd_string(trans, cmd->id)); 1914 dump_stack(); 1915 ret = -EIO; 1916 goto cancel; 1917 } 1918 1919 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1920 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1921 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1922 ret = -ERFKILL; 1923 goto cancel; 1924 } 1925 1926 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1927 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1928 iwl_get_cmd_string(trans, cmd->id)); 1929 ret = -EIO; 1930 goto cancel; 1931 } 1932 1933 return 0; 1934 1935 cancel: 1936 if (cmd->flags & CMD_WANT_SKB) { 1937 /* 1938 * Cancel the CMD_WANT_SKB flag for the cmd in the 1939 * TX cmd queue. Otherwise in case the cmd comes 1940 * in later, it will possibly set an invalid 1941 * address (cmd->meta.source). 1942 */ 1943 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1944 } 1945 1946 if (cmd->resp_pkt) { 1947 iwl_free_resp(cmd); 1948 cmd->resp_pkt = NULL; 1949 } 1950 1951 return ret; 1952 } 1953 1954 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1955 { 1956 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1957 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1958 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1959 cmd->id); 1960 return -ERFKILL; 1961 } 1962 1963 if (cmd->flags & CMD_ASYNC) 1964 return iwl_pcie_send_hcmd_async(trans, cmd); 1965 1966 /* We still can fail on RFKILL that can be asserted while we wait */ 1967 return iwl_pcie_send_hcmd_sync(trans, cmd); 1968 } 1969 1970 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1971 struct iwl_txq *txq, u8 hdr_len, 1972 struct iwl_cmd_meta *out_meta, 1973 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 1974 { 1975 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1976 u16 tb2_len; 1977 int i; 1978 1979 /* 1980 * Set up TFD's third entry to point directly to remainder 1981 * of skb's head, if any 1982 */ 1983 tb2_len = skb_headlen(skb) - hdr_len; 1984 1985 if (tb2_len > 0) { 1986 dma_addr_t tb2_phys = dma_map_single(trans->dev, 1987 skb->data + hdr_len, 1988 tb2_len, DMA_TO_DEVICE); 1989 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 1990 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1991 txq->write_ptr); 1992 return -EINVAL; 1993 } 1994 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 1995 } 1996 1997 /* set up the remaining entries to point to the data */ 1998 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1999 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2000 dma_addr_t tb_phys; 2001 int tb_idx; 2002 2003 if (!skb_frag_size(frag)) 2004 continue; 2005 2006 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 2007 skb_frag_size(frag), DMA_TO_DEVICE); 2008 2009 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2010 iwl_pcie_tfd_unmap(trans, out_meta, txq, 2011 txq->write_ptr); 2012 return -EINVAL; 2013 } 2014 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2015 skb_frag_size(frag), false); 2016 2017 out_meta->tbs |= BIT(tb_idx); 2018 } 2019 2020 trace_iwlwifi_dev_tx(trans->dev, skb, 2021 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2022 trans_pcie->tfd_size, 2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2024 hdr_len); 2025 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); 2026 return 0; 2027 } 2028 2029 #ifdef CONFIG_INET 2030 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) 2031 { 2032 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2033 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); 2034 2035 if (!p->page) 2036 goto alloc; 2037 2038 /* enough room on this page */ 2039 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) 2040 return p; 2041 2042 /* We don't have enough room on this page, get a new one. */ 2043 __free_page(p->page); 2044 2045 alloc: 2046 p->page = alloc_page(GFP_ATOMIC); 2047 if (!p->page) 2048 return NULL; 2049 p->pos = page_address(p->page); 2050 return p; 2051 } 2052 2053 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 2054 bool ipv6, unsigned int len) 2055 { 2056 if (ipv6) { 2057 struct ipv6hdr *iphv6 = iph; 2058 2059 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 2060 len + tcph->doff * 4, 2061 IPPROTO_TCP, 0); 2062 } else { 2063 struct iphdr *iphv4 = iph; 2064 2065 ip_send_check(iphv4); 2066 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 2067 len + tcph->doff * 4, 2068 IPPROTO_TCP, 0); 2069 } 2070 } 2071 2072 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2073 struct iwl_txq *txq, u8 hdr_len, 2074 struct iwl_cmd_meta *out_meta, 2075 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2076 { 2077 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 2078 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 2079 struct ieee80211_hdr *hdr = (void *)skb->data; 2080 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 2081 unsigned int mss = skb_shinfo(skb)->gso_size; 2082 u16 length, iv_len, amsdu_pad; 2083 u8 *start_hdr; 2084 struct iwl_tso_hdr_page *hdr_page; 2085 struct page **page_ptr; 2086 int ret; 2087 struct tso_t tso; 2088 2089 /* if the packet is protected, then it must be CCMP or GCMP */ 2090 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 2091 iv_len = ieee80211_has_protected(hdr->frame_control) ? 2092 IEEE80211_CCMP_HDR_LEN : 0; 2093 2094 trace_iwlwifi_dev_tx(trans->dev, skb, 2095 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2096 trans_pcie->tfd_size, 2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2098 2099 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 2100 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 2101 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 2102 amsdu_pad = 0; 2103 2104 /* total amount of header we may need for this A-MSDU */ 2105 hdr_room = DIV_ROUND_UP(total_len, mss) * 2106 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 2107 2108 /* Our device supports 9 segments at most, it will fit in 1 page */ 2109 hdr_page = get_page_hdr(trans, hdr_room); 2110 if (!hdr_page) 2111 return -ENOMEM; 2112 2113 get_page(hdr_page->page); 2114 start_hdr = hdr_page->pos; 2115 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 2116 *page_ptr = hdr_page->page; 2117 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 2118 hdr_page->pos += iv_len; 2119 2120 /* 2121 * Pull the ieee80211 header + IV to be able to use TSO core, 2122 * we will restore it for the tx_status flow. 2123 */ 2124 skb_pull(skb, hdr_len + iv_len); 2125 2126 /* 2127 * Remove the length of all the headers that we don't actually 2128 * have in the MPDU by themselves, but that we duplicate into 2129 * all the different MSDUs inside the A-MSDU. 2130 */ 2131 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 2132 2133 tso_start(skb, &tso); 2134 2135 while (total_len) { 2136 /* this is the data left for this subframe */ 2137 unsigned int data_left = 2138 min_t(unsigned int, mss, total_len); 2139 struct sk_buff *csum_skb = NULL; 2140 unsigned int hdr_tb_len; 2141 dma_addr_t hdr_tb_phys; 2142 struct tcphdr *tcph; 2143 u8 *iph, *subf_hdrs_start = hdr_page->pos; 2144 2145 total_len -= data_left; 2146 2147 memset(hdr_page->pos, 0, amsdu_pad); 2148 hdr_page->pos += amsdu_pad; 2149 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 2150 data_left)) & 0x3; 2151 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 2152 hdr_page->pos += ETH_ALEN; 2153 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 2154 hdr_page->pos += ETH_ALEN; 2155 2156 length = snap_ip_tcp_hdrlen + data_left; 2157 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 2158 hdr_page->pos += sizeof(length); 2159 2160 /* 2161 * This will copy the SNAP as well which will be considered 2162 * as MAC header. 2163 */ 2164 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 2165 iph = hdr_page->pos + 8; 2166 tcph = (void *)(iph + ip_hdrlen); 2167 2168 /* For testing on current hardware only */ 2169 if (trans_pcie->sw_csum_tx) { 2170 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 2171 GFP_ATOMIC); 2172 if (!csum_skb) { 2173 ret = -ENOMEM; 2174 goto out_unmap; 2175 } 2176 2177 iwl_compute_pseudo_hdr_csum(iph, tcph, 2178 skb->protocol == 2179 htons(ETH_P_IPV6), 2180 data_left); 2181 2182 skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); 2183 skb_reset_transport_header(csum_skb); 2184 csum_skb->csum_start = 2185 (unsigned char *)tcp_hdr(csum_skb) - 2186 csum_skb->head; 2187 } 2188 2189 hdr_page->pos += snap_ip_tcp_hdrlen; 2190 2191 hdr_tb_len = hdr_page->pos - start_hdr; 2192 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 2193 hdr_tb_len, DMA_TO_DEVICE); 2194 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 2195 dev_kfree_skb(csum_skb); 2196 ret = -EINVAL; 2197 goto out_unmap; 2198 } 2199 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2200 hdr_tb_len, false); 2201 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 2202 hdr_tb_len); 2203 /* add this subframe's headers' length to the tx_cmd */ 2204 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 2205 2206 /* prepare the start_hdr for the next subframe */ 2207 start_hdr = hdr_page->pos; 2208 2209 /* put the payload */ 2210 while (data_left) { 2211 unsigned int size = min_t(unsigned int, tso.size, 2212 data_left); 2213 dma_addr_t tb_phys; 2214 2215 if (trans_pcie->sw_csum_tx) 2216 skb_put_data(csum_skb, tso.data, size); 2217 2218 tb_phys = dma_map_single(trans->dev, tso.data, 2219 size, DMA_TO_DEVICE); 2220 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2221 dev_kfree_skb(csum_skb); 2222 ret = -EINVAL; 2223 goto out_unmap; 2224 } 2225 2226 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2227 size, false); 2228 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 2229 size); 2230 2231 data_left -= size; 2232 tso_build_data(skb, &tso, size); 2233 } 2234 2235 /* For testing on early hardware only */ 2236 if (trans_pcie->sw_csum_tx) { 2237 __wsum csum; 2238 2239 csum = skb_checksum(csum_skb, 2240 skb_checksum_start_offset(csum_skb), 2241 csum_skb->len - 2242 skb_checksum_start_offset(csum_skb), 2243 0); 2244 dev_kfree_skb(csum_skb); 2245 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 2246 hdr_tb_len, DMA_TO_DEVICE); 2247 tcph->check = csum_fold(csum); 2248 dma_sync_single_for_device(trans->dev, hdr_tb_phys, 2249 hdr_tb_len, DMA_TO_DEVICE); 2250 } 2251 } 2252 2253 /* re -add the WiFi header and IV */ 2254 skb_push(skb, hdr_len + iv_len); 2255 2256 return 0; 2257 2258 out_unmap: 2259 iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2260 return ret; 2261 } 2262 #else /* CONFIG_INET */ 2263 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2264 struct iwl_txq *txq, u8 hdr_len, 2265 struct iwl_cmd_meta *out_meta, 2266 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2267 { 2268 /* No A-MSDU without CONFIG_INET */ 2269 WARN_ON(1); 2270 2271 return -1; 2272 } 2273 #endif /* CONFIG_INET */ 2274 2275 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2276 struct iwl_device_cmd *dev_cmd, int txq_id) 2277 { 2278 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2279 struct ieee80211_hdr *hdr; 2280 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2281 struct iwl_cmd_meta *out_meta; 2282 struct iwl_txq *txq; 2283 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2284 void *tb1_addr; 2285 void *tfd; 2286 u16 len, tb1_len; 2287 bool wait_write_ptr; 2288 __le16 fc; 2289 u8 hdr_len; 2290 u16 wifi_seq; 2291 bool amsdu; 2292 2293 txq = trans_pcie->txq[txq_id]; 2294 2295 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 2296 "TX on unused queue %d\n", txq_id)) 2297 return -EINVAL; 2298 2299 if (unlikely(trans_pcie->sw_csum_tx && 2300 skb->ip_summed == CHECKSUM_PARTIAL)) { 2301 int offs = skb_checksum_start_offset(skb); 2302 int csum_offs = offs + skb->csum_offset; 2303 __wsum csum; 2304 2305 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 2306 return -1; 2307 2308 csum = skb_checksum(skb, offs, skb->len - offs, 0); 2309 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 2310 2311 skb->ip_summed = CHECKSUM_UNNECESSARY; 2312 } 2313 2314 if (skb_is_nonlinear(skb) && 2315 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && 2316 __skb_linearize(skb)) 2317 return -ENOMEM; 2318 2319 /* mac80211 always puts the full header into the SKB's head, 2320 * so there's no need to check if it's readable there 2321 */ 2322 hdr = (struct ieee80211_hdr *)skb->data; 2323 fc = hdr->frame_control; 2324 hdr_len = ieee80211_hdrlen(fc); 2325 2326 spin_lock(&txq->lock); 2327 2328 if (iwl_queue_space(txq) < txq->high_mark) { 2329 iwl_stop_queue(trans, txq); 2330 2331 /* don't put the packet on the ring, if there is no room */ 2332 if (unlikely(iwl_queue_space(txq) < 3)) { 2333 struct iwl_device_cmd **dev_cmd_ptr; 2334 2335 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2336 trans_pcie->dev_cmd_offs); 2337 2338 *dev_cmd_ptr = dev_cmd; 2339 __skb_queue_tail(&txq->overflow_q, skb); 2340 2341 spin_unlock(&txq->lock); 2342 return 0; 2343 } 2344 } 2345 2346 /* In AGG mode, the index in the ring must correspond to the WiFi 2347 * sequence number. This is a HW requirements to help the SCD to parse 2348 * the BA. 2349 * Check here that the packets are in the right place on the ring. 2350 */ 2351 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2352 WARN_ONCE(txq->ampdu && 2353 (wifi_seq & 0xff) != txq->write_ptr, 2354 "Q: %d WiFi Seq %d tfdNum %d", 2355 txq_id, wifi_seq, txq->write_ptr); 2356 2357 /* Set up driver data for this TFD */ 2358 txq->entries[txq->write_ptr].skb = skb; 2359 txq->entries[txq->write_ptr].cmd = dev_cmd; 2360 2361 dev_cmd->hdr.sequence = 2362 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2363 INDEX_TO_SEQ(txq->write_ptr))); 2364 2365 tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); 2366 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2367 offsetof(struct iwl_tx_cmd, scratch); 2368 2369 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2370 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2371 2372 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2373 out_meta = &txq->entries[txq->write_ptr].meta; 2374 out_meta->flags = 0; 2375 2376 /* 2377 * The second TB (tb1) points to the remainder of the TX command 2378 * and the 802.11 header - dword aligned size 2379 * (This calculation modifies the TX command, so do it before the 2380 * setup of the first TB) 2381 */ 2382 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2383 hdr_len - IWL_FIRST_TB_SIZE; 2384 /* do not align A-MSDU to dword as the subframe header aligns it */ 2385 amsdu = ieee80211_is_data_qos(fc) && 2386 (*ieee80211_get_qos_ctl(hdr) & 2387 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2388 if (trans_pcie->sw_csum_tx || !amsdu) { 2389 tb1_len = ALIGN(len, 4); 2390 /* Tell NIC about any 2-byte padding after MAC header */ 2391 if (tb1_len != len) 2392 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 2393 } else { 2394 tb1_len = len; 2395 } 2396 2397 /* 2398 * The first TB points to bi-directional DMA data, we'll 2399 * memcpy the data into it later. 2400 */ 2401 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 2402 IWL_FIRST_TB_SIZE, true); 2403 2404 /* there must be data left over for TB1 or this code must be changed */ 2405 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2406 2407 /* map the data for TB1 */ 2408 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2409 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2410 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2411 goto out_err; 2412 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2413 2414 if (amsdu) { 2415 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2416 out_meta, dev_cmd, 2417 tb1_len))) 2418 goto out_err; 2419 } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2420 out_meta, dev_cmd, tb1_len))) { 2421 goto out_err; 2422 } 2423 2424 /* building the A-MSDU might have changed this data, so memcpy it now */ 2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2426 IWL_FIRST_TB_SIZE); 2427 2428 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2429 /* Set up entry for this TFD in Tx byte-count array */ 2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2431 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2432 2433 wait_write_ptr = ieee80211_has_morefrags(fc); 2434 2435 /* start timer if queue currently empty */ 2436 if (txq->read_ptr == txq->write_ptr) { 2437 if (txq->wd_timeout) { 2438 /* 2439 * If the TXQ is active, then set the timer, if not, 2440 * set the timer in remainder so that the timer will 2441 * be armed with the right value when the station will 2442 * wake up. 2443 */ 2444 if (!txq->frozen) 2445 mod_timer(&txq->stuck_timer, 2446 jiffies + txq->wd_timeout); 2447 else 2448 txq->frozen_expiry_remainder = txq->wd_timeout; 2449 } 2450 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); 2451 iwl_trans_ref(trans); 2452 } 2453 2454 /* Tell device the write index *just past* this latest filled TFD */ 2455 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 2456 if (!wait_write_ptr) 2457 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2458 2459 /* 2460 * At this point the frame is "transmitted" successfully 2461 * and we will get a TX status notification eventually. 2462 */ 2463 spin_unlock(&txq->lock); 2464 return 0; 2465 out_err: 2466 spin_unlock(&txq->lock); 2467 return -1; 2468 } 2469