1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #include <linux/etherdevice.h> 32 #include <linux/ieee80211.h> 33 #include <linux/slab.h> 34 #include <linux/sched.h> 35 #include <linux/pm_runtime.h> 36 #include <net/ip6_checksum.h> 37 #include <net/tso.h> 38 39 #include "iwl-debug.h" 40 #include "iwl-csr.h" 41 #include "iwl-prph.h" 42 #include "iwl-io.h" 43 #include "iwl-scd.h" 44 #include "iwl-op-mode.h" 45 #include "internal.h" 46 /* FIXME: need to abstract out TX command (once we know what it looks like) */ 47 #include "dvm/commands.h" 48 49 #define IWL_TX_CRC_SIZE 4 50 #define IWL_TX_DELIMITER_SIZE 4 51 52 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 53 * DMA services 54 * 55 * Theory of operation 56 * 57 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 58 * of buffer descriptors, each of which points to one or more data buffers for 59 * the device to read from or fill. Driver and device exchange status of each 60 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 61 * entries in each circular buffer, to protect against confusing empty and full 62 * queue states. 63 * 64 * The device reads or writes the data in the queues via the device's several 65 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 66 * 67 * For Tx queue, there are low mark and high mark limits. If, after queuing 68 * the packet for Tx, free space become < low mark, Tx queue stopped. When 69 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 70 * Tx queue resumed. 71 * 72 ***************************************************/ 73 74 static int iwl_queue_space(const struct iwl_txq *q) 75 { 76 unsigned int max; 77 unsigned int used; 78 79 /* 80 * To avoid ambiguity between empty and completely full queues, there 81 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 82 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 83 * to reserve any queue entries for this purpose. 84 */ 85 if (q->n_window < TFD_QUEUE_SIZE_MAX) 86 max = q->n_window; 87 else 88 max = TFD_QUEUE_SIZE_MAX - 1; 89 90 /* 91 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 92 * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 93 */ 94 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 95 96 if (WARN_ON(used > max)) 97 return 0; 98 99 return max - used; 100 } 101 102 /* 103 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 104 */ 105 static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) 106 { 107 q->n_window = slots_num; 108 q->id = id; 109 110 /* slots_num must be power-of-two size, otherwise 111 * get_cmd_index is broken. */ 112 if (WARN_ON(!is_power_of_2(slots_num))) 113 return -EINVAL; 114 115 q->low_mark = q->n_window / 4; 116 if (q->low_mark < 4) 117 q->low_mark = 4; 118 119 q->high_mark = q->n_window / 8; 120 if (q->high_mark < 2) 121 q->high_mark = 2; 122 123 q->write_ptr = 0; 124 q->read_ptr = 0; 125 126 return 0; 127 } 128 129 static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 130 struct iwl_dma_ptr *ptr, size_t size) 131 { 132 if (WARN_ON(ptr->addr)) 133 return -EINVAL; 134 135 ptr->addr = dma_alloc_coherent(trans->dev, size, 136 &ptr->dma, GFP_KERNEL); 137 if (!ptr->addr) 138 return -ENOMEM; 139 ptr->size = size; 140 return 0; 141 } 142 143 static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, 144 struct iwl_dma_ptr *ptr) 145 { 146 if (unlikely(!ptr->addr)) 147 return; 148 149 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 150 memset(ptr, 0, sizeof(*ptr)); 151 } 152 153 static void iwl_pcie_txq_stuck_timer(unsigned long data) 154 { 155 struct iwl_txq *txq = (void *)data; 156 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 157 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 158 159 spin_lock(&txq->lock); 160 /* check if triggered erroneously */ 161 if (txq->read_ptr == txq->write_ptr) { 162 spin_unlock(&txq->lock); 163 return; 164 } 165 spin_unlock(&txq->lock); 166 167 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id, 168 jiffies_to_msecs(txq->wd_timeout)); 169 170 iwl_trans_pcie_log_scd_error(trans, txq); 171 172 iwl_force_nmi(trans); 173 } 174 175 /* 176 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 177 */ 178 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 179 struct iwl_txq *txq, u16 byte_cnt, 180 int num_tbs) 181 { 182 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 183 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 184 int write_ptr = txq->write_ptr; 185 int txq_id = txq->id; 186 u8 sec_ctl = 0; 187 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 188 __le16 bc_ent; 189 struct iwl_tx_cmd *tx_cmd = 190 (void *)txq->entries[txq->write_ptr].cmd->payload; 191 192 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 193 194 sec_ctl = tx_cmd->sec_ctl; 195 196 switch (sec_ctl & TX_CMD_SEC_MSK) { 197 case TX_CMD_SEC_CCM: 198 len += IEEE80211_CCMP_MIC_LEN; 199 break; 200 case TX_CMD_SEC_TKIP: 201 len += IEEE80211_TKIP_ICV_LEN; 202 break; 203 case TX_CMD_SEC_WEP: 204 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 205 break; 206 } 207 if (trans_pcie->bc_table_dword) 208 len = DIV_ROUND_UP(len, 4); 209 210 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 211 return; 212 213 if (trans->cfg->use_tfh) { 214 u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 215 num_tbs * sizeof(struct iwl_tfh_tb); 216 /* 217 * filled_tfd_size contains the number of filled bytes in the 218 * TFD. 219 * Dividing it by 64 will give the number of chunks to fetch 220 * to SRAM- 0 for one chunk, 1 for 2 and so on. 221 * If, for example, TFD contains only 3 TBs then 32 bytes 222 * of the TFD are used, and only one chunk of 64 bytes should 223 * be fetched 224 */ 225 u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 226 227 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 228 } else { 229 u8 sta_id = tx_cmd->sta_id; 230 231 bc_ent = cpu_to_le16(len | (sta_id << 12)); 232 } 233 234 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 235 236 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 237 scd_bc_tbl[txq_id]. 238 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 239 } 240 241 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 242 struct iwl_txq *txq) 243 { 244 struct iwl_trans_pcie *trans_pcie = 245 IWL_TRANS_GET_PCIE_TRANS(trans); 246 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 247 int txq_id = txq->id; 248 int read_ptr = txq->read_ptr; 249 u8 sta_id = 0; 250 __le16 bc_ent; 251 struct iwl_tx_cmd *tx_cmd = 252 (void *)txq->entries[read_ptr].cmd->payload; 253 254 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 255 256 if (txq_id != trans_pcie->cmd_queue) 257 sta_id = tx_cmd->sta_id; 258 259 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 260 261 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 262 263 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 264 scd_bc_tbl[txq_id]. 265 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 266 } 267 268 /* 269 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 270 */ 271 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 272 struct iwl_txq *txq) 273 { 274 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 275 u32 reg = 0; 276 int txq_id = txq->id; 277 278 lockdep_assert_held(&txq->lock); 279 280 /* 281 * explicitly wake up the NIC if: 282 * 1. shadow registers aren't enabled 283 * 2. NIC is woken up for CMD regardless of shadow outside this function 284 * 3. there is a chance that the NIC is asleep 285 */ 286 if (!trans->cfg->base_params->shadow_reg_enable && 287 txq_id != trans_pcie->cmd_queue && 288 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 289 /* 290 * wake up nic if it's powered down ... 291 * uCode will wake up, and interrupt us again, so next 292 * time we'll skip this part. 293 */ 294 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 295 296 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 297 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 298 txq_id, reg); 299 iwl_set_bit(trans, CSR_GP_CNTRL, 300 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 301 txq->need_update = true; 302 return; 303 } 304 } 305 306 /* 307 * if not in power-save mode, uCode will never sleep when we're 308 * trying to tx (during RFKILL, we're not trying to tx). 309 */ 310 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 311 if (!txq->block) 312 iwl_write32(trans, HBUS_TARG_WRPTR, 313 txq->write_ptr | (txq_id << 8)); 314 } 315 316 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 317 { 318 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 319 int i; 320 321 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 322 struct iwl_txq *txq = &trans_pcie->txq[i]; 323 324 spin_lock_bh(&txq->lock); 325 if (trans_pcie->txq[i].need_update) { 326 iwl_pcie_txq_inc_wr_ptr(trans, txq); 327 trans_pcie->txq[i].need_update = false; 328 } 329 spin_unlock_bh(&txq->lock); 330 } 331 } 332 333 static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 334 struct iwl_txq *txq, int idx) 335 { 336 return txq->tfds + trans_pcie->tfd_size * idx; 337 } 338 339 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, 340 void *_tfd, u8 idx) 341 { 342 343 if (trans->cfg->use_tfh) { 344 struct iwl_tfh_tfd *tfd = _tfd; 345 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 346 347 return (dma_addr_t)(le64_to_cpu(tb->addr)); 348 } else { 349 struct iwl_tfd *tfd = _tfd; 350 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 351 dma_addr_t addr = get_unaligned_le32(&tb->lo); 352 dma_addr_t hi_len; 353 354 if (sizeof(dma_addr_t) <= sizeof(u32)) 355 return addr; 356 357 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 358 359 /* 360 * shift by 16 twice to avoid warnings on 32-bit 361 * (where this code never runs anyway due to the 362 * if statement above) 363 */ 364 return addr | ((hi_len << 16) << 16); 365 } 366 } 367 368 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 369 u8 idx, dma_addr_t addr, u16 len) 370 { 371 if (trans->cfg->use_tfh) { 372 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 373 struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; 374 375 put_unaligned_le64(addr, &tb->addr); 376 tb->tb_len = cpu_to_le16(len); 377 378 tfd_fh->num_tbs = cpu_to_le16(idx + 1); 379 } else { 380 struct iwl_tfd *tfd_fh = (void *)tfd; 381 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 382 383 u16 hi_n_len = len << 4; 384 385 put_unaligned_le32(addr, &tb->lo); 386 if (sizeof(dma_addr_t) > sizeof(u32)) 387 hi_n_len |= ((addr >> 16) >> 16) & 0xF; 388 389 tb->hi_n_len = cpu_to_le16(hi_n_len); 390 391 tfd_fh->num_tbs = idx + 1; 392 } 393 } 394 395 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) 396 { 397 if (trans->cfg->use_tfh) { 398 struct iwl_tfh_tfd *tfd = _tfd; 399 400 return le16_to_cpu(tfd->num_tbs) & 0x1f; 401 } else { 402 struct iwl_tfd *tfd = _tfd; 403 404 return tfd->num_tbs & 0x1f; 405 } 406 } 407 408 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 409 struct iwl_cmd_meta *meta, 410 struct iwl_txq *txq, int index) 411 { 412 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 413 int i, num_tbs; 414 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 415 416 /* Sanity check on number of chunks */ 417 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 418 419 if (num_tbs >= trans_pcie->max_tbs) { 420 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 421 /* @todo issue fatal error, it is quite serious situation */ 422 return; 423 } 424 425 /* first TB is never freed - it's the bidirectional DMA data */ 426 427 for (i = 1; i < num_tbs; i++) { 428 if (meta->tbs & BIT(i)) 429 dma_unmap_page(trans->dev, 430 iwl_pcie_tfd_tb_get_addr(trans, tfd, i), 431 iwl_pcie_tfd_tb_get_len(trans, tfd, i), 432 DMA_TO_DEVICE); 433 else 434 dma_unmap_single(trans->dev, 435 iwl_pcie_tfd_tb_get_addr(trans, tfd, 436 i), 437 iwl_pcie_tfd_tb_get_len(trans, tfd, 438 i), 439 DMA_TO_DEVICE); 440 } 441 442 if (trans->cfg->use_tfh) { 443 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 444 445 tfd_fh->num_tbs = 0; 446 } else { 447 struct iwl_tfd *tfd_fh = (void *)tfd; 448 449 tfd_fh->num_tbs = 0; 450 } 451 452 } 453 454 /* 455 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 456 * @trans - transport private data 457 * @txq - tx queue 458 * @dma_dir - the direction of the DMA mapping 459 * 460 * Does NOT advance any TFD circular buffer read/write indexes 461 * Does NOT free the TFD itself (which is within circular buffer) 462 */ 463 static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 464 { 465 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 466 * idx is bounded by n_window 467 */ 468 int rd_ptr = txq->read_ptr; 469 int idx = get_cmd_index(txq, rd_ptr); 470 471 lockdep_assert_held(&txq->lock); 472 473 /* We have only q->n_window txq->entries, but we use 474 * TFD_QUEUE_SIZE_MAX tfds 475 */ 476 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 477 478 /* free SKB */ 479 if (txq->entries) { 480 struct sk_buff *skb; 481 482 skb = txq->entries[idx].skb; 483 484 /* Can be called from irqs-disabled context 485 * If skb is not NULL, it means that the whole queue is being 486 * freed and that the queue is not empty - free the skb 487 */ 488 if (skb) { 489 iwl_op_mode_free_skb(trans->op_mode, skb); 490 txq->entries[idx].skb = NULL; 491 } 492 } 493 } 494 495 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 496 dma_addr_t addr, u16 len, bool reset) 497 { 498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 499 void *tfd; 500 u32 num_tbs; 501 502 tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; 503 504 if (reset) 505 memset(tfd, 0, trans_pcie->tfd_size); 506 507 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 508 509 /* Each TFD can point to a maximum max_tbs Tx buffers */ 510 if (num_tbs >= trans_pcie->max_tbs) { 511 IWL_ERR(trans, "Error can not send more than %d chunks\n", 512 trans_pcie->max_tbs); 513 return -EINVAL; 514 } 515 516 if (WARN(addr & ~IWL_TX_DMA_MASK, 517 "Unaligned address = %llx\n", (unsigned long long)addr)) 518 return -EINVAL; 519 520 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 521 522 return num_tbs; 523 } 524 525 static int iwl_pcie_txq_alloc(struct iwl_trans *trans, 526 struct iwl_txq *txq, int slots_num, 527 u32 txq_id) 528 { 529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 530 size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; 531 size_t tb0_buf_sz; 532 int i; 533 534 if (WARN_ON(txq->entries || txq->tfds)) 535 return -EINVAL; 536 537 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 538 (unsigned long)txq); 539 txq->trans_pcie = trans_pcie; 540 541 txq->n_window = slots_num; 542 543 txq->entries = kcalloc(slots_num, 544 sizeof(struct iwl_pcie_txq_entry), 545 GFP_KERNEL); 546 547 if (!txq->entries) 548 goto error; 549 550 if (txq_id == trans_pcie->cmd_queue) 551 for (i = 0; i < slots_num; i++) { 552 txq->entries[i].cmd = 553 kmalloc(sizeof(struct iwl_device_cmd), 554 GFP_KERNEL); 555 if (!txq->entries[i].cmd) 556 goto error; 557 } 558 559 /* Circular buffer of transmit frame descriptors (TFDs), 560 * shared with device */ 561 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 562 &txq->dma_addr, GFP_KERNEL); 563 if (!txq->tfds) 564 goto error; 565 566 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); 567 568 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 569 570 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 571 &txq->first_tb_dma, 572 GFP_KERNEL); 573 if (!txq->first_tb_bufs) 574 goto err_free_tfds; 575 576 txq->id = txq_id; 577 578 return 0; 579 err_free_tfds: 580 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 581 error: 582 if (txq->entries && txq_id == trans_pcie->cmd_queue) 583 for (i = 0; i < slots_num; i++) 584 kfree(txq->entries[i].cmd); 585 kfree(txq->entries); 586 txq->entries = NULL; 587 588 return -ENOMEM; 589 590 } 591 592 static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 593 int slots_num, u32 txq_id) 594 { 595 int ret; 596 597 txq->need_update = false; 598 599 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 600 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 601 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 602 603 /* Initialize queue's high/low-water marks, and head/tail indexes */ 604 ret = iwl_queue_init(txq, slots_num, txq_id); 605 if (ret) 606 return ret; 607 608 spin_lock_init(&txq->lock); 609 __skb_queue_head_init(&txq->overflow_q); 610 611 /* 612 * Tell nic where to find circular buffer of Tx Frame Descriptors for 613 * given Tx queue, and enable the DMA channel used for that queue. 614 * Circular buffer (TFD queue in DRAM) physical base address */ 615 if (trans->cfg->use_tfh) 616 iwl_write_direct64(trans, 617 FH_MEM_CBBC_QUEUE(trans, txq_id), 618 txq->dma_addr); 619 else 620 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 621 txq->dma_addr >> 8); 622 623 return 0; 624 } 625 626 static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 627 struct sk_buff *skb) 628 { 629 struct page **page_ptr; 630 631 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 632 633 if (*page_ptr) { 634 __free_page(*page_ptr); 635 *page_ptr = NULL; 636 } 637 } 638 639 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 640 { 641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 642 643 lockdep_assert_held(&trans_pcie->reg_lock); 644 645 if (trans_pcie->ref_cmd_in_flight) { 646 trans_pcie->ref_cmd_in_flight = false; 647 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 648 iwl_trans_unref(trans); 649 } 650 651 if (!trans->cfg->base_params->apmg_wake_up_wa) 652 return; 653 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 654 return; 655 656 trans_pcie->cmd_hold_nic_awake = false; 657 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 658 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 659 } 660 661 /* 662 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 663 */ 664 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 665 { 666 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 667 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 668 669 spin_lock_bh(&txq->lock); 670 while (txq->write_ptr != txq->read_ptr) { 671 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 672 txq_id, txq->read_ptr); 673 674 if (txq_id != trans_pcie->cmd_queue) { 675 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 676 677 if (WARN_ON_ONCE(!skb)) 678 continue; 679 680 iwl_pcie_free_tso_page(trans_pcie, skb); 681 } 682 iwl_pcie_txq_free_tfd(trans, txq); 683 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 684 685 if (txq->read_ptr == txq->write_ptr) { 686 unsigned long flags; 687 688 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 689 if (txq_id != trans_pcie->cmd_queue) { 690 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", 691 txq->id); 692 iwl_trans_unref(trans); 693 } else { 694 iwl_pcie_clear_cmd_in_flight(trans); 695 } 696 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 697 } 698 } 699 txq->active = false; 700 701 while (!skb_queue_empty(&txq->overflow_q)) { 702 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 703 704 iwl_op_mode_free_skb(trans->op_mode, skb); 705 } 706 707 spin_unlock_bh(&txq->lock); 708 709 /* just in case - this queue may have been stopped */ 710 iwl_wake_queue(trans, txq); 711 } 712 713 /* 714 * iwl_pcie_txq_free - Deallocate DMA queue. 715 * @txq: Transmit queue to deallocate. 716 * 717 * Empty queue by removing and destroying all BD's. 718 * Free all buffers. 719 * 0-fill, but do not free "txq" descriptor structure. 720 */ 721 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 722 { 723 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 724 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 725 struct device *dev = trans->dev; 726 int i; 727 728 if (WARN_ON(!txq)) 729 return; 730 731 iwl_pcie_txq_unmap(trans, txq_id); 732 733 /* De-alloc array of command/tx buffers */ 734 if (txq_id == trans_pcie->cmd_queue) 735 for (i = 0; i < txq->n_window; i++) { 736 kzfree(txq->entries[i].cmd); 737 kzfree(txq->entries[i].free_buf); 738 } 739 740 /* De-alloc circular buffer of TFDs */ 741 if (txq->tfds) { 742 dma_free_coherent(dev, 743 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, 744 txq->tfds, txq->dma_addr); 745 txq->dma_addr = 0; 746 txq->tfds = NULL; 747 748 dma_free_coherent(dev, 749 sizeof(*txq->first_tb_bufs) * txq->n_window, 750 txq->first_tb_bufs, txq->first_tb_dma); 751 } 752 753 kfree(txq->entries); 754 txq->entries = NULL; 755 756 del_timer_sync(&txq->stuck_timer); 757 758 /* 0-fill queue descriptor structure */ 759 memset(txq, 0, sizeof(*txq)); 760 } 761 762 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 763 { 764 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 765 int nq = trans->cfg->base_params->num_of_queues; 766 int chan; 767 u32 reg_val; 768 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 769 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 770 771 /* make sure all queue are not stopped/used */ 772 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 773 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 774 775 if (trans->cfg->use_tfh) 776 return; 777 778 trans_pcie->scd_base_addr = 779 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 780 781 WARN_ON(scd_base_addr != 0 && 782 scd_base_addr != trans_pcie->scd_base_addr); 783 784 /* reset context data, TX status and translation data */ 785 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 786 SCD_CONTEXT_MEM_LOWER_BOUND, 787 NULL, clear_dwords); 788 789 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 790 trans_pcie->scd_bc_tbls.dma >> 10); 791 792 /* The chain extension of the SCD doesn't work well. This feature is 793 * enabled by default by the HW, so we need to disable it manually. 794 */ 795 if (trans->cfg->base_params->scd_chain_ext_wa) 796 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 797 798 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 799 trans_pcie->cmd_fifo, 800 trans_pcie->cmd_q_wdg_timeout); 801 802 /* Activate all Tx DMA/FIFO channels */ 803 iwl_scd_activate_fifos(trans); 804 805 /* Enable DMA channel */ 806 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 807 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 808 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 809 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 810 811 /* Update FH chicken bits */ 812 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 813 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 814 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 815 816 /* Enable L1-Active */ 817 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 818 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 819 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 820 } 821 822 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 823 { 824 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 825 int txq_id; 826 827 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 828 txq_id++) { 829 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 830 if (trans->cfg->use_tfh) 831 iwl_write_direct64(trans, 832 FH_MEM_CBBC_QUEUE(trans, txq_id), 833 txq->dma_addr); 834 else 835 iwl_write_direct32(trans, 836 FH_MEM_CBBC_QUEUE(trans, txq_id), 837 txq->dma_addr >> 8); 838 iwl_pcie_txq_unmap(trans, txq_id); 839 txq->read_ptr = 0; 840 txq->write_ptr = 0; 841 } 842 843 /* Tell NIC where to find the "keep warm" buffer */ 844 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 845 trans_pcie->kw.dma >> 4); 846 847 /* 848 * Send 0 as the scd_base_addr since the device may have be reset 849 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 850 * contain garbage. 851 */ 852 iwl_pcie_tx_start(trans, 0); 853 } 854 855 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 856 { 857 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 858 unsigned long flags; 859 int ch, ret; 860 u32 mask = 0; 861 862 spin_lock(&trans_pcie->irq_lock); 863 864 if (!iwl_trans_grab_nic_access(trans, &flags)) 865 goto out; 866 867 /* Stop each Tx DMA channel */ 868 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 869 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 870 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 871 } 872 873 /* Wait for DMA channels to be idle */ 874 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 875 if (ret < 0) 876 IWL_ERR(trans, 877 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 878 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 879 880 iwl_trans_release_nic_access(trans, &flags); 881 882 out: 883 spin_unlock(&trans_pcie->irq_lock); 884 } 885 886 /* 887 * iwl_pcie_tx_stop - Stop all Tx DMA channels 888 */ 889 int iwl_pcie_tx_stop(struct iwl_trans *trans) 890 { 891 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 892 int txq_id; 893 894 /* Turn off all Tx DMA fifos */ 895 iwl_scd_deactivate_fifos(trans); 896 897 /* Turn off all Tx DMA channels */ 898 iwl_pcie_tx_stop_fh(trans); 899 900 /* 901 * This function can be called before the op_mode disabled the 902 * queues. This happens when we have an rfkill interrupt. 903 * Since we stop Tx altogether - mark the queues as stopped. 904 */ 905 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 906 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 907 908 /* This can happen: start_hw, stop_device */ 909 if (!trans_pcie->txq) 910 return 0; 911 912 /* Unmap DMA from host system and free skb's */ 913 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 914 txq_id++) 915 iwl_pcie_txq_unmap(trans, txq_id); 916 917 return 0; 918 } 919 920 /* 921 * iwl_trans_tx_free - Free TXQ Context 922 * 923 * Destroy all TX DMA queues and structures 924 */ 925 void iwl_pcie_tx_free(struct iwl_trans *trans) 926 { 927 int txq_id; 928 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 929 930 /* Tx queues */ 931 if (trans_pcie->txq) { 932 for (txq_id = 0; 933 txq_id < trans->cfg->base_params->num_of_queues; txq_id++) 934 iwl_pcie_txq_free(trans, txq_id); 935 } 936 937 kfree(trans_pcie->txq); 938 trans_pcie->txq = NULL; 939 940 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 941 942 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 943 } 944 945 /* 946 * iwl_pcie_tx_alloc - allocate TX context 947 * Allocate all Tx DMA structures and initialize them 948 */ 949 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 950 { 951 int ret; 952 int txq_id, slots_num; 953 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 954 955 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 956 sizeof(struct iwlagn_scd_bc_tbl); 957 958 /*It is not allowed to alloc twice, so warn when this happens. 959 * We cannot rely on the previous allocation, so free and fail */ 960 if (WARN_ON(trans_pcie->txq)) { 961 ret = -EINVAL; 962 goto error; 963 } 964 965 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 966 scd_bc_tbls_size); 967 if (ret) { 968 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 969 goto error; 970 } 971 972 /* Alloc keep-warm buffer */ 973 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 974 if (ret) { 975 IWL_ERR(trans, "Keep Warm allocation failed\n"); 976 goto error; 977 } 978 979 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, 980 sizeof(struct iwl_txq), GFP_KERNEL); 981 if (!trans_pcie->txq) { 982 IWL_ERR(trans, "Not enough memory for txq\n"); 983 ret = -ENOMEM; 984 goto error; 985 } 986 987 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 988 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 989 txq_id++) { 990 slots_num = (txq_id == trans_pcie->cmd_queue) ? 991 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 992 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], 993 slots_num, txq_id); 994 if (ret) { 995 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 996 goto error; 997 } 998 } 999 1000 return 0; 1001 1002 error: 1003 iwl_pcie_tx_free(trans); 1004 1005 return ret; 1006 } 1007 int iwl_pcie_tx_init(struct iwl_trans *trans) 1008 { 1009 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1010 int ret; 1011 int txq_id, slots_num; 1012 bool alloc = false; 1013 1014 if (!trans_pcie->txq) { 1015 ret = iwl_pcie_tx_alloc(trans); 1016 if (ret) 1017 goto error; 1018 alloc = true; 1019 } 1020 1021 spin_lock(&trans_pcie->irq_lock); 1022 1023 /* Turn off all Tx DMA fifos */ 1024 iwl_scd_deactivate_fifos(trans); 1025 1026 /* Tell NIC where to find the "keep warm" buffer */ 1027 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 1028 trans_pcie->kw.dma >> 4); 1029 1030 spin_unlock(&trans_pcie->irq_lock); 1031 1032 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 1033 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1034 txq_id++) { 1035 slots_num = (txq_id == trans_pcie->cmd_queue) ? 1036 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1037 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], 1038 slots_num, txq_id); 1039 if (ret) { 1040 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1041 goto error; 1042 } 1043 } 1044 1045 if (trans->cfg->use_tfh) { 1046 iwl_write_direct32(trans, TFH_TRANSFER_MODE, 1047 TFH_TRANSFER_MAX_PENDING_REQ | 1048 TFH_CHUNK_SIZE_128 | 1049 TFH_CHUNK_SPLIT_MODE); 1050 return 0; 1051 } 1052 1053 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1054 if (trans->cfg->base_params->num_of_queues > 20) 1055 iwl_set_bits_prph(trans, SCD_GP_CTRL, 1056 SCD_GP_CTRL_ENABLE_31_QUEUES); 1057 1058 return 0; 1059 error: 1060 /*Upon error, free only if we allocated something */ 1061 if (alloc) 1062 iwl_pcie_tx_free(trans); 1063 return ret; 1064 } 1065 1066 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 1067 { 1068 lockdep_assert_held(&txq->lock); 1069 1070 if (!txq->wd_timeout) 1071 return; 1072 1073 /* 1074 * station is asleep and we send data - that must 1075 * be uAPSD or PS-Poll. Don't rearm the timer. 1076 */ 1077 if (txq->frozen) 1078 return; 1079 1080 /* 1081 * if empty delete timer, otherwise move timer forward 1082 * since we're making progress on this queue 1083 */ 1084 if (txq->read_ptr == txq->write_ptr) 1085 del_timer(&txq->stuck_timer); 1086 else 1087 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1088 } 1089 1090 /* Frees buffers until index _not_ inclusive */ 1091 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1092 struct sk_buff_head *skbs) 1093 { 1094 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1095 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1096 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 1097 int last_to_free; 1098 1099 /* This function is not meant to release cmd queue*/ 1100 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1101 return; 1102 1103 spin_lock_bh(&txq->lock); 1104 1105 if (!txq->active) { 1106 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1107 txq_id, ssn); 1108 goto out; 1109 } 1110 1111 if (txq->read_ptr == tfd_num) 1112 goto out; 1113 1114 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1115 txq_id, txq->read_ptr, tfd_num, ssn); 1116 1117 /*Since we free until index _not_ inclusive, the one before index is 1118 * the last we will free. This one must be used */ 1119 last_to_free = iwl_queue_dec_wrap(tfd_num); 1120 1121 if (!iwl_queue_used(txq, last_to_free)) { 1122 IWL_ERR(trans, 1123 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1124 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1125 txq->write_ptr, txq->read_ptr); 1126 goto out; 1127 } 1128 1129 if (WARN_ON(!skb_queue_empty(skbs))) 1130 goto out; 1131 1132 for (; 1133 txq->read_ptr != tfd_num; 1134 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1135 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 1136 1137 if (WARN_ON_ONCE(!skb)) 1138 continue; 1139 1140 iwl_pcie_free_tso_page(trans_pcie, skb); 1141 1142 __skb_queue_tail(skbs, skb); 1143 1144 txq->entries[txq->read_ptr].skb = NULL; 1145 1146 if (!trans->cfg->use_tfh) 1147 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1148 1149 iwl_pcie_txq_free_tfd(trans, txq); 1150 } 1151 1152 iwl_pcie_txq_progress(txq); 1153 1154 if (iwl_queue_space(txq) > txq->low_mark && 1155 test_bit(txq_id, trans_pcie->queue_stopped)) { 1156 struct sk_buff_head overflow_skbs; 1157 1158 __skb_queue_head_init(&overflow_skbs); 1159 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1160 1161 /* 1162 * This is tricky: we are in reclaim path which is non 1163 * re-entrant, so noone will try to take the access the 1164 * txq data from that path. We stopped tx, so we can't 1165 * have tx as well. Bottom line, we can unlock and re-lock 1166 * later. 1167 */ 1168 spin_unlock_bh(&txq->lock); 1169 1170 while (!skb_queue_empty(&overflow_skbs)) { 1171 struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1172 struct iwl_device_cmd *dev_cmd_ptr; 1173 1174 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1175 trans_pcie->dev_cmd_offs); 1176 1177 /* 1178 * Note that we can very well be overflowing again. 1179 * In that case, iwl_queue_space will be small again 1180 * and we won't wake mac80211's queue. 1181 */ 1182 iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); 1183 } 1184 spin_lock_bh(&txq->lock); 1185 1186 if (iwl_queue_space(txq) > txq->low_mark) 1187 iwl_wake_queue(trans, txq); 1188 } 1189 1190 if (txq->read_ptr == txq->write_ptr) { 1191 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); 1192 iwl_trans_unref(trans); 1193 } 1194 1195 out: 1196 spin_unlock_bh(&txq->lock); 1197 } 1198 1199 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1200 const struct iwl_host_cmd *cmd) 1201 { 1202 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1203 int ret; 1204 1205 lockdep_assert_held(&trans_pcie->reg_lock); 1206 1207 if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1208 !trans_pcie->ref_cmd_in_flight) { 1209 trans_pcie->ref_cmd_in_flight = true; 1210 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1211 iwl_trans_ref(trans); 1212 } 1213 1214 /* 1215 * wake up the NIC to make sure that the firmware will see the host 1216 * command - we will let the NIC sleep once all the host commands 1217 * returned. This needs to be done only on NICs that have 1218 * apmg_wake_up_wa set. 1219 */ 1220 if (trans->cfg->base_params->apmg_wake_up_wa && 1221 !trans_pcie->cmd_hold_nic_awake) { 1222 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1223 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1224 1225 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1226 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1227 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1228 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1229 15000); 1230 if (ret < 0) { 1231 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1233 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1234 return -EIO; 1235 } 1236 trans_pcie->cmd_hold_nic_awake = true; 1237 } 1238 1239 return 0; 1240 } 1241 1242 /* 1243 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1244 * 1245 * When FW advances 'R' index, all entries between old and new 'R' index 1246 * need to be reclaimed. As result, some free space forms. If there is 1247 * enough free space (> low mark), wake the stack that feeds us. 1248 */ 1249 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1250 { 1251 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1252 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1253 unsigned long flags; 1254 int nfreed = 0; 1255 1256 lockdep_assert_held(&txq->lock); 1257 1258 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { 1259 IWL_ERR(trans, 1260 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1261 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1262 txq->write_ptr, txq->read_ptr); 1263 return; 1264 } 1265 1266 for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; 1267 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1268 1269 if (nfreed++ > 0) { 1270 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1271 idx, txq->write_ptr, txq->read_ptr); 1272 iwl_force_nmi(trans); 1273 } 1274 } 1275 1276 if (txq->read_ptr == txq->write_ptr) { 1277 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1278 iwl_pcie_clear_cmd_in_flight(trans); 1279 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1280 } 1281 1282 iwl_pcie_txq_progress(txq); 1283 } 1284 1285 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1286 u16 txq_id) 1287 { 1288 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1289 u32 tbl_dw_addr; 1290 u32 tbl_dw; 1291 u16 scd_q2ratid; 1292 1293 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1294 1295 tbl_dw_addr = trans_pcie->scd_base_addr + 1296 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1297 1298 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1299 1300 if (txq_id & 0x1) 1301 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1302 else 1303 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1304 1305 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1306 1307 return 0; 1308 } 1309 1310 /* Receiver address (actually, Rx station's index into station table), 1311 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1312 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1313 1314 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1315 const struct iwl_trans_txq_scd_cfg *cfg, 1316 unsigned int wdg_timeout) 1317 { 1318 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1319 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1320 int fifo = -1; 1321 1322 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1323 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1324 1325 if (cfg && trans->cfg->use_tfh) 1326 WARN_ONCE(1, "Expected no calls to SCD configuration"); 1327 1328 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1329 1330 if (cfg) { 1331 fifo = cfg->fifo; 1332 1333 /* Disable the scheduler prior configuring the cmd queue */ 1334 if (txq_id == trans_pcie->cmd_queue && 1335 trans_pcie->scd_set_active) 1336 iwl_scd_enable_set_active(trans, 0); 1337 1338 /* Stop this Tx queue before configuring it */ 1339 iwl_scd_txq_set_inactive(trans, txq_id); 1340 1341 /* Set this queue as a chain-building queue unless it is CMD */ 1342 if (txq_id != trans_pcie->cmd_queue) 1343 iwl_scd_txq_set_chain(trans, txq_id); 1344 1345 if (cfg->aggregate) { 1346 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1347 1348 /* Map receiver-address / traffic-ID to this queue */ 1349 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1350 1351 /* enable aggregations for the queue */ 1352 iwl_scd_txq_enable_agg(trans, txq_id); 1353 txq->ampdu = true; 1354 } else { 1355 /* 1356 * disable aggregations for the queue, this will also 1357 * make the ra_tid mapping configuration irrelevant 1358 * since it is now a non-AGG queue. 1359 */ 1360 iwl_scd_txq_disable_agg(trans, txq_id); 1361 1362 ssn = txq->read_ptr; 1363 } 1364 } 1365 1366 /* Place first TFD at index corresponding to start sequence number. 1367 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1368 txq->read_ptr = (ssn & 0xff); 1369 txq->write_ptr = (ssn & 0xff); 1370 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1371 (ssn & 0xff) | (txq_id << 8)); 1372 1373 if (cfg) { 1374 u8 frame_limit = cfg->frame_limit; 1375 1376 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1377 1378 /* Set up Tx window size and frame limit for this queue */ 1379 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1380 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1381 iwl_trans_write_mem32(trans, 1382 trans_pcie->scd_base_addr + 1383 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1384 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1385 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1386 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1387 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1388 1389 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1390 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1391 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1392 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1393 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1394 SCD_QUEUE_STTS_REG_MSK); 1395 1396 /* enable the scheduler for this queue (only) */ 1397 if (txq_id == trans_pcie->cmd_queue && 1398 trans_pcie->scd_set_active) 1399 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1400 1401 IWL_DEBUG_TX_QUEUES(trans, 1402 "Activate queue %d on FIFO %d WrPtr: %d\n", 1403 txq_id, fifo, ssn & 0xff); 1404 } else { 1405 IWL_DEBUG_TX_QUEUES(trans, 1406 "Activate queue %d WrPtr: %d\n", 1407 txq_id, ssn & 0xff); 1408 } 1409 1410 txq->active = true; 1411 } 1412 1413 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 1414 bool shared_mode) 1415 { 1416 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1417 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1418 1419 txq->ampdu = !shared_mode; 1420 } 1421 1422 dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq) 1423 { 1424 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1425 1426 return trans_pcie->scd_bc_tbls.dma + 1427 txq * sizeof(struct iwlagn_scd_bc_tbl); 1428 } 1429 1430 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1431 bool configure_scd) 1432 { 1433 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1434 u32 stts_addr = trans_pcie->scd_base_addr + 1435 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1436 static const u32 zero_val[4] = {}; 1437 1438 trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; 1439 trans_pcie->txq[txq_id].frozen = false; 1440 1441 /* 1442 * Upon HW Rfkill - we stop the device, and then stop the queues 1443 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1444 * allow the op_mode to call txq_disable after it already called 1445 * stop_device. 1446 */ 1447 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1448 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1449 "queue %d not used", txq_id); 1450 return; 1451 } 1452 1453 if (configure_scd && trans->cfg->use_tfh) 1454 WARN_ONCE(1, "Expected no calls to SCD configuration"); 1455 1456 if (configure_scd) { 1457 iwl_scd_txq_set_inactive(trans, txq_id); 1458 1459 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1460 ARRAY_SIZE(zero_val)); 1461 } 1462 1463 iwl_pcie_txq_unmap(trans, txq_id); 1464 trans_pcie->txq[txq_id].ampdu = false; 1465 1466 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1467 } 1468 1469 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1470 1471 /* 1472 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1473 * @priv: device private data point 1474 * @cmd: a pointer to the ucode command structure 1475 * 1476 * The function returns < 0 values to indicate the operation 1477 * failed. On success, it returns the index (>= 0) of command in the 1478 * command queue. 1479 */ 1480 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1481 struct iwl_host_cmd *cmd) 1482 { 1483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1484 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1485 struct iwl_device_cmd *out_cmd; 1486 struct iwl_cmd_meta *out_meta; 1487 unsigned long flags; 1488 void *dup_buf = NULL; 1489 dma_addr_t phys_addr; 1490 int idx; 1491 u16 copy_size, cmd_size, tb0_size; 1492 bool had_nocopy = false; 1493 u8 group_id = iwl_cmd_groupid(cmd->id); 1494 int i, ret; 1495 u32 cmd_pos; 1496 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1497 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1498 1499 if (WARN(!trans->wide_cmd_header && 1500 group_id > IWL_ALWAYS_LONG_GROUP, 1501 "unsupported wide command %#x\n", cmd->id)) 1502 return -EINVAL; 1503 1504 if (group_id != 0) { 1505 copy_size = sizeof(struct iwl_cmd_header_wide); 1506 cmd_size = sizeof(struct iwl_cmd_header_wide); 1507 } else { 1508 copy_size = sizeof(struct iwl_cmd_header); 1509 cmd_size = sizeof(struct iwl_cmd_header); 1510 } 1511 1512 /* need one for the header if the first is NOCOPY */ 1513 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1514 1515 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1516 cmddata[i] = cmd->data[i]; 1517 cmdlen[i] = cmd->len[i]; 1518 1519 if (!cmd->len[i]) 1520 continue; 1521 1522 /* need at least IWL_FIRST_TB_SIZE copied */ 1523 if (copy_size < IWL_FIRST_TB_SIZE) { 1524 int copy = IWL_FIRST_TB_SIZE - copy_size; 1525 1526 if (copy > cmdlen[i]) 1527 copy = cmdlen[i]; 1528 cmdlen[i] -= copy; 1529 cmddata[i] += copy; 1530 copy_size += copy; 1531 } 1532 1533 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1534 had_nocopy = true; 1535 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1536 idx = -EINVAL; 1537 goto free_dup_buf; 1538 } 1539 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1540 /* 1541 * This is also a chunk that isn't copied 1542 * to the static buffer so set had_nocopy. 1543 */ 1544 had_nocopy = true; 1545 1546 /* only allowed once */ 1547 if (WARN_ON(dup_buf)) { 1548 idx = -EINVAL; 1549 goto free_dup_buf; 1550 } 1551 1552 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1553 GFP_ATOMIC); 1554 if (!dup_buf) 1555 return -ENOMEM; 1556 } else { 1557 /* NOCOPY must not be followed by normal! */ 1558 if (WARN_ON(had_nocopy)) { 1559 idx = -EINVAL; 1560 goto free_dup_buf; 1561 } 1562 copy_size += cmdlen[i]; 1563 } 1564 cmd_size += cmd->len[i]; 1565 } 1566 1567 /* 1568 * If any of the command structures end up being larger than 1569 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1570 * allocated into separate TFDs, then we will need to 1571 * increase the size of the buffers. 1572 */ 1573 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1574 "Command %s (%#x) is too large (%d bytes)\n", 1575 iwl_get_cmd_string(trans, cmd->id), 1576 cmd->id, copy_size)) { 1577 idx = -EINVAL; 1578 goto free_dup_buf; 1579 } 1580 1581 spin_lock_bh(&txq->lock); 1582 1583 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1584 spin_unlock_bh(&txq->lock); 1585 1586 IWL_ERR(trans, "No space in command queue\n"); 1587 iwl_op_mode_cmd_queue_full(trans->op_mode); 1588 idx = -ENOSPC; 1589 goto free_dup_buf; 1590 } 1591 1592 idx = get_cmd_index(txq, txq->write_ptr); 1593 out_cmd = txq->entries[idx].cmd; 1594 out_meta = &txq->entries[idx].meta; 1595 1596 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1597 if (cmd->flags & CMD_WANT_SKB) 1598 out_meta->source = cmd; 1599 1600 /* set up the header */ 1601 if (group_id != 0) { 1602 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1603 out_cmd->hdr_wide.group_id = group_id; 1604 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1605 out_cmd->hdr_wide.length = 1606 cpu_to_le16(cmd_size - 1607 sizeof(struct iwl_cmd_header_wide)); 1608 out_cmd->hdr_wide.reserved = 0; 1609 out_cmd->hdr_wide.sequence = 1610 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1611 INDEX_TO_SEQ(txq->write_ptr)); 1612 1613 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1614 copy_size = sizeof(struct iwl_cmd_header_wide); 1615 } else { 1616 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1617 out_cmd->hdr.sequence = 1618 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1619 INDEX_TO_SEQ(txq->write_ptr)); 1620 out_cmd->hdr.group_id = 0; 1621 1622 cmd_pos = sizeof(struct iwl_cmd_header); 1623 copy_size = sizeof(struct iwl_cmd_header); 1624 } 1625 1626 /* and copy the data that needs to be copied */ 1627 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1628 int copy; 1629 1630 if (!cmd->len[i]) 1631 continue; 1632 1633 /* copy everything if not nocopy/dup */ 1634 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1635 IWL_HCMD_DFL_DUP))) { 1636 copy = cmd->len[i]; 1637 1638 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1639 cmd_pos += copy; 1640 copy_size += copy; 1641 continue; 1642 } 1643 1644 /* 1645 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1646 * in total (for bi-directional DMA), but copy up to what 1647 * we can fit into the payload for debug dump purposes. 1648 */ 1649 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1650 1651 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1652 cmd_pos += copy; 1653 1654 /* However, treat copy_size the proper way, we need it below */ 1655 if (copy_size < IWL_FIRST_TB_SIZE) { 1656 copy = IWL_FIRST_TB_SIZE - copy_size; 1657 1658 if (copy > cmd->len[i]) 1659 copy = cmd->len[i]; 1660 copy_size += copy; 1661 } 1662 } 1663 1664 IWL_DEBUG_HC(trans, 1665 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1666 iwl_get_cmd_string(trans, cmd->id), 1667 group_id, out_cmd->hdr.cmd, 1668 le16_to_cpu(out_cmd->hdr.sequence), 1669 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); 1670 1671 /* start the TFD with the minimum copy bytes */ 1672 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1673 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1674 iwl_pcie_txq_build_tfd(trans, txq, 1675 iwl_pcie_get_first_tb_dma(txq, idx), 1676 tb0_size, true); 1677 1678 /* map first command fragment, if any remains */ 1679 if (copy_size > tb0_size) { 1680 phys_addr = dma_map_single(trans->dev, 1681 ((u8 *)&out_cmd->hdr) + tb0_size, 1682 copy_size - tb0_size, 1683 DMA_TO_DEVICE); 1684 if (dma_mapping_error(trans->dev, phys_addr)) { 1685 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1686 txq->write_ptr); 1687 idx = -ENOMEM; 1688 goto out; 1689 } 1690 1691 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1692 copy_size - tb0_size, false); 1693 } 1694 1695 /* map the remaining (adjusted) nocopy/dup fragments */ 1696 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1697 const void *data = cmddata[i]; 1698 1699 if (!cmdlen[i]) 1700 continue; 1701 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1702 IWL_HCMD_DFL_DUP))) 1703 continue; 1704 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1705 data = dup_buf; 1706 phys_addr = dma_map_single(trans->dev, (void *)data, 1707 cmdlen[i], DMA_TO_DEVICE); 1708 if (dma_mapping_error(trans->dev, phys_addr)) { 1709 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1710 txq->write_ptr); 1711 idx = -ENOMEM; 1712 goto out; 1713 } 1714 1715 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1716 } 1717 1718 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1719 out_meta->flags = cmd->flags; 1720 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1721 kzfree(txq->entries[idx].free_buf); 1722 txq->entries[idx].free_buf = dup_buf; 1723 1724 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1725 1726 /* start timer if queue currently empty */ 1727 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1728 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1729 1730 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1731 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1732 if (ret < 0) { 1733 idx = ret; 1734 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1735 goto out; 1736 } 1737 1738 /* Increment and update queue's write index */ 1739 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 1740 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1741 1742 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1743 1744 out: 1745 spin_unlock_bh(&txq->lock); 1746 free_dup_buf: 1747 if (idx < 0) 1748 kfree(dup_buf); 1749 return idx; 1750 } 1751 1752 /* 1753 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1754 * @rxb: Rx buffer to reclaim 1755 */ 1756 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1757 struct iwl_rx_cmd_buffer *rxb) 1758 { 1759 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1760 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1761 u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id); 1762 u32 cmd_id; 1763 int txq_id = SEQ_TO_QUEUE(sequence); 1764 int index = SEQ_TO_INDEX(sequence); 1765 int cmd_index; 1766 struct iwl_device_cmd *cmd; 1767 struct iwl_cmd_meta *meta; 1768 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1769 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1770 1771 /* If a Tx command is being handled and it isn't in the actual 1772 * command queue then there a command routing bug has been introduced 1773 * in the queue management code. */ 1774 if (WARN(txq_id != trans_pcie->cmd_queue, 1775 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1776 txq_id, trans_pcie->cmd_queue, sequence, 1777 trans_pcie->txq[trans_pcie->cmd_queue].read_ptr, 1778 trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) { 1779 iwl_print_hex_error(trans, pkt, 32); 1780 return; 1781 } 1782 1783 spin_lock_bh(&txq->lock); 1784 1785 cmd_index = get_cmd_index(txq, index); 1786 cmd = txq->entries[cmd_index].cmd; 1787 meta = &txq->entries[cmd_index].meta; 1788 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1789 1790 iwl_pcie_tfd_unmap(trans, meta, txq, index); 1791 1792 /* Input error checking is done when commands are added to queue. */ 1793 if (meta->flags & CMD_WANT_SKB) { 1794 struct page *p = rxb_steal_page(rxb); 1795 1796 meta->source->resp_pkt = pkt; 1797 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1798 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1799 } 1800 1801 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1802 iwl_op_mode_async_cb(trans->op_mode, cmd); 1803 1804 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1805 1806 if (!(meta->flags & CMD_ASYNC)) { 1807 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1808 IWL_WARN(trans, 1809 "HCMD_ACTIVE already clear for command %s\n", 1810 iwl_get_cmd_string(trans, cmd_id)); 1811 } 1812 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1813 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1814 iwl_get_cmd_string(trans, cmd_id)); 1815 wake_up(&trans_pcie->wait_command_queue); 1816 } 1817 1818 if (meta->flags & CMD_MAKE_TRANS_IDLE) { 1819 IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 1820 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1821 set_bit(STATUS_TRANS_IDLE, &trans->status); 1822 wake_up(&trans_pcie->d0i3_waitq); 1823 } 1824 1825 if (meta->flags & CMD_WAKE_UP_TRANS) { 1826 IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 1827 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1828 clear_bit(STATUS_TRANS_IDLE, &trans->status); 1829 wake_up(&trans_pcie->d0i3_waitq); 1830 } 1831 1832 meta->flags = 0; 1833 1834 spin_unlock_bh(&txq->lock); 1835 } 1836 1837 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1838 1839 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1840 struct iwl_host_cmd *cmd) 1841 { 1842 int ret; 1843 1844 /* An asynchronous command can not expect an SKB to be set. */ 1845 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1846 return -EINVAL; 1847 1848 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1849 if (ret < 0) { 1850 IWL_ERR(trans, 1851 "Error sending %s: enqueue_hcmd failed: %d\n", 1852 iwl_get_cmd_string(trans, cmd->id), ret); 1853 return ret; 1854 } 1855 return 0; 1856 } 1857 1858 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1859 struct iwl_host_cmd *cmd) 1860 { 1861 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1862 int cmd_idx; 1863 int ret; 1864 1865 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1866 iwl_get_cmd_string(trans, cmd->id)); 1867 1868 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1869 &trans->status), 1870 "Command %s: a command is already active!\n", 1871 iwl_get_cmd_string(trans, cmd->id))) 1872 return -EIO; 1873 1874 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1875 iwl_get_cmd_string(trans, cmd->id)); 1876 1877 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { 1878 ret = wait_event_timeout(trans_pcie->d0i3_waitq, 1879 pm_runtime_active(&trans_pcie->pci_dev->dev), 1880 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 1881 if (!ret) { 1882 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); 1883 return -ETIMEDOUT; 1884 } 1885 } 1886 1887 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1888 if (cmd_idx < 0) { 1889 ret = cmd_idx; 1890 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1891 IWL_ERR(trans, 1892 "Error sending %s: enqueue_hcmd failed: %d\n", 1893 iwl_get_cmd_string(trans, cmd->id), ret); 1894 return ret; 1895 } 1896 1897 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1898 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1899 &trans->status), 1900 HOST_COMPLETE_TIMEOUT); 1901 if (!ret) { 1902 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1903 1904 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1905 iwl_get_cmd_string(trans, cmd->id), 1906 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1907 1908 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1909 txq->read_ptr, txq->write_ptr); 1910 1911 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1912 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1913 iwl_get_cmd_string(trans, cmd->id)); 1914 ret = -ETIMEDOUT; 1915 1916 iwl_force_nmi(trans); 1917 iwl_trans_fw_error(trans); 1918 1919 goto cancel; 1920 } 1921 1922 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1923 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1924 iwl_get_cmd_string(trans, cmd->id)); 1925 dump_stack(); 1926 ret = -EIO; 1927 goto cancel; 1928 } 1929 1930 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1931 test_bit(STATUS_RFKILL, &trans->status)) { 1932 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1933 ret = -ERFKILL; 1934 goto cancel; 1935 } 1936 1937 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1938 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1939 iwl_get_cmd_string(trans, cmd->id)); 1940 ret = -EIO; 1941 goto cancel; 1942 } 1943 1944 return 0; 1945 1946 cancel: 1947 if (cmd->flags & CMD_WANT_SKB) { 1948 /* 1949 * Cancel the CMD_WANT_SKB flag for the cmd in the 1950 * TX cmd queue. Otherwise in case the cmd comes 1951 * in later, it will possibly set an invalid 1952 * address (cmd->meta.source). 1953 */ 1954 trans_pcie->txq[trans_pcie->cmd_queue]. 1955 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1956 } 1957 1958 if (cmd->resp_pkt) { 1959 iwl_free_resp(cmd); 1960 cmd->resp_pkt = NULL; 1961 } 1962 1963 return ret; 1964 } 1965 1966 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1967 { 1968 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1969 test_bit(STATUS_RFKILL, &trans->status)) { 1970 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1971 cmd->id); 1972 return -ERFKILL; 1973 } 1974 1975 if (cmd->flags & CMD_ASYNC) 1976 return iwl_pcie_send_hcmd_async(trans, cmd); 1977 1978 /* We still can fail on RFKILL that can be asserted while we wait */ 1979 return iwl_pcie_send_hcmd_sync(trans, cmd); 1980 } 1981 1982 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1983 struct iwl_txq *txq, u8 hdr_len, 1984 struct iwl_cmd_meta *out_meta, 1985 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 1986 { 1987 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1988 u16 tb2_len; 1989 int i; 1990 1991 /* 1992 * Set up TFD's third entry to point directly to remainder 1993 * of skb's head, if any 1994 */ 1995 tb2_len = skb_headlen(skb) - hdr_len; 1996 1997 if (tb2_len > 0) { 1998 dma_addr_t tb2_phys = dma_map_single(trans->dev, 1999 skb->data + hdr_len, 2000 tb2_len, DMA_TO_DEVICE); 2001 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 2002 iwl_pcie_tfd_unmap(trans, out_meta, txq, 2003 txq->write_ptr); 2004 return -EINVAL; 2005 } 2006 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 2007 } 2008 2009 /* set up the remaining entries to point to the data */ 2010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2011 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2012 dma_addr_t tb_phys; 2013 int tb_idx; 2014 2015 if (!skb_frag_size(frag)) 2016 continue; 2017 2018 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 2019 skb_frag_size(frag), DMA_TO_DEVICE); 2020 2021 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2022 iwl_pcie_tfd_unmap(trans, out_meta, txq, 2023 txq->write_ptr); 2024 return -EINVAL; 2025 } 2026 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2027 skb_frag_size(frag), false); 2028 2029 out_meta->tbs |= BIT(tb_idx); 2030 } 2031 2032 trace_iwlwifi_dev_tx(trans->dev, skb, 2033 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2034 trans_pcie->tfd_size, 2035 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2036 skb->data + hdr_len, tb2_len); 2037 trace_iwlwifi_dev_tx_data(trans->dev, skb, 2038 hdr_len, skb->len - hdr_len); 2039 return 0; 2040 } 2041 2042 #ifdef CONFIG_INET 2043 static struct iwl_tso_hdr_page * 2044 get_page_hdr(struct iwl_trans *trans, size_t len) 2045 { 2046 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2047 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); 2048 2049 if (!p->page) 2050 goto alloc; 2051 2052 /* enough room on this page */ 2053 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) 2054 return p; 2055 2056 /* We don't have enough room on this page, get a new one. */ 2057 __free_page(p->page); 2058 2059 alloc: 2060 p->page = alloc_page(GFP_ATOMIC); 2061 if (!p->page) 2062 return NULL; 2063 p->pos = page_address(p->page); 2064 return p; 2065 } 2066 2067 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 2068 bool ipv6, unsigned int len) 2069 { 2070 if (ipv6) { 2071 struct ipv6hdr *iphv6 = iph; 2072 2073 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 2074 len + tcph->doff * 4, 2075 IPPROTO_TCP, 0); 2076 } else { 2077 struct iphdr *iphv4 = iph; 2078 2079 ip_send_check(iphv4); 2080 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 2081 len + tcph->doff * 4, 2082 IPPROTO_TCP, 0); 2083 } 2084 } 2085 2086 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2087 struct iwl_txq *txq, u8 hdr_len, 2088 struct iwl_cmd_meta *out_meta, 2089 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2090 { 2091 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 2092 struct ieee80211_hdr *hdr = (void *)skb->data; 2093 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 2094 unsigned int mss = skb_shinfo(skb)->gso_size; 2095 u16 length, iv_len, amsdu_pad; 2096 u8 *start_hdr; 2097 struct iwl_tso_hdr_page *hdr_page; 2098 struct page **page_ptr; 2099 int ret; 2100 struct tso_t tso; 2101 2102 /* if the packet is protected, then it must be CCMP or GCMP */ 2103 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 2104 iv_len = ieee80211_has_protected(hdr->frame_control) ? 2105 IEEE80211_CCMP_HDR_LEN : 0; 2106 2107 trace_iwlwifi_dev_tx(trans->dev, skb, 2108 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2109 trans_pcie->tfd_size, 2110 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2111 NULL, 0); 2112 2113 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 2114 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 2115 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 2116 amsdu_pad = 0; 2117 2118 /* total amount of header we may need for this A-MSDU */ 2119 hdr_room = DIV_ROUND_UP(total_len, mss) * 2120 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 2121 2122 /* Our device supports 9 segments at most, it will fit in 1 page */ 2123 hdr_page = get_page_hdr(trans, hdr_room); 2124 if (!hdr_page) 2125 return -ENOMEM; 2126 2127 get_page(hdr_page->page); 2128 start_hdr = hdr_page->pos; 2129 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 2130 *page_ptr = hdr_page->page; 2131 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 2132 hdr_page->pos += iv_len; 2133 2134 /* 2135 * Pull the ieee80211 header + IV to be able to use TSO core, 2136 * we will restore it for the tx_status flow. 2137 */ 2138 skb_pull(skb, hdr_len + iv_len); 2139 2140 tso_start(skb, &tso); 2141 2142 while (total_len) { 2143 /* this is the data left for this subframe */ 2144 unsigned int data_left = 2145 min_t(unsigned int, mss, total_len); 2146 struct sk_buff *csum_skb = NULL; 2147 unsigned int hdr_tb_len; 2148 dma_addr_t hdr_tb_phys; 2149 struct tcphdr *tcph; 2150 u8 *iph; 2151 2152 total_len -= data_left; 2153 2154 memset(hdr_page->pos, 0, amsdu_pad); 2155 hdr_page->pos += amsdu_pad; 2156 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 2157 data_left)) & 0x3; 2158 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 2159 hdr_page->pos += ETH_ALEN; 2160 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 2161 hdr_page->pos += ETH_ALEN; 2162 2163 length = snap_ip_tcp_hdrlen + data_left; 2164 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 2165 hdr_page->pos += sizeof(length); 2166 2167 /* 2168 * This will copy the SNAP as well which will be considered 2169 * as MAC header. 2170 */ 2171 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 2172 iph = hdr_page->pos + 8; 2173 tcph = (void *)(iph + ip_hdrlen); 2174 2175 /* For testing on current hardware only */ 2176 if (trans_pcie->sw_csum_tx) { 2177 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 2178 GFP_ATOMIC); 2179 if (!csum_skb) { 2180 ret = -ENOMEM; 2181 goto out_unmap; 2182 } 2183 2184 iwl_compute_pseudo_hdr_csum(iph, tcph, 2185 skb->protocol == 2186 htons(ETH_P_IPV6), 2187 data_left); 2188 2189 memcpy(skb_put(csum_skb, tcp_hdrlen(skb)), 2190 tcph, tcp_hdrlen(skb)); 2191 skb_set_transport_header(csum_skb, 0); 2192 csum_skb->csum_start = 2193 (unsigned char *)tcp_hdr(csum_skb) - 2194 csum_skb->head; 2195 } 2196 2197 hdr_page->pos += snap_ip_tcp_hdrlen; 2198 2199 hdr_tb_len = hdr_page->pos - start_hdr; 2200 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 2201 hdr_tb_len, DMA_TO_DEVICE); 2202 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 2203 dev_kfree_skb(csum_skb); 2204 ret = -EINVAL; 2205 goto out_unmap; 2206 } 2207 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2208 hdr_tb_len, false); 2209 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 2210 hdr_tb_len); 2211 2212 /* prepare the start_hdr for the next subframe */ 2213 start_hdr = hdr_page->pos; 2214 2215 /* put the payload */ 2216 while (data_left) { 2217 unsigned int size = min_t(unsigned int, tso.size, 2218 data_left); 2219 dma_addr_t tb_phys; 2220 2221 if (trans_pcie->sw_csum_tx) 2222 memcpy(skb_put(csum_skb, size), tso.data, size); 2223 2224 tb_phys = dma_map_single(trans->dev, tso.data, 2225 size, DMA_TO_DEVICE); 2226 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2227 dev_kfree_skb(csum_skb); 2228 ret = -EINVAL; 2229 goto out_unmap; 2230 } 2231 2232 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2233 size, false); 2234 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 2235 size); 2236 2237 data_left -= size; 2238 tso_build_data(skb, &tso, size); 2239 } 2240 2241 /* For testing on early hardware only */ 2242 if (trans_pcie->sw_csum_tx) { 2243 __wsum csum; 2244 2245 csum = skb_checksum(csum_skb, 2246 skb_checksum_start_offset(csum_skb), 2247 csum_skb->len - 2248 skb_checksum_start_offset(csum_skb), 2249 0); 2250 dev_kfree_skb(csum_skb); 2251 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 2252 hdr_tb_len, DMA_TO_DEVICE); 2253 tcph->check = csum_fold(csum); 2254 dma_sync_single_for_device(trans->dev, hdr_tb_phys, 2255 hdr_tb_len, DMA_TO_DEVICE); 2256 } 2257 } 2258 2259 /* re -add the WiFi header and IV */ 2260 skb_push(skb, hdr_len + iv_len); 2261 2262 return 0; 2263 2264 out_unmap: 2265 iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2266 return ret; 2267 } 2268 #else /* CONFIG_INET */ 2269 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2270 struct iwl_txq *txq, u8 hdr_len, 2271 struct iwl_cmd_meta *out_meta, 2272 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2273 { 2274 /* No A-MSDU without CONFIG_INET */ 2275 WARN_ON(1); 2276 2277 return -1; 2278 } 2279 #endif /* CONFIG_INET */ 2280 2281 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2282 struct iwl_device_cmd *dev_cmd, int txq_id) 2283 { 2284 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2285 struct ieee80211_hdr *hdr; 2286 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2287 struct iwl_cmd_meta *out_meta; 2288 struct iwl_txq *txq; 2289 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2290 void *tb1_addr; 2291 void *tfd; 2292 u16 len, tb1_len; 2293 bool wait_write_ptr; 2294 __le16 fc; 2295 u8 hdr_len; 2296 u16 wifi_seq; 2297 bool amsdu; 2298 2299 txq = &trans_pcie->txq[txq_id]; 2300 2301 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 2302 "TX on unused queue %d\n", txq_id)) 2303 return -EINVAL; 2304 2305 if (unlikely(trans_pcie->sw_csum_tx && 2306 skb->ip_summed == CHECKSUM_PARTIAL)) { 2307 int offs = skb_checksum_start_offset(skb); 2308 int csum_offs = offs + skb->csum_offset; 2309 __wsum csum; 2310 2311 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 2312 return -1; 2313 2314 csum = skb_checksum(skb, offs, skb->len - offs, 0); 2315 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 2316 2317 skb->ip_summed = CHECKSUM_UNNECESSARY; 2318 } 2319 2320 if (skb_is_nonlinear(skb) && 2321 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && 2322 __skb_linearize(skb)) 2323 return -ENOMEM; 2324 2325 /* mac80211 always puts the full header into the SKB's head, 2326 * so there's no need to check if it's readable there 2327 */ 2328 hdr = (struct ieee80211_hdr *)skb->data; 2329 fc = hdr->frame_control; 2330 hdr_len = ieee80211_hdrlen(fc); 2331 2332 spin_lock(&txq->lock); 2333 2334 if (iwl_queue_space(txq) < txq->high_mark) { 2335 iwl_stop_queue(trans, txq); 2336 2337 /* don't put the packet on the ring, if there is no room */ 2338 if (unlikely(iwl_queue_space(txq) < 3)) { 2339 struct iwl_device_cmd **dev_cmd_ptr; 2340 2341 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2342 trans_pcie->dev_cmd_offs); 2343 2344 *dev_cmd_ptr = dev_cmd; 2345 __skb_queue_tail(&txq->overflow_q, skb); 2346 2347 spin_unlock(&txq->lock); 2348 return 0; 2349 } 2350 } 2351 2352 /* In AGG mode, the index in the ring must correspond to the WiFi 2353 * sequence number. This is a HW requirements to help the SCD to parse 2354 * the BA. 2355 * Check here that the packets are in the right place on the ring. 2356 */ 2357 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2358 WARN_ONCE(txq->ampdu && 2359 (wifi_seq & 0xff) != txq->write_ptr, 2360 "Q: %d WiFi Seq %d tfdNum %d", 2361 txq_id, wifi_seq, txq->write_ptr); 2362 2363 /* Set up driver data for this TFD */ 2364 txq->entries[txq->write_ptr].skb = skb; 2365 txq->entries[txq->write_ptr].cmd = dev_cmd; 2366 2367 dev_cmd->hdr.sequence = 2368 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2369 INDEX_TO_SEQ(txq->write_ptr))); 2370 2371 tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); 2372 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2373 offsetof(struct iwl_tx_cmd, scratch); 2374 2375 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2376 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2377 2378 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2379 out_meta = &txq->entries[txq->write_ptr].meta; 2380 out_meta->flags = 0; 2381 2382 /* 2383 * The second TB (tb1) points to the remainder of the TX command 2384 * and the 802.11 header - dword aligned size 2385 * (This calculation modifies the TX command, so do it before the 2386 * setup of the first TB) 2387 */ 2388 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2389 hdr_len - IWL_FIRST_TB_SIZE; 2390 /* do not align A-MSDU to dword as the subframe header aligns it */ 2391 amsdu = ieee80211_is_data_qos(fc) && 2392 (*ieee80211_get_qos_ctl(hdr) & 2393 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2394 if (trans_pcie->sw_csum_tx || !amsdu) { 2395 tb1_len = ALIGN(len, 4); 2396 /* Tell NIC about any 2-byte padding after MAC header */ 2397 if (tb1_len != len) 2398 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2399 } else { 2400 tb1_len = len; 2401 } 2402 2403 /* The first TB points to bi-directional DMA data */ 2404 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2405 IWL_FIRST_TB_SIZE); 2406 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 2407 IWL_FIRST_TB_SIZE, true); 2408 2409 /* there must be data left over for TB1 or this code must be changed */ 2410 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2411 2412 /* map the data for TB1 */ 2413 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2414 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2415 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2416 goto out_err; 2417 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2418 2419 if (amsdu) { 2420 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2421 out_meta, dev_cmd, 2422 tb1_len))) 2423 goto out_err; 2424 } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2425 out_meta, dev_cmd, tb1_len))) { 2426 goto out_err; 2427 } 2428 2429 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2430 /* Set up entry for this TFD in Tx byte-count array */ 2431 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2432 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2433 2434 wait_write_ptr = ieee80211_has_morefrags(fc); 2435 2436 /* start timer if queue currently empty */ 2437 if (txq->read_ptr == txq->write_ptr) { 2438 if (txq->wd_timeout) { 2439 /* 2440 * If the TXQ is active, then set the timer, if not, 2441 * set the timer in remainder so that the timer will 2442 * be armed with the right value when the station will 2443 * wake up. 2444 */ 2445 if (!txq->frozen) 2446 mod_timer(&txq->stuck_timer, 2447 jiffies + txq->wd_timeout); 2448 else 2449 txq->frozen_expiry_remainder = txq->wd_timeout; 2450 } 2451 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); 2452 iwl_trans_ref(trans); 2453 } 2454 2455 /* Tell device the write index *just past* this latest filled TFD */ 2456 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 2457 if (!wait_write_ptr) 2458 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2459 2460 /* 2461 * At this point the frame is "transmitted" successfully 2462 * and we will get a TX status notification eventually. 2463 */ 2464 spin_unlock(&txq->lock); 2465 return 0; 2466 out_err: 2467 spin_unlock(&txq->lock); 2468 return -1; 2469 } 2470