1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #include <linux/etherdevice.h> 32 #include <linux/ieee80211.h> 33 #include <linux/slab.h> 34 #include <linux/sched.h> 35 #include <linux/pm_runtime.h> 36 #include <net/ip6_checksum.h> 37 #include <net/tso.h> 38 39 #include "iwl-debug.h" 40 #include "iwl-csr.h" 41 #include "iwl-prph.h" 42 #include "iwl-io.h" 43 #include "iwl-scd.h" 44 #include "iwl-op-mode.h" 45 #include "internal.h" 46 /* FIXME: need to abstract out TX command (once we know what it looks like) */ 47 #include "dvm/commands.h" 48 49 #define IWL_TX_CRC_SIZE 4 50 #define IWL_TX_DELIMITER_SIZE 4 51 52 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 53 * DMA services 54 * 55 * Theory of operation 56 * 57 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 58 * of buffer descriptors, each of which points to one or more data buffers for 59 * the device to read from or fill. Driver and device exchange status of each 60 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 61 * entries in each circular buffer, to protect against confusing empty and full 62 * queue states. 63 * 64 * The device reads or writes the data in the queues via the device's several 65 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 66 * 67 * For Tx queue, there are low mark and high mark limits. If, after queuing 68 * the packet for Tx, free space become < low mark, Tx queue stopped. When 69 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 70 * Tx queue resumed. 71 * 72 ***************************************************/ 73 74 static int iwl_queue_space(const struct iwl_txq *q) 75 { 76 unsigned int max; 77 unsigned int used; 78 79 /* 80 * To avoid ambiguity between empty and completely full queues, there 81 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 82 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 83 * to reserve any queue entries for this purpose. 84 */ 85 if (q->n_window < TFD_QUEUE_SIZE_MAX) 86 max = q->n_window; 87 else 88 max = TFD_QUEUE_SIZE_MAX - 1; 89 90 /* 91 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 92 * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 93 */ 94 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 95 96 if (WARN_ON(used > max)) 97 return 0; 98 99 return max - used; 100 } 101 102 /* 103 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 104 */ 105 static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) 106 { 107 q->n_window = slots_num; 108 q->id = id; 109 110 /* slots_num must be power-of-two size, otherwise 111 * get_cmd_index is broken. */ 112 if (WARN_ON(!is_power_of_2(slots_num))) 113 return -EINVAL; 114 115 q->low_mark = q->n_window / 4; 116 if (q->low_mark < 4) 117 q->low_mark = 4; 118 119 q->high_mark = q->n_window / 8; 120 if (q->high_mark < 2) 121 q->high_mark = 2; 122 123 q->write_ptr = 0; 124 q->read_ptr = 0; 125 126 return 0; 127 } 128 129 static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 130 struct iwl_dma_ptr *ptr, size_t size) 131 { 132 if (WARN_ON(ptr->addr)) 133 return -EINVAL; 134 135 ptr->addr = dma_alloc_coherent(trans->dev, size, 136 &ptr->dma, GFP_KERNEL); 137 if (!ptr->addr) 138 return -ENOMEM; 139 ptr->size = size; 140 return 0; 141 } 142 143 static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, 144 struct iwl_dma_ptr *ptr) 145 { 146 if (unlikely(!ptr->addr)) 147 return; 148 149 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 150 memset(ptr, 0, sizeof(*ptr)); 151 } 152 153 static void iwl_pcie_txq_stuck_timer(unsigned long data) 154 { 155 struct iwl_txq *txq = (void *)data; 156 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 157 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 158 159 spin_lock(&txq->lock); 160 /* check if triggered erroneously */ 161 if (txq->read_ptr == txq->write_ptr) { 162 spin_unlock(&txq->lock); 163 return; 164 } 165 spin_unlock(&txq->lock); 166 167 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id, 168 jiffies_to_msecs(txq->wd_timeout)); 169 170 iwl_trans_pcie_log_scd_error(trans, txq); 171 172 iwl_force_nmi(trans); 173 } 174 175 /* 176 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 177 */ 178 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 179 struct iwl_txq *txq, u16 byte_cnt, 180 int num_tbs) 181 { 182 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 183 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 184 int write_ptr = txq->write_ptr; 185 int txq_id = txq->id; 186 u8 sec_ctl = 0; 187 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 188 __le16 bc_ent; 189 struct iwl_tx_cmd *tx_cmd = 190 (void *)txq->entries[txq->write_ptr].cmd->payload; 191 192 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 193 194 sec_ctl = tx_cmd->sec_ctl; 195 196 switch (sec_ctl & TX_CMD_SEC_MSK) { 197 case TX_CMD_SEC_CCM: 198 len += IEEE80211_CCMP_MIC_LEN; 199 break; 200 case TX_CMD_SEC_TKIP: 201 len += IEEE80211_TKIP_ICV_LEN; 202 break; 203 case TX_CMD_SEC_WEP: 204 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 205 break; 206 } 207 if (trans_pcie->bc_table_dword) 208 len = DIV_ROUND_UP(len, 4); 209 210 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 211 return; 212 213 if (trans->cfg->use_tfh) { 214 u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 215 num_tbs * sizeof(struct iwl_tfh_tb); 216 /* 217 * filled_tfd_size contains the number of filled bytes in the 218 * TFD. 219 * Dividing it by 64 will give the number of chunks to fetch 220 * to SRAM- 0 for one chunk, 1 for 2 and so on. 221 * If, for example, TFD contains only 3 TBs then 32 bytes 222 * of the TFD are used, and only one chunk of 64 bytes should 223 * be fetched 224 */ 225 u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 226 227 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 228 } else { 229 u8 sta_id = tx_cmd->sta_id; 230 231 bc_ent = cpu_to_le16(len | (sta_id << 12)); 232 } 233 234 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 235 236 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 237 scd_bc_tbl[txq_id]. 238 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 239 } 240 241 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 242 struct iwl_txq *txq) 243 { 244 struct iwl_trans_pcie *trans_pcie = 245 IWL_TRANS_GET_PCIE_TRANS(trans); 246 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 247 int txq_id = txq->id; 248 int read_ptr = txq->read_ptr; 249 u8 sta_id = 0; 250 __le16 bc_ent; 251 struct iwl_tx_cmd *tx_cmd = 252 (void *)txq->entries[read_ptr].cmd->payload; 253 254 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 255 256 if (txq_id != trans_pcie->cmd_queue) 257 sta_id = tx_cmd->sta_id; 258 259 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 260 261 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 262 263 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 264 scd_bc_tbl[txq_id]. 265 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 266 } 267 268 /* 269 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 270 */ 271 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 272 struct iwl_txq *txq) 273 { 274 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 275 u32 reg = 0; 276 int txq_id = txq->id; 277 278 lockdep_assert_held(&txq->lock); 279 280 /* 281 * explicitly wake up the NIC if: 282 * 1. shadow registers aren't enabled 283 * 2. NIC is woken up for CMD regardless of shadow outside this function 284 * 3. there is a chance that the NIC is asleep 285 */ 286 if (!trans->cfg->base_params->shadow_reg_enable && 287 txq_id != trans_pcie->cmd_queue && 288 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 289 /* 290 * wake up nic if it's powered down ... 291 * uCode will wake up, and interrupt us again, so next 292 * time we'll skip this part. 293 */ 294 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 295 296 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 297 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 298 txq_id, reg); 299 iwl_set_bit(trans, CSR_GP_CNTRL, 300 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 301 txq->need_update = true; 302 return; 303 } 304 } 305 306 /* 307 * if not in power-save mode, uCode will never sleep when we're 308 * trying to tx (during RFKILL, we're not trying to tx). 309 */ 310 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 311 if (!txq->block) 312 iwl_write32(trans, HBUS_TARG_WRPTR, 313 txq->write_ptr | (txq_id << 8)); 314 } 315 316 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 317 { 318 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 319 int i; 320 321 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 322 struct iwl_txq *txq = &trans_pcie->txq[i]; 323 324 spin_lock_bh(&txq->lock); 325 if (trans_pcie->txq[i].need_update) { 326 iwl_pcie_txq_inc_wr_ptr(trans, txq); 327 trans_pcie->txq[i].need_update = false; 328 } 329 spin_unlock_bh(&txq->lock); 330 } 331 } 332 333 static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 334 struct iwl_txq *txq, int idx) 335 { 336 return txq->tfds + trans_pcie->tfd_size * idx; 337 } 338 339 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, 340 void *tfd, u8 idx) 341 { 342 struct iwl_tfd *tfd_fh; 343 struct iwl_tfd_tb *tb; 344 dma_addr_t addr; 345 346 if (trans->cfg->use_tfh) { 347 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 348 struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; 349 350 return (dma_addr_t)(le64_to_cpu(tb->addr)); 351 } 352 353 tfd_fh = (void *)tfd; 354 tb = &tfd_fh->tbs[idx]; 355 addr = get_unaligned_le32(&tb->lo); 356 357 if (sizeof(dma_addr_t) > sizeof(u32)) 358 addr |= 359 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; 360 361 return addr; 362 } 363 364 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 365 u8 idx, dma_addr_t addr, u16 len) 366 { 367 if (trans->cfg->use_tfh) { 368 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 369 struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; 370 371 put_unaligned_le64(addr, &tb->addr); 372 tb->tb_len = cpu_to_le16(len); 373 374 tfd_fh->num_tbs = cpu_to_le16(idx + 1); 375 } else { 376 struct iwl_tfd *tfd_fh = (void *)tfd; 377 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 378 379 u16 hi_n_len = len << 4; 380 381 put_unaligned_le32(addr, &tb->lo); 382 if (sizeof(dma_addr_t) > sizeof(u32)) 383 hi_n_len |= ((addr >> 16) >> 16) & 0xF; 384 385 tb->hi_n_len = cpu_to_le16(hi_n_len); 386 387 tfd_fh->num_tbs = idx + 1; 388 } 389 } 390 391 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *tfd) 392 { 393 struct iwl_tfd *tfd_fh; 394 395 if (trans->cfg->use_tfh) { 396 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 397 398 return le16_to_cpu(tfd_fh->num_tbs) & 0x1f; 399 } 400 401 tfd_fh = (void *)tfd; 402 return tfd_fh->num_tbs & 0x1f; 403 } 404 405 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 406 struct iwl_cmd_meta *meta, 407 struct iwl_txq *txq, int index) 408 { 409 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 410 int i, num_tbs; 411 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 412 413 /* Sanity check on number of chunks */ 414 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 415 416 if (num_tbs >= trans_pcie->max_tbs) { 417 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 418 /* @todo issue fatal error, it is quite serious situation */ 419 return; 420 } 421 422 /* first TB is never freed - it's the bidirectional DMA data */ 423 424 for (i = 1; i < num_tbs; i++) { 425 if (meta->tbs & BIT(i)) 426 dma_unmap_page(trans->dev, 427 iwl_pcie_tfd_tb_get_addr(trans, tfd, i), 428 iwl_pcie_tfd_tb_get_len(trans, tfd, i), 429 DMA_TO_DEVICE); 430 else 431 dma_unmap_single(trans->dev, 432 iwl_pcie_tfd_tb_get_addr(trans, tfd, 433 i), 434 iwl_pcie_tfd_tb_get_len(trans, tfd, 435 i), 436 DMA_TO_DEVICE); 437 } 438 439 if (trans->cfg->use_tfh) { 440 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 441 442 tfd_fh->num_tbs = 0; 443 } else { 444 struct iwl_tfd *tfd_fh = (void *)tfd; 445 446 tfd_fh->num_tbs = 0; 447 } 448 449 } 450 451 /* 452 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 453 * @trans - transport private data 454 * @txq - tx queue 455 * @dma_dir - the direction of the DMA mapping 456 * 457 * Does NOT advance any TFD circular buffer read/write indexes 458 * Does NOT free the TFD itself (which is within circular buffer) 459 */ 460 static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 461 { 462 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 463 * idx is bounded by n_window 464 */ 465 int rd_ptr = txq->read_ptr; 466 int idx = get_cmd_index(txq, rd_ptr); 467 468 lockdep_assert_held(&txq->lock); 469 470 /* We have only q->n_window txq->entries, but we use 471 * TFD_QUEUE_SIZE_MAX tfds 472 */ 473 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 474 475 /* free SKB */ 476 if (txq->entries) { 477 struct sk_buff *skb; 478 479 skb = txq->entries[idx].skb; 480 481 /* Can be called from irqs-disabled context 482 * If skb is not NULL, it means that the whole queue is being 483 * freed and that the queue is not empty - free the skb 484 */ 485 if (skb) { 486 iwl_op_mode_free_skb(trans->op_mode, skb); 487 txq->entries[idx].skb = NULL; 488 } 489 } 490 } 491 492 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 493 dma_addr_t addr, u16 len, bool reset) 494 { 495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 496 void *tfd; 497 u32 num_tbs; 498 499 tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; 500 501 if (reset) 502 memset(tfd, 0, trans_pcie->tfd_size); 503 504 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 505 506 /* Each TFD can point to a maximum max_tbs Tx buffers */ 507 if (num_tbs >= trans_pcie->max_tbs) { 508 IWL_ERR(trans, "Error can not send more than %d chunks\n", 509 trans_pcie->max_tbs); 510 return -EINVAL; 511 } 512 513 if (WARN(addr & ~IWL_TX_DMA_MASK, 514 "Unaligned address = %llx\n", (unsigned long long)addr)) 515 return -EINVAL; 516 517 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 518 519 return num_tbs; 520 } 521 522 static int iwl_pcie_txq_alloc(struct iwl_trans *trans, 523 struct iwl_txq *txq, int slots_num, 524 u32 txq_id) 525 { 526 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 527 size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; 528 size_t tb0_buf_sz; 529 int i; 530 531 if (WARN_ON(txq->entries || txq->tfds)) 532 return -EINVAL; 533 534 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 535 (unsigned long)txq); 536 txq->trans_pcie = trans_pcie; 537 538 txq->n_window = slots_num; 539 540 txq->entries = kcalloc(slots_num, 541 sizeof(struct iwl_pcie_txq_entry), 542 GFP_KERNEL); 543 544 if (!txq->entries) 545 goto error; 546 547 if (txq_id == trans_pcie->cmd_queue) 548 for (i = 0; i < slots_num; i++) { 549 txq->entries[i].cmd = 550 kmalloc(sizeof(struct iwl_device_cmd), 551 GFP_KERNEL); 552 if (!txq->entries[i].cmd) 553 goto error; 554 } 555 556 /* Circular buffer of transmit frame descriptors (TFDs), 557 * shared with device */ 558 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 559 &txq->dma_addr, GFP_KERNEL); 560 if (!txq->tfds) 561 goto error; 562 563 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); 564 565 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 566 567 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 568 &txq->first_tb_dma, 569 GFP_KERNEL); 570 if (!txq->first_tb_bufs) 571 goto err_free_tfds; 572 573 txq->id = txq_id; 574 575 return 0; 576 err_free_tfds: 577 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 578 error: 579 if (txq->entries && txq_id == trans_pcie->cmd_queue) 580 for (i = 0; i < slots_num; i++) 581 kfree(txq->entries[i].cmd); 582 kfree(txq->entries); 583 txq->entries = NULL; 584 585 return -ENOMEM; 586 587 } 588 589 static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 590 int slots_num, u32 txq_id) 591 { 592 int ret; 593 594 txq->need_update = false; 595 596 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 597 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 598 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 599 600 /* Initialize queue's high/low-water marks, and head/tail indexes */ 601 ret = iwl_queue_init(txq, slots_num, txq_id); 602 if (ret) 603 return ret; 604 605 spin_lock_init(&txq->lock); 606 __skb_queue_head_init(&txq->overflow_q); 607 608 /* 609 * Tell nic where to find circular buffer of Tx Frame Descriptors for 610 * given Tx queue, and enable the DMA channel used for that queue. 611 * Circular buffer (TFD queue in DRAM) physical base address */ 612 if (trans->cfg->use_tfh) 613 iwl_write_direct64(trans, 614 FH_MEM_CBBC_QUEUE(trans, txq_id), 615 txq->dma_addr); 616 else 617 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 618 txq->dma_addr >> 8); 619 620 return 0; 621 } 622 623 static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 624 struct sk_buff *skb) 625 { 626 struct page **page_ptr; 627 628 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 629 630 if (*page_ptr) { 631 __free_page(*page_ptr); 632 *page_ptr = NULL; 633 } 634 } 635 636 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 637 { 638 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 639 640 lockdep_assert_held(&trans_pcie->reg_lock); 641 642 if (trans_pcie->ref_cmd_in_flight) { 643 trans_pcie->ref_cmd_in_flight = false; 644 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); 645 iwl_trans_unref(trans); 646 } 647 648 if (!trans->cfg->base_params->apmg_wake_up_wa) 649 return; 650 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 651 return; 652 653 trans_pcie->cmd_hold_nic_awake = false; 654 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 655 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 656 } 657 658 /* 659 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 660 */ 661 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 662 { 663 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 664 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 665 666 spin_lock_bh(&txq->lock); 667 while (txq->write_ptr != txq->read_ptr) { 668 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 669 txq_id, txq->read_ptr); 670 671 if (txq_id != trans_pcie->cmd_queue) { 672 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 673 674 if (WARN_ON_ONCE(!skb)) 675 continue; 676 677 iwl_pcie_free_tso_page(trans_pcie, skb); 678 } 679 iwl_pcie_txq_free_tfd(trans, txq); 680 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 681 682 if (txq->read_ptr == txq->write_ptr) { 683 unsigned long flags; 684 685 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 686 if (txq_id != trans_pcie->cmd_queue) { 687 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", 688 txq->id); 689 iwl_trans_unref(trans); 690 } else { 691 iwl_pcie_clear_cmd_in_flight(trans); 692 } 693 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 694 } 695 } 696 txq->active = false; 697 698 while (!skb_queue_empty(&txq->overflow_q)) { 699 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 700 701 iwl_op_mode_free_skb(trans->op_mode, skb); 702 } 703 704 spin_unlock_bh(&txq->lock); 705 706 /* just in case - this queue may have been stopped */ 707 iwl_wake_queue(trans, txq); 708 } 709 710 /* 711 * iwl_pcie_txq_free - Deallocate DMA queue. 712 * @txq: Transmit queue to deallocate. 713 * 714 * Empty queue by removing and destroying all BD's. 715 * Free all buffers. 716 * 0-fill, but do not free "txq" descriptor structure. 717 */ 718 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 719 { 720 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 721 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 722 struct device *dev = trans->dev; 723 int i; 724 725 if (WARN_ON(!txq)) 726 return; 727 728 iwl_pcie_txq_unmap(trans, txq_id); 729 730 /* De-alloc array of command/tx buffers */ 731 if (txq_id == trans_pcie->cmd_queue) 732 for (i = 0; i < txq->n_window; i++) { 733 kzfree(txq->entries[i].cmd); 734 kzfree(txq->entries[i].free_buf); 735 } 736 737 /* De-alloc circular buffer of TFDs */ 738 if (txq->tfds) { 739 dma_free_coherent(dev, 740 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, 741 txq->tfds, txq->dma_addr); 742 txq->dma_addr = 0; 743 txq->tfds = NULL; 744 745 dma_free_coherent(dev, 746 sizeof(*txq->first_tb_bufs) * txq->n_window, 747 txq->first_tb_bufs, txq->first_tb_dma); 748 } 749 750 kfree(txq->entries); 751 txq->entries = NULL; 752 753 del_timer_sync(&txq->stuck_timer); 754 755 /* 0-fill queue descriptor structure */ 756 memset(txq, 0, sizeof(*txq)); 757 } 758 759 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 760 { 761 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 762 int nq = trans->cfg->base_params->num_of_queues; 763 int chan; 764 u32 reg_val; 765 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 766 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 767 768 /* make sure all queue are not stopped/used */ 769 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 770 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 771 772 if (trans->cfg->use_tfh) 773 return; 774 775 trans_pcie->scd_base_addr = 776 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 777 778 WARN_ON(scd_base_addr != 0 && 779 scd_base_addr != trans_pcie->scd_base_addr); 780 781 /* reset context data, TX status and translation data */ 782 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 783 SCD_CONTEXT_MEM_LOWER_BOUND, 784 NULL, clear_dwords); 785 786 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 787 trans_pcie->scd_bc_tbls.dma >> 10); 788 789 /* The chain extension of the SCD doesn't work well. This feature is 790 * enabled by default by the HW, so we need to disable it manually. 791 */ 792 if (trans->cfg->base_params->scd_chain_ext_wa) 793 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 794 795 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 796 trans_pcie->cmd_fifo, 797 trans_pcie->cmd_q_wdg_timeout); 798 799 /* Activate all Tx DMA/FIFO channels */ 800 iwl_scd_activate_fifos(trans); 801 802 /* Enable DMA channel */ 803 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 804 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 805 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 806 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 807 808 /* Update FH chicken bits */ 809 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 810 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 811 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 812 813 /* Enable L1-Active */ 814 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 815 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 816 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 817 } 818 819 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 820 { 821 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 822 int txq_id; 823 824 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 825 txq_id++) { 826 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 827 if (trans->cfg->use_tfh) 828 iwl_write_direct64(trans, 829 FH_MEM_CBBC_QUEUE(trans, txq_id), 830 txq->dma_addr); 831 else 832 iwl_write_direct32(trans, 833 FH_MEM_CBBC_QUEUE(trans, txq_id), 834 txq->dma_addr >> 8); 835 iwl_pcie_txq_unmap(trans, txq_id); 836 txq->read_ptr = 0; 837 txq->write_ptr = 0; 838 } 839 840 /* Tell NIC where to find the "keep warm" buffer */ 841 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 842 trans_pcie->kw.dma >> 4); 843 844 /* 845 * Send 0 as the scd_base_addr since the device may have be reset 846 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 847 * contain garbage. 848 */ 849 iwl_pcie_tx_start(trans, 0); 850 } 851 852 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 853 { 854 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 855 unsigned long flags; 856 int ch, ret; 857 u32 mask = 0; 858 859 spin_lock(&trans_pcie->irq_lock); 860 861 if (!iwl_trans_grab_nic_access(trans, &flags)) 862 goto out; 863 864 /* Stop each Tx DMA channel */ 865 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 866 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 867 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 868 } 869 870 /* Wait for DMA channels to be idle */ 871 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 872 if (ret < 0) 873 IWL_ERR(trans, 874 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 875 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 876 877 iwl_trans_release_nic_access(trans, &flags); 878 879 out: 880 spin_unlock(&trans_pcie->irq_lock); 881 } 882 883 /* 884 * iwl_pcie_tx_stop - Stop all Tx DMA channels 885 */ 886 int iwl_pcie_tx_stop(struct iwl_trans *trans) 887 { 888 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 889 int txq_id; 890 891 /* Turn off all Tx DMA fifos */ 892 iwl_scd_deactivate_fifos(trans); 893 894 /* Turn off all Tx DMA channels */ 895 iwl_pcie_tx_stop_fh(trans); 896 897 /* 898 * This function can be called before the op_mode disabled the 899 * queues. This happens when we have an rfkill interrupt. 900 * Since we stop Tx altogether - mark the queues as stopped. 901 */ 902 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 903 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 904 905 /* This can happen: start_hw, stop_device */ 906 if (!trans_pcie->txq) 907 return 0; 908 909 /* Unmap DMA from host system and free skb's */ 910 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 911 txq_id++) 912 iwl_pcie_txq_unmap(trans, txq_id); 913 914 return 0; 915 } 916 917 /* 918 * iwl_trans_tx_free - Free TXQ Context 919 * 920 * Destroy all TX DMA queues and structures 921 */ 922 void iwl_pcie_tx_free(struct iwl_trans *trans) 923 { 924 int txq_id; 925 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 926 927 /* Tx queues */ 928 if (trans_pcie->txq) { 929 for (txq_id = 0; 930 txq_id < trans->cfg->base_params->num_of_queues; txq_id++) 931 iwl_pcie_txq_free(trans, txq_id); 932 } 933 934 kfree(trans_pcie->txq); 935 trans_pcie->txq = NULL; 936 937 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 938 939 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 940 } 941 942 /* 943 * iwl_pcie_tx_alloc - allocate TX context 944 * Allocate all Tx DMA structures and initialize them 945 */ 946 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 947 { 948 int ret; 949 int txq_id, slots_num; 950 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 951 952 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 953 sizeof(struct iwlagn_scd_bc_tbl); 954 955 /*It is not allowed to alloc twice, so warn when this happens. 956 * We cannot rely on the previous allocation, so free and fail */ 957 if (WARN_ON(trans_pcie->txq)) { 958 ret = -EINVAL; 959 goto error; 960 } 961 962 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 963 scd_bc_tbls_size); 964 if (ret) { 965 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 966 goto error; 967 } 968 969 /* Alloc keep-warm buffer */ 970 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 971 if (ret) { 972 IWL_ERR(trans, "Keep Warm allocation failed\n"); 973 goto error; 974 } 975 976 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, 977 sizeof(struct iwl_txq), GFP_KERNEL); 978 if (!trans_pcie->txq) { 979 IWL_ERR(trans, "Not enough memory for txq\n"); 980 ret = -ENOMEM; 981 goto error; 982 } 983 984 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 985 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 986 txq_id++) { 987 slots_num = (txq_id == trans_pcie->cmd_queue) ? 988 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 989 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], 990 slots_num, txq_id); 991 if (ret) { 992 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 993 goto error; 994 } 995 } 996 997 return 0; 998 999 error: 1000 iwl_pcie_tx_free(trans); 1001 1002 return ret; 1003 } 1004 int iwl_pcie_tx_init(struct iwl_trans *trans) 1005 { 1006 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1007 int ret; 1008 int txq_id, slots_num; 1009 bool alloc = false; 1010 1011 if (!trans_pcie->txq) { 1012 ret = iwl_pcie_tx_alloc(trans); 1013 if (ret) 1014 goto error; 1015 alloc = true; 1016 } 1017 1018 spin_lock(&trans_pcie->irq_lock); 1019 1020 /* Turn off all Tx DMA fifos */ 1021 iwl_scd_deactivate_fifos(trans); 1022 1023 /* Tell NIC where to find the "keep warm" buffer */ 1024 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 1025 trans_pcie->kw.dma >> 4); 1026 1027 spin_unlock(&trans_pcie->irq_lock); 1028 1029 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 1030 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1031 txq_id++) { 1032 slots_num = (txq_id == trans_pcie->cmd_queue) ? 1033 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1034 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], 1035 slots_num, txq_id); 1036 if (ret) { 1037 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1038 goto error; 1039 } 1040 } 1041 1042 if (trans->cfg->use_tfh) { 1043 iwl_write_direct32(trans, TFH_TRANSFER_MODE, 1044 TFH_TRANSFER_MAX_PENDING_REQ | 1045 TFH_CHUNK_SIZE_128 | 1046 TFH_CHUNK_SPLIT_MODE); 1047 return 0; 1048 } 1049 1050 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1051 if (trans->cfg->base_params->num_of_queues > 20) 1052 iwl_set_bits_prph(trans, SCD_GP_CTRL, 1053 SCD_GP_CTRL_ENABLE_31_QUEUES); 1054 1055 return 0; 1056 error: 1057 /*Upon error, free only if we allocated something */ 1058 if (alloc) 1059 iwl_pcie_tx_free(trans); 1060 return ret; 1061 } 1062 1063 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 1064 { 1065 lockdep_assert_held(&txq->lock); 1066 1067 if (!txq->wd_timeout) 1068 return; 1069 1070 /* 1071 * station is asleep and we send data - that must 1072 * be uAPSD or PS-Poll. Don't rearm the timer. 1073 */ 1074 if (txq->frozen) 1075 return; 1076 1077 /* 1078 * if empty delete timer, otherwise move timer forward 1079 * since we're making progress on this queue 1080 */ 1081 if (txq->read_ptr == txq->write_ptr) 1082 del_timer(&txq->stuck_timer); 1083 else 1084 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1085 } 1086 1087 /* Frees buffers until index _not_ inclusive */ 1088 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1089 struct sk_buff_head *skbs) 1090 { 1091 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1092 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1093 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 1094 int last_to_free; 1095 1096 /* This function is not meant to release cmd queue*/ 1097 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1098 return; 1099 1100 spin_lock_bh(&txq->lock); 1101 1102 if (!txq->active) { 1103 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1104 txq_id, ssn); 1105 goto out; 1106 } 1107 1108 if (txq->read_ptr == tfd_num) 1109 goto out; 1110 1111 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1112 txq_id, txq->read_ptr, tfd_num, ssn); 1113 1114 /*Since we free until index _not_ inclusive, the one before index is 1115 * the last we will free. This one must be used */ 1116 last_to_free = iwl_queue_dec_wrap(tfd_num); 1117 1118 if (!iwl_queue_used(txq, last_to_free)) { 1119 IWL_ERR(trans, 1120 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1121 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1122 txq->write_ptr, txq->read_ptr); 1123 goto out; 1124 } 1125 1126 if (WARN_ON(!skb_queue_empty(skbs))) 1127 goto out; 1128 1129 for (; 1130 txq->read_ptr != tfd_num; 1131 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1132 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 1133 1134 if (WARN_ON_ONCE(!skb)) 1135 continue; 1136 1137 iwl_pcie_free_tso_page(trans_pcie, skb); 1138 1139 __skb_queue_tail(skbs, skb); 1140 1141 txq->entries[txq->read_ptr].skb = NULL; 1142 1143 if (!trans->cfg->use_tfh) 1144 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1145 1146 iwl_pcie_txq_free_tfd(trans, txq); 1147 } 1148 1149 iwl_pcie_txq_progress(txq); 1150 1151 if (iwl_queue_space(txq) > txq->low_mark && 1152 test_bit(txq_id, trans_pcie->queue_stopped)) { 1153 struct sk_buff_head overflow_skbs; 1154 1155 __skb_queue_head_init(&overflow_skbs); 1156 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1157 1158 /* 1159 * This is tricky: we are in reclaim path which is non 1160 * re-entrant, so noone will try to take the access the 1161 * txq data from that path. We stopped tx, so we can't 1162 * have tx as well. Bottom line, we can unlock and re-lock 1163 * later. 1164 */ 1165 spin_unlock_bh(&txq->lock); 1166 1167 while (!skb_queue_empty(&overflow_skbs)) { 1168 struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1169 struct iwl_device_cmd *dev_cmd_ptr; 1170 1171 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1172 trans_pcie->dev_cmd_offs); 1173 1174 /* 1175 * Note that we can very well be overflowing again. 1176 * In that case, iwl_queue_space will be small again 1177 * and we won't wake mac80211's queue. 1178 */ 1179 iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); 1180 } 1181 spin_lock_bh(&txq->lock); 1182 1183 if (iwl_queue_space(txq) > txq->low_mark) 1184 iwl_wake_queue(trans, txq); 1185 } 1186 1187 if (txq->read_ptr == txq->write_ptr) { 1188 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); 1189 iwl_trans_unref(trans); 1190 } 1191 1192 out: 1193 spin_unlock_bh(&txq->lock); 1194 } 1195 1196 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1197 const struct iwl_host_cmd *cmd) 1198 { 1199 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1200 int ret; 1201 1202 lockdep_assert_held(&trans_pcie->reg_lock); 1203 1204 if (!(cmd->flags & CMD_SEND_IN_IDLE) && 1205 !trans_pcie->ref_cmd_in_flight) { 1206 trans_pcie->ref_cmd_in_flight = true; 1207 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); 1208 iwl_trans_ref(trans); 1209 } 1210 1211 /* 1212 * wake up the NIC to make sure that the firmware will see the host 1213 * command - we will let the NIC sleep once all the host commands 1214 * returned. This needs to be done only on NICs that have 1215 * apmg_wake_up_wa set. 1216 */ 1217 if (trans->cfg->base_params->apmg_wake_up_wa && 1218 !trans_pcie->cmd_hold_nic_awake) { 1219 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1220 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1221 1222 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1223 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1224 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1225 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1226 15000); 1227 if (ret < 0) { 1228 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1229 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1230 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1231 return -EIO; 1232 } 1233 trans_pcie->cmd_hold_nic_awake = true; 1234 } 1235 1236 return 0; 1237 } 1238 1239 /* 1240 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1241 * 1242 * When FW advances 'R' index, all entries between old and new 'R' index 1243 * need to be reclaimed. As result, some free space forms. If there is 1244 * enough free space (> low mark), wake the stack that feeds us. 1245 */ 1246 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1247 { 1248 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1249 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1250 unsigned long flags; 1251 int nfreed = 0; 1252 1253 lockdep_assert_held(&txq->lock); 1254 1255 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { 1256 IWL_ERR(trans, 1257 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1258 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1259 txq->write_ptr, txq->read_ptr); 1260 return; 1261 } 1262 1263 for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; 1264 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1265 1266 if (nfreed++ > 0) { 1267 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1268 idx, txq->write_ptr, txq->read_ptr); 1269 iwl_force_nmi(trans); 1270 } 1271 } 1272 1273 if (txq->read_ptr == txq->write_ptr) { 1274 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1275 iwl_pcie_clear_cmd_in_flight(trans); 1276 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1277 } 1278 1279 iwl_pcie_txq_progress(txq); 1280 } 1281 1282 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1283 u16 txq_id) 1284 { 1285 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1286 u32 tbl_dw_addr; 1287 u32 tbl_dw; 1288 u16 scd_q2ratid; 1289 1290 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1291 1292 tbl_dw_addr = trans_pcie->scd_base_addr + 1293 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1294 1295 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1296 1297 if (txq_id & 0x1) 1298 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1299 else 1300 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1301 1302 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1303 1304 return 0; 1305 } 1306 1307 /* Receiver address (actually, Rx station's index into station table), 1308 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1309 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1310 1311 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1312 const struct iwl_trans_txq_scd_cfg *cfg, 1313 unsigned int wdg_timeout) 1314 { 1315 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1316 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1317 int fifo = -1; 1318 1319 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1320 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1321 1322 if (cfg && trans->cfg->use_tfh) 1323 WARN_ONCE(1, "Expected no calls to SCD configuration"); 1324 1325 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1326 1327 if (cfg) { 1328 fifo = cfg->fifo; 1329 1330 /* Disable the scheduler prior configuring the cmd queue */ 1331 if (txq_id == trans_pcie->cmd_queue && 1332 trans_pcie->scd_set_active) 1333 iwl_scd_enable_set_active(trans, 0); 1334 1335 /* Stop this Tx queue before configuring it */ 1336 iwl_scd_txq_set_inactive(trans, txq_id); 1337 1338 /* Set this queue as a chain-building queue unless it is CMD */ 1339 if (txq_id != trans_pcie->cmd_queue) 1340 iwl_scd_txq_set_chain(trans, txq_id); 1341 1342 if (cfg->aggregate) { 1343 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1344 1345 /* Map receiver-address / traffic-ID to this queue */ 1346 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1347 1348 /* enable aggregations for the queue */ 1349 iwl_scd_txq_enable_agg(trans, txq_id); 1350 txq->ampdu = true; 1351 } else { 1352 /* 1353 * disable aggregations for the queue, this will also 1354 * make the ra_tid mapping configuration irrelevant 1355 * since it is now a non-AGG queue. 1356 */ 1357 iwl_scd_txq_disable_agg(trans, txq_id); 1358 1359 ssn = txq->read_ptr; 1360 } 1361 } 1362 1363 /* Place first TFD at index corresponding to start sequence number. 1364 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1365 txq->read_ptr = (ssn & 0xff); 1366 txq->write_ptr = (ssn & 0xff); 1367 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1368 (ssn & 0xff) | (txq_id << 8)); 1369 1370 if (cfg) { 1371 u8 frame_limit = cfg->frame_limit; 1372 1373 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1374 1375 /* Set up Tx window size and frame limit for this queue */ 1376 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1377 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1378 iwl_trans_write_mem32(trans, 1379 trans_pcie->scd_base_addr + 1380 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1381 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1382 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1383 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1384 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1385 1386 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1387 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1388 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1389 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1390 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1391 SCD_QUEUE_STTS_REG_MSK); 1392 1393 /* enable the scheduler for this queue (only) */ 1394 if (txq_id == trans_pcie->cmd_queue && 1395 trans_pcie->scd_set_active) 1396 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1397 1398 IWL_DEBUG_TX_QUEUES(trans, 1399 "Activate queue %d on FIFO %d WrPtr: %d\n", 1400 txq_id, fifo, ssn & 0xff); 1401 } else { 1402 IWL_DEBUG_TX_QUEUES(trans, 1403 "Activate queue %d WrPtr: %d\n", 1404 txq_id, ssn & 0xff); 1405 } 1406 1407 txq->active = true; 1408 } 1409 1410 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 1411 bool shared_mode) 1412 { 1413 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1414 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1415 1416 txq->ampdu = !shared_mode; 1417 } 1418 1419 dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq) 1420 { 1421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1422 1423 return trans_pcie->scd_bc_tbls.dma + 1424 txq * sizeof(struct iwlagn_scd_bc_tbl); 1425 } 1426 1427 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1428 bool configure_scd) 1429 { 1430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1431 u32 stts_addr = trans_pcie->scd_base_addr + 1432 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1433 static const u32 zero_val[4] = {}; 1434 1435 trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; 1436 trans_pcie->txq[txq_id].frozen = false; 1437 1438 /* 1439 * Upon HW Rfkill - we stop the device, and then stop the queues 1440 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1441 * allow the op_mode to call txq_disable after it already called 1442 * stop_device. 1443 */ 1444 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1445 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1446 "queue %d not used", txq_id); 1447 return; 1448 } 1449 1450 if (configure_scd && trans->cfg->use_tfh) 1451 WARN_ONCE(1, "Expected no calls to SCD configuration"); 1452 1453 if (configure_scd) { 1454 iwl_scd_txq_set_inactive(trans, txq_id); 1455 1456 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1457 ARRAY_SIZE(zero_val)); 1458 } 1459 1460 iwl_pcie_txq_unmap(trans, txq_id); 1461 trans_pcie->txq[txq_id].ampdu = false; 1462 1463 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1464 } 1465 1466 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1467 1468 /* 1469 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1470 * @priv: device private data point 1471 * @cmd: a pointer to the ucode command structure 1472 * 1473 * The function returns < 0 values to indicate the operation 1474 * failed. On success, it returns the index (>= 0) of command in the 1475 * command queue. 1476 */ 1477 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1478 struct iwl_host_cmd *cmd) 1479 { 1480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1481 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1482 struct iwl_device_cmd *out_cmd; 1483 struct iwl_cmd_meta *out_meta; 1484 unsigned long flags; 1485 void *dup_buf = NULL; 1486 dma_addr_t phys_addr; 1487 int idx; 1488 u16 copy_size, cmd_size, tb0_size; 1489 bool had_nocopy = false; 1490 u8 group_id = iwl_cmd_groupid(cmd->id); 1491 int i, ret; 1492 u32 cmd_pos; 1493 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1494 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1495 1496 if (WARN(!trans_pcie->wide_cmd_header && 1497 group_id > IWL_ALWAYS_LONG_GROUP, 1498 "unsupported wide command %#x\n", cmd->id)) 1499 return -EINVAL; 1500 1501 if (group_id != 0) { 1502 copy_size = sizeof(struct iwl_cmd_header_wide); 1503 cmd_size = sizeof(struct iwl_cmd_header_wide); 1504 } else { 1505 copy_size = sizeof(struct iwl_cmd_header); 1506 cmd_size = sizeof(struct iwl_cmd_header); 1507 } 1508 1509 /* need one for the header if the first is NOCOPY */ 1510 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1511 1512 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1513 cmddata[i] = cmd->data[i]; 1514 cmdlen[i] = cmd->len[i]; 1515 1516 if (!cmd->len[i]) 1517 continue; 1518 1519 /* need at least IWL_FIRST_TB_SIZE copied */ 1520 if (copy_size < IWL_FIRST_TB_SIZE) { 1521 int copy = IWL_FIRST_TB_SIZE - copy_size; 1522 1523 if (copy > cmdlen[i]) 1524 copy = cmdlen[i]; 1525 cmdlen[i] -= copy; 1526 cmddata[i] += copy; 1527 copy_size += copy; 1528 } 1529 1530 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1531 had_nocopy = true; 1532 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1533 idx = -EINVAL; 1534 goto free_dup_buf; 1535 } 1536 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1537 /* 1538 * This is also a chunk that isn't copied 1539 * to the static buffer so set had_nocopy. 1540 */ 1541 had_nocopy = true; 1542 1543 /* only allowed once */ 1544 if (WARN_ON(dup_buf)) { 1545 idx = -EINVAL; 1546 goto free_dup_buf; 1547 } 1548 1549 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1550 GFP_ATOMIC); 1551 if (!dup_buf) 1552 return -ENOMEM; 1553 } else { 1554 /* NOCOPY must not be followed by normal! */ 1555 if (WARN_ON(had_nocopy)) { 1556 idx = -EINVAL; 1557 goto free_dup_buf; 1558 } 1559 copy_size += cmdlen[i]; 1560 } 1561 cmd_size += cmd->len[i]; 1562 } 1563 1564 /* 1565 * If any of the command structures end up being larger than 1566 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1567 * allocated into separate TFDs, then we will need to 1568 * increase the size of the buffers. 1569 */ 1570 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1571 "Command %s (%#x) is too large (%d bytes)\n", 1572 iwl_get_cmd_string(trans, cmd->id), 1573 cmd->id, copy_size)) { 1574 idx = -EINVAL; 1575 goto free_dup_buf; 1576 } 1577 1578 spin_lock_bh(&txq->lock); 1579 1580 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1581 spin_unlock_bh(&txq->lock); 1582 1583 IWL_ERR(trans, "No space in command queue\n"); 1584 iwl_op_mode_cmd_queue_full(trans->op_mode); 1585 idx = -ENOSPC; 1586 goto free_dup_buf; 1587 } 1588 1589 idx = get_cmd_index(txq, txq->write_ptr); 1590 out_cmd = txq->entries[idx].cmd; 1591 out_meta = &txq->entries[idx].meta; 1592 1593 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1594 if (cmd->flags & CMD_WANT_SKB) 1595 out_meta->source = cmd; 1596 1597 /* set up the header */ 1598 if (group_id != 0) { 1599 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1600 out_cmd->hdr_wide.group_id = group_id; 1601 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1602 out_cmd->hdr_wide.length = 1603 cpu_to_le16(cmd_size - 1604 sizeof(struct iwl_cmd_header_wide)); 1605 out_cmd->hdr_wide.reserved = 0; 1606 out_cmd->hdr_wide.sequence = 1607 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1608 INDEX_TO_SEQ(txq->write_ptr)); 1609 1610 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1611 copy_size = sizeof(struct iwl_cmd_header_wide); 1612 } else { 1613 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1614 out_cmd->hdr.sequence = 1615 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1616 INDEX_TO_SEQ(txq->write_ptr)); 1617 out_cmd->hdr.group_id = 0; 1618 1619 cmd_pos = sizeof(struct iwl_cmd_header); 1620 copy_size = sizeof(struct iwl_cmd_header); 1621 } 1622 1623 /* and copy the data that needs to be copied */ 1624 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1625 int copy; 1626 1627 if (!cmd->len[i]) 1628 continue; 1629 1630 /* copy everything if not nocopy/dup */ 1631 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1632 IWL_HCMD_DFL_DUP))) { 1633 copy = cmd->len[i]; 1634 1635 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1636 cmd_pos += copy; 1637 copy_size += copy; 1638 continue; 1639 } 1640 1641 /* 1642 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1643 * in total (for bi-directional DMA), but copy up to what 1644 * we can fit into the payload for debug dump purposes. 1645 */ 1646 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1647 1648 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1649 cmd_pos += copy; 1650 1651 /* However, treat copy_size the proper way, we need it below */ 1652 if (copy_size < IWL_FIRST_TB_SIZE) { 1653 copy = IWL_FIRST_TB_SIZE - copy_size; 1654 1655 if (copy > cmd->len[i]) 1656 copy = cmd->len[i]; 1657 copy_size += copy; 1658 } 1659 } 1660 1661 IWL_DEBUG_HC(trans, 1662 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1663 iwl_get_cmd_string(trans, cmd->id), 1664 group_id, out_cmd->hdr.cmd, 1665 le16_to_cpu(out_cmd->hdr.sequence), 1666 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); 1667 1668 /* start the TFD with the minimum copy bytes */ 1669 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1670 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1671 iwl_pcie_txq_build_tfd(trans, txq, 1672 iwl_pcie_get_first_tb_dma(txq, idx), 1673 tb0_size, true); 1674 1675 /* map first command fragment, if any remains */ 1676 if (copy_size > tb0_size) { 1677 phys_addr = dma_map_single(trans->dev, 1678 ((u8 *)&out_cmd->hdr) + tb0_size, 1679 copy_size - tb0_size, 1680 DMA_TO_DEVICE); 1681 if (dma_mapping_error(trans->dev, phys_addr)) { 1682 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1683 txq->write_ptr); 1684 idx = -ENOMEM; 1685 goto out; 1686 } 1687 1688 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1689 copy_size - tb0_size, false); 1690 } 1691 1692 /* map the remaining (adjusted) nocopy/dup fragments */ 1693 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1694 const void *data = cmddata[i]; 1695 1696 if (!cmdlen[i]) 1697 continue; 1698 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1699 IWL_HCMD_DFL_DUP))) 1700 continue; 1701 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1702 data = dup_buf; 1703 phys_addr = dma_map_single(trans->dev, (void *)data, 1704 cmdlen[i], DMA_TO_DEVICE); 1705 if (dma_mapping_error(trans->dev, phys_addr)) { 1706 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1707 txq->write_ptr); 1708 idx = -ENOMEM; 1709 goto out; 1710 } 1711 1712 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1713 } 1714 1715 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1716 out_meta->flags = cmd->flags; 1717 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1718 kzfree(txq->entries[idx].free_buf); 1719 txq->entries[idx].free_buf = dup_buf; 1720 1721 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1722 1723 /* start timer if queue currently empty */ 1724 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1725 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1726 1727 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1728 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1729 if (ret < 0) { 1730 idx = ret; 1731 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1732 goto out; 1733 } 1734 1735 /* Increment and update queue's write index */ 1736 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 1737 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1738 1739 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1740 1741 out: 1742 spin_unlock_bh(&txq->lock); 1743 free_dup_buf: 1744 if (idx < 0) 1745 kfree(dup_buf); 1746 return idx; 1747 } 1748 1749 /* 1750 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1751 * @rxb: Rx buffer to reclaim 1752 */ 1753 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1754 struct iwl_rx_cmd_buffer *rxb) 1755 { 1756 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1757 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1758 u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id); 1759 u32 cmd_id; 1760 int txq_id = SEQ_TO_QUEUE(sequence); 1761 int index = SEQ_TO_INDEX(sequence); 1762 int cmd_index; 1763 struct iwl_device_cmd *cmd; 1764 struct iwl_cmd_meta *meta; 1765 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1766 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1767 1768 /* If a Tx command is being handled and it isn't in the actual 1769 * command queue then there a command routing bug has been introduced 1770 * in the queue management code. */ 1771 if (WARN(txq_id != trans_pcie->cmd_queue, 1772 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1773 txq_id, trans_pcie->cmd_queue, sequence, 1774 trans_pcie->txq[trans_pcie->cmd_queue].read_ptr, 1775 trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) { 1776 iwl_print_hex_error(trans, pkt, 32); 1777 return; 1778 } 1779 1780 spin_lock_bh(&txq->lock); 1781 1782 cmd_index = get_cmd_index(txq, index); 1783 cmd = txq->entries[cmd_index].cmd; 1784 meta = &txq->entries[cmd_index].meta; 1785 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1786 1787 iwl_pcie_tfd_unmap(trans, meta, txq, index); 1788 1789 /* Input error checking is done when commands are added to queue. */ 1790 if (meta->flags & CMD_WANT_SKB) { 1791 struct page *p = rxb_steal_page(rxb); 1792 1793 meta->source->resp_pkt = pkt; 1794 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1795 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1796 } 1797 1798 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1799 iwl_op_mode_async_cb(trans->op_mode, cmd); 1800 1801 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1802 1803 if (!(meta->flags & CMD_ASYNC)) { 1804 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1805 IWL_WARN(trans, 1806 "HCMD_ACTIVE already clear for command %s\n", 1807 iwl_get_cmd_string(trans, cmd_id)); 1808 } 1809 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1810 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1811 iwl_get_cmd_string(trans, cmd_id)); 1812 wake_up(&trans_pcie->wait_command_queue); 1813 } 1814 1815 if (meta->flags & CMD_MAKE_TRANS_IDLE) { 1816 IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 1817 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1818 set_bit(STATUS_TRANS_IDLE, &trans->status); 1819 wake_up(&trans_pcie->d0i3_waitq); 1820 } 1821 1822 if (meta->flags & CMD_WAKE_UP_TRANS) { 1823 IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 1824 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1825 clear_bit(STATUS_TRANS_IDLE, &trans->status); 1826 wake_up(&trans_pcie->d0i3_waitq); 1827 } 1828 1829 meta->flags = 0; 1830 1831 spin_unlock_bh(&txq->lock); 1832 } 1833 1834 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1835 1836 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1837 struct iwl_host_cmd *cmd) 1838 { 1839 int ret; 1840 1841 /* An asynchronous command can not expect an SKB to be set. */ 1842 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1843 return -EINVAL; 1844 1845 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1846 if (ret < 0) { 1847 IWL_ERR(trans, 1848 "Error sending %s: enqueue_hcmd failed: %d\n", 1849 iwl_get_cmd_string(trans, cmd->id), ret); 1850 return ret; 1851 } 1852 return 0; 1853 } 1854 1855 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1856 struct iwl_host_cmd *cmd) 1857 { 1858 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1859 int cmd_idx; 1860 int ret; 1861 1862 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1863 iwl_get_cmd_string(trans, cmd->id)); 1864 1865 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1866 &trans->status), 1867 "Command %s: a command is already active!\n", 1868 iwl_get_cmd_string(trans, cmd->id))) 1869 return -EIO; 1870 1871 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1872 iwl_get_cmd_string(trans, cmd->id)); 1873 1874 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { 1875 ret = wait_event_timeout(trans_pcie->d0i3_waitq, 1876 pm_runtime_active(&trans_pcie->pci_dev->dev), 1877 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 1878 if (!ret) { 1879 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); 1880 return -ETIMEDOUT; 1881 } 1882 } 1883 1884 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1885 if (cmd_idx < 0) { 1886 ret = cmd_idx; 1887 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1888 IWL_ERR(trans, 1889 "Error sending %s: enqueue_hcmd failed: %d\n", 1890 iwl_get_cmd_string(trans, cmd->id), ret); 1891 return ret; 1892 } 1893 1894 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1895 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1896 &trans->status), 1897 HOST_COMPLETE_TIMEOUT); 1898 if (!ret) { 1899 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1900 1901 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1902 iwl_get_cmd_string(trans, cmd->id), 1903 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1904 1905 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1906 txq->read_ptr, txq->write_ptr); 1907 1908 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1909 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1910 iwl_get_cmd_string(trans, cmd->id)); 1911 ret = -ETIMEDOUT; 1912 1913 iwl_force_nmi(trans); 1914 iwl_trans_fw_error(trans); 1915 1916 goto cancel; 1917 } 1918 1919 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1920 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1921 iwl_get_cmd_string(trans, cmd->id)); 1922 dump_stack(); 1923 ret = -EIO; 1924 goto cancel; 1925 } 1926 1927 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1928 test_bit(STATUS_RFKILL, &trans->status)) { 1929 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1930 ret = -ERFKILL; 1931 goto cancel; 1932 } 1933 1934 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1935 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1936 iwl_get_cmd_string(trans, cmd->id)); 1937 ret = -EIO; 1938 goto cancel; 1939 } 1940 1941 return 0; 1942 1943 cancel: 1944 if (cmd->flags & CMD_WANT_SKB) { 1945 /* 1946 * Cancel the CMD_WANT_SKB flag for the cmd in the 1947 * TX cmd queue. Otherwise in case the cmd comes 1948 * in later, it will possibly set an invalid 1949 * address (cmd->meta.source). 1950 */ 1951 trans_pcie->txq[trans_pcie->cmd_queue]. 1952 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1953 } 1954 1955 if (cmd->resp_pkt) { 1956 iwl_free_resp(cmd); 1957 cmd->resp_pkt = NULL; 1958 } 1959 1960 return ret; 1961 } 1962 1963 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1964 { 1965 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1966 test_bit(STATUS_RFKILL, &trans->status)) { 1967 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1968 cmd->id); 1969 return -ERFKILL; 1970 } 1971 1972 if (cmd->flags & CMD_ASYNC) 1973 return iwl_pcie_send_hcmd_async(trans, cmd); 1974 1975 /* We still can fail on RFKILL that can be asserted while we wait */ 1976 return iwl_pcie_send_hcmd_sync(trans, cmd); 1977 } 1978 1979 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1980 struct iwl_txq *txq, u8 hdr_len, 1981 struct iwl_cmd_meta *out_meta, 1982 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 1983 { 1984 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1985 u16 tb2_len; 1986 int i; 1987 1988 /* 1989 * Set up TFD's third entry to point directly to remainder 1990 * of skb's head, if any 1991 */ 1992 tb2_len = skb_headlen(skb) - hdr_len; 1993 1994 if (tb2_len > 0) { 1995 dma_addr_t tb2_phys = dma_map_single(trans->dev, 1996 skb->data + hdr_len, 1997 tb2_len, DMA_TO_DEVICE); 1998 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { 1999 iwl_pcie_tfd_unmap(trans, out_meta, txq, 2000 txq->write_ptr); 2001 return -EINVAL; 2002 } 2003 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); 2004 } 2005 2006 /* set up the remaining entries to point to the data */ 2007 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2008 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2009 dma_addr_t tb_phys; 2010 int tb_idx; 2011 2012 if (!skb_frag_size(frag)) 2013 continue; 2014 2015 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 2016 skb_frag_size(frag), DMA_TO_DEVICE); 2017 2018 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2019 iwl_pcie_tfd_unmap(trans, out_meta, txq, 2020 txq->write_ptr); 2021 return -EINVAL; 2022 } 2023 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2024 skb_frag_size(frag), false); 2025 2026 out_meta->tbs |= BIT(tb_idx); 2027 } 2028 2029 trace_iwlwifi_dev_tx(trans->dev, skb, 2030 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2031 trans_pcie->tfd_size, 2032 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2033 skb->data + hdr_len, tb2_len); 2034 trace_iwlwifi_dev_tx_data(trans->dev, skb, 2035 hdr_len, skb->len - hdr_len); 2036 return 0; 2037 } 2038 2039 #ifdef CONFIG_INET 2040 static struct iwl_tso_hdr_page * 2041 get_page_hdr(struct iwl_trans *trans, size_t len) 2042 { 2043 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2044 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); 2045 2046 if (!p->page) 2047 goto alloc; 2048 2049 /* enough room on this page */ 2050 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) 2051 return p; 2052 2053 /* We don't have enough room on this page, get a new one. */ 2054 __free_page(p->page); 2055 2056 alloc: 2057 p->page = alloc_page(GFP_ATOMIC); 2058 if (!p->page) 2059 return NULL; 2060 p->pos = page_address(p->page); 2061 return p; 2062 } 2063 2064 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 2065 bool ipv6, unsigned int len) 2066 { 2067 if (ipv6) { 2068 struct ipv6hdr *iphv6 = iph; 2069 2070 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 2071 len + tcph->doff * 4, 2072 IPPROTO_TCP, 0); 2073 } else { 2074 struct iphdr *iphv4 = iph; 2075 2076 ip_send_check(iphv4); 2077 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 2078 len + tcph->doff * 4, 2079 IPPROTO_TCP, 0); 2080 } 2081 } 2082 2083 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2084 struct iwl_txq *txq, u8 hdr_len, 2085 struct iwl_cmd_meta *out_meta, 2086 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2087 { 2088 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 2089 struct ieee80211_hdr *hdr = (void *)skb->data; 2090 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 2091 unsigned int mss = skb_shinfo(skb)->gso_size; 2092 u16 length, iv_len, amsdu_pad; 2093 u8 *start_hdr; 2094 struct iwl_tso_hdr_page *hdr_page; 2095 struct page **page_ptr; 2096 int ret; 2097 struct tso_t tso; 2098 2099 /* if the packet is protected, then it must be CCMP or GCMP */ 2100 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 2101 iv_len = ieee80211_has_protected(hdr->frame_control) ? 2102 IEEE80211_CCMP_HDR_LEN : 0; 2103 2104 trace_iwlwifi_dev_tx(trans->dev, skb, 2105 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2106 trans_pcie->tfd_size, 2107 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2108 NULL, 0); 2109 2110 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 2111 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 2112 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 2113 amsdu_pad = 0; 2114 2115 /* total amount of header we may need for this A-MSDU */ 2116 hdr_room = DIV_ROUND_UP(total_len, mss) * 2117 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 2118 2119 /* Our device supports 9 segments at most, it will fit in 1 page */ 2120 hdr_page = get_page_hdr(trans, hdr_room); 2121 if (!hdr_page) 2122 return -ENOMEM; 2123 2124 get_page(hdr_page->page); 2125 start_hdr = hdr_page->pos; 2126 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 2127 *page_ptr = hdr_page->page; 2128 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 2129 hdr_page->pos += iv_len; 2130 2131 /* 2132 * Pull the ieee80211 header + IV to be able to use TSO core, 2133 * we will restore it for the tx_status flow. 2134 */ 2135 skb_pull(skb, hdr_len + iv_len); 2136 2137 tso_start(skb, &tso); 2138 2139 while (total_len) { 2140 /* this is the data left for this subframe */ 2141 unsigned int data_left = 2142 min_t(unsigned int, mss, total_len); 2143 struct sk_buff *csum_skb = NULL; 2144 unsigned int hdr_tb_len; 2145 dma_addr_t hdr_tb_phys; 2146 struct tcphdr *tcph; 2147 u8 *iph; 2148 2149 total_len -= data_left; 2150 2151 memset(hdr_page->pos, 0, amsdu_pad); 2152 hdr_page->pos += amsdu_pad; 2153 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 2154 data_left)) & 0x3; 2155 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 2156 hdr_page->pos += ETH_ALEN; 2157 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 2158 hdr_page->pos += ETH_ALEN; 2159 2160 length = snap_ip_tcp_hdrlen + data_left; 2161 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 2162 hdr_page->pos += sizeof(length); 2163 2164 /* 2165 * This will copy the SNAP as well which will be considered 2166 * as MAC header. 2167 */ 2168 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 2169 iph = hdr_page->pos + 8; 2170 tcph = (void *)(iph + ip_hdrlen); 2171 2172 /* For testing on current hardware only */ 2173 if (trans_pcie->sw_csum_tx) { 2174 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 2175 GFP_ATOMIC); 2176 if (!csum_skb) { 2177 ret = -ENOMEM; 2178 goto out_unmap; 2179 } 2180 2181 iwl_compute_pseudo_hdr_csum(iph, tcph, 2182 skb->protocol == 2183 htons(ETH_P_IPV6), 2184 data_left); 2185 2186 memcpy(skb_put(csum_skb, tcp_hdrlen(skb)), 2187 tcph, tcp_hdrlen(skb)); 2188 skb_set_transport_header(csum_skb, 0); 2189 csum_skb->csum_start = 2190 (unsigned char *)tcp_hdr(csum_skb) - 2191 csum_skb->head; 2192 } 2193 2194 hdr_page->pos += snap_ip_tcp_hdrlen; 2195 2196 hdr_tb_len = hdr_page->pos - start_hdr; 2197 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 2198 hdr_tb_len, DMA_TO_DEVICE); 2199 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 2200 dev_kfree_skb(csum_skb); 2201 ret = -EINVAL; 2202 goto out_unmap; 2203 } 2204 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2205 hdr_tb_len, false); 2206 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 2207 hdr_tb_len); 2208 2209 /* prepare the start_hdr for the next subframe */ 2210 start_hdr = hdr_page->pos; 2211 2212 /* put the payload */ 2213 while (data_left) { 2214 unsigned int size = min_t(unsigned int, tso.size, 2215 data_left); 2216 dma_addr_t tb_phys; 2217 2218 if (trans_pcie->sw_csum_tx) 2219 memcpy(skb_put(csum_skb, size), tso.data, size); 2220 2221 tb_phys = dma_map_single(trans->dev, tso.data, 2222 size, DMA_TO_DEVICE); 2223 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2224 dev_kfree_skb(csum_skb); 2225 ret = -EINVAL; 2226 goto out_unmap; 2227 } 2228 2229 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2230 size, false); 2231 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 2232 size); 2233 2234 data_left -= size; 2235 tso_build_data(skb, &tso, size); 2236 } 2237 2238 /* For testing on early hardware only */ 2239 if (trans_pcie->sw_csum_tx) { 2240 __wsum csum; 2241 2242 csum = skb_checksum(csum_skb, 2243 skb_checksum_start_offset(csum_skb), 2244 csum_skb->len - 2245 skb_checksum_start_offset(csum_skb), 2246 0); 2247 dev_kfree_skb(csum_skb); 2248 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 2249 hdr_tb_len, DMA_TO_DEVICE); 2250 tcph->check = csum_fold(csum); 2251 dma_sync_single_for_device(trans->dev, hdr_tb_phys, 2252 hdr_tb_len, DMA_TO_DEVICE); 2253 } 2254 } 2255 2256 /* re -add the WiFi header and IV */ 2257 skb_push(skb, hdr_len + iv_len); 2258 2259 return 0; 2260 2261 out_unmap: 2262 iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2263 return ret; 2264 } 2265 #else /* CONFIG_INET */ 2266 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2267 struct iwl_txq *txq, u8 hdr_len, 2268 struct iwl_cmd_meta *out_meta, 2269 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2270 { 2271 /* No A-MSDU without CONFIG_INET */ 2272 WARN_ON(1); 2273 2274 return -1; 2275 } 2276 #endif /* CONFIG_INET */ 2277 2278 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2279 struct iwl_device_cmd *dev_cmd, int txq_id) 2280 { 2281 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2282 struct ieee80211_hdr *hdr; 2283 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2284 struct iwl_cmd_meta *out_meta; 2285 struct iwl_txq *txq; 2286 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2287 void *tb1_addr; 2288 void *tfd; 2289 u16 len, tb1_len; 2290 bool wait_write_ptr; 2291 __le16 fc; 2292 u8 hdr_len; 2293 u16 wifi_seq; 2294 bool amsdu; 2295 2296 txq = &trans_pcie->txq[txq_id]; 2297 2298 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 2299 "TX on unused queue %d\n", txq_id)) 2300 return -EINVAL; 2301 2302 if (unlikely(trans_pcie->sw_csum_tx && 2303 skb->ip_summed == CHECKSUM_PARTIAL)) { 2304 int offs = skb_checksum_start_offset(skb); 2305 int csum_offs = offs + skb->csum_offset; 2306 __wsum csum; 2307 2308 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 2309 return -1; 2310 2311 csum = skb_checksum(skb, offs, skb->len - offs, 0); 2312 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 2313 2314 skb->ip_summed = CHECKSUM_UNNECESSARY; 2315 } 2316 2317 if (skb_is_nonlinear(skb) && 2318 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && 2319 __skb_linearize(skb)) 2320 return -ENOMEM; 2321 2322 /* mac80211 always puts the full header into the SKB's head, 2323 * so there's no need to check if it's readable there 2324 */ 2325 hdr = (struct ieee80211_hdr *)skb->data; 2326 fc = hdr->frame_control; 2327 hdr_len = ieee80211_hdrlen(fc); 2328 2329 spin_lock(&txq->lock); 2330 2331 if (iwl_queue_space(txq) < txq->high_mark) { 2332 iwl_stop_queue(trans, txq); 2333 2334 /* don't put the packet on the ring, if there is no room */ 2335 if (unlikely(iwl_queue_space(txq) < 3)) { 2336 struct iwl_device_cmd **dev_cmd_ptr; 2337 2338 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2339 trans_pcie->dev_cmd_offs); 2340 2341 *dev_cmd_ptr = dev_cmd; 2342 __skb_queue_tail(&txq->overflow_q, skb); 2343 2344 spin_unlock(&txq->lock); 2345 return 0; 2346 } 2347 } 2348 2349 /* In AGG mode, the index in the ring must correspond to the WiFi 2350 * sequence number. This is a HW requirements to help the SCD to parse 2351 * the BA. 2352 * Check here that the packets are in the right place on the ring. 2353 */ 2354 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2355 WARN_ONCE(txq->ampdu && 2356 (wifi_seq & 0xff) != txq->write_ptr, 2357 "Q: %d WiFi Seq %d tfdNum %d", 2358 txq_id, wifi_seq, txq->write_ptr); 2359 2360 /* Set up driver data for this TFD */ 2361 txq->entries[txq->write_ptr].skb = skb; 2362 txq->entries[txq->write_ptr].cmd = dev_cmd; 2363 2364 dev_cmd->hdr.sequence = 2365 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2366 INDEX_TO_SEQ(txq->write_ptr))); 2367 2368 tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); 2369 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2370 offsetof(struct iwl_tx_cmd, scratch); 2371 2372 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2373 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2374 2375 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2376 out_meta = &txq->entries[txq->write_ptr].meta; 2377 out_meta->flags = 0; 2378 2379 /* 2380 * The second TB (tb1) points to the remainder of the TX command 2381 * and the 802.11 header - dword aligned size 2382 * (This calculation modifies the TX command, so do it before the 2383 * setup of the first TB) 2384 */ 2385 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2386 hdr_len - IWL_FIRST_TB_SIZE; 2387 /* do not align A-MSDU to dword as the subframe header aligns it */ 2388 amsdu = ieee80211_is_data_qos(fc) && 2389 (*ieee80211_get_qos_ctl(hdr) & 2390 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2391 if (trans_pcie->sw_csum_tx || !amsdu) { 2392 tb1_len = ALIGN(len, 4); 2393 /* Tell NIC about any 2-byte padding after MAC header */ 2394 if (tb1_len != len) 2395 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2396 } else { 2397 tb1_len = len; 2398 } 2399 2400 /* The first TB points to bi-directional DMA data */ 2401 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2402 IWL_FIRST_TB_SIZE); 2403 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 2404 IWL_FIRST_TB_SIZE, true); 2405 2406 /* there must be data left over for TB1 or this code must be changed */ 2407 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2408 2409 /* map the data for TB1 */ 2410 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2411 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2412 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2413 goto out_err; 2414 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2415 2416 if (amsdu) { 2417 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2418 out_meta, dev_cmd, 2419 tb1_len))) 2420 goto out_err; 2421 } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2422 out_meta, dev_cmd, tb1_len))) { 2423 goto out_err; 2424 } 2425 2426 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2427 /* Set up entry for this TFD in Tx byte-count array */ 2428 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2429 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2430 2431 wait_write_ptr = ieee80211_has_morefrags(fc); 2432 2433 /* start timer if queue currently empty */ 2434 if (txq->read_ptr == txq->write_ptr) { 2435 if (txq->wd_timeout) { 2436 /* 2437 * If the TXQ is active, then set the timer, if not, 2438 * set the timer in remainder so that the timer will 2439 * be armed with the right value when the station will 2440 * wake up. 2441 */ 2442 if (!txq->frozen) 2443 mod_timer(&txq->stuck_timer, 2444 jiffies + txq->wd_timeout); 2445 else 2446 txq->frozen_expiry_remainder = txq->wd_timeout; 2447 } 2448 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); 2449 iwl_trans_ref(trans); 2450 } 2451 2452 /* Tell device the write index *just past* this latest filled TFD */ 2453 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 2454 if (!wait_write_ptr) 2455 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2456 2457 /* 2458 * At this point the frame is "transmitted" successfully 2459 * and we will get a TX status notification eventually. 2460 */ 2461 spin_unlock(&txq->lock); 2462 return 0; 2463 out_err: 2464 spin_unlock(&txq->lock); 2465 return -1; 2466 } 2467