1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #include <linux/etherdevice.h> 65 #include <linux/ieee80211.h> 66 #include <linux/slab.h> 67 #include <linux/sched.h> 68 #include <linux/pm_runtime.h> 69 #include <net/ip6_checksum.h> 70 #include <net/tso.h> 71 72 #include "iwl-debug.h" 73 #include "iwl-csr.h" 74 #include "iwl-prph.h" 75 #include "iwl-io.h" 76 #include "iwl-scd.h" 77 #include "iwl-op-mode.h" 78 #include "internal.h" 79 #include "fw/api/tx.h" 80 81 #define IWL_TX_CRC_SIZE 4 82 #define IWL_TX_DELIMITER_SIZE 4 83 84 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 85 * DMA services 86 * 87 * Theory of operation 88 * 89 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 90 * of buffer descriptors, each of which points to one or more data buffers for 91 * the device to read from or fill. Driver and device exchange status of each 92 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 93 * entries in each circular buffer, to protect against confusing empty and full 94 * queue states. 95 * 96 * The device reads or writes the data in the queues via the device's several 97 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 98 * 99 * For Tx queue, there are low mark and high mark limits. If, after queuing 100 * the packet for Tx, free space become < low mark, Tx queue stopped. When 101 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 102 * Tx queue resumed. 103 * 104 ***************************************************/ 105 106 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) 107 { 108 unsigned int max; 109 unsigned int used; 110 111 /* 112 * To avoid ambiguity between empty and completely full queues, there 113 * should always be less than max_tfd_queue_size elements in the queue. 114 * If q->n_window is smaller than max_tfd_queue_size, there is no need 115 * to reserve any queue entries for this purpose. 116 */ 117 if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) 118 max = q->n_window; 119 else 120 max = trans->cfg->base_params->max_tfd_queue_size - 1; 121 122 /* 123 * max_tfd_queue_size is a power of 2, so the following is equivalent to 124 * modulo by max_tfd_queue_size and is well defined. 125 */ 126 used = (q->write_ptr - q->read_ptr) & 127 (trans->cfg->base_params->max_tfd_queue_size - 1); 128 129 if (WARN_ON(used > max)) 130 return 0; 131 132 return max - used; 133 } 134 135 /* 136 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 137 */ 138 static int iwl_queue_init(struct iwl_txq *q, int slots_num) 139 { 140 q->n_window = slots_num; 141 142 /* slots_num must be power-of-two size, otherwise 143 * iwl_pcie_get_cmd_index is broken. */ 144 if (WARN_ON(!is_power_of_2(slots_num))) 145 return -EINVAL; 146 147 q->low_mark = q->n_window / 4; 148 if (q->low_mark < 4) 149 q->low_mark = 4; 150 151 q->high_mark = q->n_window / 8; 152 if (q->high_mark < 2) 153 q->high_mark = 2; 154 155 q->write_ptr = 0; 156 q->read_ptr = 0; 157 158 return 0; 159 } 160 161 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 162 struct iwl_dma_ptr *ptr, size_t size) 163 { 164 if (WARN_ON(ptr->addr)) 165 return -EINVAL; 166 167 ptr->addr = dma_alloc_coherent(trans->dev, size, 168 &ptr->dma, GFP_KERNEL); 169 if (!ptr->addr) 170 return -ENOMEM; 171 ptr->size = size; 172 return 0; 173 } 174 175 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 176 { 177 if (unlikely(!ptr->addr)) 178 return; 179 180 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 181 memset(ptr, 0, sizeof(*ptr)); 182 } 183 184 static void iwl_pcie_txq_stuck_timer(struct timer_list *t) 185 { 186 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 187 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 188 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 189 190 spin_lock(&txq->lock); 191 /* check if triggered erroneously */ 192 if (txq->read_ptr == txq->write_ptr) { 193 spin_unlock(&txq->lock); 194 return; 195 } 196 spin_unlock(&txq->lock); 197 198 iwl_trans_pcie_log_scd_error(trans, txq); 199 200 iwl_force_nmi(trans); 201 } 202 203 /* 204 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 205 */ 206 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 207 struct iwl_txq *txq, u16 byte_cnt, 208 int num_tbs) 209 { 210 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 212 int write_ptr = txq->write_ptr; 213 int txq_id = txq->id; 214 u8 sec_ctl = 0; 215 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 216 __le16 bc_ent; 217 struct iwl_tx_cmd *tx_cmd = 218 (void *)txq->entries[txq->write_ptr].cmd->payload; 219 u8 sta_id = tx_cmd->sta_id; 220 221 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 222 223 sec_ctl = tx_cmd->sec_ctl; 224 225 switch (sec_ctl & TX_CMD_SEC_MSK) { 226 case TX_CMD_SEC_CCM: 227 len += IEEE80211_CCMP_MIC_LEN; 228 break; 229 case TX_CMD_SEC_TKIP: 230 len += IEEE80211_TKIP_ICV_LEN; 231 break; 232 case TX_CMD_SEC_WEP: 233 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 234 break; 235 } 236 if (trans_pcie->bc_table_dword) 237 len = DIV_ROUND_UP(len, 4); 238 239 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 240 return; 241 242 bc_ent = cpu_to_le16(len | (sta_id << 12)); 243 244 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 245 246 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 247 scd_bc_tbl[txq_id]. 248 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 249 } 250 251 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 252 struct iwl_txq *txq) 253 { 254 struct iwl_trans_pcie *trans_pcie = 255 IWL_TRANS_GET_PCIE_TRANS(trans); 256 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 257 int txq_id = txq->id; 258 int read_ptr = txq->read_ptr; 259 u8 sta_id = 0; 260 __le16 bc_ent; 261 struct iwl_tx_cmd *tx_cmd = 262 (void *)txq->entries[read_ptr].cmd->payload; 263 264 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 265 266 if (txq_id != trans_pcie->cmd_queue) 267 sta_id = tx_cmd->sta_id; 268 269 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 270 271 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 272 273 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 274 scd_bc_tbl[txq_id]. 275 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 276 } 277 278 /* 279 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 280 */ 281 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 282 struct iwl_txq *txq) 283 { 284 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 285 u32 reg = 0; 286 int txq_id = txq->id; 287 288 lockdep_assert_held(&txq->lock); 289 290 /* 291 * explicitly wake up the NIC if: 292 * 1. shadow registers aren't enabled 293 * 2. NIC is woken up for CMD regardless of shadow outside this function 294 * 3. there is a chance that the NIC is asleep 295 */ 296 if (!trans->cfg->base_params->shadow_reg_enable && 297 txq_id != trans_pcie->cmd_queue && 298 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 299 /* 300 * wake up nic if it's powered down ... 301 * uCode will wake up, and interrupt us again, so next 302 * time we'll skip this part. 303 */ 304 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 305 306 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 307 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 308 txq_id, reg); 309 iwl_set_bit(trans, CSR_GP_CNTRL, 310 BIT(trans->cfg->csr->flag_mac_access_req)); 311 txq->need_update = true; 312 return; 313 } 314 } 315 316 /* 317 * if not in power-save mode, uCode will never sleep when we're 318 * trying to tx (during RFKILL, we're not trying to tx). 319 */ 320 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 321 if (!txq->block) 322 iwl_write32(trans, HBUS_TARG_WRPTR, 323 txq->write_ptr | (txq_id << 8)); 324 } 325 326 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 327 { 328 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 329 int i; 330 331 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 332 struct iwl_txq *txq = trans_pcie->txq[i]; 333 334 if (!test_bit(i, trans_pcie->queue_used)) 335 continue; 336 337 spin_lock_bh(&txq->lock); 338 if (txq->need_update) { 339 iwl_pcie_txq_inc_wr_ptr(trans, txq); 340 txq->need_update = false; 341 } 342 spin_unlock_bh(&txq->lock); 343 } 344 } 345 346 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, 347 void *_tfd, u8 idx) 348 { 349 350 if (trans->cfg->use_tfh) { 351 struct iwl_tfh_tfd *tfd = _tfd; 352 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 353 354 return (dma_addr_t)(le64_to_cpu(tb->addr)); 355 } else { 356 struct iwl_tfd *tfd = _tfd; 357 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 358 dma_addr_t addr = get_unaligned_le32(&tb->lo); 359 dma_addr_t hi_len; 360 361 if (sizeof(dma_addr_t) <= sizeof(u32)) 362 return addr; 363 364 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 365 366 /* 367 * shift by 16 twice to avoid warnings on 32-bit 368 * (where this code never runs anyway due to the 369 * if statement above) 370 */ 371 return addr | ((hi_len << 16) << 16); 372 } 373 } 374 375 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 376 u8 idx, dma_addr_t addr, u16 len) 377 { 378 struct iwl_tfd *tfd_fh = (void *)tfd; 379 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 380 381 u16 hi_n_len = len << 4; 382 383 put_unaligned_le32(addr, &tb->lo); 384 hi_n_len |= iwl_get_dma_hi_addr(addr); 385 386 tb->hi_n_len = cpu_to_le16(hi_n_len); 387 388 tfd_fh->num_tbs = idx + 1; 389 } 390 391 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) 392 { 393 if (trans->cfg->use_tfh) { 394 struct iwl_tfh_tfd *tfd = _tfd; 395 396 return le16_to_cpu(tfd->num_tbs) & 0x1f; 397 } else { 398 struct iwl_tfd *tfd = _tfd; 399 400 return tfd->num_tbs & 0x1f; 401 } 402 } 403 404 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 405 struct iwl_cmd_meta *meta, 406 struct iwl_txq *txq, int index) 407 { 408 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 409 int i, num_tbs; 410 void *tfd = iwl_pcie_get_tfd(trans, txq, index); 411 412 /* Sanity check on number of chunks */ 413 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 414 415 if (num_tbs > trans_pcie->max_tbs) { 416 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 417 /* @todo issue fatal error, it is quite serious situation */ 418 return; 419 } 420 421 /* first TB is never freed - it's the bidirectional DMA data */ 422 423 for (i = 1; i < num_tbs; i++) { 424 if (meta->tbs & BIT(i)) 425 dma_unmap_page(trans->dev, 426 iwl_pcie_tfd_tb_get_addr(trans, tfd, i), 427 iwl_pcie_tfd_tb_get_len(trans, tfd, i), 428 DMA_TO_DEVICE); 429 else 430 dma_unmap_single(trans->dev, 431 iwl_pcie_tfd_tb_get_addr(trans, tfd, 432 i), 433 iwl_pcie_tfd_tb_get_len(trans, tfd, 434 i), 435 DMA_TO_DEVICE); 436 } 437 438 meta->tbs = 0; 439 440 if (trans->cfg->use_tfh) { 441 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 442 443 tfd_fh->num_tbs = 0; 444 } else { 445 struct iwl_tfd *tfd_fh = (void *)tfd; 446 447 tfd_fh->num_tbs = 0; 448 } 449 450 } 451 452 /* 453 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 454 * @trans - transport private data 455 * @txq - tx queue 456 * @dma_dir - the direction of the DMA mapping 457 * 458 * Does NOT advance any TFD circular buffer read/write indexes 459 * Does NOT free the TFD itself (which is within circular buffer) 460 */ 461 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 462 { 463 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 464 * idx is bounded by n_window 465 */ 466 int rd_ptr = txq->read_ptr; 467 int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); 468 469 lockdep_assert_held(&txq->lock); 470 471 /* We have only q->n_window txq->entries, but we use 472 * TFD_QUEUE_SIZE_MAX tfds 473 */ 474 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 475 476 /* free SKB */ 477 if (txq->entries) { 478 struct sk_buff *skb; 479 480 skb = txq->entries[idx].skb; 481 482 /* Can be called from irqs-disabled context 483 * If skb is not NULL, it means that the whole queue is being 484 * freed and that the queue is not empty - free the skb 485 */ 486 if (skb) { 487 iwl_op_mode_free_skb(trans->op_mode, skb); 488 txq->entries[idx].skb = NULL; 489 } 490 } 491 } 492 493 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 494 dma_addr_t addr, u16 len, bool reset) 495 { 496 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 void *tfd; 498 u32 num_tbs; 499 500 tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; 501 502 if (reset) 503 memset(tfd, 0, trans_pcie->tfd_size); 504 505 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 506 507 /* Each TFD can point to a maximum max_tbs Tx buffers */ 508 if (num_tbs >= trans_pcie->max_tbs) { 509 IWL_ERR(trans, "Error can not send more than %d chunks\n", 510 trans_pcie->max_tbs); 511 return -EINVAL; 512 } 513 514 if (WARN(addr & ~IWL_TX_DMA_MASK, 515 "Unaligned address = %llx\n", (unsigned long long)addr)) 516 return -EINVAL; 517 518 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 519 520 return num_tbs; 521 } 522 523 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, 524 int slots_num, bool cmd_queue) 525 { 526 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 527 size_t tfd_sz = trans_pcie->tfd_size * 528 trans->cfg->base_params->max_tfd_queue_size; 529 size_t tb0_buf_sz; 530 int i; 531 532 if (WARN_ON(txq->entries || txq->tfds)) 533 return -EINVAL; 534 535 if (trans->cfg->use_tfh) 536 tfd_sz = trans_pcie->tfd_size * slots_num; 537 538 timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); 539 txq->trans_pcie = trans_pcie; 540 541 txq->n_window = slots_num; 542 543 txq->entries = kcalloc(slots_num, 544 sizeof(struct iwl_pcie_txq_entry), 545 GFP_KERNEL); 546 547 if (!txq->entries) 548 goto error; 549 550 if (cmd_queue) 551 for (i = 0; i < slots_num; i++) { 552 txq->entries[i].cmd = 553 kmalloc(sizeof(struct iwl_device_cmd), 554 GFP_KERNEL); 555 if (!txq->entries[i].cmd) 556 goto error; 557 } 558 559 /* Circular buffer of transmit frame descriptors (TFDs), 560 * shared with device */ 561 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 562 &txq->dma_addr, GFP_KERNEL); 563 if (!txq->tfds) 564 goto error; 565 566 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); 567 568 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 569 570 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 571 &txq->first_tb_dma, 572 GFP_KERNEL); 573 if (!txq->first_tb_bufs) 574 goto err_free_tfds; 575 576 return 0; 577 err_free_tfds: 578 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 579 error: 580 if (txq->entries && cmd_queue) 581 for (i = 0; i < slots_num; i++) 582 kfree(txq->entries[i].cmd); 583 kfree(txq->entries); 584 txq->entries = NULL; 585 586 return -ENOMEM; 587 588 } 589 590 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 591 int slots_num, bool cmd_queue) 592 { 593 int ret; 594 u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; 595 596 txq->need_update = false; 597 598 /* max_tfd_queue_size must be power-of-two size, otherwise 599 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 600 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 601 "Max tfd queue size must be a power of two, but is %d", 602 tfd_queue_max_size)) 603 return -EINVAL; 604 605 /* Initialize queue's high/low-water marks, and head/tail indexes */ 606 ret = iwl_queue_init(txq, slots_num); 607 if (ret) 608 return ret; 609 610 spin_lock_init(&txq->lock); 611 612 if (cmd_queue) { 613 static struct lock_class_key iwl_pcie_cmd_queue_lock_class; 614 615 lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); 616 } 617 618 __skb_queue_head_init(&txq->overflow_q); 619 620 return 0; 621 } 622 623 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 624 struct sk_buff *skb) 625 { 626 struct page **page_ptr; 627 628 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 629 630 if (*page_ptr) { 631 __free_page(*page_ptr); 632 *page_ptr = NULL; 633 } 634 } 635 636 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 637 { 638 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 639 640 lockdep_assert_held(&trans_pcie->reg_lock); 641 642 if (!trans->cfg->base_params->apmg_wake_up_wa) 643 return; 644 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 645 return; 646 647 trans_pcie->cmd_hold_nic_awake = false; 648 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 649 BIT(trans->cfg->csr->flag_mac_access_req)); 650 } 651 652 /* 653 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 654 */ 655 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 656 { 657 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 658 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 659 660 spin_lock_bh(&txq->lock); 661 while (txq->write_ptr != txq->read_ptr) { 662 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 663 txq_id, txq->read_ptr); 664 665 if (txq_id != trans_pcie->cmd_queue) { 666 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 667 668 if (WARN_ON_ONCE(!skb)) 669 continue; 670 671 iwl_pcie_free_tso_page(trans_pcie, skb); 672 } 673 iwl_pcie_txq_free_tfd(trans, txq); 674 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); 675 676 if (txq->read_ptr == txq->write_ptr) { 677 unsigned long flags; 678 679 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 680 if (txq_id == trans_pcie->cmd_queue) 681 iwl_pcie_clear_cmd_in_flight(trans); 682 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 683 } 684 } 685 686 while (!skb_queue_empty(&txq->overflow_q)) { 687 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 688 689 iwl_op_mode_free_skb(trans->op_mode, skb); 690 } 691 692 spin_unlock_bh(&txq->lock); 693 694 /* just in case - this queue may have been stopped */ 695 iwl_wake_queue(trans, txq); 696 } 697 698 /* 699 * iwl_pcie_txq_free - Deallocate DMA queue. 700 * @txq: Transmit queue to deallocate. 701 * 702 * Empty queue by removing and destroying all BD's. 703 * Free all buffers. 704 * 0-fill, but do not free "txq" descriptor structure. 705 */ 706 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 707 { 708 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 709 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 710 struct device *dev = trans->dev; 711 int i; 712 713 if (WARN_ON(!txq)) 714 return; 715 716 iwl_pcie_txq_unmap(trans, txq_id); 717 718 /* De-alloc array of command/tx buffers */ 719 if (txq_id == trans_pcie->cmd_queue) 720 for (i = 0; i < txq->n_window; i++) { 721 kzfree(txq->entries[i].cmd); 722 kzfree(txq->entries[i].free_buf); 723 } 724 725 /* De-alloc circular buffer of TFDs */ 726 if (txq->tfds) { 727 dma_free_coherent(dev, 728 trans_pcie->tfd_size * 729 trans->cfg->base_params->max_tfd_queue_size, 730 txq->tfds, txq->dma_addr); 731 txq->dma_addr = 0; 732 txq->tfds = NULL; 733 734 dma_free_coherent(dev, 735 sizeof(*txq->first_tb_bufs) * txq->n_window, 736 txq->first_tb_bufs, txq->first_tb_dma); 737 } 738 739 kfree(txq->entries); 740 txq->entries = NULL; 741 742 del_timer_sync(&txq->stuck_timer); 743 744 /* 0-fill queue descriptor structure */ 745 memset(txq, 0, sizeof(*txq)); 746 } 747 748 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 749 { 750 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 751 int nq = trans->cfg->base_params->num_of_queues; 752 int chan; 753 u32 reg_val; 754 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 755 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 756 757 /* make sure all queue are not stopped/used */ 758 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 759 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 760 761 trans_pcie->scd_base_addr = 762 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 763 764 WARN_ON(scd_base_addr != 0 && 765 scd_base_addr != trans_pcie->scd_base_addr); 766 767 /* reset context data, TX status and translation data */ 768 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 769 SCD_CONTEXT_MEM_LOWER_BOUND, 770 NULL, clear_dwords); 771 772 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 773 trans_pcie->scd_bc_tbls.dma >> 10); 774 775 /* The chain extension of the SCD doesn't work well. This feature is 776 * enabled by default by the HW, so we need to disable it manually. 777 */ 778 if (trans->cfg->base_params->scd_chain_ext_wa) 779 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 780 781 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 782 trans_pcie->cmd_fifo, 783 trans_pcie->cmd_q_wdg_timeout); 784 785 /* Activate all Tx DMA/FIFO channels */ 786 iwl_scd_activate_fifos(trans); 787 788 /* Enable DMA channel */ 789 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 790 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 791 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 792 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 793 794 /* Update FH chicken bits */ 795 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 796 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 797 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 798 799 /* Enable L1-Active */ 800 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) 801 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 802 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 803 } 804 805 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 806 { 807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 808 int txq_id; 809 810 /* 811 * we should never get here in gen2 trans mode return early to avoid 812 * having invalid accesses 813 */ 814 if (WARN_ON_ONCE(trans->cfg->gen2)) 815 return; 816 817 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 818 txq_id++) { 819 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 820 if (trans->cfg->use_tfh) 821 iwl_write_direct64(trans, 822 FH_MEM_CBBC_QUEUE(trans, txq_id), 823 txq->dma_addr); 824 else 825 iwl_write_direct32(trans, 826 FH_MEM_CBBC_QUEUE(trans, txq_id), 827 txq->dma_addr >> 8); 828 iwl_pcie_txq_unmap(trans, txq_id); 829 txq->read_ptr = 0; 830 txq->write_ptr = 0; 831 } 832 833 /* Tell NIC where to find the "keep warm" buffer */ 834 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 835 trans_pcie->kw.dma >> 4); 836 837 /* 838 * Send 0 as the scd_base_addr since the device may have be reset 839 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 840 * contain garbage. 841 */ 842 iwl_pcie_tx_start(trans, 0); 843 } 844 845 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 846 { 847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 848 unsigned long flags; 849 int ch, ret; 850 u32 mask = 0; 851 852 spin_lock(&trans_pcie->irq_lock); 853 854 if (!iwl_trans_grab_nic_access(trans, &flags)) 855 goto out; 856 857 /* Stop each Tx DMA channel */ 858 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 859 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 860 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 861 } 862 863 /* Wait for DMA channels to be idle */ 864 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 865 if (ret < 0) 866 IWL_ERR(trans, 867 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 868 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 869 870 iwl_trans_release_nic_access(trans, &flags); 871 872 out: 873 spin_unlock(&trans_pcie->irq_lock); 874 } 875 876 /* 877 * iwl_pcie_tx_stop - Stop all Tx DMA channels 878 */ 879 int iwl_pcie_tx_stop(struct iwl_trans *trans) 880 { 881 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 882 int txq_id; 883 884 /* Turn off all Tx DMA fifos */ 885 iwl_scd_deactivate_fifos(trans); 886 887 /* Turn off all Tx DMA channels */ 888 iwl_pcie_tx_stop_fh(trans); 889 890 /* 891 * This function can be called before the op_mode disabled the 892 * queues. This happens when we have an rfkill interrupt. 893 * Since we stop Tx altogether - mark the queues as stopped. 894 */ 895 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); 896 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 897 898 /* This can happen: start_hw, stop_device */ 899 if (!trans_pcie->txq_memory) 900 return 0; 901 902 /* Unmap DMA from host system and free skb's */ 903 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 904 txq_id++) 905 iwl_pcie_txq_unmap(trans, txq_id); 906 907 return 0; 908 } 909 910 /* 911 * iwl_trans_tx_free - Free TXQ Context 912 * 913 * Destroy all TX DMA queues and structures 914 */ 915 void iwl_pcie_tx_free(struct iwl_trans *trans) 916 { 917 int txq_id; 918 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 919 920 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); 921 922 /* Tx queues */ 923 if (trans_pcie->txq_memory) { 924 for (txq_id = 0; 925 txq_id < trans->cfg->base_params->num_of_queues; 926 txq_id++) { 927 iwl_pcie_txq_free(trans, txq_id); 928 trans_pcie->txq[txq_id] = NULL; 929 } 930 } 931 932 kfree(trans_pcie->txq_memory); 933 trans_pcie->txq_memory = NULL; 934 935 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 936 937 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 938 } 939 940 /* 941 * iwl_pcie_tx_alloc - allocate TX context 942 * Allocate all Tx DMA structures and initialize them 943 */ 944 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 945 { 946 int ret; 947 int txq_id, slots_num; 948 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 949 u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; 950 951 bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? 952 sizeof(struct iwl_gen3_bc_tbl) : 953 sizeof(struct iwlagn_scd_bc_tbl); 954 955 /*It is not allowed to alloc twice, so warn when this happens. 956 * We cannot rely on the previous allocation, so free and fail */ 957 if (WARN_ON(trans_pcie->txq_memory)) { 958 ret = -EINVAL; 959 goto error; 960 } 961 962 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 963 bc_tbls_size); 964 if (ret) { 965 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 966 goto error; 967 } 968 969 /* Alloc keep-warm buffer */ 970 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 971 if (ret) { 972 IWL_ERR(trans, "Keep Warm allocation failed\n"); 973 goto error; 974 } 975 976 trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, 977 sizeof(struct iwl_txq), GFP_KERNEL); 978 if (!trans_pcie->txq_memory) { 979 IWL_ERR(trans, "Not enough memory for txq\n"); 980 ret = -ENOMEM; 981 goto error; 982 } 983 984 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 985 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 986 txq_id++) { 987 bool cmd_queue = (txq_id == trans_pcie->cmd_queue); 988 989 if (cmd_queue) 990 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 991 trans->cfg->min_txq_size); 992 else 993 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 994 trans->cfg->min_256_ba_txq_size); 995 trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 996 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], 997 slots_num, cmd_queue); 998 if (ret) { 999 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 1000 goto error; 1001 } 1002 trans_pcie->txq[txq_id]->id = txq_id; 1003 } 1004 1005 return 0; 1006 1007 error: 1008 iwl_pcie_tx_free(trans); 1009 1010 return ret; 1011 } 1012 1013 int iwl_pcie_tx_init(struct iwl_trans *trans) 1014 { 1015 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1016 int ret; 1017 int txq_id, slots_num; 1018 bool alloc = false; 1019 1020 if (!trans_pcie->txq_memory) { 1021 ret = iwl_pcie_tx_alloc(trans); 1022 if (ret) 1023 goto error; 1024 alloc = true; 1025 } 1026 1027 spin_lock(&trans_pcie->irq_lock); 1028 1029 /* Turn off all Tx DMA fifos */ 1030 iwl_scd_deactivate_fifos(trans); 1031 1032 /* Tell NIC where to find the "keep warm" buffer */ 1033 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 1034 trans_pcie->kw.dma >> 4); 1035 1036 spin_unlock(&trans_pcie->irq_lock); 1037 1038 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 1039 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1040 txq_id++) { 1041 bool cmd_queue = (txq_id == trans_pcie->cmd_queue); 1042 1043 if (cmd_queue) 1044 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 1045 trans->cfg->min_txq_size); 1046 else 1047 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 1048 trans->cfg->min_256_ba_txq_size); 1049 ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], 1050 slots_num, cmd_queue); 1051 if (ret) { 1052 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 1053 goto error; 1054 } 1055 1056 /* 1057 * Tell nic where to find circular buffer of TFDs for a 1058 * given Tx queue, and enable the DMA channel used for that 1059 * queue. 1060 * Circular buffer (TFD queue in DRAM) physical base address 1061 */ 1062 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 1063 trans_pcie->txq[txq_id]->dma_addr >> 8); 1064 } 1065 1066 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1067 if (trans->cfg->base_params->num_of_queues > 20) 1068 iwl_set_bits_prph(trans, SCD_GP_CTRL, 1069 SCD_GP_CTRL_ENABLE_31_QUEUES); 1070 1071 return 0; 1072 error: 1073 /*Upon error, free only if we allocated something */ 1074 if (alloc) 1075 iwl_pcie_tx_free(trans); 1076 return ret; 1077 } 1078 1079 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 1080 { 1081 lockdep_assert_held(&txq->lock); 1082 1083 if (!txq->wd_timeout) 1084 return; 1085 1086 /* 1087 * station is asleep and we send data - that must 1088 * be uAPSD or PS-Poll. Don't rearm the timer. 1089 */ 1090 if (txq->frozen) 1091 return; 1092 1093 /* 1094 * if empty delete timer, otherwise move timer forward 1095 * since we're making progress on this queue 1096 */ 1097 if (txq->read_ptr == txq->write_ptr) 1098 del_timer(&txq->stuck_timer); 1099 else 1100 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1101 } 1102 1103 /* Frees buffers until index _not_ inclusive */ 1104 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1105 struct sk_buff_head *skbs) 1106 { 1107 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1108 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1109 int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); 1110 int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); 1111 int last_to_free; 1112 1113 /* This function is not meant to release cmd queue*/ 1114 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1115 return; 1116 1117 spin_lock_bh(&txq->lock); 1118 1119 if (!test_bit(txq_id, trans_pcie->queue_used)) { 1120 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1121 txq_id, ssn); 1122 goto out; 1123 } 1124 1125 if (read_ptr == tfd_num) 1126 goto out; 1127 1128 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1129 txq_id, txq->read_ptr, tfd_num, ssn); 1130 1131 /*Since we free until index _not_ inclusive, the one before index is 1132 * the last we will free. This one must be used */ 1133 last_to_free = iwl_queue_dec_wrap(trans, tfd_num); 1134 1135 if (!iwl_queue_used(txq, last_to_free)) { 1136 IWL_ERR(trans, 1137 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1138 __func__, txq_id, last_to_free, 1139 trans->cfg->base_params->max_tfd_queue_size, 1140 txq->write_ptr, txq->read_ptr); 1141 goto out; 1142 } 1143 1144 if (WARN_ON(!skb_queue_empty(skbs))) 1145 goto out; 1146 1147 for (; 1148 read_ptr != tfd_num; 1149 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), 1150 read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { 1151 struct sk_buff *skb = txq->entries[read_ptr].skb; 1152 1153 if (WARN_ON_ONCE(!skb)) 1154 continue; 1155 1156 iwl_pcie_free_tso_page(trans_pcie, skb); 1157 1158 __skb_queue_tail(skbs, skb); 1159 1160 txq->entries[read_ptr].skb = NULL; 1161 1162 if (!trans->cfg->use_tfh) 1163 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1164 1165 iwl_pcie_txq_free_tfd(trans, txq); 1166 } 1167 1168 iwl_pcie_txq_progress(txq); 1169 1170 if (iwl_queue_space(trans, txq) > txq->low_mark && 1171 test_bit(txq_id, trans_pcie->queue_stopped)) { 1172 struct sk_buff_head overflow_skbs; 1173 1174 __skb_queue_head_init(&overflow_skbs); 1175 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1176 1177 /* 1178 * We are going to transmit from the overflow queue. 1179 * Remember this state so that wait_for_txq_empty will know we 1180 * are adding more packets to the TFD queue. It cannot rely on 1181 * the state of &txq->overflow_q, as we just emptied it, but 1182 * haven't TXed the content yet. 1183 */ 1184 txq->overflow_tx = true; 1185 1186 /* 1187 * This is tricky: we are in reclaim path which is non 1188 * re-entrant, so noone will try to take the access the 1189 * txq data from that path. We stopped tx, so we can't 1190 * have tx as well. Bottom line, we can unlock and re-lock 1191 * later. 1192 */ 1193 spin_unlock_bh(&txq->lock); 1194 1195 while (!skb_queue_empty(&overflow_skbs)) { 1196 struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1197 struct iwl_device_cmd *dev_cmd_ptr; 1198 1199 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1200 trans_pcie->dev_cmd_offs); 1201 1202 /* 1203 * Note that we can very well be overflowing again. 1204 * In that case, iwl_queue_space will be small again 1205 * and we won't wake mac80211's queue. 1206 */ 1207 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 1208 } 1209 1210 if (iwl_queue_space(trans, txq) > txq->low_mark) 1211 iwl_wake_queue(trans, txq); 1212 1213 spin_lock_bh(&txq->lock); 1214 txq->overflow_tx = false; 1215 } 1216 1217 out: 1218 spin_unlock_bh(&txq->lock); 1219 } 1220 1221 /* Set wr_ptr of specific device and txq */ 1222 void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 1223 { 1224 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1225 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1226 1227 spin_lock_bh(&txq->lock); 1228 1229 txq->write_ptr = ptr; 1230 txq->read_ptr = txq->write_ptr; 1231 1232 spin_unlock_bh(&txq->lock); 1233 } 1234 1235 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1236 const struct iwl_host_cmd *cmd) 1237 { 1238 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1239 const struct iwl_cfg *cfg = trans->cfg; 1240 int ret; 1241 1242 lockdep_assert_held(&trans_pcie->reg_lock); 1243 1244 /* Make sure the NIC is still alive in the bus */ 1245 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1246 return -ENODEV; 1247 1248 /* 1249 * wake up the NIC to make sure that the firmware will see the host 1250 * command - we will let the NIC sleep once all the host commands 1251 * returned. This needs to be done only on NICs that have 1252 * apmg_wake_up_wa set. 1253 */ 1254 if (cfg->base_params->apmg_wake_up_wa && 1255 !trans_pcie->cmd_hold_nic_awake) { 1256 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1257 BIT(cfg->csr->flag_mac_access_req)); 1258 1259 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1260 BIT(cfg->csr->flag_val_mac_access_en), 1261 (BIT(cfg->csr->flag_mac_clock_ready) | 1262 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 1263 15000); 1264 if (ret < 0) { 1265 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1266 BIT(cfg->csr->flag_mac_access_req)); 1267 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1268 return -EIO; 1269 } 1270 trans_pcie->cmd_hold_nic_awake = true; 1271 } 1272 1273 return 0; 1274 } 1275 1276 /* 1277 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1278 * 1279 * When FW advances 'R' index, all entries between old and new 'R' index 1280 * need to be reclaimed. As result, some free space forms. If there is 1281 * enough free space (> low mark), wake the stack that feeds us. 1282 */ 1283 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1284 { 1285 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1286 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1287 unsigned long flags; 1288 int nfreed = 0; 1289 u16 r; 1290 1291 lockdep_assert_held(&txq->lock); 1292 1293 idx = iwl_pcie_get_cmd_index(txq, idx); 1294 r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); 1295 1296 if (idx >= trans->cfg->base_params->max_tfd_queue_size || 1297 (!iwl_queue_used(txq, idx))) { 1298 WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), 1299 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1300 __func__, txq_id, idx, 1301 trans->cfg->base_params->max_tfd_queue_size, 1302 txq->write_ptr, txq->read_ptr); 1303 return; 1304 } 1305 1306 for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; 1307 r = iwl_queue_inc_wrap(trans, r)) { 1308 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); 1309 1310 if (nfreed++ > 0) { 1311 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1312 idx, txq->write_ptr, r); 1313 iwl_force_nmi(trans); 1314 } 1315 } 1316 1317 if (txq->read_ptr == txq->write_ptr) { 1318 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1319 iwl_pcie_clear_cmd_in_flight(trans); 1320 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1321 } 1322 1323 iwl_pcie_txq_progress(txq); 1324 } 1325 1326 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1327 u16 txq_id) 1328 { 1329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1330 u32 tbl_dw_addr; 1331 u32 tbl_dw; 1332 u16 scd_q2ratid; 1333 1334 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1335 1336 tbl_dw_addr = trans_pcie->scd_base_addr + 1337 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1338 1339 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1340 1341 if (txq_id & 0x1) 1342 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1343 else 1344 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1345 1346 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1347 1348 return 0; 1349 } 1350 1351 /* Receiver address (actually, Rx station's index into station table), 1352 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1353 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1354 1355 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1356 const struct iwl_trans_txq_scd_cfg *cfg, 1357 unsigned int wdg_timeout) 1358 { 1359 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1360 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1361 int fifo = -1; 1362 bool scd_bug = false; 1363 1364 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1365 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1366 1367 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1368 1369 if (cfg) { 1370 fifo = cfg->fifo; 1371 1372 /* Disable the scheduler prior configuring the cmd queue */ 1373 if (txq_id == trans_pcie->cmd_queue && 1374 trans_pcie->scd_set_active) 1375 iwl_scd_enable_set_active(trans, 0); 1376 1377 /* Stop this Tx queue before configuring it */ 1378 iwl_scd_txq_set_inactive(trans, txq_id); 1379 1380 /* Set this queue as a chain-building queue unless it is CMD */ 1381 if (txq_id != trans_pcie->cmd_queue) 1382 iwl_scd_txq_set_chain(trans, txq_id); 1383 1384 if (cfg->aggregate) { 1385 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1386 1387 /* Map receiver-address / traffic-ID to this queue */ 1388 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1389 1390 /* enable aggregations for the queue */ 1391 iwl_scd_txq_enable_agg(trans, txq_id); 1392 txq->ampdu = true; 1393 } else { 1394 /* 1395 * disable aggregations for the queue, this will also 1396 * make the ra_tid mapping configuration irrelevant 1397 * since it is now a non-AGG queue. 1398 */ 1399 iwl_scd_txq_disable_agg(trans, txq_id); 1400 1401 ssn = txq->read_ptr; 1402 } 1403 } else { 1404 /* 1405 * If we need to move the SCD write pointer by steps of 1406 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 1407 * the op_mode know by returning true later. 1408 * Do this only in case cfg is NULL since this trick can 1409 * be done only if we have DQA enabled which is true for mvm 1410 * only. And mvm never sets a cfg pointer. 1411 * This is really ugly, but this is the easiest way out for 1412 * this sad hardware issue. 1413 * This bug has been fixed on devices 9000 and up. 1414 */ 1415 scd_bug = !trans->cfg->mq_rx_supported && 1416 !((ssn - txq->write_ptr) & 0x3f) && 1417 (ssn != txq->write_ptr); 1418 if (scd_bug) 1419 ssn++; 1420 } 1421 1422 /* Place first TFD at index corresponding to start sequence number. 1423 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1424 txq->read_ptr = (ssn & 0xff); 1425 txq->write_ptr = (ssn & 0xff); 1426 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1427 (ssn & 0xff) | (txq_id << 8)); 1428 1429 if (cfg) { 1430 u8 frame_limit = cfg->frame_limit; 1431 1432 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1433 1434 /* Set up Tx window size and frame limit for this queue */ 1435 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1436 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1437 iwl_trans_write_mem32(trans, 1438 trans_pcie->scd_base_addr + 1439 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1440 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 1441 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 1442 1443 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1444 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1445 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1446 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1447 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1448 SCD_QUEUE_STTS_REG_MSK); 1449 1450 /* enable the scheduler for this queue (only) */ 1451 if (txq_id == trans_pcie->cmd_queue && 1452 trans_pcie->scd_set_active) 1453 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1454 1455 IWL_DEBUG_TX_QUEUES(trans, 1456 "Activate queue %d on FIFO %d WrPtr: %d\n", 1457 txq_id, fifo, ssn & 0xff); 1458 } else { 1459 IWL_DEBUG_TX_QUEUES(trans, 1460 "Activate queue %d WrPtr: %d\n", 1461 txq_id, ssn & 0xff); 1462 } 1463 1464 return scd_bug; 1465 } 1466 1467 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 1468 bool shared_mode) 1469 { 1470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1471 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1472 1473 txq->ampdu = !shared_mode; 1474 } 1475 1476 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1477 bool configure_scd) 1478 { 1479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1480 u32 stts_addr = trans_pcie->scd_base_addr + 1481 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1482 static const u32 zero_val[4] = {}; 1483 1484 trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; 1485 trans_pcie->txq[txq_id]->frozen = false; 1486 1487 /* 1488 * Upon HW Rfkill - we stop the device, and then stop the queues 1489 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1490 * allow the op_mode to call txq_disable after it already called 1491 * stop_device. 1492 */ 1493 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1494 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1495 "queue %d not used", txq_id); 1496 return; 1497 } 1498 1499 if (configure_scd) { 1500 iwl_scd_txq_set_inactive(trans, txq_id); 1501 1502 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 1503 ARRAY_SIZE(zero_val)); 1504 } 1505 1506 iwl_pcie_txq_unmap(trans, txq_id); 1507 trans_pcie->txq[txq_id]->ampdu = false; 1508 1509 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1510 } 1511 1512 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1513 1514 /* 1515 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1516 * @priv: device private data point 1517 * @cmd: a pointer to the ucode command structure 1518 * 1519 * The function returns < 0 values to indicate the operation 1520 * failed. On success, it returns the index (>= 0) of command in the 1521 * command queue. 1522 */ 1523 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1524 struct iwl_host_cmd *cmd) 1525 { 1526 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1527 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1528 struct iwl_device_cmd *out_cmd; 1529 struct iwl_cmd_meta *out_meta; 1530 unsigned long flags; 1531 void *dup_buf = NULL; 1532 dma_addr_t phys_addr; 1533 int idx; 1534 u16 copy_size, cmd_size, tb0_size; 1535 bool had_nocopy = false; 1536 u8 group_id = iwl_cmd_groupid(cmd->id); 1537 int i, ret; 1538 u32 cmd_pos; 1539 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1540 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1541 1542 if (WARN(!trans->wide_cmd_header && 1543 group_id > IWL_ALWAYS_LONG_GROUP, 1544 "unsupported wide command %#x\n", cmd->id)) 1545 return -EINVAL; 1546 1547 if (group_id != 0) { 1548 copy_size = sizeof(struct iwl_cmd_header_wide); 1549 cmd_size = sizeof(struct iwl_cmd_header_wide); 1550 } else { 1551 copy_size = sizeof(struct iwl_cmd_header); 1552 cmd_size = sizeof(struct iwl_cmd_header); 1553 } 1554 1555 /* need one for the header if the first is NOCOPY */ 1556 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1557 1558 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1559 cmddata[i] = cmd->data[i]; 1560 cmdlen[i] = cmd->len[i]; 1561 1562 if (!cmd->len[i]) 1563 continue; 1564 1565 /* need at least IWL_FIRST_TB_SIZE copied */ 1566 if (copy_size < IWL_FIRST_TB_SIZE) { 1567 int copy = IWL_FIRST_TB_SIZE - copy_size; 1568 1569 if (copy > cmdlen[i]) 1570 copy = cmdlen[i]; 1571 cmdlen[i] -= copy; 1572 cmddata[i] += copy; 1573 copy_size += copy; 1574 } 1575 1576 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1577 had_nocopy = true; 1578 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1579 idx = -EINVAL; 1580 goto free_dup_buf; 1581 } 1582 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1583 /* 1584 * This is also a chunk that isn't copied 1585 * to the static buffer so set had_nocopy. 1586 */ 1587 had_nocopy = true; 1588 1589 /* only allowed once */ 1590 if (WARN_ON(dup_buf)) { 1591 idx = -EINVAL; 1592 goto free_dup_buf; 1593 } 1594 1595 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1596 GFP_ATOMIC); 1597 if (!dup_buf) 1598 return -ENOMEM; 1599 } else { 1600 /* NOCOPY must not be followed by normal! */ 1601 if (WARN_ON(had_nocopy)) { 1602 idx = -EINVAL; 1603 goto free_dup_buf; 1604 } 1605 copy_size += cmdlen[i]; 1606 } 1607 cmd_size += cmd->len[i]; 1608 } 1609 1610 /* 1611 * If any of the command structures end up being larger than 1612 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1613 * allocated into separate TFDs, then we will need to 1614 * increase the size of the buffers. 1615 */ 1616 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1617 "Command %s (%#x) is too large (%d bytes)\n", 1618 iwl_get_cmd_string(trans, cmd->id), 1619 cmd->id, copy_size)) { 1620 idx = -EINVAL; 1621 goto free_dup_buf; 1622 } 1623 1624 spin_lock_bh(&txq->lock); 1625 1626 if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1627 spin_unlock_bh(&txq->lock); 1628 1629 IWL_ERR(trans, "No space in command queue\n"); 1630 iwl_op_mode_cmd_queue_full(trans->op_mode); 1631 idx = -ENOSPC; 1632 goto free_dup_buf; 1633 } 1634 1635 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 1636 out_cmd = txq->entries[idx].cmd; 1637 out_meta = &txq->entries[idx].meta; 1638 1639 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1640 if (cmd->flags & CMD_WANT_SKB) 1641 out_meta->source = cmd; 1642 1643 /* set up the header */ 1644 if (group_id != 0) { 1645 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1646 out_cmd->hdr_wide.group_id = group_id; 1647 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1648 out_cmd->hdr_wide.length = 1649 cpu_to_le16(cmd_size - 1650 sizeof(struct iwl_cmd_header_wide)); 1651 out_cmd->hdr_wide.reserved = 0; 1652 out_cmd->hdr_wide.sequence = 1653 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1654 INDEX_TO_SEQ(txq->write_ptr)); 1655 1656 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1657 copy_size = sizeof(struct iwl_cmd_header_wide); 1658 } else { 1659 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1660 out_cmd->hdr.sequence = 1661 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | 1662 INDEX_TO_SEQ(txq->write_ptr)); 1663 out_cmd->hdr.group_id = 0; 1664 1665 cmd_pos = sizeof(struct iwl_cmd_header); 1666 copy_size = sizeof(struct iwl_cmd_header); 1667 } 1668 1669 /* and copy the data that needs to be copied */ 1670 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1671 int copy; 1672 1673 if (!cmd->len[i]) 1674 continue; 1675 1676 /* copy everything if not nocopy/dup */ 1677 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1678 IWL_HCMD_DFL_DUP))) { 1679 copy = cmd->len[i]; 1680 1681 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1682 cmd_pos += copy; 1683 copy_size += copy; 1684 continue; 1685 } 1686 1687 /* 1688 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1689 * in total (for bi-directional DMA), but copy up to what 1690 * we can fit into the payload for debug dump purposes. 1691 */ 1692 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1693 1694 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1695 cmd_pos += copy; 1696 1697 /* However, treat copy_size the proper way, we need it below */ 1698 if (copy_size < IWL_FIRST_TB_SIZE) { 1699 copy = IWL_FIRST_TB_SIZE - copy_size; 1700 1701 if (copy > cmd->len[i]) 1702 copy = cmd->len[i]; 1703 copy_size += copy; 1704 } 1705 } 1706 1707 IWL_DEBUG_HC(trans, 1708 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1709 iwl_get_cmd_string(trans, cmd->id), 1710 group_id, out_cmd->hdr.cmd, 1711 le16_to_cpu(out_cmd->hdr.sequence), 1712 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); 1713 1714 /* start the TFD with the minimum copy bytes */ 1715 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1716 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1717 iwl_pcie_txq_build_tfd(trans, txq, 1718 iwl_pcie_get_first_tb_dma(txq, idx), 1719 tb0_size, true); 1720 1721 /* map first command fragment, if any remains */ 1722 if (copy_size > tb0_size) { 1723 phys_addr = dma_map_single(trans->dev, 1724 ((u8 *)&out_cmd->hdr) + tb0_size, 1725 copy_size - tb0_size, 1726 DMA_TO_DEVICE); 1727 if (dma_mapping_error(trans->dev, phys_addr)) { 1728 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1729 txq->write_ptr); 1730 idx = -ENOMEM; 1731 goto out; 1732 } 1733 1734 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1735 copy_size - tb0_size, false); 1736 } 1737 1738 /* map the remaining (adjusted) nocopy/dup fragments */ 1739 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1740 const void *data = cmddata[i]; 1741 1742 if (!cmdlen[i]) 1743 continue; 1744 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1745 IWL_HCMD_DFL_DUP))) 1746 continue; 1747 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1748 data = dup_buf; 1749 phys_addr = dma_map_single(trans->dev, (void *)data, 1750 cmdlen[i], DMA_TO_DEVICE); 1751 if (dma_mapping_error(trans->dev, phys_addr)) { 1752 iwl_pcie_tfd_unmap(trans, out_meta, txq, 1753 txq->write_ptr); 1754 idx = -ENOMEM; 1755 goto out; 1756 } 1757 1758 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1759 } 1760 1761 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1762 out_meta->flags = cmd->flags; 1763 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1764 kzfree(txq->entries[idx].free_buf); 1765 txq->entries[idx].free_buf = dup_buf; 1766 1767 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1768 1769 /* start timer if queue currently empty */ 1770 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1771 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1772 1773 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1774 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1775 if (ret < 0) { 1776 idx = ret; 1777 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1778 goto out; 1779 } 1780 1781 /* Increment and update queue's write index */ 1782 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); 1783 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1784 1785 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1786 1787 out: 1788 spin_unlock_bh(&txq->lock); 1789 free_dup_buf: 1790 if (idx < 0) 1791 kfree(dup_buf); 1792 return idx; 1793 } 1794 1795 /* 1796 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1797 * @rxb: Rx buffer to reclaim 1798 */ 1799 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1800 struct iwl_rx_cmd_buffer *rxb) 1801 { 1802 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1803 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1804 u8 group_id; 1805 u32 cmd_id; 1806 int txq_id = SEQ_TO_QUEUE(sequence); 1807 int index = SEQ_TO_INDEX(sequence); 1808 int cmd_index; 1809 struct iwl_device_cmd *cmd; 1810 struct iwl_cmd_meta *meta; 1811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1812 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1813 1814 /* If a Tx command is being handled and it isn't in the actual 1815 * command queue then there a command routing bug has been introduced 1816 * in the queue management code. */ 1817 if (WARN(txq_id != trans_pcie->cmd_queue, 1818 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1819 txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, 1820 txq->write_ptr)) { 1821 iwl_print_hex_error(trans, pkt, 32); 1822 return; 1823 } 1824 1825 spin_lock_bh(&txq->lock); 1826 1827 cmd_index = iwl_pcie_get_cmd_index(txq, index); 1828 cmd = txq->entries[cmd_index].cmd; 1829 meta = &txq->entries[cmd_index].meta; 1830 group_id = cmd->hdr.group_id; 1831 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1832 1833 iwl_pcie_tfd_unmap(trans, meta, txq, index); 1834 1835 /* Input error checking is done when commands are added to queue. */ 1836 if (meta->flags & CMD_WANT_SKB) { 1837 struct page *p = rxb_steal_page(rxb); 1838 1839 meta->source->resp_pkt = pkt; 1840 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1841 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1842 } 1843 1844 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1845 iwl_op_mode_async_cb(trans->op_mode, cmd); 1846 1847 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1848 1849 if (!(meta->flags & CMD_ASYNC)) { 1850 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1851 IWL_WARN(trans, 1852 "HCMD_ACTIVE already clear for command %s\n", 1853 iwl_get_cmd_string(trans, cmd_id)); 1854 } 1855 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1856 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1857 iwl_get_cmd_string(trans, cmd_id)); 1858 wake_up(&trans_pcie->wait_command_queue); 1859 } 1860 1861 if (meta->flags & CMD_MAKE_TRANS_IDLE) { 1862 IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 1863 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1864 set_bit(STATUS_TRANS_IDLE, &trans->status); 1865 wake_up(&trans_pcie->d0i3_waitq); 1866 } 1867 1868 if (meta->flags & CMD_WAKE_UP_TRANS) { 1869 IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 1870 iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1871 clear_bit(STATUS_TRANS_IDLE, &trans->status); 1872 wake_up(&trans_pcie->d0i3_waitq); 1873 } 1874 1875 meta->flags = 0; 1876 1877 spin_unlock_bh(&txq->lock); 1878 } 1879 1880 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1881 1882 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1883 struct iwl_host_cmd *cmd) 1884 { 1885 int ret; 1886 1887 /* An asynchronous command can not expect an SKB to be set. */ 1888 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1889 return -EINVAL; 1890 1891 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1892 if (ret < 0) { 1893 IWL_ERR(trans, 1894 "Error sending %s: enqueue_hcmd failed: %d\n", 1895 iwl_get_cmd_string(trans, cmd->id), ret); 1896 return ret; 1897 } 1898 return 0; 1899 } 1900 1901 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1902 struct iwl_host_cmd *cmd) 1903 { 1904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1905 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; 1906 int cmd_idx; 1907 int ret; 1908 1909 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1910 iwl_get_cmd_string(trans, cmd->id)); 1911 1912 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1913 &trans->status), 1914 "Command %s: a command is already active!\n", 1915 iwl_get_cmd_string(trans, cmd->id))) 1916 return -EIO; 1917 1918 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1919 iwl_get_cmd_string(trans, cmd->id)); 1920 1921 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { 1922 ret = wait_event_timeout(trans_pcie->d0i3_waitq, 1923 pm_runtime_active(&trans_pcie->pci_dev->dev), 1924 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 1925 if (!ret) { 1926 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); 1927 return -ETIMEDOUT; 1928 } 1929 } 1930 1931 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1932 if (cmd_idx < 0) { 1933 ret = cmd_idx; 1934 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1935 IWL_ERR(trans, 1936 "Error sending %s: enqueue_hcmd failed: %d\n", 1937 iwl_get_cmd_string(trans, cmd->id), ret); 1938 return ret; 1939 } 1940 1941 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1942 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1943 &trans->status), 1944 HOST_COMPLETE_TIMEOUT); 1945 if (!ret) { 1946 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1947 iwl_get_cmd_string(trans, cmd->id), 1948 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1949 1950 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1951 txq->read_ptr, txq->write_ptr); 1952 1953 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1954 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1955 iwl_get_cmd_string(trans, cmd->id)); 1956 ret = -ETIMEDOUT; 1957 1958 iwl_trans_pcie_sync_nmi(trans); 1959 goto cancel; 1960 } 1961 1962 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1963 iwl_trans_pcie_dump_regs(trans); 1964 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1965 iwl_get_cmd_string(trans, cmd->id)); 1966 dump_stack(); 1967 ret = -EIO; 1968 goto cancel; 1969 } 1970 1971 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1972 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1973 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1974 ret = -ERFKILL; 1975 goto cancel; 1976 } 1977 1978 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1979 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1980 iwl_get_cmd_string(trans, cmd->id)); 1981 ret = -EIO; 1982 goto cancel; 1983 } 1984 1985 return 0; 1986 1987 cancel: 1988 if (cmd->flags & CMD_WANT_SKB) { 1989 /* 1990 * Cancel the CMD_WANT_SKB flag for the cmd in the 1991 * TX cmd queue. Otherwise in case the cmd comes 1992 * in later, it will possibly set an invalid 1993 * address (cmd->meta.source). 1994 */ 1995 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1996 } 1997 1998 if (cmd->resp_pkt) { 1999 iwl_free_resp(cmd); 2000 cmd->resp_pkt = NULL; 2001 } 2002 2003 return ret; 2004 } 2005 2006 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 2007 { 2008 /* Make sure the NIC is still alive in the bus */ 2009 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2010 return -ENODEV; 2011 2012 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 2013 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 2014 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 2015 cmd->id); 2016 return -ERFKILL; 2017 } 2018 2019 if (cmd->flags & CMD_ASYNC) 2020 return iwl_pcie_send_hcmd_async(trans, cmd); 2021 2022 /* We still can fail on RFKILL that can be asserted while we wait */ 2023 return iwl_pcie_send_hcmd_sync(trans, cmd); 2024 } 2025 2026 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 2027 struct iwl_txq *txq, u8 hdr_len, 2028 struct iwl_cmd_meta *out_meta) 2029 { 2030 u16 head_tb_len; 2031 int i; 2032 2033 /* 2034 * Set up TFD's third entry to point directly to remainder 2035 * of skb's head, if any 2036 */ 2037 head_tb_len = skb_headlen(skb) - hdr_len; 2038 2039 if (head_tb_len > 0) { 2040 dma_addr_t tb_phys = dma_map_single(trans->dev, 2041 skb->data + hdr_len, 2042 head_tb_len, DMA_TO_DEVICE); 2043 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 2044 return -EINVAL; 2045 trace_iwlwifi_dev_tx_tb(trans->dev, skb, 2046 skb->data + hdr_len, 2047 head_tb_len); 2048 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 2049 } 2050 2051 /* set up the remaining entries to point to the data */ 2052 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2053 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2054 dma_addr_t tb_phys; 2055 int tb_idx; 2056 2057 if (!skb_frag_size(frag)) 2058 continue; 2059 2060 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 2061 skb_frag_size(frag), DMA_TO_DEVICE); 2062 2063 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 2064 return -EINVAL; 2065 trace_iwlwifi_dev_tx_tb(trans->dev, skb, 2066 skb_frag_address(frag), 2067 skb_frag_size(frag)); 2068 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2069 skb_frag_size(frag), false); 2070 if (tb_idx < 0) 2071 return tb_idx; 2072 2073 out_meta->tbs |= BIT(tb_idx); 2074 } 2075 2076 return 0; 2077 } 2078 2079 #ifdef CONFIG_INET 2080 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) 2081 { 2082 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2083 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); 2084 2085 if (!p->page) 2086 goto alloc; 2087 2088 /* enough room on this page */ 2089 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) 2090 return p; 2091 2092 /* We don't have enough room on this page, get a new one. */ 2093 __free_page(p->page); 2094 2095 alloc: 2096 p->page = alloc_page(GFP_ATOMIC); 2097 if (!p->page) 2098 return NULL; 2099 p->pos = page_address(p->page); 2100 return p; 2101 } 2102 2103 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 2104 bool ipv6, unsigned int len) 2105 { 2106 if (ipv6) { 2107 struct ipv6hdr *iphv6 = iph; 2108 2109 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 2110 len + tcph->doff * 4, 2111 IPPROTO_TCP, 0); 2112 } else { 2113 struct iphdr *iphv4 = iph; 2114 2115 ip_send_check(iphv4); 2116 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 2117 len + tcph->doff * 4, 2118 IPPROTO_TCP, 0); 2119 } 2120 } 2121 2122 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2123 struct iwl_txq *txq, u8 hdr_len, 2124 struct iwl_cmd_meta *out_meta, 2125 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2126 { 2127 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 2128 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 2129 struct ieee80211_hdr *hdr = (void *)skb->data; 2130 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 2131 unsigned int mss = skb_shinfo(skb)->gso_size; 2132 u16 length, iv_len, amsdu_pad; 2133 u8 *start_hdr; 2134 struct iwl_tso_hdr_page *hdr_page; 2135 struct page **page_ptr; 2136 struct tso_t tso; 2137 2138 /* if the packet is protected, then it must be CCMP or GCMP */ 2139 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 2140 iv_len = ieee80211_has_protected(hdr->frame_control) ? 2141 IEEE80211_CCMP_HDR_LEN : 0; 2142 2143 trace_iwlwifi_dev_tx(trans->dev, skb, 2144 iwl_pcie_get_tfd(trans, txq, txq->write_ptr), 2145 trans_pcie->tfd_size, 2146 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2147 2148 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 2149 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 2150 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 2151 amsdu_pad = 0; 2152 2153 /* total amount of header we may need for this A-MSDU */ 2154 hdr_room = DIV_ROUND_UP(total_len, mss) * 2155 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 2156 2157 /* Our device supports 9 segments at most, it will fit in 1 page */ 2158 hdr_page = get_page_hdr(trans, hdr_room); 2159 if (!hdr_page) 2160 return -ENOMEM; 2161 2162 get_page(hdr_page->page); 2163 start_hdr = hdr_page->pos; 2164 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); 2165 *page_ptr = hdr_page->page; 2166 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 2167 hdr_page->pos += iv_len; 2168 2169 /* 2170 * Pull the ieee80211 header + IV to be able to use TSO core, 2171 * we will restore it for the tx_status flow. 2172 */ 2173 skb_pull(skb, hdr_len + iv_len); 2174 2175 /* 2176 * Remove the length of all the headers that we don't actually 2177 * have in the MPDU by themselves, but that we duplicate into 2178 * all the different MSDUs inside the A-MSDU. 2179 */ 2180 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 2181 2182 tso_start(skb, &tso); 2183 2184 while (total_len) { 2185 /* this is the data left for this subframe */ 2186 unsigned int data_left = 2187 min_t(unsigned int, mss, total_len); 2188 struct sk_buff *csum_skb = NULL; 2189 unsigned int hdr_tb_len; 2190 dma_addr_t hdr_tb_phys; 2191 struct tcphdr *tcph; 2192 u8 *iph, *subf_hdrs_start = hdr_page->pos; 2193 2194 total_len -= data_left; 2195 2196 memset(hdr_page->pos, 0, amsdu_pad); 2197 hdr_page->pos += amsdu_pad; 2198 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 2199 data_left)) & 0x3; 2200 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 2201 hdr_page->pos += ETH_ALEN; 2202 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 2203 hdr_page->pos += ETH_ALEN; 2204 2205 length = snap_ip_tcp_hdrlen + data_left; 2206 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 2207 hdr_page->pos += sizeof(length); 2208 2209 /* 2210 * This will copy the SNAP as well which will be considered 2211 * as MAC header. 2212 */ 2213 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 2214 iph = hdr_page->pos + 8; 2215 tcph = (void *)(iph + ip_hdrlen); 2216 2217 /* For testing on current hardware only */ 2218 if (trans_pcie->sw_csum_tx) { 2219 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 2220 GFP_ATOMIC); 2221 if (!csum_skb) 2222 return -ENOMEM; 2223 2224 iwl_compute_pseudo_hdr_csum(iph, tcph, 2225 skb->protocol == 2226 htons(ETH_P_IPV6), 2227 data_left); 2228 2229 skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); 2230 skb_reset_transport_header(csum_skb); 2231 csum_skb->csum_start = 2232 (unsigned char *)tcp_hdr(csum_skb) - 2233 csum_skb->head; 2234 } 2235 2236 hdr_page->pos += snap_ip_tcp_hdrlen; 2237 2238 hdr_tb_len = hdr_page->pos - start_hdr; 2239 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 2240 hdr_tb_len, DMA_TO_DEVICE); 2241 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 2242 dev_kfree_skb(csum_skb); 2243 return -EINVAL; 2244 } 2245 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2246 hdr_tb_len, false); 2247 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 2248 hdr_tb_len); 2249 /* add this subframe's headers' length to the tx_cmd */ 2250 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 2251 2252 /* prepare the start_hdr for the next subframe */ 2253 start_hdr = hdr_page->pos; 2254 2255 /* put the payload */ 2256 while (data_left) { 2257 unsigned int size = min_t(unsigned int, tso.size, 2258 data_left); 2259 dma_addr_t tb_phys; 2260 2261 if (trans_pcie->sw_csum_tx) 2262 skb_put_data(csum_skb, tso.data, size); 2263 2264 tb_phys = dma_map_single(trans->dev, tso.data, 2265 size, DMA_TO_DEVICE); 2266 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 2267 dev_kfree_skb(csum_skb); 2268 return -EINVAL; 2269 } 2270 2271 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2272 size, false); 2273 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 2274 size); 2275 2276 data_left -= size; 2277 tso_build_data(skb, &tso, size); 2278 } 2279 2280 /* For testing on early hardware only */ 2281 if (trans_pcie->sw_csum_tx) { 2282 __wsum csum; 2283 2284 csum = skb_checksum(csum_skb, 2285 skb_checksum_start_offset(csum_skb), 2286 csum_skb->len - 2287 skb_checksum_start_offset(csum_skb), 2288 0); 2289 dev_kfree_skb(csum_skb); 2290 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 2291 hdr_tb_len, DMA_TO_DEVICE); 2292 tcph->check = csum_fold(csum); 2293 dma_sync_single_for_device(trans->dev, hdr_tb_phys, 2294 hdr_tb_len, DMA_TO_DEVICE); 2295 } 2296 } 2297 2298 /* re -add the WiFi header and IV */ 2299 skb_push(skb, hdr_len + iv_len); 2300 2301 return 0; 2302 } 2303 #else /* CONFIG_INET */ 2304 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2305 struct iwl_txq *txq, u8 hdr_len, 2306 struct iwl_cmd_meta *out_meta, 2307 struct iwl_device_cmd *dev_cmd, u16 tb1_len) 2308 { 2309 /* No A-MSDU without CONFIG_INET */ 2310 WARN_ON(1); 2311 2312 return -1; 2313 } 2314 #endif /* CONFIG_INET */ 2315 2316 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2317 struct iwl_device_cmd *dev_cmd, int txq_id) 2318 { 2319 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2320 struct ieee80211_hdr *hdr; 2321 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2322 struct iwl_cmd_meta *out_meta; 2323 struct iwl_txq *txq; 2324 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2325 void *tb1_addr; 2326 void *tfd; 2327 u16 len, tb1_len; 2328 bool wait_write_ptr; 2329 __le16 fc; 2330 u8 hdr_len; 2331 u16 wifi_seq; 2332 bool amsdu; 2333 2334 txq = trans_pcie->txq[txq_id]; 2335 2336 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), 2337 "TX on unused queue %d\n", txq_id)) 2338 return -EINVAL; 2339 2340 if (unlikely(trans_pcie->sw_csum_tx && 2341 skb->ip_summed == CHECKSUM_PARTIAL)) { 2342 int offs = skb_checksum_start_offset(skb); 2343 int csum_offs = offs + skb->csum_offset; 2344 __wsum csum; 2345 2346 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 2347 return -1; 2348 2349 csum = skb_checksum(skb, offs, skb->len - offs, 0); 2350 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 2351 2352 skb->ip_summed = CHECKSUM_UNNECESSARY; 2353 } 2354 2355 if (skb_is_nonlinear(skb) && 2356 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && 2357 __skb_linearize(skb)) 2358 return -ENOMEM; 2359 2360 /* mac80211 always puts the full header into the SKB's head, 2361 * so there's no need to check if it's readable there 2362 */ 2363 hdr = (struct ieee80211_hdr *)skb->data; 2364 fc = hdr->frame_control; 2365 hdr_len = ieee80211_hdrlen(fc); 2366 2367 spin_lock(&txq->lock); 2368 2369 if (iwl_queue_space(trans, txq) < txq->high_mark) { 2370 iwl_stop_queue(trans, txq); 2371 2372 /* don't put the packet on the ring, if there is no room */ 2373 if (unlikely(iwl_queue_space(trans, txq) < 3)) { 2374 struct iwl_device_cmd **dev_cmd_ptr; 2375 2376 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2377 trans_pcie->dev_cmd_offs); 2378 2379 *dev_cmd_ptr = dev_cmd; 2380 __skb_queue_tail(&txq->overflow_q, skb); 2381 2382 spin_unlock(&txq->lock); 2383 return 0; 2384 } 2385 } 2386 2387 /* In AGG mode, the index in the ring must correspond to the WiFi 2388 * sequence number. This is a HW requirements to help the SCD to parse 2389 * the BA. 2390 * Check here that the packets are in the right place on the ring. 2391 */ 2392 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2393 WARN_ONCE(txq->ampdu && 2394 (wifi_seq & 0xff) != txq->write_ptr, 2395 "Q: %d WiFi Seq %d tfdNum %d", 2396 txq_id, wifi_seq, txq->write_ptr); 2397 2398 /* Set up driver data for this TFD */ 2399 txq->entries[txq->write_ptr].skb = skb; 2400 txq->entries[txq->write_ptr].cmd = dev_cmd; 2401 2402 dev_cmd->hdr.sequence = 2403 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2404 INDEX_TO_SEQ(txq->write_ptr))); 2405 2406 tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); 2407 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2408 offsetof(struct iwl_tx_cmd, scratch); 2409 2410 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2411 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2412 2413 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2414 out_meta = &txq->entries[txq->write_ptr].meta; 2415 out_meta->flags = 0; 2416 2417 /* 2418 * The second TB (tb1) points to the remainder of the TX command 2419 * and the 802.11 header - dword aligned size 2420 * (This calculation modifies the TX command, so do it before the 2421 * setup of the first TB) 2422 */ 2423 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2424 hdr_len - IWL_FIRST_TB_SIZE; 2425 /* do not align A-MSDU to dword as the subframe header aligns it */ 2426 amsdu = ieee80211_is_data_qos(fc) && 2427 (*ieee80211_get_qos_ctl(hdr) & 2428 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2429 if (trans_pcie->sw_csum_tx || !amsdu) { 2430 tb1_len = ALIGN(len, 4); 2431 /* Tell NIC about any 2-byte padding after MAC header */ 2432 if (tb1_len != len) 2433 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 2434 } else { 2435 tb1_len = len; 2436 } 2437 2438 /* 2439 * The first TB points to bi-directional DMA data, we'll 2440 * memcpy the data into it later. 2441 */ 2442 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 2443 IWL_FIRST_TB_SIZE, true); 2444 2445 /* there must be data left over for TB1 or this code must be changed */ 2446 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2447 2448 /* map the data for TB1 */ 2449 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2450 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2451 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2452 goto out_err; 2453 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2454 2455 trace_iwlwifi_dev_tx(trans->dev, skb, 2456 iwl_pcie_get_tfd(trans, txq, 2457 txq->write_ptr), 2458 trans_pcie->tfd_size, 2459 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2460 hdr_len); 2461 2462 /* 2463 * If gso_size wasn't set, don't give the frame "amsdu treatment" 2464 * (adding subframes, etc.). 2465 * This can happen in some testing flows when the amsdu was already 2466 * pre-built, and we just need to send the resulting skb. 2467 */ 2468 if (amsdu && skb_shinfo(skb)->gso_size) { 2469 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2470 out_meta, dev_cmd, 2471 tb1_len))) 2472 goto out_err; 2473 } else { 2474 struct sk_buff *frag; 2475 2476 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2477 out_meta))) 2478 goto out_err; 2479 2480 skb_walk_frags(skb, frag) { 2481 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 2482 out_meta))) 2483 goto out_err; 2484 } 2485 } 2486 2487 /* building the A-MSDU might have changed this data, so memcpy it now */ 2488 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 2489 2490 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); 2491 /* Set up entry for this TFD in Tx byte-count array */ 2492 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2493 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2494 2495 wait_write_ptr = ieee80211_has_morefrags(fc); 2496 2497 /* start timer if queue currently empty */ 2498 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 2499 /* 2500 * If the TXQ is active, then set the timer, if not, 2501 * set the timer in remainder so that the timer will 2502 * be armed with the right value when the station will 2503 * wake up. 2504 */ 2505 if (!txq->frozen) 2506 mod_timer(&txq->stuck_timer, 2507 jiffies + txq->wd_timeout); 2508 else 2509 txq->frozen_expiry_remainder = txq->wd_timeout; 2510 } 2511 2512 /* Tell device the write index *just past* this latest filled TFD */ 2513 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); 2514 if (!wait_write_ptr) 2515 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2516 2517 /* 2518 * At this point the frame is "transmitted" successfully 2519 * and we will get a TX status notification eventually. 2520 */ 2521 spin_unlock(&txq->lock); 2522 return 0; 2523 out_err: 2524 iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2525 spin_unlock(&txq->lock); 2526 return -1; 2527 } 2528