10cd1ad2dSMordechay Goodstein /****************************************************************************** 20cd1ad2dSMordechay Goodstein * 30cd1ad2dSMordechay Goodstein * This file is provided under a dual BSD/GPLv2 license. When using or 40cd1ad2dSMordechay Goodstein * redistributing this file, you may do so under either license. 50cd1ad2dSMordechay Goodstein * 60cd1ad2dSMordechay Goodstein * GPL LICENSE SUMMARY 70cd1ad2dSMordechay Goodstein * 80cd1ad2dSMordechay Goodstein * Copyright(c) 2020 Intel Corporation 90cd1ad2dSMordechay Goodstein * 100cd1ad2dSMordechay Goodstein * This program is free software; you can redistribute it and/or modify 110cd1ad2dSMordechay Goodstein * it under the terms of version 2 of the GNU General Public License as 120cd1ad2dSMordechay Goodstein * published by the Free Software Foundation. 130cd1ad2dSMordechay Goodstein * 140cd1ad2dSMordechay Goodstein * This program is distributed in the hope that it will be useful, but 150cd1ad2dSMordechay Goodstein * WITHOUT ANY WARRANTY; without even the implied warranty of 160cd1ad2dSMordechay Goodstein * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 170cd1ad2dSMordechay Goodstein * General Public License for more details. 180cd1ad2dSMordechay Goodstein * 190cd1ad2dSMordechay Goodstein * BSD LICENSE 200cd1ad2dSMordechay Goodstein * 210cd1ad2dSMordechay Goodstein * Copyright(c) 2020 Intel Corporation 220cd1ad2dSMordechay Goodstein * All rights reserved. 230cd1ad2dSMordechay Goodstein * 240cd1ad2dSMordechay Goodstein * Redistribution and use in source and binary forms, with or without 250cd1ad2dSMordechay Goodstein * modification, are permitted provided that the following conditions 260cd1ad2dSMordechay Goodstein * are met: 270cd1ad2dSMordechay Goodstein * 280cd1ad2dSMordechay Goodstein * * Redistributions of source code must retain the above copyright 290cd1ad2dSMordechay Goodstein * notice, this list of conditions and the following disclaimer. 300cd1ad2dSMordechay Goodstein * * Redistributions in binary form must reproduce the above copyright 310cd1ad2dSMordechay Goodstein * notice, this list of conditions and the following disclaimer in 320cd1ad2dSMordechay Goodstein * the documentation and/or other materials provided with the 330cd1ad2dSMordechay Goodstein * distribution. 340cd1ad2dSMordechay Goodstein * * Neither the name Intel Corporation nor the names of its 350cd1ad2dSMordechay Goodstein * contributors may be used to endorse or promote products derived 360cd1ad2dSMordechay Goodstein * from this software without specific prior written permission. 370cd1ad2dSMordechay Goodstein * 380cd1ad2dSMordechay Goodstein * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 390cd1ad2dSMordechay Goodstein * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 400cd1ad2dSMordechay Goodstein * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 410cd1ad2dSMordechay Goodstein * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 420cd1ad2dSMordechay Goodstein * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 430cd1ad2dSMordechay Goodstein * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 440cd1ad2dSMordechay Goodstein * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 450cd1ad2dSMordechay Goodstein * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 460cd1ad2dSMordechay Goodstein * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 470cd1ad2dSMordechay Goodstein * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 480cd1ad2dSMordechay Goodstein * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 490cd1ad2dSMordechay Goodstein * 500cd1ad2dSMordechay Goodstein *****************************************************************************/ 510cd1ad2dSMordechay Goodstein #include <net/tso.h> 520cd1ad2dSMordechay Goodstein #include <linux/tcp.h> 530cd1ad2dSMordechay Goodstein 540cd1ad2dSMordechay Goodstein #include "iwl-debug.h" 550cd1ad2dSMordechay Goodstein #include "iwl-io.h" 560cd1ad2dSMordechay Goodstein #include "fw/api/tx.h" 570cd1ad2dSMordechay Goodstein #include "queue/tx.h" 580cd1ad2dSMordechay Goodstein #include "iwl-fh.h" 590cd1ad2dSMordechay Goodstein #include "iwl-scd.h" 600cd1ad2dSMordechay Goodstein #include <linux/dmapool.h> 610cd1ad2dSMordechay Goodstein 620cd1ad2dSMordechay Goodstein /* 630cd1ad2dSMordechay Goodstein * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels 640cd1ad2dSMordechay Goodstein */ 650cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tx_stop(struct iwl_trans *trans) 660cd1ad2dSMordechay Goodstein { 670cd1ad2dSMordechay Goodstein int txq_id; 680cd1ad2dSMordechay Goodstein 690cd1ad2dSMordechay Goodstein /* 700cd1ad2dSMordechay Goodstein * This function can be called before the op_mode disabled the 710cd1ad2dSMordechay Goodstein * queues. This happens when we have an rfkill interrupt. 720cd1ad2dSMordechay Goodstein * Since we stop Tx altogether - mark the queues as stopped. 730cd1ad2dSMordechay Goodstein */ 740cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 750cd1ad2dSMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 760cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 770cd1ad2dSMordechay Goodstein 780cd1ad2dSMordechay Goodstein /* Unmap DMA from host system and free skb's */ 790cd1ad2dSMordechay Goodstein for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { 800cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[txq_id]) 810cd1ad2dSMordechay Goodstein continue; 820cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, txq_id); 830cd1ad2dSMordechay Goodstein } 840cd1ad2dSMordechay Goodstein } 850cd1ad2dSMordechay Goodstein 860cd1ad2dSMordechay Goodstein /* 870cd1ad2dSMordechay Goodstein * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 880cd1ad2dSMordechay Goodstein */ 890cd1ad2dSMordechay Goodstein static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 900cd1ad2dSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 910cd1ad2dSMordechay Goodstein int num_tbs) 920cd1ad2dSMordechay Goodstein { 930cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 940cd1ad2dSMordechay Goodstein u8 filled_tfd_size, num_fetch_chunks; 950cd1ad2dSMordechay Goodstein u16 len = byte_cnt; 960cd1ad2dSMordechay Goodstein __le16 bc_ent; 970cd1ad2dSMordechay Goodstein 980cd1ad2dSMordechay Goodstein if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 990cd1ad2dSMordechay Goodstein return; 1000cd1ad2dSMordechay Goodstein 1010cd1ad2dSMordechay Goodstein filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 1020cd1ad2dSMordechay Goodstein num_tbs * sizeof(struct iwl_tfh_tb); 1030cd1ad2dSMordechay Goodstein /* 1040cd1ad2dSMordechay Goodstein * filled_tfd_size contains the number of filled bytes in the TFD. 1050cd1ad2dSMordechay Goodstein * Dividing it by 64 will give the number of chunks to fetch 1060cd1ad2dSMordechay Goodstein * to SRAM- 0 for one chunk, 1 for 2 and so on. 1070cd1ad2dSMordechay Goodstein * If, for example, TFD contains only 3 TBs then 32 bytes 1080cd1ad2dSMordechay Goodstein * of the TFD are used, and only one chunk of 64 bytes should 1090cd1ad2dSMordechay Goodstein * be fetched 1100cd1ad2dSMordechay Goodstein */ 1110cd1ad2dSMordechay Goodstein num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 1120cd1ad2dSMordechay Goodstein 1130cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1140cd1ad2dSMordechay Goodstein struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 1150cd1ad2dSMordechay Goodstein 1160cd1ad2dSMordechay Goodstein /* Starting from AX210, the HW expects bytes */ 1170cd1ad2dSMordechay Goodstein WARN_ON(trans->txqs.bc_table_dword); 1180cd1ad2dSMordechay Goodstein WARN_ON(len > 0x3FFF); 1190cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 1200cd1ad2dSMordechay Goodstein scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 1210cd1ad2dSMordechay Goodstein } else { 1220cd1ad2dSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 1230cd1ad2dSMordechay Goodstein 1240cd1ad2dSMordechay Goodstein /* Before AX210, the HW expects DW */ 1250cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_table_dword); 1260cd1ad2dSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 1270cd1ad2dSMordechay Goodstein WARN_ON(len > 0xFFF); 1280cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 1290cd1ad2dSMordechay Goodstein scd_bc_tbl->tfd_offset[idx] = bc_ent; 1300cd1ad2dSMordechay Goodstein } 1310cd1ad2dSMordechay Goodstein } 1320cd1ad2dSMordechay Goodstein 1330cd1ad2dSMordechay Goodstein /* 1340cd1ad2dSMordechay Goodstein * iwl_txq_inc_wr_ptr - Send new write index to hardware 1350cd1ad2dSMordechay Goodstein */ 1360cd1ad2dSMordechay Goodstein void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 1370cd1ad2dSMordechay Goodstein { 1380cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 1390cd1ad2dSMordechay Goodstein 1400cd1ad2dSMordechay Goodstein IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 1410cd1ad2dSMordechay Goodstein 1420cd1ad2dSMordechay Goodstein /* 1430cd1ad2dSMordechay Goodstein * if not in power-save mode, uCode will never sleep when we're 1440cd1ad2dSMordechay Goodstein * trying to tx (during RFKILL, we're not trying to tx). 1450cd1ad2dSMordechay Goodstein */ 1460cd1ad2dSMordechay Goodstein iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 1470cd1ad2dSMordechay Goodstein } 1480cd1ad2dSMordechay Goodstein 1490cd1ad2dSMordechay Goodstein static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 1500cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 1510cd1ad2dSMordechay Goodstein { 1520cd1ad2dSMordechay Goodstein return le16_to_cpu(tfd->num_tbs) & 0x1f; 1530cd1ad2dSMordechay Goodstein } 1540cd1ad2dSMordechay Goodstein 1550cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 1560cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 1570cd1ad2dSMordechay Goodstein { 1580cd1ad2dSMordechay Goodstein int i, num_tbs; 1590cd1ad2dSMordechay Goodstein 1600cd1ad2dSMordechay Goodstein /* Sanity check on number of chunks */ 1610cd1ad2dSMordechay Goodstein num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 1620cd1ad2dSMordechay Goodstein 1630cd1ad2dSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 1640cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 1650cd1ad2dSMordechay Goodstein return; 1660cd1ad2dSMordechay Goodstein } 1670cd1ad2dSMordechay Goodstein 1680cd1ad2dSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 1690cd1ad2dSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 1700cd1ad2dSMordechay Goodstein if (meta->tbs & BIT(i)) 1710cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, 1720cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1730cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1740cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1750cd1ad2dSMordechay Goodstein else 1760cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, 1770cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1780cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1790cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1800cd1ad2dSMordechay Goodstein } 1810cd1ad2dSMordechay Goodstein 1820cd1ad2dSMordechay Goodstein tfd->num_tbs = 0; 1830cd1ad2dSMordechay Goodstein } 1840cd1ad2dSMordechay Goodstein 1850cd1ad2dSMordechay Goodstein void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1860cd1ad2dSMordechay Goodstein { 1870cd1ad2dSMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1880cd1ad2dSMordechay Goodstein * idx is bounded by n_window 1890cd1ad2dSMordechay Goodstein */ 1900cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1910cd1ad2dSMordechay Goodstein 1920cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 1930cd1ad2dSMordechay Goodstein 1940cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1950cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, idx)); 1960cd1ad2dSMordechay Goodstein 1970cd1ad2dSMordechay Goodstein /* free SKB */ 1980cd1ad2dSMordechay Goodstein if (txq->entries) { 1990cd1ad2dSMordechay Goodstein struct sk_buff *skb; 2000cd1ad2dSMordechay Goodstein 2010cd1ad2dSMordechay Goodstein skb = txq->entries[idx].skb; 2020cd1ad2dSMordechay Goodstein 2030cd1ad2dSMordechay Goodstein /* Can be called from irqs-disabled context 2040cd1ad2dSMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 2050cd1ad2dSMordechay Goodstein * freed and that the queue is not empty - free the skb 2060cd1ad2dSMordechay Goodstein */ 2070cd1ad2dSMordechay Goodstein if (skb) { 2080cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 2090cd1ad2dSMordechay Goodstein txq->entries[idx].skb = NULL; 2100cd1ad2dSMordechay Goodstein } 2110cd1ad2dSMordechay Goodstein } 2120cd1ad2dSMordechay Goodstein } 2130cd1ad2dSMordechay Goodstein 2140cd1ad2dSMordechay Goodstein int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 2150cd1ad2dSMordechay Goodstein dma_addr_t addr, u16 len) 2160cd1ad2dSMordechay Goodstein { 2170cd1ad2dSMordechay Goodstein int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 2180cd1ad2dSMordechay Goodstein struct iwl_tfh_tb *tb; 2190cd1ad2dSMordechay Goodstein 2200cd1ad2dSMordechay Goodstein /* 2210cd1ad2dSMordechay Goodstein * Only WARN here so we know about the issue, but we mess up our 2220cd1ad2dSMordechay Goodstein * unmap path because not every place currently checks for errors 2230cd1ad2dSMordechay Goodstein * returned from this function - it can only return an error if 2240cd1ad2dSMordechay Goodstein * there's no more space, and so when we know there is enough we 2250cd1ad2dSMordechay Goodstein * don't always check ... 2260cd1ad2dSMordechay Goodstein */ 2270cd1ad2dSMordechay Goodstein WARN(iwl_txq_crosses_4g_boundary(addr, len), 2280cd1ad2dSMordechay Goodstein "possible DMA problem with iova:0x%llx, len:%d\n", 2290cd1ad2dSMordechay Goodstein (unsigned long long)addr, len); 2300cd1ad2dSMordechay Goodstein 2310cd1ad2dSMordechay Goodstein if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 2320cd1ad2dSMordechay Goodstein return -EINVAL; 2330cd1ad2dSMordechay Goodstein tb = &tfd->tbs[idx]; 2340cd1ad2dSMordechay Goodstein 2350cd1ad2dSMordechay Goodstein /* Each TFD can point to a maximum max_tbs Tx buffers */ 2360cd1ad2dSMordechay Goodstein if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 2370cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Error can not send more than %d chunks\n", 2380cd1ad2dSMordechay Goodstein trans->txqs.tfd.max_tbs); 2390cd1ad2dSMordechay Goodstein return -EINVAL; 2400cd1ad2dSMordechay Goodstein } 2410cd1ad2dSMordechay Goodstein 2420cd1ad2dSMordechay Goodstein put_unaligned_le64(addr, &tb->addr); 2430cd1ad2dSMordechay Goodstein tb->tb_len = cpu_to_le16(len); 2440cd1ad2dSMordechay Goodstein 2450cd1ad2dSMordechay Goodstein tfd->num_tbs = cpu_to_le16(idx + 1); 2460cd1ad2dSMordechay Goodstein 2470cd1ad2dSMordechay Goodstein return idx; 2480cd1ad2dSMordechay Goodstein } 2490cd1ad2dSMordechay Goodstein 2500cd1ad2dSMordechay Goodstein static struct page *get_workaround_page(struct iwl_trans *trans, 2510cd1ad2dSMordechay Goodstein struct sk_buff *skb) 2520cd1ad2dSMordechay Goodstein { 2530cd1ad2dSMordechay Goodstein struct page **page_ptr; 2540cd1ad2dSMordechay Goodstein struct page *ret; 2550cd1ad2dSMordechay Goodstein 2560cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 2570cd1ad2dSMordechay Goodstein 2580cd1ad2dSMordechay Goodstein ret = alloc_page(GFP_ATOMIC); 2590cd1ad2dSMordechay Goodstein if (!ret) 2600cd1ad2dSMordechay Goodstein return NULL; 2610cd1ad2dSMordechay Goodstein 2620cd1ad2dSMordechay Goodstein /* set the chaining pointer to the previous page if there */ 2630cd1ad2dSMordechay Goodstein *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 2640cd1ad2dSMordechay Goodstein *page_ptr = ret; 2650cd1ad2dSMordechay Goodstein 2660cd1ad2dSMordechay Goodstein return ret; 2670cd1ad2dSMordechay Goodstein } 2680cd1ad2dSMordechay Goodstein 2690cd1ad2dSMordechay Goodstein /* 2700cd1ad2dSMordechay Goodstein * Add a TB and if needed apply the FH HW bug workaround; 2710cd1ad2dSMordechay Goodstein * meta != NULL indicates that it's a page mapping and we 2720cd1ad2dSMordechay Goodstein * need to dma_unmap_page() and set the meta->tbs bit in 2730cd1ad2dSMordechay Goodstein * this case. 2740cd1ad2dSMordechay Goodstein */ 2750cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 2760cd1ad2dSMordechay Goodstein struct sk_buff *skb, 2770cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 2780cd1ad2dSMordechay Goodstein dma_addr_t phys, void *virt, 2790cd1ad2dSMordechay Goodstein u16 len, struct iwl_cmd_meta *meta) 2800cd1ad2dSMordechay Goodstein { 2810cd1ad2dSMordechay Goodstein dma_addr_t oldphys = phys; 2820cd1ad2dSMordechay Goodstein struct page *page; 2830cd1ad2dSMordechay Goodstein int ret; 2840cd1ad2dSMordechay Goodstein 2850cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2860cd1ad2dSMordechay Goodstein return -ENOMEM; 2870cd1ad2dSMordechay Goodstein 2880cd1ad2dSMordechay Goodstein if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 2890cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2900cd1ad2dSMordechay Goodstein 2910cd1ad2dSMordechay Goodstein if (ret < 0) 2920cd1ad2dSMordechay Goodstein goto unmap; 2930cd1ad2dSMordechay Goodstein 2940cd1ad2dSMordechay Goodstein if (meta) 2950cd1ad2dSMordechay Goodstein meta->tbs |= BIT(ret); 2960cd1ad2dSMordechay Goodstein 2970cd1ad2dSMordechay Goodstein ret = 0; 2980cd1ad2dSMordechay Goodstein goto trace; 2990cd1ad2dSMordechay Goodstein } 3000cd1ad2dSMordechay Goodstein 3010cd1ad2dSMordechay Goodstein /* 3020cd1ad2dSMordechay Goodstein * Work around a hardware bug. If (as expressed in the 3030cd1ad2dSMordechay Goodstein * condition above) the TB ends on a 32-bit boundary, 3040cd1ad2dSMordechay Goodstein * then the next TB may be accessed with the wrong 3050cd1ad2dSMordechay Goodstein * address. 3060cd1ad2dSMordechay Goodstein * To work around it, copy the data elsewhere and make 3070cd1ad2dSMordechay Goodstein * a new mapping for it so the device will not fail. 3080cd1ad2dSMordechay Goodstein */ 3090cd1ad2dSMordechay Goodstein 3100cd1ad2dSMordechay Goodstein if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 3110cd1ad2dSMordechay Goodstein ret = -ENOBUFS; 3120cd1ad2dSMordechay Goodstein goto unmap; 3130cd1ad2dSMordechay Goodstein } 3140cd1ad2dSMordechay Goodstein 3150cd1ad2dSMordechay Goodstein page = get_workaround_page(trans, skb); 3160cd1ad2dSMordechay Goodstein if (!page) { 3170cd1ad2dSMordechay Goodstein ret = -ENOMEM; 3180cd1ad2dSMordechay Goodstein goto unmap; 3190cd1ad2dSMordechay Goodstein } 3200cd1ad2dSMordechay Goodstein 3210cd1ad2dSMordechay Goodstein memcpy(page_address(page), virt, len); 3220cd1ad2dSMordechay Goodstein 3230cd1ad2dSMordechay Goodstein phys = dma_map_single(trans->dev, page_address(page), len, 3240cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 3250cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 3260cd1ad2dSMordechay Goodstein return -ENOMEM; 3270cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 3280cd1ad2dSMordechay Goodstein if (ret < 0) { 3290cd1ad2dSMordechay Goodstein /* unmap the new allocation as single */ 3300cd1ad2dSMordechay Goodstein oldphys = phys; 3310cd1ad2dSMordechay Goodstein meta = NULL; 3320cd1ad2dSMordechay Goodstein goto unmap; 3330cd1ad2dSMordechay Goodstein } 3340cd1ad2dSMordechay Goodstein IWL_WARN(trans, 3350cd1ad2dSMordechay Goodstein "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 3360cd1ad2dSMordechay Goodstein len, (unsigned long long)oldphys, (unsigned long long)phys); 3370cd1ad2dSMordechay Goodstein 3380cd1ad2dSMordechay Goodstein ret = 0; 3390cd1ad2dSMordechay Goodstein unmap: 3400cd1ad2dSMordechay Goodstein if (meta) 3410cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 3420cd1ad2dSMordechay Goodstein else 3430cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 3440cd1ad2dSMordechay Goodstein trace: 3450cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 3460cd1ad2dSMordechay Goodstein 3470cd1ad2dSMordechay Goodstein return ret; 3480cd1ad2dSMordechay Goodstein } 3490cd1ad2dSMordechay Goodstein 3500cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 3510cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 3520cd1ad2dSMordechay Goodstein struct sk_buff *skb) 3530cd1ad2dSMordechay Goodstein { 3540cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 3550cd1ad2dSMordechay Goodstein struct page **page_ptr; 3560cd1ad2dSMordechay Goodstein 3570cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 3580cd1ad2dSMordechay Goodstein 3590cd1ad2dSMordechay Goodstein if (WARN_ON(*page_ptr)) 3600cd1ad2dSMordechay Goodstein return NULL; 3610cd1ad2dSMordechay Goodstein 3620cd1ad2dSMordechay Goodstein if (!p->page) 3630cd1ad2dSMordechay Goodstein goto alloc; 3640cd1ad2dSMordechay Goodstein 3650cd1ad2dSMordechay Goodstein /* 3660cd1ad2dSMordechay Goodstein * Check if there's enough room on this page 3670cd1ad2dSMordechay Goodstein * 3680cd1ad2dSMordechay Goodstein * Note that we put a page chaining pointer *last* in the 3690cd1ad2dSMordechay Goodstein * page - we need it somewhere, and if it's there then we 3700cd1ad2dSMordechay Goodstein * avoid DMA mapping the last bits of the page which may 3710cd1ad2dSMordechay Goodstein * trigger the 32-bit boundary hardware bug. 3720cd1ad2dSMordechay Goodstein * 3730cd1ad2dSMordechay Goodstein * (see also get_workaround_page() in tx-gen2.c) 3740cd1ad2dSMordechay Goodstein */ 3750cd1ad2dSMordechay Goodstein if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 3760cd1ad2dSMordechay Goodstein sizeof(void *)) 3770cd1ad2dSMordechay Goodstein goto out; 3780cd1ad2dSMordechay Goodstein 3790cd1ad2dSMordechay Goodstein /* We don't have enough room on this page, get a new one. */ 3800cd1ad2dSMordechay Goodstein __free_page(p->page); 3810cd1ad2dSMordechay Goodstein 3820cd1ad2dSMordechay Goodstein alloc: 3830cd1ad2dSMordechay Goodstein p->page = alloc_page(GFP_ATOMIC); 3840cd1ad2dSMordechay Goodstein if (!p->page) 3850cd1ad2dSMordechay Goodstein return NULL; 3860cd1ad2dSMordechay Goodstein p->pos = page_address(p->page); 3870cd1ad2dSMordechay Goodstein /* set the chaining pointer to NULL */ 3880cd1ad2dSMordechay Goodstein *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 3890cd1ad2dSMordechay Goodstein out: 3900cd1ad2dSMordechay Goodstein *page_ptr = p->page; 3910cd1ad2dSMordechay Goodstein get_page(p->page); 3920cd1ad2dSMordechay Goodstein return p; 3930cd1ad2dSMordechay Goodstein } 3940cd1ad2dSMordechay Goodstein #endif 3950cd1ad2dSMordechay Goodstein 3960cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 3970cd1ad2dSMordechay Goodstein struct sk_buff *skb, 3980cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, int start_len, 3990cd1ad2dSMordechay Goodstein u8 hdr_len, 4000cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd) 4010cd1ad2dSMordechay Goodstein { 4020cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 4030cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 4040cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (void *)skb->data; 4050cd1ad2dSMordechay Goodstein unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 4060cd1ad2dSMordechay Goodstein unsigned int mss = skb_shinfo(skb)->gso_size; 4070cd1ad2dSMordechay Goodstein u16 length, amsdu_pad; 4080cd1ad2dSMordechay Goodstein u8 *start_hdr; 4090cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *hdr_page; 4100cd1ad2dSMordechay Goodstein struct tso_t tso; 4110cd1ad2dSMordechay Goodstein 4120cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 4130cd1ad2dSMordechay Goodstein &dev_cmd->hdr, start_len, 0); 4140cd1ad2dSMordechay Goodstein 4150cd1ad2dSMordechay Goodstein ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 4160cd1ad2dSMordechay Goodstein snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 4170cd1ad2dSMordechay Goodstein total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 4180cd1ad2dSMordechay Goodstein amsdu_pad = 0; 4190cd1ad2dSMordechay Goodstein 4200cd1ad2dSMordechay Goodstein /* total amount of header we may need for this A-MSDU */ 4210cd1ad2dSMordechay Goodstein hdr_room = DIV_ROUND_UP(total_len, mss) * 4220cd1ad2dSMordechay Goodstein (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 4230cd1ad2dSMordechay Goodstein 4240cd1ad2dSMordechay Goodstein /* Our device supports 9 segments at most, it will fit in 1 page */ 4250cd1ad2dSMordechay Goodstein hdr_page = get_page_hdr(trans, hdr_room, skb); 4260cd1ad2dSMordechay Goodstein if (!hdr_page) 4270cd1ad2dSMordechay Goodstein return -ENOMEM; 4280cd1ad2dSMordechay Goodstein 4290cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 4300cd1ad2dSMordechay Goodstein 4310cd1ad2dSMordechay Goodstein /* 4320cd1ad2dSMordechay Goodstein * Pull the ieee80211 header to be able to use TSO core, 4330cd1ad2dSMordechay Goodstein * we will restore it for the tx_status flow. 4340cd1ad2dSMordechay Goodstein */ 4350cd1ad2dSMordechay Goodstein skb_pull(skb, hdr_len); 4360cd1ad2dSMordechay Goodstein 4370cd1ad2dSMordechay Goodstein /* 4380cd1ad2dSMordechay Goodstein * Remove the length of all the headers that we don't actually 4390cd1ad2dSMordechay Goodstein * have in the MPDU by themselves, but that we duplicate into 4400cd1ad2dSMordechay Goodstein * all the different MSDUs inside the A-MSDU. 4410cd1ad2dSMordechay Goodstein */ 4420cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 4430cd1ad2dSMordechay Goodstein 4440cd1ad2dSMordechay Goodstein tso_start(skb, &tso); 4450cd1ad2dSMordechay Goodstein 4460cd1ad2dSMordechay Goodstein while (total_len) { 4470cd1ad2dSMordechay Goodstein /* this is the data left for this subframe */ 4480cd1ad2dSMordechay Goodstein unsigned int data_left = min_t(unsigned int, mss, total_len); 4490cd1ad2dSMordechay Goodstein struct sk_buff *csum_skb = NULL; 4500cd1ad2dSMordechay Goodstein unsigned int tb_len; 4510cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 4520cd1ad2dSMordechay Goodstein u8 *subf_hdrs_start = hdr_page->pos; 4530cd1ad2dSMordechay Goodstein 4540cd1ad2dSMordechay Goodstein total_len -= data_left; 4550cd1ad2dSMordechay Goodstein 4560cd1ad2dSMordechay Goodstein memset(hdr_page->pos, 0, amsdu_pad); 4570cd1ad2dSMordechay Goodstein hdr_page->pos += amsdu_pad; 4580cd1ad2dSMordechay Goodstein amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 4590cd1ad2dSMordechay Goodstein data_left)) & 0x3; 4600cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 4610cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 4620cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 4630cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 4640cd1ad2dSMordechay Goodstein 4650cd1ad2dSMordechay Goodstein length = snap_ip_tcp_hdrlen + data_left; 4660cd1ad2dSMordechay Goodstein *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 4670cd1ad2dSMordechay Goodstein hdr_page->pos += sizeof(length); 4680cd1ad2dSMordechay Goodstein 4690cd1ad2dSMordechay Goodstein /* 4700cd1ad2dSMordechay Goodstein * This will copy the SNAP as well which will be considered 4710cd1ad2dSMordechay Goodstein * as MAC header. 4720cd1ad2dSMordechay Goodstein */ 4730cd1ad2dSMordechay Goodstein tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 4740cd1ad2dSMordechay Goodstein 4750cd1ad2dSMordechay Goodstein hdr_page->pos += snap_ip_tcp_hdrlen; 4760cd1ad2dSMordechay Goodstein 4770cd1ad2dSMordechay Goodstein tb_len = hdr_page->pos - start_hdr; 4780cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, start_hdr, 4790cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 4800cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 4810cd1ad2dSMordechay Goodstein dev_kfree_skb(csum_skb); 4820cd1ad2dSMordechay Goodstein goto out_err; 4830cd1ad2dSMordechay Goodstein } 4840cd1ad2dSMordechay Goodstein /* 4850cd1ad2dSMordechay Goodstein * No need for _with_wa, this is from the TSO page and 4860cd1ad2dSMordechay Goodstein * we leave some space at the end of it so can't hit 4870cd1ad2dSMordechay Goodstein * the buggy scenario. 4880cd1ad2dSMordechay Goodstein */ 4890cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 4900cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 4910cd1ad2dSMordechay Goodstein tb_phys, tb_len); 4920cd1ad2dSMordechay Goodstein /* add this subframe's headers' length to the tx_cmd */ 4930cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 4940cd1ad2dSMordechay Goodstein 4950cd1ad2dSMordechay Goodstein /* prepare the start_hdr for the next subframe */ 4960cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 4970cd1ad2dSMordechay Goodstein 4980cd1ad2dSMordechay Goodstein /* put the payload */ 4990cd1ad2dSMordechay Goodstein while (data_left) { 5000cd1ad2dSMordechay Goodstein int ret; 5010cd1ad2dSMordechay Goodstein 5020cd1ad2dSMordechay Goodstein tb_len = min_t(unsigned int, tso.size, data_left); 5030cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tso.data, 5040cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 5050cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 5060cd1ad2dSMordechay Goodstein tb_phys, tso.data, 5070cd1ad2dSMordechay Goodstein tb_len, NULL); 5080cd1ad2dSMordechay Goodstein if (ret) { 5090cd1ad2dSMordechay Goodstein dev_kfree_skb(csum_skb); 5100cd1ad2dSMordechay Goodstein goto out_err; 5110cd1ad2dSMordechay Goodstein } 5120cd1ad2dSMordechay Goodstein 5130cd1ad2dSMordechay Goodstein data_left -= tb_len; 5140cd1ad2dSMordechay Goodstein tso_build_data(skb, &tso, tb_len); 5150cd1ad2dSMordechay Goodstein } 5160cd1ad2dSMordechay Goodstein } 5170cd1ad2dSMordechay Goodstein 5180cd1ad2dSMordechay Goodstein /* re -add the WiFi header */ 5190cd1ad2dSMordechay Goodstein skb_push(skb, hdr_len); 5200cd1ad2dSMordechay Goodstein 5210cd1ad2dSMordechay Goodstein return 0; 5220cd1ad2dSMordechay Goodstein 5230cd1ad2dSMordechay Goodstein out_err: 5240cd1ad2dSMordechay Goodstein #endif 5250cd1ad2dSMordechay Goodstein return -EINVAL; 5260cd1ad2dSMordechay Goodstein } 5270cd1ad2dSMordechay Goodstein 5280cd1ad2dSMordechay Goodstein static struct 5290cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 5300cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 5310cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 5320cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5330cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 5340cd1ad2dSMordechay Goodstein int hdr_len, 5350cd1ad2dSMordechay Goodstein int tx_cmd_len) 5360cd1ad2dSMordechay Goodstein { 5370cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 5380cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 5390cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5400cd1ad2dSMordechay Goodstein int len; 5410cd1ad2dSMordechay Goodstein void *tb1_addr; 5420cd1ad2dSMordechay Goodstein 5430cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 5440cd1ad2dSMordechay Goodstein 5450cd1ad2dSMordechay Goodstein /* 5460cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 5470cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 5480cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 5490cd1ad2dSMordechay Goodstein */ 5500cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 5510cd1ad2dSMordechay Goodstein 5520cd1ad2dSMordechay Goodstein /* 5530cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 5540cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 5550cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 5560cd1ad2dSMordechay Goodstein * setup of the first TB) 5570cd1ad2dSMordechay Goodstein */ 5580cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 5590cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 5600cd1ad2dSMordechay Goodstein 5610cd1ad2dSMordechay Goodstein /* do not align A-MSDU to dword as the subframe header aligns it */ 5620cd1ad2dSMordechay Goodstein 5630cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 5640cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 5650cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 5660cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 5670cd1ad2dSMordechay Goodstein goto out_err; 5680cd1ad2dSMordechay Goodstein /* 5690cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 5700cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 5710cd1ad2dSMordechay Goodstein */ 5720cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 5730cd1ad2dSMordechay Goodstein 5740cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 5750cd1ad2dSMordechay Goodstein hdr_len, dev_cmd)) 5760cd1ad2dSMordechay Goodstein goto out_err; 5770cd1ad2dSMordechay Goodstein 5780cd1ad2dSMordechay Goodstein /* building the A-MSDU might have changed this data, memcpy it now */ 5790cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5800cd1ad2dSMordechay Goodstein return tfd; 5810cd1ad2dSMordechay Goodstein 5820cd1ad2dSMordechay Goodstein out_err: 5830cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 5840cd1ad2dSMordechay Goodstein return NULL; 5850cd1ad2dSMordechay Goodstein } 5860cd1ad2dSMordechay Goodstein 5870cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 5880cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5890cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 5900cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 5910cd1ad2dSMordechay Goodstein { 5920cd1ad2dSMordechay Goodstein int i; 5930cd1ad2dSMordechay Goodstein 5940cd1ad2dSMordechay Goodstein for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5950cd1ad2dSMordechay Goodstein const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5960cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5970cd1ad2dSMordechay Goodstein unsigned int fragsz = skb_frag_size(frag); 5980cd1ad2dSMordechay Goodstein int ret; 5990cd1ad2dSMordechay Goodstein 6000cd1ad2dSMordechay Goodstein if (!fragsz) 6010cd1ad2dSMordechay Goodstein continue; 6020cd1ad2dSMordechay Goodstein 6030cd1ad2dSMordechay Goodstein tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 6040cd1ad2dSMordechay Goodstein fragsz, DMA_TO_DEVICE); 6050cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6060cd1ad2dSMordechay Goodstein skb_frag_address(frag), 6070cd1ad2dSMordechay Goodstein fragsz, out_meta); 6080cd1ad2dSMordechay Goodstein if (ret) 6090cd1ad2dSMordechay Goodstein return ret; 6100cd1ad2dSMordechay Goodstein } 6110cd1ad2dSMordechay Goodstein 6120cd1ad2dSMordechay Goodstein return 0; 6130cd1ad2dSMordechay Goodstein } 6140cd1ad2dSMordechay Goodstein 6150cd1ad2dSMordechay Goodstein static struct 6160cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 6170cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 6180cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 6190cd1ad2dSMordechay Goodstein struct sk_buff *skb, 6200cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 6210cd1ad2dSMordechay Goodstein int hdr_len, 6220cd1ad2dSMordechay Goodstein int tx_cmd_len, 6230cd1ad2dSMordechay Goodstein bool pad) 6240cd1ad2dSMordechay Goodstein { 6250cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 6260cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 6270cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 6280cd1ad2dSMordechay Goodstein int len, tb1_len, tb2_len; 6290cd1ad2dSMordechay Goodstein void *tb1_addr; 6300cd1ad2dSMordechay Goodstein struct sk_buff *frag; 6310cd1ad2dSMordechay Goodstein 6320cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 6330cd1ad2dSMordechay Goodstein 6340cd1ad2dSMordechay Goodstein /* The first TB points to bi-directional DMA data */ 6350cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 6360cd1ad2dSMordechay Goodstein 6370cd1ad2dSMordechay Goodstein /* 6380cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 6390cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 6400cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 6410cd1ad2dSMordechay Goodstein */ 6420cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 6430cd1ad2dSMordechay Goodstein 6440cd1ad2dSMordechay Goodstein /* 6450cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 6460cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 6470cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 6480cd1ad2dSMordechay Goodstein * setup of the first TB) 6490cd1ad2dSMordechay Goodstein */ 6500cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 6510cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 6520cd1ad2dSMordechay Goodstein 6530cd1ad2dSMordechay Goodstein if (pad) 6540cd1ad2dSMordechay Goodstein tb1_len = ALIGN(len, 4); 6550cd1ad2dSMordechay Goodstein else 6560cd1ad2dSMordechay Goodstein tb1_len = len; 6570cd1ad2dSMordechay Goodstein 6580cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 6590cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 6600cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 6610cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 6620cd1ad2dSMordechay Goodstein goto out_err; 6630cd1ad2dSMordechay Goodstein /* 6640cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 6650cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 6660cd1ad2dSMordechay Goodstein */ 6670cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 6680cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 6690cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 6700cd1ad2dSMordechay Goodstein 6710cd1ad2dSMordechay Goodstein /* set up TFD's third entry to point to remainder of skb's head */ 6720cd1ad2dSMordechay Goodstein tb2_len = skb_headlen(skb) - hdr_len; 6730cd1ad2dSMordechay Goodstein 6740cd1ad2dSMordechay Goodstein if (tb2_len > 0) { 6750cd1ad2dSMordechay Goodstein int ret; 6760cd1ad2dSMordechay Goodstein 6770cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 6780cd1ad2dSMordechay Goodstein tb2_len, DMA_TO_DEVICE); 6790cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6800cd1ad2dSMordechay Goodstein skb->data + hdr_len, tb2_len, 6810cd1ad2dSMordechay Goodstein NULL); 6820cd1ad2dSMordechay Goodstein if (ret) 6830cd1ad2dSMordechay Goodstein goto out_err; 6840cd1ad2dSMordechay Goodstein } 6850cd1ad2dSMordechay Goodstein 6860cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 6870cd1ad2dSMordechay Goodstein goto out_err; 6880cd1ad2dSMordechay Goodstein 6890cd1ad2dSMordechay Goodstein skb_walk_frags(skb, frag) { 6900cd1ad2dSMordechay Goodstein int ret; 6910cd1ad2dSMordechay Goodstein 6920cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, frag->data, 6930cd1ad2dSMordechay Goodstein skb_headlen(frag), DMA_TO_DEVICE); 6940cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6950cd1ad2dSMordechay Goodstein frag->data, 6960cd1ad2dSMordechay Goodstein skb_headlen(frag), NULL); 6970cd1ad2dSMordechay Goodstein if (ret) 6980cd1ad2dSMordechay Goodstein goto out_err; 6990cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 7000cd1ad2dSMordechay Goodstein goto out_err; 7010cd1ad2dSMordechay Goodstein } 7020cd1ad2dSMordechay Goodstein 7030cd1ad2dSMordechay Goodstein return tfd; 7040cd1ad2dSMordechay Goodstein 7050cd1ad2dSMordechay Goodstein out_err: 7060cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 7070cd1ad2dSMordechay Goodstein return NULL; 7080cd1ad2dSMordechay Goodstein } 7090cd1ad2dSMordechay Goodstein 7100cd1ad2dSMordechay Goodstein static 7110cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 7120cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 7130cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 7140cd1ad2dSMordechay Goodstein struct sk_buff *skb, 7150cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 7160cd1ad2dSMordechay Goodstein { 7170cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 7180cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 7190cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 7200cd1ad2dSMordechay Goodstein int len, hdr_len; 7210cd1ad2dSMordechay Goodstein bool amsdu; 7220cd1ad2dSMordechay Goodstein 7230cd1ad2dSMordechay Goodstein /* There must be data left over for TB1 or this code must be changed */ 7240cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 7250cd1ad2dSMordechay Goodstein 7260cd1ad2dSMordechay Goodstein memset(tfd, 0, sizeof(*tfd)); 7270cd1ad2dSMordechay Goodstein 7280cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 7290cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen2); 7300cd1ad2dSMordechay Goodstein else 7310cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen3); 7320cd1ad2dSMordechay Goodstein 7330cd1ad2dSMordechay Goodstein amsdu = ieee80211_is_data_qos(hdr->frame_control) && 7340cd1ad2dSMordechay Goodstein (*ieee80211_get_qos_ctl(hdr) & 7350cd1ad2dSMordechay Goodstein IEEE80211_QOS_CTL_A_MSDU_PRESENT); 7360cd1ad2dSMordechay Goodstein 7370cd1ad2dSMordechay Goodstein hdr_len = ieee80211_hdrlen(hdr->frame_control); 7380cd1ad2dSMordechay Goodstein 7390cd1ad2dSMordechay Goodstein /* 7400cd1ad2dSMordechay Goodstein * Only build A-MSDUs here if doing so by GSO, otherwise it may be 7410cd1ad2dSMordechay Goodstein * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 7420cd1ad2dSMordechay Goodstein * built in the higher layers already. 7430cd1ad2dSMordechay Goodstein */ 7440cd1ad2dSMordechay Goodstein if (amsdu && skb_shinfo(skb)->gso_size) 7450cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 7460cd1ad2dSMordechay Goodstein out_meta, hdr_len, len); 7470cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 7480cd1ad2dSMordechay Goodstein hdr_len, len, !amsdu); 7490cd1ad2dSMordechay Goodstein } 7500cd1ad2dSMordechay Goodstein 7510cd1ad2dSMordechay Goodstein int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 7520cd1ad2dSMordechay Goodstein { 7530cd1ad2dSMordechay Goodstein unsigned int max; 7540cd1ad2dSMordechay Goodstein unsigned int used; 7550cd1ad2dSMordechay Goodstein 7560cd1ad2dSMordechay Goodstein /* 7570cd1ad2dSMordechay Goodstein * To avoid ambiguity between empty and completely full queues, there 7580cd1ad2dSMordechay Goodstein * should always be less than max_tfd_queue_size elements in the queue. 7590cd1ad2dSMordechay Goodstein * If q->n_window is smaller than max_tfd_queue_size, there is no need 7600cd1ad2dSMordechay Goodstein * to reserve any queue entries for this purpose. 7610cd1ad2dSMordechay Goodstein */ 7620cd1ad2dSMordechay Goodstein if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 7630cd1ad2dSMordechay Goodstein max = q->n_window; 7640cd1ad2dSMordechay Goodstein else 7650cd1ad2dSMordechay Goodstein max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 7660cd1ad2dSMordechay Goodstein 7670cd1ad2dSMordechay Goodstein /* 7680cd1ad2dSMordechay Goodstein * max_tfd_queue_size is a power of 2, so the following is equivalent to 7690cd1ad2dSMordechay Goodstein * modulo by max_tfd_queue_size and is well defined. 7700cd1ad2dSMordechay Goodstein */ 7710cd1ad2dSMordechay Goodstein used = (q->write_ptr - q->read_ptr) & 7720cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 7730cd1ad2dSMordechay Goodstein 7740cd1ad2dSMordechay Goodstein if (WARN_ON(used > max)) 7750cd1ad2dSMordechay Goodstein return 0; 7760cd1ad2dSMordechay Goodstein 7770cd1ad2dSMordechay Goodstein return max - used; 7780cd1ad2dSMordechay Goodstein } 7790cd1ad2dSMordechay Goodstein 7800cd1ad2dSMordechay Goodstein int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 7810cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, int txq_id) 7820cd1ad2dSMordechay Goodstein { 7830cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta; 7840cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7850cd1ad2dSMordechay Goodstein u16 cmd_len; 7860cd1ad2dSMordechay Goodstein int idx; 7870cd1ad2dSMordechay Goodstein void *tfd; 7880cd1ad2dSMordechay Goodstein 7890cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 7900cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 7910cd1ad2dSMordechay Goodstein return -EINVAL; 7920cd1ad2dSMordechay Goodstein 7930cd1ad2dSMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 7940cd1ad2dSMordechay Goodstein "TX on unused queue %d\n", txq_id)) 7950cd1ad2dSMordechay Goodstein return -EINVAL; 7960cd1ad2dSMordechay Goodstein 7970cd1ad2dSMordechay Goodstein if (skb_is_nonlinear(skb) && 7980cd1ad2dSMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 7990cd1ad2dSMordechay Goodstein __skb_linearize(skb)) 8000cd1ad2dSMordechay Goodstein return -ENOMEM; 8010cd1ad2dSMordechay Goodstein 8020cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 8030cd1ad2dSMordechay Goodstein 8040cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 8050cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 8060cd1ad2dSMordechay Goodstein 8070cd1ad2dSMordechay Goodstein /* don't put the packet on the ring, if there is no room */ 8080cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 8090cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd **dev_cmd_ptr; 8100cd1ad2dSMordechay Goodstein 8110cd1ad2dSMordechay Goodstein dev_cmd_ptr = (void *)((u8 *)skb->cb + 8120cd1ad2dSMordechay Goodstein trans->txqs.dev_cmd_offs); 8130cd1ad2dSMordechay Goodstein 8140cd1ad2dSMordechay Goodstein *dev_cmd_ptr = dev_cmd; 8150cd1ad2dSMordechay Goodstein __skb_queue_tail(&txq->overflow_q, skb); 8160cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 8170cd1ad2dSMordechay Goodstein return 0; 8180cd1ad2dSMordechay Goodstein } 8190cd1ad2dSMordechay Goodstein } 8200cd1ad2dSMordechay Goodstein 8210cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 8220cd1ad2dSMordechay Goodstein 8230cd1ad2dSMordechay Goodstein /* Set up driver data for this TFD */ 8240cd1ad2dSMordechay Goodstein txq->entries[idx].skb = skb; 8250cd1ad2dSMordechay Goodstein txq->entries[idx].cmd = dev_cmd; 8260cd1ad2dSMordechay Goodstein 8270cd1ad2dSMordechay Goodstein dev_cmd->hdr.sequence = 8280cd1ad2dSMordechay Goodstein cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 8290cd1ad2dSMordechay Goodstein INDEX_TO_SEQ(idx))); 8300cd1ad2dSMordechay Goodstein 8310cd1ad2dSMordechay Goodstein /* Set up first empty entry in queue's array of Tx/cmd buffers */ 8320cd1ad2dSMordechay Goodstein out_meta = &txq->entries[idx].meta; 8330cd1ad2dSMordechay Goodstein out_meta->flags = 0; 8340cd1ad2dSMordechay Goodstein 8350cd1ad2dSMordechay Goodstein tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 8360cd1ad2dSMordechay Goodstein if (!tfd) { 8370cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 8380cd1ad2dSMordechay Goodstein return -1; 8390cd1ad2dSMordechay Goodstein } 8400cd1ad2dSMordechay Goodstein 8410cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 8420cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 8430cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 8440cd1ad2dSMordechay Goodstein 8450cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen3->len); 8460cd1ad2dSMordechay Goodstein } else { 8470cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 8480cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 8490cd1ad2dSMordechay Goodstein 8500cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen2->len); 8510cd1ad2dSMordechay Goodstein } 8520cd1ad2dSMordechay Goodstein 8530cd1ad2dSMordechay Goodstein /* Set up entry for this TFD in Tx byte-count array */ 8540cd1ad2dSMordechay Goodstein iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 8550cd1ad2dSMordechay Goodstein iwl_txq_gen2_get_num_tbs(trans, tfd)); 8560cd1ad2dSMordechay Goodstein 8570cd1ad2dSMordechay Goodstein /* start timer if queue currently empty */ 8580cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 8590cd1ad2dSMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 8600cd1ad2dSMordechay Goodstein 8610cd1ad2dSMordechay Goodstein /* Tell device the write index *just past* this latest filled TFD */ 8620cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 8630cd1ad2dSMordechay Goodstein iwl_txq_inc_wr_ptr(trans, txq); 8640cd1ad2dSMordechay Goodstein /* 8650cd1ad2dSMordechay Goodstein * At this point the frame is "transmitted" successfully 8660cd1ad2dSMordechay Goodstein * and we will get a TX status notification eventually. 8670cd1ad2dSMordechay Goodstein */ 8680cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 8690cd1ad2dSMordechay Goodstein return 0; 8700cd1ad2dSMordechay Goodstein } 8710cd1ad2dSMordechay Goodstein 8720cd1ad2dSMordechay Goodstein /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 8730cd1ad2dSMordechay Goodstein 8740cd1ad2dSMordechay Goodstein /* 8750cd1ad2dSMordechay Goodstein * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 8760cd1ad2dSMordechay Goodstein */ 8770cd1ad2dSMordechay Goodstein void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 8780cd1ad2dSMordechay Goodstein { 8790cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 8800cd1ad2dSMordechay Goodstein 8810cd1ad2dSMordechay Goodstein spin_lock_bh(&txq->lock); 8820cd1ad2dSMordechay Goodstein while (txq->write_ptr != txq->read_ptr) { 8830cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 8840cd1ad2dSMordechay Goodstein txq_id, txq->read_ptr); 8850cd1ad2dSMordechay Goodstein 8860cd1ad2dSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 8870cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 8880cd1ad2dSMordechay Goodstein struct sk_buff *skb = txq->entries[idx].skb; 8890cd1ad2dSMordechay Goodstein 8900cd1ad2dSMordechay Goodstein if (WARN_ON_ONCE(!skb)) 8910cd1ad2dSMordechay Goodstein continue; 8920cd1ad2dSMordechay Goodstein 8930cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 8940cd1ad2dSMordechay Goodstein } 8950cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_tfd(trans, txq); 8960cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 8970cd1ad2dSMordechay Goodstein } 8980cd1ad2dSMordechay Goodstein 8990cd1ad2dSMordechay Goodstein while (!skb_queue_empty(&txq->overflow_q)) { 9000cd1ad2dSMordechay Goodstein struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 9010cd1ad2dSMordechay Goodstein 9020cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 9030cd1ad2dSMordechay Goodstein } 9040cd1ad2dSMordechay Goodstein 9050cd1ad2dSMordechay Goodstein spin_unlock_bh(&txq->lock); 9060cd1ad2dSMordechay Goodstein 9070cd1ad2dSMordechay Goodstein /* just in case - this queue may have been stopped */ 9080cd1ad2dSMordechay Goodstein iwl_wake_queue(trans, txq); 9090cd1ad2dSMordechay Goodstein } 9100cd1ad2dSMordechay Goodstein 9110cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 9120cd1ad2dSMordechay Goodstein struct iwl_txq *txq) 9130cd1ad2dSMordechay Goodstein { 9140cd1ad2dSMordechay Goodstein struct device *dev = trans->dev; 9150cd1ad2dSMordechay Goodstein 9160cd1ad2dSMordechay Goodstein /* De-alloc circular buffer of TFDs */ 9170cd1ad2dSMordechay Goodstein if (txq->tfds) { 9180cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 9190cd1ad2dSMordechay Goodstein trans->txqs.tfd.size * txq->n_window, 9200cd1ad2dSMordechay Goodstein txq->tfds, txq->dma_addr); 9210cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 9220cd1ad2dSMordechay Goodstein sizeof(*txq->first_tb_bufs) * txq->n_window, 9230cd1ad2dSMordechay Goodstein txq->first_tb_bufs, txq->first_tb_dma); 9240cd1ad2dSMordechay Goodstein } 9250cd1ad2dSMordechay Goodstein 9260cd1ad2dSMordechay Goodstein kfree(txq->entries); 9270cd1ad2dSMordechay Goodstein if (txq->bc_tbl.addr) 9280cd1ad2dSMordechay Goodstein dma_pool_free(trans->txqs.bc_pool, 9290cd1ad2dSMordechay Goodstein txq->bc_tbl.addr, txq->bc_tbl.dma); 9300cd1ad2dSMordechay Goodstein kfree(txq); 9310cd1ad2dSMordechay Goodstein } 9320cd1ad2dSMordechay Goodstein 9330cd1ad2dSMordechay Goodstein /* 9340cd1ad2dSMordechay Goodstein * iwl_pcie_txq_free - Deallocate DMA queue. 9350cd1ad2dSMordechay Goodstein * @txq: Transmit queue to deallocate. 9360cd1ad2dSMordechay Goodstein * 9370cd1ad2dSMordechay Goodstein * Empty queue by removing and destroying all BD's. 9380cd1ad2dSMordechay Goodstein * Free all buffers. 9390cd1ad2dSMordechay Goodstein * 0-fill, but do not free "txq" descriptor structure. 9400cd1ad2dSMordechay Goodstein */ 9410cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 9420cd1ad2dSMordechay Goodstein { 9430cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 9440cd1ad2dSMordechay Goodstein int i; 9450cd1ad2dSMordechay Goodstein 9460cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 9470cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 9480cd1ad2dSMordechay Goodstein return; 9490cd1ad2dSMordechay Goodstein 9500cd1ad2dSMordechay Goodstein txq = trans->txqs.txq[txq_id]; 9510cd1ad2dSMordechay Goodstein 9520cd1ad2dSMordechay Goodstein if (WARN_ON(!txq)) 9530cd1ad2dSMordechay Goodstein return; 9540cd1ad2dSMordechay Goodstein 9550cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, txq_id); 9560cd1ad2dSMordechay Goodstein 9570cd1ad2dSMordechay Goodstein /* De-alloc array of command/tx buffers */ 9580cd1ad2dSMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 9590cd1ad2dSMordechay Goodstein for (i = 0; i < txq->n_window; i++) { 9600cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].cmd); 9610cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].free_buf); 9620cd1ad2dSMordechay Goodstein } 9630cd1ad2dSMordechay Goodstein del_timer_sync(&txq->stuck_timer); 9640cd1ad2dSMordechay Goodstein 9650cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 9660cd1ad2dSMordechay Goodstein 9670cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 9680cd1ad2dSMordechay Goodstein 9690cd1ad2dSMordechay Goodstein clear_bit(txq_id, trans->txqs.queue_used); 9700cd1ad2dSMordechay Goodstein } 9710cd1ad2dSMordechay Goodstein 9720cd1ad2dSMordechay Goodstein /* 9730cd1ad2dSMordechay Goodstein * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 9740cd1ad2dSMordechay Goodstein */ 9750cd1ad2dSMordechay Goodstein static int iwl_queue_init(struct iwl_txq *q, int slots_num) 9760cd1ad2dSMordechay Goodstein { 9770cd1ad2dSMordechay Goodstein q->n_window = slots_num; 9780cd1ad2dSMordechay Goodstein 9790cd1ad2dSMordechay Goodstein /* slots_num must be power-of-two size, otherwise 9800cd1ad2dSMordechay Goodstein * iwl_txq_get_cmd_index is broken. */ 9810cd1ad2dSMordechay Goodstein if (WARN_ON(!is_power_of_2(slots_num))) 9820cd1ad2dSMordechay Goodstein return -EINVAL; 9830cd1ad2dSMordechay Goodstein 9840cd1ad2dSMordechay Goodstein q->low_mark = q->n_window / 4; 9850cd1ad2dSMordechay Goodstein if (q->low_mark < 4) 9860cd1ad2dSMordechay Goodstein q->low_mark = 4; 9870cd1ad2dSMordechay Goodstein 9880cd1ad2dSMordechay Goodstein q->high_mark = q->n_window / 8; 9890cd1ad2dSMordechay Goodstein if (q->high_mark < 2) 9900cd1ad2dSMordechay Goodstein q->high_mark = 2; 9910cd1ad2dSMordechay Goodstein 9920cd1ad2dSMordechay Goodstein q->write_ptr = 0; 9930cd1ad2dSMordechay Goodstein q->read_ptr = 0; 9940cd1ad2dSMordechay Goodstein 9950cd1ad2dSMordechay Goodstein return 0; 9960cd1ad2dSMordechay Goodstein } 9970cd1ad2dSMordechay Goodstein 9980cd1ad2dSMordechay Goodstein int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 9990cd1ad2dSMordechay Goodstein bool cmd_queue) 10000cd1ad2dSMordechay Goodstein { 10010cd1ad2dSMordechay Goodstein int ret; 10020cd1ad2dSMordechay Goodstein u32 tfd_queue_max_size = 10030cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 10040cd1ad2dSMordechay Goodstein 10050cd1ad2dSMordechay Goodstein txq->need_update = false; 10060cd1ad2dSMordechay Goodstein 10070cd1ad2dSMordechay Goodstein /* max_tfd_queue_size must be power-of-two size, otherwise 10080cd1ad2dSMordechay Goodstein * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 10090cd1ad2dSMordechay Goodstein if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 10100cd1ad2dSMordechay Goodstein "Max tfd queue size must be a power of two, but is %d", 10110cd1ad2dSMordechay Goodstein tfd_queue_max_size)) 10120cd1ad2dSMordechay Goodstein return -EINVAL; 10130cd1ad2dSMordechay Goodstein 10140cd1ad2dSMordechay Goodstein /* Initialize queue's high/low-water marks, and head/tail indexes */ 10150cd1ad2dSMordechay Goodstein ret = iwl_queue_init(txq, slots_num); 10160cd1ad2dSMordechay Goodstein if (ret) 10170cd1ad2dSMordechay Goodstein return ret; 10180cd1ad2dSMordechay Goodstein 10190cd1ad2dSMordechay Goodstein spin_lock_init(&txq->lock); 10200cd1ad2dSMordechay Goodstein 10210cd1ad2dSMordechay Goodstein if (cmd_queue) { 10220cd1ad2dSMordechay Goodstein static struct lock_class_key iwl_txq_cmd_queue_lock_class; 10230cd1ad2dSMordechay Goodstein 10240cd1ad2dSMordechay Goodstein lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 10250cd1ad2dSMordechay Goodstein } 10260cd1ad2dSMordechay Goodstein 10270cd1ad2dSMordechay Goodstein __skb_queue_head_init(&txq->overflow_q); 10280cd1ad2dSMordechay Goodstein 10290cd1ad2dSMordechay Goodstein return 0; 10300cd1ad2dSMordechay Goodstein } 10310cd1ad2dSMordechay Goodstein 10320cd1ad2dSMordechay Goodstein void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 10330cd1ad2dSMordechay Goodstein { 10340cd1ad2dSMordechay Goodstein struct page **page_ptr; 10350cd1ad2dSMordechay Goodstein struct page *next; 10360cd1ad2dSMordechay Goodstein 10370cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 10380cd1ad2dSMordechay Goodstein next = *page_ptr; 10390cd1ad2dSMordechay Goodstein *page_ptr = NULL; 10400cd1ad2dSMordechay Goodstein 10410cd1ad2dSMordechay Goodstein while (next) { 10420cd1ad2dSMordechay Goodstein struct page *tmp = next; 10430cd1ad2dSMordechay Goodstein 10440cd1ad2dSMordechay Goodstein next = *(void **)(page_address(next) + PAGE_SIZE - 10450cd1ad2dSMordechay Goodstein sizeof(void *)); 10460cd1ad2dSMordechay Goodstein __free_page(tmp); 10470cd1ad2dSMordechay Goodstein } 10480cd1ad2dSMordechay Goodstein } 10490cd1ad2dSMordechay Goodstein 10500cd1ad2dSMordechay Goodstein void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 10510cd1ad2dSMordechay Goodstein { 10520cd1ad2dSMordechay Goodstein u32 txq_id = txq->id; 10530cd1ad2dSMordechay Goodstein u32 status; 10540cd1ad2dSMordechay Goodstein bool active; 10550cd1ad2dSMordechay Goodstein u8 fifo; 10560cd1ad2dSMordechay Goodstein 10570cd1ad2dSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 10580cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 10590cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr); 10600cd1ad2dSMordechay Goodstein /* TODO: access new SCD registers and dump them */ 10610cd1ad2dSMordechay Goodstein return; 10620cd1ad2dSMordechay Goodstein } 10630cd1ad2dSMordechay Goodstein 10640cd1ad2dSMordechay Goodstein status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 10650cd1ad2dSMordechay Goodstein fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 10660cd1ad2dSMordechay Goodstein active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 10670cd1ad2dSMordechay Goodstein 10680cd1ad2dSMordechay Goodstein IWL_ERR(trans, 10690cd1ad2dSMordechay Goodstein "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 10700cd1ad2dSMordechay Goodstein txq_id, active ? "" : "in", fifo, 10710cd1ad2dSMordechay Goodstein jiffies_to_msecs(txq->wd_timeout), 10720cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr, 10730cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 10740cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10750cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 10760cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10770cd1ad2dSMordechay Goodstein iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 10780cd1ad2dSMordechay Goodstein } 10790cd1ad2dSMordechay Goodstein 10800cd1ad2dSMordechay Goodstein static void iwl_txq_stuck_timer(struct timer_list *t) 10810cd1ad2dSMordechay Goodstein { 10820cd1ad2dSMordechay Goodstein struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 10830cd1ad2dSMordechay Goodstein struct iwl_trans *trans = txq->trans; 10840cd1ad2dSMordechay Goodstein 10850cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 10860cd1ad2dSMordechay Goodstein /* check if triggered erroneously */ 10870cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr) { 10880cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10890cd1ad2dSMordechay Goodstein return; 10900cd1ad2dSMordechay Goodstein } 10910cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10920cd1ad2dSMordechay Goodstein 10930cd1ad2dSMordechay Goodstein iwl_txq_log_scd_error(trans, txq); 10940cd1ad2dSMordechay Goodstein 10950cd1ad2dSMordechay Goodstein iwl_force_nmi(trans); 10960cd1ad2dSMordechay Goodstein } 10970cd1ad2dSMordechay Goodstein 10980cd1ad2dSMordechay Goodstein int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 10990cd1ad2dSMordechay Goodstein bool cmd_queue) 11000cd1ad2dSMordechay Goodstein { 11010cd1ad2dSMordechay Goodstein size_t tfd_sz = trans->txqs.tfd.size * 11020cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 11030cd1ad2dSMordechay Goodstein size_t tb0_buf_sz; 11040cd1ad2dSMordechay Goodstein int i; 11050cd1ad2dSMordechay Goodstein 11060cd1ad2dSMordechay Goodstein if (WARN_ON(txq->entries || txq->tfds)) 11070cd1ad2dSMordechay Goodstein return -EINVAL; 11080cd1ad2dSMordechay Goodstein 11090cd1ad2dSMordechay Goodstein if (trans->trans_cfg->use_tfh) 11100cd1ad2dSMordechay Goodstein tfd_sz = trans->txqs.tfd.size * slots_num; 11110cd1ad2dSMordechay Goodstein 11120cd1ad2dSMordechay Goodstein timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 11130cd1ad2dSMordechay Goodstein txq->trans = trans; 11140cd1ad2dSMordechay Goodstein 11150cd1ad2dSMordechay Goodstein txq->n_window = slots_num; 11160cd1ad2dSMordechay Goodstein 11170cd1ad2dSMordechay Goodstein txq->entries = kcalloc(slots_num, 11180cd1ad2dSMordechay Goodstein sizeof(struct iwl_pcie_txq_entry), 11190cd1ad2dSMordechay Goodstein GFP_KERNEL); 11200cd1ad2dSMordechay Goodstein 11210cd1ad2dSMordechay Goodstein if (!txq->entries) 11220cd1ad2dSMordechay Goodstein goto error; 11230cd1ad2dSMordechay Goodstein 11240cd1ad2dSMordechay Goodstein if (cmd_queue) 11250cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) { 11260cd1ad2dSMordechay Goodstein txq->entries[i].cmd = 11270cd1ad2dSMordechay Goodstein kmalloc(sizeof(struct iwl_device_cmd), 11280cd1ad2dSMordechay Goodstein GFP_KERNEL); 11290cd1ad2dSMordechay Goodstein if (!txq->entries[i].cmd) 11300cd1ad2dSMordechay Goodstein goto error; 11310cd1ad2dSMordechay Goodstein } 11320cd1ad2dSMordechay Goodstein 11330cd1ad2dSMordechay Goodstein /* Circular buffer of transmit frame descriptors (TFDs), 11340cd1ad2dSMordechay Goodstein * shared with device */ 11350cd1ad2dSMordechay Goodstein txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 11360cd1ad2dSMordechay Goodstein &txq->dma_addr, GFP_KERNEL); 11370cd1ad2dSMordechay Goodstein if (!txq->tfds) 11380cd1ad2dSMordechay Goodstein goto error; 11390cd1ad2dSMordechay Goodstein 11400cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 11410cd1ad2dSMordechay Goodstein 11420cd1ad2dSMordechay Goodstein tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 11430cd1ad2dSMordechay Goodstein 11440cd1ad2dSMordechay Goodstein txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 11450cd1ad2dSMordechay Goodstein &txq->first_tb_dma, 11460cd1ad2dSMordechay Goodstein GFP_KERNEL); 11470cd1ad2dSMordechay Goodstein if (!txq->first_tb_bufs) 11480cd1ad2dSMordechay Goodstein goto err_free_tfds; 11490cd1ad2dSMordechay Goodstein 11500cd1ad2dSMordechay Goodstein return 0; 11510cd1ad2dSMordechay Goodstein err_free_tfds: 11520cd1ad2dSMordechay Goodstein dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 11530cd1ad2dSMordechay Goodstein error: 11540cd1ad2dSMordechay Goodstein if (txq->entries && cmd_queue) 11550cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) 11560cd1ad2dSMordechay Goodstein kfree(txq->entries[i].cmd); 11570cd1ad2dSMordechay Goodstein kfree(txq->entries); 11580cd1ad2dSMordechay Goodstein txq->entries = NULL; 11590cd1ad2dSMordechay Goodstein 11600cd1ad2dSMordechay Goodstein return -ENOMEM; 11610cd1ad2dSMordechay Goodstein } 11620cd1ad2dSMordechay Goodstein 11630cd1ad2dSMordechay Goodstein static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, 11640cd1ad2dSMordechay Goodstein struct iwl_txq **intxq, int size, 11650cd1ad2dSMordechay Goodstein unsigned int timeout) 11660cd1ad2dSMordechay Goodstein { 11670cd1ad2dSMordechay Goodstein size_t bc_tbl_size, bc_tbl_entries; 11680cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 11690cd1ad2dSMordechay Goodstein int ret; 11700cd1ad2dSMordechay Goodstein 11710cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_tbl_size); 11720cd1ad2dSMordechay Goodstein 11730cd1ad2dSMordechay Goodstein bc_tbl_size = trans->txqs.bc_tbl_size; 11740cd1ad2dSMordechay Goodstein bc_tbl_entries = bc_tbl_size / sizeof(u16); 11750cd1ad2dSMordechay Goodstein 11760cd1ad2dSMordechay Goodstein if (WARN_ON(size > bc_tbl_entries)) 11770cd1ad2dSMordechay Goodstein return -EINVAL; 11780cd1ad2dSMordechay Goodstein 11790cd1ad2dSMordechay Goodstein txq = kzalloc(sizeof(*txq), GFP_KERNEL); 11800cd1ad2dSMordechay Goodstein if (!txq) 11810cd1ad2dSMordechay Goodstein return -ENOMEM; 11820cd1ad2dSMordechay Goodstein 11830cd1ad2dSMordechay Goodstein txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 11840cd1ad2dSMordechay Goodstein &txq->bc_tbl.dma); 11850cd1ad2dSMordechay Goodstein if (!txq->bc_tbl.addr) { 11860cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 11870cd1ad2dSMordechay Goodstein kfree(txq); 11880cd1ad2dSMordechay Goodstein return -ENOMEM; 11890cd1ad2dSMordechay Goodstein } 11900cd1ad2dSMordechay Goodstein 11910cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, txq, size, false); 11920cd1ad2dSMordechay Goodstein if (ret) { 11930cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue alloc failed\n"); 11940cd1ad2dSMordechay Goodstein goto error; 11950cd1ad2dSMordechay Goodstein } 11960cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, txq, size, false); 11970cd1ad2dSMordechay Goodstein if (ret) { 11980cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue init failed\n"); 11990cd1ad2dSMordechay Goodstein goto error; 12000cd1ad2dSMordechay Goodstein } 12010cd1ad2dSMordechay Goodstein 12020cd1ad2dSMordechay Goodstein txq->wd_timeout = msecs_to_jiffies(timeout); 12030cd1ad2dSMordechay Goodstein 12040cd1ad2dSMordechay Goodstein *intxq = txq; 12050cd1ad2dSMordechay Goodstein return 0; 12060cd1ad2dSMordechay Goodstein 12070cd1ad2dSMordechay Goodstein error: 12080cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12090cd1ad2dSMordechay Goodstein return ret; 12100cd1ad2dSMordechay Goodstein } 12110cd1ad2dSMordechay Goodstein 12120cd1ad2dSMordechay Goodstein static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 12130cd1ad2dSMordechay Goodstein struct iwl_host_cmd *hcmd) 12140cd1ad2dSMordechay Goodstein { 12150cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_rsp *rsp; 12160cd1ad2dSMordechay Goodstein int ret, qid; 12170cd1ad2dSMordechay Goodstein u32 wr_ptr; 12180cd1ad2dSMordechay Goodstein 12190cd1ad2dSMordechay Goodstein if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 12200cd1ad2dSMordechay Goodstein sizeof(*rsp))) { 12210cd1ad2dSMordechay Goodstein ret = -EINVAL; 12220cd1ad2dSMordechay Goodstein goto error_free_resp; 12230cd1ad2dSMordechay Goodstein } 12240cd1ad2dSMordechay Goodstein 12250cd1ad2dSMordechay Goodstein rsp = (void *)hcmd->resp_pkt->data; 12260cd1ad2dSMordechay Goodstein qid = le16_to_cpu(rsp->queue_number); 12270cd1ad2dSMordechay Goodstein wr_ptr = le16_to_cpu(rsp->write_pointer); 12280cd1ad2dSMordechay Goodstein 12290cd1ad2dSMordechay Goodstein if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 12300cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue index %d unsupported", qid); 12310cd1ad2dSMordechay Goodstein ret = -EIO; 12320cd1ad2dSMordechay Goodstein goto error_free_resp; 12330cd1ad2dSMordechay Goodstein } 12340cd1ad2dSMordechay Goodstein 12350cd1ad2dSMordechay Goodstein if (test_and_set_bit(qid, trans->txqs.queue_used)) { 12360cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue %d already used", qid); 12370cd1ad2dSMordechay Goodstein ret = -EIO; 12380cd1ad2dSMordechay Goodstein goto error_free_resp; 12390cd1ad2dSMordechay Goodstein } 12400cd1ad2dSMordechay Goodstein 12410cd1ad2dSMordechay Goodstein txq->id = qid; 12420cd1ad2dSMordechay Goodstein trans->txqs.txq[qid] = txq; 12430cd1ad2dSMordechay Goodstein wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 12440cd1ad2dSMordechay Goodstein 12450cd1ad2dSMordechay Goodstein /* Place first TFD at index corresponding to start sequence number */ 12460cd1ad2dSMordechay Goodstein txq->read_ptr = wr_ptr; 12470cd1ad2dSMordechay Goodstein txq->write_ptr = wr_ptr; 12480cd1ad2dSMordechay Goodstein 12490cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 12500cd1ad2dSMordechay Goodstein 12510cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 12520cd1ad2dSMordechay Goodstein return qid; 12530cd1ad2dSMordechay Goodstein 12540cd1ad2dSMordechay Goodstein error_free_resp: 12550cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 12560cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12570cd1ad2dSMordechay Goodstein return ret; 12580cd1ad2dSMordechay Goodstein } 12590cd1ad2dSMordechay Goodstein 12600cd1ad2dSMordechay Goodstein int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, 12610cd1ad2dSMordechay Goodstein int cmd_id, int size, unsigned int timeout) 12620cd1ad2dSMordechay Goodstein { 12630cd1ad2dSMordechay Goodstein struct iwl_txq *txq = NULL; 12640cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_cmd cmd = { 12650cd1ad2dSMordechay Goodstein .flags = flags, 12660cd1ad2dSMordechay Goodstein .sta_id = sta_id, 12670cd1ad2dSMordechay Goodstein .tid = tid, 12680cd1ad2dSMordechay Goodstein }; 12690cd1ad2dSMordechay Goodstein struct iwl_host_cmd hcmd = { 12700cd1ad2dSMordechay Goodstein .id = cmd_id, 12710cd1ad2dSMordechay Goodstein .len = { sizeof(cmd) }, 12720cd1ad2dSMordechay Goodstein .data = { &cmd, }, 12730cd1ad2dSMordechay Goodstein .flags = CMD_WANT_SKB, 12740cd1ad2dSMordechay Goodstein }; 12750cd1ad2dSMordechay Goodstein int ret; 12760cd1ad2dSMordechay Goodstein 12770cd1ad2dSMordechay Goodstein ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); 12780cd1ad2dSMordechay Goodstein if (ret) 12790cd1ad2dSMordechay Goodstein return ret; 12800cd1ad2dSMordechay Goodstein 12810cd1ad2dSMordechay Goodstein cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); 12820cd1ad2dSMordechay Goodstein cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 12830cd1ad2dSMordechay Goodstein cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 12840cd1ad2dSMordechay Goodstein 12850cd1ad2dSMordechay Goodstein ret = iwl_trans_send_cmd(trans, &hcmd); 12860cd1ad2dSMordechay Goodstein if (ret) 12870cd1ad2dSMordechay Goodstein goto error; 12880cd1ad2dSMordechay Goodstein 12890cd1ad2dSMordechay Goodstein return iwl_txq_alloc_response(trans, txq, &hcmd); 12900cd1ad2dSMordechay Goodstein 12910cd1ad2dSMordechay Goodstein error: 12920cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12930cd1ad2dSMordechay Goodstein return ret; 12940cd1ad2dSMordechay Goodstein } 12950cd1ad2dSMordechay Goodstein 12960cd1ad2dSMordechay Goodstein void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 12970cd1ad2dSMordechay Goodstein { 12980cd1ad2dSMordechay Goodstein if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 12990cd1ad2dSMordechay Goodstein "queue %d out of range", queue)) 13000cd1ad2dSMordechay Goodstein return; 13010cd1ad2dSMordechay Goodstein 13020cd1ad2dSMordechay Goodstein /* 13030cd1ad2dSMordechay Goodstein * Upon HW Rfkill - we stop the device, and then stop the queues 13040cd1ad2dSMordechay Goodstein * in the op_mode. Just for the sake of the simplicity of the op_mode, 13050cd1ad2dSMordechay Goodstein * allow the op_mode to call txq_disable after it already called 13060cd1ad2dSMordechay Goodstein * stop_device. 13070cd1ad2dSMordechay Goodstein */ 13080cd1ad2dSMordechay Goodstein if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 13090cd1ad2dSMordechay Goodstein WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 13100cd1ad2dSMordechay Goodstein "queue %d not used", queue); 13110cd1ad2dSMordechay Goodstein return; 13120cd1ad2dSMordechay Goodstein } 13130cd1ad2dSMordechay Goodstein 13140cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, queue); 13150cd1ad2dSMordechay Goodstein 13160cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]); 13170cd1ad2dSMordechay Goodstein 13180cd1ad2dSMordechay Goodstein trans->txqs.txq[queue] = NULL; 13190cd1ad2dSMordechay Goodstein 13200cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 13210cd1ad2dSMordechay Goodstein } 13220cd1ad2dSMordechay Goodstein 13230cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tx_free(struct iwl_trans *trans) 13240cd1ad2dSMordechay Goodstein { 13250cd1ad2dSMordechay Goodstein int i; 13260cd1ad2dSMordechay Goodstein 13270cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 13280cd1ad2dSMordechay Goodstein 13290cd1ad2dSMordechay Goodstein /* Free all TX queues */ 13300cd1ad2dSMordechay Goodstein for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 13310cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[i]) 13320cd1ad2dSMordechay Goodstein continue; 13330cd1ad2dSMordechay Goodstein 13340cd1ad2dSMordechay Goodstein iwl_txq_gen2_free(trans, i); 13350cd1ad2dSMordechay Goodstein } 13360cd1ad2dSMordechay Goodstein } 13370cd1ad2dSMordechay Goodstein 13380cd1ad2dSMordechay Goodstein int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 13390cd1ad2dSMordechay Goodstein { 13400cd1ad2dSMordechay Goodstein struct iwl_txq *queue; 13410cd1ad2dSMordechay Goodstein int ret; 13420cd1ad2dSMordechay Goodstein 13430cd1ad2dSMordechay Goodstein /* alloc and init the tx queue */ 13440cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[txq_id]) { 13450cd1ad2dSMordechay Goodstein queue = kzalloc(sizeof(*queue), GFP_KERNEL); 13460cd1ad2dSMordechay Goodstein if (!queue) { 13470cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Not enough memory for tx queue\n"); 13480cd1ad2dSMordechay Goodstein return -ENOMEM; 13490cd1ad2dSMordechay Goodstein } 13500cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = queue; 13510cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, queue, queue_size, true); 13520cd1ad2dSMordechay Goodstein if (ret) { 13530cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 13540cd1ad2dSMordechay Goodstein goto error; 13550cd1ad2dSMordechay Goodstein } 13560cd1ad2dSMordechay Goodstein } else { 13570cd1ad2dSMordechay Goodstein queue = trans->txqs.txq[txq_id]; 13580cd1ad2dSMordechay Goodstein } 13590cd1ad2dSMordechay Goodstein 13600cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, queue, queue_size, 13610cd1ad2dSMordechay Goodstein (txq_id == trans->txqs.cmd.q_id)); 13620cd1ad2dSMordechay Goodstein if (ret) { 13630cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 13640cd1ad2dSMordechay Goodstein goto error; 13650cd1ad2dSMordechay Goodstein } 13660cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 13670cd1ad2dSMordechay Goodstein set_bit(txq_id, trans->txqs.queue_used); 13680cd1ad2dSMordechay Goodstein 13690cd1ad2dSMordechay Goodstein return 0; 13700cd1ad2dSMordechay Goodstein 13710cd1ad2dSMordechay Goodstein error: 13720cd1ad2dSMordechay Goodstein iwl_txq_gen2_tx_free(trans); 13730cd1ad2dSMordechay Goodstein return ret; 13740cd1ad2dSMordechay Goodstein } 13750cd1ad2dSMordechay Goodstein 13760179bfffSMordechay Goodstein static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 13770179bfffSMordechay Goodstein void *_tfd, u8 idx) 13780179bfffSMordechay Goodstein { 13790179bfffSMordechay Goodstein struct iwl_tfd *tfd; 13800179bfffSMordechay Goodstein struct iwl_tfd_tb *tb; 13810179bfffSMordechay Goodstein dma_addr_t addr; 13820179bfffSMordechay Goodstein dma_addr_t hi_len; 13830179bfffSMordechay Goodstein 13840179bfffSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 13850179bfffSMordechay Goodstein struct iwl_tfh_tfd *tfd = _tfd; 13860179bfffSMordechay Goodstein struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 13870179bfffSMordechay Goodstein 13880179bfffSMordechay Goodstein return (dma_addr_t)(le64_to_cpu(tb->addr)); 13890179bfffSMordechay Goodstein } 13900179bfffSMordechay Goodstein 13910179bfffSMordechay Goodstein tfd = _tfd; 13920179bfffSMordechay Goodstein tb = &tfd->tbs[idx]; 13930179bfffSMordechay Goodstein addr = get_unaligned_le32(&tb->lo); 13940179bfffSMordechay Goodstein 13950179bfffSMordechay Goodstein if (sizeof(dma_addr_t) <= sizeof(u32)) 13960179bfffSMordechay Goodstein return addr; 13970179bfffSMordechay Goodstein 13980179bfffSMordechay Goodstein hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 13990179bfffSMordechay Goodstein 14000179bfffSMordechay Goodstein /* 14010179bfffSMordechay Goodstein * shift by 16 twice to avoid warnings on 32-bit 14020179bfffSMordechay Goodstein * (where this code never runs anyway due to the 14030179bfffSMordechay Goodstein * if statement above) 14040179bfffSMordechay Goodstein */ 14050179bfffSMordechay Goodstein return addr | ((hi_len << 16) << 16); 14060179bfffSMordechay Goodstein } 14070179bfffSMordechay Goodstein 14080179bfffSMordechay Goodstein void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 14090179bfffSMordechay Goodstein struct iwl_cmd_meta *meta, 14100179bfffSMordechay Goodstein struct iwl_txq *txq, int index) 14110179bfffSMordechay Goodstein { 14120179bfffSMordechay Goodstein int i, num_tbs; 14130179bfffSMordechay Goodstein void *tfd = iwl_txq_get_tfd(trans, txq, index); 14140179bfffSMordechay Goodstein 14150179bfffSMordechay Goodstein /* Sanity check on number of chunks */ 14160179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 14170179bfffSMordechay Goodstein 14180179bfffSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 14190179bfffSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 14200179bfffSMordechay Goodstein /* @todo issue fatal error, it is quite serious situation */ 14210179bfffSMordechay Goodstein return; 14220179bfffSMordechay Goodstein } 14230179bfffSMordechay Goodstein 14240179bfffSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 14250179bfffSMordechay Goodstein 14260179bfffSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 14270179bfffSMordechay Goodstein if (meta->tbs & BIT(i)) 14280179bfffSMordechay Goodstein dma_unmap_page(trans->dev, 14290179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 14300179bfffSMordechay Goodstein tfd, i), 14310179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 14320179bfffSMordechay Goodstein tfd, i), 14330179bfffSMordechay Goodstein DMA_TO_DEVICE); 14340179bfffSMordechay Goodstein else 14350179bfffSMordechay Goodstein dma_unmap_single(trans->dev, 14360179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 14370179bfffSMordechay Goodstein tfd, i), 14380179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 14390179bfffSMordechay Goodstein tfd, i), 14400179bfffSMordechay Goodstein DMA_TO_DEVICE); 14410179bfffSMordechay Goodstein } 14420179bfffSMordechay Goodstein 14430179bfffSMordechay Goodstein meta->tbs = 0; 14440179bfffSMordechay Goodstein 14450179bfffSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 14460179bfffSMordechay Goodstein struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 14470179bfffSMordechay Goodstein 14480179bfffSMordechay Goodstein tfd_fh->num_tbs = 0; 14490179bfffSMordechay Goodstein } else { 14500179bfffSMordechay Goodstein struct iwl_tfd *tfd_fh = (void *)tfd; 14510179bfffSMordechay Goodstein 14520179bfffSMordechay Goodstein tfd_fh->num_tbs = 0; 14530179bfffSMordechay Goodstein } 14540179bfffSMordechay Goodstein } 14550179bfffSMordechay Goodstein 14560179bfffSMordechay Goodstein #define IWL_TX_CRC_SIZE 4 14570179bfffSMordechay Goodstein #define IWL_TX_DELIMITER_SIZE 4 14580179bfffSMordechay Goodstein 14590179bfffSMordechay Goodstein /* 14600179bfffSMordechay Goodstein * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 14610179bfffSMordechay Goodstein */ 14620179bfffSMordechay Goodstein void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 14630179bfffSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 14640179bfffSMordechay Goodstein int num_tbs) 14650179bfffSMordechay Goodstein { 14660179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl; 14670179bfffSMordechay Goodstein int write_ptr = txq->write_ptr; 14680179bfffSMordechay Goodstein int txq_id = txq->id; 14690179bfffSMordechay Goodstein u8 sec_ctl = 0; 14700179bfffSMordechay Goodstein u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 14710179bfffSMordechay Goodstein __le16 bc_ent; 14720179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 14730179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14740179bfffSMordechay Goodstein u8 sta_id = tx_cmd->sta_id; 14750179bfffSMordechay Goodstein 14760179bfffSMordechay Goodstein scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14770179bfffSMordechay Goodstein 14780179bfffSMordechay Goodstein sec_ctl = tx_cmd->sec_ctl; 14790179bfffSMordechay Goodstein 14800179bfffSMordechay Goodstein switch (sec_ctl & TX_CMD_SEC_MSK) { 14810179bfffSMordechay Goodstein case TX_CMD_SEC_CCM: 14820179bfffSMordechay Goodstein len += IEEE80211_CCMP_MIC_LEN; 14830179bfffSMordechay Goodstein break; 14840179bfffSMordechay Goodstein case TX_CMD_SEC_TKIP: 14850179bfffSMordechay Goodstein len += IEEE80211_TKIP_ICV_LEN; 14860179bfffSMordechay Goodstein break; 14870179bfffSMordechay Goodstein case TX_CMD_SEC_WEP: 14880179bfffSMordechay Goodstein len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 14890179bfffSMordechay Goodstein break; 14900179bfffSMordechay Goodstein } 14910179bfffSMordechay Goodstein if (trans->txqs.bc_table_dword) 14920179bfffSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 14930179bfffSMordechay Goodstein 14940179bfffSMordechay Goodstein if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 14950179bfffSMordechay Goodstein return; 14960179bfffSMordechay Goodstein 14970179bfffSMordechay Goodstein bc_ent = cpu_to_le16(len | (sta_id << 12)); 14980179bfffSMordechay Goodstein 14990179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 15000179bfffSMordechay Goodstein 15010179bfffSMordechay Goodstein if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 15020179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 15030179bfffSMordechay Goodstein bc_ent; 15040179bfffSMordechay Goodstein } 15050179bfffSMordechay Goodstein 15060179bfffSMordechay Goodstein void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 15070179bfffSMordechay Goodstein struct iwl_txq *txq) 15080179bfffSMordechay Goodstein { 15090179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 15100179bfffSMordechay Goodstein int txq_id = txq->id; 15110179bfffSMordechay Goodstein int read_ptr = txq->read_ptr; 15120179bfffSMordechay Goodstein u8 sta_id = 0; 15130179bfffSMordechay Goodstein __le16 bc_ent; 15140179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 15150179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 15160179bfffSMordechay Goodstein 15170179bfffSMordechay Goodstein WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 15180179bfffSMordechay Goodstein 15190179bfffSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 15200179bfffSMordechay Goodstein sta_id = tx_cmd->sta_id; 15210179bfffSMordechay Goodstein 15220179bfffSMordechay Goodstein bc_ent = cpu_to_le16(1 | (sta_id << 12)); 15230179bfffSMordechay Goodstein 15240179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 15250179bfffSMordechay Goodstein 15260179bfffSMordechay Goodstein if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 15270179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 15280179bfffSMordechay Goodstein bc_ent; 15290179bfffSMordechay Goodstein } 1530