18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 28e99ea8dSJohannes Berg /* 32e0ce1deSAnjaneyulu * Copyright (C) 2020-2023 Intel Corporation 48e99ea8dSJohannes Berg */ 50cd1ad2dSMordechay Goodstein #include <net/tso.h> 60cd1ad2dSMordechay Goodstein #include <linux/tcp.h> 70cd1ad2dSMordechay Goodstein 80cd1ad2dSMordechay Goodstein #include "iwl-debug.h" 90cd1ad2dSMordechay Goodstein #include "iwl-io.h" 1085b17a33SJohannes Berg #include "fw/api/commands.h" 110cd1ad2dSMordechay Goodstein #include "fw/api/tx.h" 12227f2597SJohannes Berg #include "fw/api/datapath.h" 13c83031afSJohannes Berg #include "fw/api/debug.h" 140cd1ad2dSMordechay Goodstein #include "queue/tx.h" 150cd1ad2dSMordechay Goodstein #include "iwl-fh.h" 160cd1ad2dSMordechay Goodstein #include "iwl-scd.h" 170cd1ad2dSMordechay Goodstein #include <linux/dmapool.h> 180cd1ad2dSMordechay Goodstein 190cd1ad2dSMordechay Goodstein /* 200cd1ad2dSMordechay Goodstein * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 210cd1ad2dSMordechay Goodstein */ 220cd1ad2dSMordechay Goodstein static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 230cd1ad2dSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 240cd1ad2dSMordechay Goodstein int num_tbs) 250cd1ad2dSMordechay Goodstein { 260cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 270cd1ad2dSMordechay Goodstein u8 filled_tfd_size, num_fetch_chunks; 280cd1ad2dSMordechay Goodstein u16 len = byte_cnt; 290cd1ad2dSMordechay Goodstein __le16 bc_ent; 300cd1ad2dSMordechay Goodstein 310cd1ad2dSMordechay Goodstein if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 320cd1ad2dSMordechay Goodstein return; 330cd1ad2dSMordechay Goodstein 340cd1ad2dSMordechay Goodstein filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 350cd1ad2dSMordechay Goodstein num_tbs * sizeof(struct iwl_tfh_tb); 360cd1ad2dSMordechay Goodstein /* 370cd1ad2dSMordechay Goodstein * filled_tfd_size contains the number of filled bytes in the TFD. 380cd1ad2dSMordechay Goodstein * Dividing it by 64 will give the number of chunks to fetch 390cd1ad2dSMordechay Goodstein * to SRAM- 0 for one chunk, 1 for 2 and so on. 400cd1ad2dSMordechay Goodstein * If, for example, TFD contains only 3 TBs then 32 bytes 410cd1ad2dSMordechay Goodstein * of the TFD are used, and only one chunk of 64 bytes should 420cd1ad2dSMordechay Goodstein * be fetched 430cd1ad2dSMordechay Goodstein */ 440cd1ad2dSMordechay Goodstein num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 450cd1ad2dSMordechay Goodstein 460cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 47d5399f11SMordechay Goodstein struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 480cd1ad2dSMordechay Goodstein 490cd1ad2dSMordechay Goodstein /* Starting from AX210, the HW expects bytes */ 500cd1ad2dSMordechay Goodstein WARN_ON(trans->txqs.bc_table_dword); 510cd1ad2dSMordechay Goodstein WARN_ON(len > 0x3FFF); 520cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 53d5399f11SMordechay Goodstein scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; 540cd1ad2dSMordechay Goodstein } else { 550cd1ad2dSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 560cd1ad2dSMordechay Goodstein 570cd1ad2dSMordechay Goodstein /* Before AX210, the HW expects DW */ 580cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_table_dword); 590cd1ad2dSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 600cd1ad2dSMordechay Goodstein WARN_ON(len > 0xFFF); 610cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 620cd1ad2dSMordechay Goodstein scd_bc_tbl->tfd_offset[idx] = bc_ent; 630cd1ad2dSMordechay Goodstein } 640cd1ad2dSMordechay Goodstein } 650cd1ad2dSMordechay Goodstein 660cd1ad2dSMordechay Goodstein /* 670cd1ad2dSMordechay Goodstein * iwl_txq_inc_wr_ptr - Send new write index to hardware 680cd1ad2dSMordechay Goodstein */ 690cd1ad2dSMordechay Goodstein void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 700cd1ad2dSMordechay Goodstein { 710cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 720cd1ad2dSMordechay Goodstein 730cd1ad2dSMordechay Goodstein IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 740cd1ad2dSMordechay Goodstein 750cd1ad2dSMordechay Goodstein /* 760cd1ad2dSMordechay Goodstein * if not in power-save mode, uCode will never sleep when we're 770cd1ad2dSMordechay Goodstein * trying to tx (during RFKILL, we're not trying to tx). 780cd1ad2dSMordechay Goodstein */ 790cd1ad2dSMordechay Goodstein iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 800cd1ad2dSMordechay Goodstein } 810cd1ad2dSMordechay Goodstein 820cd1ad2dSMordechay Goodstein static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 830cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 840cd1ad2dSMordechay Goodstein { 850cd1ad2dSMordechay Goodstein return le16_to_cpu(tfd->num_tbs) & 0x1f; 860cd1ad2dSMordechay Goodstein } 870cd1ad2dSMordechay Goodstein 8880fa8377SJohannes Berg int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 8980fa8377SJohannes Berg dma_addr_t addr, u16 len) 9080fa8377SJohannes Berg { 9180fa8377SJohannes Berg int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 9280fa8377SJohannes Berg struct iwl_tfh_tb *tb; 9380fa8377SJohannes Berg 9480fa8377SJohannes Berg /* Only WARN here so we know about the issue, but we mess up our 9580fa8377SJohannes Berg * unmap path because not every place currently checks for errors 9680fa8377SJohannes Berg * returned from this function - it can only return an error if 9780fa8377SJohannes Berg * there's no more space, and so when we know there is enough we 9880fa8377SJohannes Berg * don't always check ... 9980fa8377SJohannes Berg */ 10080fa8377SJohannes Berg WARN(iwl_txq_crosses_4g_boundary(addr, len), 10180fa8377SJohannes Berg "possible DMA problem with iova:0x%llx, len:%d\n", 10280fa8377SJohannes Berg (unsigned long long)addr, len); 10380fa8377SJohannes Berg 10480fa8377SJohannes Berg if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 10580fa8377SJohannes Berg return -EINVAL; 10680fa8377SJohannes Berg tb = &tfd->tbs[idx]; 10780fa8377SJohannes Berg 10880fa8377SJohannes Berg /* Each TFD can point to a maximum max_tbs Tx buffers */ 10980fa8377SJohannes Berg if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 11080fa8377SJohannes Berg IWL_ERR(trans, "Error can not send more than %d chunks\n", 11180fa8377SJohannes Berg trans->txqs.tfd.max_tbs); 11280fa8377SJohannes Berg return -EINVAL; 11380fa8377SJohannes Berg } 11480fa8377SJohannes Berg 11580fa8377SJohannes Berg put_unaligned_le64(addr, &tb->addr); 11680fa8377SJohannes Berg tb->tb_len = cpu_to_le16(len); 11780fa8377SJohannes Berg 11880fa8377SJohannes Berg tfd->num_tbs = cpu_to_le16(idx + 1); 11980fa8377SJohannes Berg 12080fa8377SJohannes Berg return idx; 12180fa8377SJohannes Berg } 12280fa8377SJohannes Berg 123c83031afSJohannes Berg static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, 124c83031afSJohannes Berg struct iwl_tfh_tfd *tfd) 125c83031afSJohannes Berg { 126c83031afSJohannes Berg tfd->num_tbs = 0; 127c83031afSJohannes Berg 128c83031afSJohannes Berg iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, 129c83031afSJohannes Berg trans->invalid_tx_cmd.size); 130c83031afSJohannes Berg } 131c83031afSJohannes Berg 1320cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 1330cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 1340cd1ad2dSMordechay Goodstein { 1350cd1ad2dSMordechay Goodstein int i, num_tbs; 1360cd1ad2dSMordechay Goodstein 1370cd1ad2dSMordechay Goodstein /* Sanity check on number of chunks */ 1380cd1ad2dSMordechay Goodstein num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 1390cd1ad2dSMordechay Goodstein 1400cd1ad2dSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 1410cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 1420cd1ad2dSMordechay Goodstein return; 1430cd1ad2dSMordechay Goodstein } 1440cd1ad2dSMordechay Goodstein 1450cd1ad2dSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 1460cd1ad2dSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 1470cd1ad2dSMordechay Goodstein if (meta->tbs & BIT(i)) 1480cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, 1490cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1500cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1510cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1520cd1ad2dSMordechay Goodstein else 1530cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, 1540cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1550cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1560cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1570cd1ad2dSMordechay Goodstein } 1580cd1ad2dSMordechay Goodstein 159c83031afSJohannes Berg iwl_txq_set_tfd_invalid_gen2(trans, tfd); 1600cd1ad2dSMordechay Goodstein } 1610cd1ad2dSMordechay Goodstein 1620cd1ad2dSMordechay Goodstein void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1630cd1ad2dSMordechay Goodstein { 1640cd1ad2dSMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1650cd1ad2dSMordechay Goodstein * idx is bounded by n_window 1660cd1ad2dSMordechay Goodstein */ 1670cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1680f8d5656SEmmanuel Grumbach struct sk_buff *skb; 1690cd1ad2dSMordechay Goodstein 1700cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 1710cd1ad2dSMordechay Goodstein 1720f8d5656SEmmanuel Grumbach if (!txq->entries) 1730f8d5656SEmmanuel Grumbach return; 1740f8d5656SEmmanuel Grumbach 1750cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1760cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, idx)); 1770cd1ad2dSMordechay Goodstein 1780cd1ad2dSMordechay Goodstein skb = txq->entries[idx].skb; 1790cd1ad2dSMordechay Goodstein 1800cd1ad2dSMordechay Goodstein /* Can be called from irqs-disabled context 1810cd1ad2dSMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 1820cd1ad2dSMordechay Goodstein * freed and that the queue is not empty - free the skb 1830cd1ad2dSMordechay Goodstein */ 1840cd1ad2dSMordechay Goodstein if (skb) { 1850cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 1860cd1ad2dSMordechay Goodstein txq->entries[idx].skb = NULL; 1870cd1ad2dSMordechay Goodstein } 1880cd1ad2dSMordechay Goodstein } 1890cd1ad2dSMordechay Goodstein 1900cd1ad2dSMordechay Goodstein static struct page *get_workaround_page(struct iwl_trans *trans, 1910cd1ad2dSMordechay Goodstein struct sk_buff *skb) 1920cd1ad2dSMordechay Goodstein { 1930cd1ad2dSMordechay Goodstein struct page **page_ptr; 1940cd1ad2dSMordechay Goodstein struct page *ret; 1950cd1ad2dSMordechay Goodstein 1960cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 1970cd1ad2dSMordechay Goodstein 1980cd1ad2dSMordechay Goodstein ret = alloc_page(GFP_ATOMIC); 1990cd1ad2dSMordechay Goodstein if (!ret) 2000cd1ad2dSMordechay Goodstein return NULL; 2010cd1ad2dSMordechay Goodstein 2020cd1ad2dSMordechay Goodstein /* set the chaining pointer to the previous page if there */ 2033827cb59SJohannes Berg *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 2040cd1ad2dSMordechay Goodstein *page_ptr = ret; 2050cd1ad2dSMordechay Goodstein 2060cd1ad2dSMordechay Goodstein return ret; 2070cd1ad2dSMordechay Goodstein } 2080cd1ad2dSMordechay Goodstein 2090cd1ad2dSMordechay Goodstein /* 2100cd1ad2dSMordechay Goodstein * Add a TB and if needed apply the FH HW bug workaround; 2110cd1ad2dSMordechay Goodstein * meta != NULL indicates that it's a page mapping and we 2120cd1ad2dSMordechay Goodstein * need to dma_unmap_page() and set the meta->tbs bit in 2130cd1ad2dSMordechay Goodstein * this case. 2140cd1ad2dSMordechay Goodstein */ 2150cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 2160cd1ad2dSMordechay Goodstein struct sk_buff *skb, 2170cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 2180cd1ad2dSMordechay Goodstein dma_addr_t phys, void *virt, 2190cd1ad2dSMordechay Goodstein u16 len, struct iwl_cmd_meta *meta) 2200cd1ad2dSMordechay Goodstein { 2210cd1ad2dSMordechay Goodstein dma_addr_t oldphys = phys; 2220cd1ad2dSMordechay Goodstein struct page *page; 2230cd1ad2dSMordechay Goodstein int ret; 2240cd1ad2dSMordechay Goodstein 2250cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2260cd1ad2dSMordechay Goodstein return -ENOMEM; 2270cd1ad2dSMordechay Goodstein 2280cd1ad2dSMordechay Goodstein if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 2290cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2300cd1ad2dSMordechay Goodstein 2310cd1ad2dSMordechay Goodstein if (ret < 0) 2320cd1ad2dSMordechay Goodstein goto unmap; 2330cd1ad2dSMordechay Goodstein 2340cd1ad2dSMordechay Goodstein if (meta) 2350cd1ad2dSMordechay Goodstein meta->tbs |= BIT(ret); 2360cd1ad2dSMordechay Goodstein 2370cd1ad2dSMordechay Goodstein ret = 0; 2380cd1ad2dSMordechay Goodstein goto trace; 2390cd1ad2dSMordechay Goodstein } 2400cd1ad2dSMordechay Goodstein 2410cd1ad2dSMordechay Goodstein /* 2420cd1ad2dSMordechay Goodstein * Work around a hardware bug. If (as expressed in the 2430cd1ad2dSMordechay Goodstein * condition above) the TB ends on a 32-bit boundary, 2440cd1ad2dSMordechay Goodstein * then the next TB may be accessed with the wrong 2450cd1ad2dSMordechay Goodstein * address. 2460cd1ad2dSMordechay Goodstein * To work around it, copy the data elsewhere and make 2470cd1ad2dSMordechay Goodstein * a new mapping for it so the device will not fail. 2480cd1ad2dSMordechay Goodstein */ 2490cd1ad2dSMordechay Goodstein 2500cd1ad2dSMordechay Goodstein if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 2510cd1ad2dSMordechay Goodstein ret = -ENOBUFS; 2520cd1ad2dSMordechay Goodstein goto unmap; 2530cd1ad2dSMordechay Goodstein } 2540cd1ad2dSMordechay Goodstein 2550cd1ad2dSMordechay Goodstein page = get_workaround_page(trans, skb); 2560cd1ad2dSMordechay Goodstein if (!page) { 2570cd1ad2dSMordechay Goodstein ret = -ENOMEM; 2580cd1ad2dSMordechay Goodstein goto unmap; 2590cd1ad2dSMordechay Goodstein } 2600cd1ad2dSMordechay Goodstein 2610cd1ad2dSMordechay Goodstein memcpy(page_address(page), virt, len); 2620cd1ad2dSMordechay Goodstein 2630cd1ad2dSMordechay Goodstein phys = dma_map_single(trans->dev, page_address(page), len, 2640cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 2650cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2660cd1ad2dSMordechay Goodstein return -ENOMEM; 2670cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2680cd1ad2dSMordechay Goodstein if (ret < 0) { 2690cd1ad2dSMordechay Goodstein /* unmap the new allocation as single */ 2700cd1ad2dSMordechay Goodstein oldphys = phys; 2710cd1ad2dSMordechay Goodstein meta = NULL; 2720cd1ad2dSMordechay Goodstein goto unmap; 2730cd1ad2dSMordechay Goodstein } 2740cd1ad2dSMordechay Goodstein IWL_WARN(trans, 2750cd1ad2dSMordechay Goodstein "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 2760cd1ad2dSMordechay Goodstein len, (unsigned long long)oldphys, (unsigned long long)phys); 2770cd1ad2dSMordechay Goodstein 2780cd1ad2dSMordechay Goodstein ret = 0; 2790cd1ad2dSMordechay Goodstein unmap: 2800cd1ad2dSMordechay Goodstein if (meta) 2810cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 2820cd1ad2dSMordechay Goodstein else 2830cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 2840cd1ad2dSMordechay Goodstein trace: 2850cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 2860cd1ad2dSMordechay Goodstein 2870cd1ad2dSMordechay Goodstein return ret; 2880cd1ad2dSMordechay Goodstein } 2890cd1ad2dSMordechay Goodstein 2900cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 2910cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 2920cd1ad2dSMordechay Goodstein struct sk_buff *skb) 2930cd1ad2dSMordechay Goodstein { 2940cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 2950cd1ad2dSMordechay Goodstein struct page **page_ptr; 2960cd1ad2dSMordechay Goodstein 2970cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 2980cd1ad2dSMordechay Goodstein 2990cd1ad2dSMordechay Goodstein if (WARN_ON(*page_ptr)) 3000cd1ad2dSMordechay Goodstein return NULL; 3010cd1ad2dSMordechay Goodstein 3020cd1ad2dSMordechay Goodstein if (!p->page) 3030cd1ad2dSMordechay Goodstein goto alloc; 3040cd1ad2dSMordechay Goodstein 3050cd1ad2dSMordechay Goodstein /* 3060cd1ad2dSMordechay Goodstein * Check if there's enough room on this page 3070cd1ad2dSMordechay Goodstein * 3080cd1ad2dSMordechay Goodstein * Note that we put a page chaining pointer *last* in the 3090cd1ad2dSMordechay Goodstein * page - we need it somewhere, and if it's there then we 3100cd1ad2dSMordechay Goodstein * avoid DMA mapping the last bits of the page which may 3110cd1ad2dSMordechay Goodstein * trigger the 32-bit boundary hardware bug. 3120cd1ad2dSMordechay Goodstein * 3130cd1ad2dSMordechay Goodstein * (see also get_workaround_page() in tx-gen2.c) 3140cd1ad2dSMordechay Goodstein */ 3150cd1ad2dSMordechay Goodstein if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 3160cd1ad2dSMordechay Goodstein sizeof(void *)) 3170cd1ad2dSMordechay Goodstein goto out; 3180cd1ad2dSMordechay Goodstein 3190cd1ad2dSMordechay Goodstein /* We don't have enough room on this page, get a new one. */ 3200cd1ad2dSMordechay Goodstein __free_page(p->page); 3210cd1ad2dSMordechay Goodstein 3220cd1ad2dSMordechay Goodstein alloc: 3230cd1ad2dSMordechay Goodstein p->page = alloc_page(GFP_ATOMIC); 3240cd1ad2dSMordechay Goodstein if (!p->page) 3250cd1ad2dSMordechay Goodstein return NULL; 3260cd1ad2dSMordechay Goodstein p->pos = page_address(p->page); 3270cd1ad2dSMordechay Goodstein /* set the chaining pointer to NULL */ 3283827cb59SJohannes Berg *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 3290cd1ad2dSMordechay Goodstein out: 3300cd1ad2dSMordechay Goodstein *page_ptr = p->page; 3310cd1ad2dSMordechay Goodstein get_page(p->page); 3320cd1ad2dSMordechay Goodstein return p; 3330cd1ad2dSMordechay Goodstein } 3340cd1ad2dSMordechay Goodstein #endif 3350cd1ad2dSMordechay Goodstein 3360cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 3370cd1ad2dSMordechay Goodstein struct sk_buff *skb, 3380cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, int start_len, 3390cd1ad2dSMordechay Goodstein u8 hdr_len, 3400cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd) 3410cd1ad2dSMordechay Goodstein { 3420cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 3430cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 3440cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (void *)skb->data; 3450cd1ad2dSMordechay Goodstein unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 3460cd1ad2dSMordechay Goodstein unsigned int mss = skb_shinfo(skb)->gso_size; 3470cd1ad2dSMordechay Goodstein u16 length, amsdu_pad; 3480cd1ad2dSMordechay Goodstein u8 *start_hdr; 3490cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *hdr_page; 3500cd1ad2dSMordechay Goodstein struct tso_t tso; 3510cd1ad2dSMordechay Goodstein 3520cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 3530cd1ad2dSMordechay Goodstein &dev_cmd->hdr, start_len, 0); 3540cd1ad2dSMordechay Goodstein 3550cd1ad2dSMordechay Goodstein ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 3560cd1ad2dSMordechay Goodstein snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 3570cd1ad2dSMordechay Goodstein total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 3580cd1ad2dSMordechay Goodstein amsdu_pad = 0; 3590cd1ad2dSMordechay Goodstein 3600cd1ad2dSMordechay Goodstein /* total amount of header we may need for this A-MSDU */ 3610cd1ad2dSMordechay Goodstein hdr_room = DIV_ROUND_UP(total_len, mss) * 3620cd1ad2dSMordechay Goodstein (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 3630cd1ad2dSMordechay Goodstein 3640cd1ad2dSMordechay Goodstein /* Our device supports 9 segments at most, it will fit in 1 page */ 3650cd1ad2dSMordechay Goodstein hdr_page = get_page_hdr(trans, hdr_room, skb); 3660cd1ad2dSMordechay Goodstein if (!hdr_page) 3670cd1ad2dSMordechay Goodstein return -ENOMEM; 3680cd1ad2dSMordechay Goodstein 3690cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 3700cd1ad2dSMordechay Goodstein 3710cd1ad2dSMordechay Goodstein /* 3720cd1ad2dSMordechay Goodstein * Pull the ieee80211 header to be able to use TSO core, 3730cd1ad2dSMordechay Goodstein * we will restore it for the tx_status flow. 3740cd1ad2dSMordechay Goodstein */ 3750cd1ad2dSMordechay Goodstein skb_pull(skb, hdr_len); 3760cd1ad2dSMordechay Goodstein 3770cd1ad2dSMordechay Goodstein /* 3780cd1ad2dSMordechay Goodstein * Remove the length of all the headers that we don't actually 3790cd1ad2dSMordechay Goodstein * have in the MPDU by themselves, but that we duplicate into 3800cd1ad2dSMordechay Goodstein * all the different MSDUs inside the A-MSDU. 3810cd1ad2dSMordechay Goodstein */ 3820cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 3830cd1ad2dSMordechay Goodstein 3840cd1ad2dSMordechay Goodstein tso_start(skb, &tso); 3850cd1ad2dSMordechay Goodstein 3860cd1ad2dSMordechay Goodstein while (total_len) { 3870cd1ad2dSMordechay Goodstein /* this is the data left for this subframe */ 3880cd1ad2dSMordechay Goodstein unsigned int data_left = min_t(unsigned int, mss, total_len); 3890cd1ad2dSMordechay Goodstein unsigned int tb_len; 3900cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 3910cd1ad2dSMordechay Goodstein u8 *subf_hdrs_start = hdr_page->pos; 3920cd1ad2dSMordechay Goodstein 3930cd1ad2dSMordechay Goodstein total_len -= data_left; 3940cd1ad2dSMordechay Goodstein 3950cd1ad2dSMordechay Goodstein memset(hdr_page->pos, 0, amsdu_pad); 3960cd1ad2dSMordechay Goodstein hdr_page->pos += amsdu_pad; 3970cd1ad2dSMordechay Goodstein amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 3980cd1ad2dSMordechay Goodstein data_left)) & 0x3; 3990cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 4000cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 4010cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 4020cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 4030cd1ad2dSMordechay Goodstein 4040cd1ad2dSMordechay Goodstein length = snap_ip_tcp_hdrlen + data_left; 4050cd1ad2dSMordechay Goodstein *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 4060cd1ad2dSMordechay Goodstein hdr_page->pos += sizeof(length); 4070cd1ad2dSMordechay Goodstein 4080cd1ad2dSMordechay Goodstein /* 4090cd1ad2dSMordechay Goodstein * This will copy the SNAP as well which will be considered 4100cd1ad2dSMordechay Goodstein * as MAC header. 4110cd1ad2dSMordechay Goodstein */ 4120cd1ad2dSMordechay Goodstein tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 4130cd1ad2dSMordechay Goodstein 4140cd1ad2dSMordechay Goodstein hdr_page->pos += snap_ip_tcp_hdrlen; 4150cd1ad2dSMordechay Goodstein 4160cd1ad2dSMordechay Goodstein tb_len = hdr_page->pos - start_hdr; 4170cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, start_hdr, 4180cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 419fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 4200cd1ad2dSMordechay Goodstein goto out_err; 4210cd1ad2dSMordechay Goodstein /* 4220cd1ad2dSMordechay Goodstein * No need for _with_wa, this is from the TSO page and 4230cd1ad2dSMordechay Goodstein * we leave some space at the end of it so can't hit 4240cd1ad2dSMordechay Goodstein * the buggy scenario. 4250cd1ad2dSMordechay Goodstein */ 4260cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 4270cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 4280cd1ad2dSMordechay Goodstein tb_phys, tb_len); 4290cd1ad2dSMordechay Goodstein /* add this subframe's headers' length to the tx_cmd */ 4300cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 4310cd1ad2dSMordechay Goodstein 4320cd1ad2dSMordechay Goodstein /* prepare the start_hdr for the next subframe */ 4330cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 4340cd1ad2dSMordechay Goodstein 4350cd1ad2dSMordechay Goodstein /* put the payload */ 4360cd1ad2dSMordechay Goodstein while (data_left) { 4370cd1ad2dSMordechay Goodstein int ret; 4380cd1ad2dSMordechay Goodstein 4390cd1ad2dSMordechay Goodstein tb_len = min_t(unsigned int, tso.size, data_left); 4400cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tso.data, 4410cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 4420cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 4430cd1ad2dSMordechay Goodstein tb_phys, tso.data, 4440cd1ad2dSMordechay Goodstein tb_len, NULL); 445fb54b863SJohannes Berg if (ret) 4460cd1ad2dSMordechay Goodstein goto out_err; 4470cd1ad2dSMordechay Goodstein 4480cd1ad2dSMordechay Goodstein data_left -= tb_len; 4490cd1ad2dSMordechay Goodstein tso_build_data(skb, &tso, tb_len); 4500cd1ad2dSMordechay Goodstein } 4510cd1ad2dSMordechay Goodstein } 4520cd1ad2dSMordechay Goodstein 4530cd1ad2dSMordechay Goodstein /* re -add the WiFi header */ 4540cd1ad2dSMordechay Goodstein skb_push(skb, hdr_len); 4550cd1ad2dSMordechay Goodstein 4560cd1ad2dSMordechay Goodstein return 0; 4570cd1ad2dSMordechay Goodstein 4580cd1ad2dSMordechay Goodstein out_err: 4590cd1ad2dSMordechay Goodstein #endif 4600cd1ad2dSMordechay Goodstein return -EINVAL; 4610cd1ad2dSMordechay Goodstein } 4620cd1ad2dSMordechay Goodstein 4630cd1ad2dSMordechay Goodstein static struct 4640cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 4650cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 4660cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 4670cd1ad2dSMordechay Goodstein struct sk_buff *skb, 4680cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 4690cd1ad2dSMordechay Goodstein int hdr_len, 4700cd1ad2dSMordechay Goodstein int tx_cmd_len) 4710cd1ad2dSMordechay Goodstein { 4720cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 4730cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 4740cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 4750cd1ad2dSMordechay Goodstein int len; 4760cd1ad2dSMordechay Goodstein void *tb1_addr; 4770cd1ad2dSMordechay Goodstein 4780cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 4790cd1ad2dSMordechay Goodstein 4800cd1ad2dSMordechay Goodstein /* 4810cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 4820cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 4830cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 4840cd1ad2dSMordechay Goodstein */ 4850cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 4860cd1ad2dSMordechay Goodstein 4870cd1ad2dSMordechay Goodstein /* 4880cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 4890cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 4900cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 4910cd1ad2dSMordechay Goodstein * setup of the first TB) 4920cd1ad2dSMordechay Goodstein */ 4930cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 4940cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 4950cd1ad2dSMordechay Goodstein 4960cd1ad2dSMordechay Goodstein /* do not align A-MSDU to dword as the subframe header aligns it */ 4970cd1ad2dSMordechay Goodstein 4980cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 4990cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 5000cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 5010cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 5020cd1ad2dSMordechay Goodstein goto out_err; 5030cd1ad2dSMordechay Goodstein /* 5040cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 5050cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 5060cd1ad2dSMordechay Goodstein */ 5070cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 5080cd1ad2dSMordechay Goodstein 5090cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 5100cd1ad2dSMordechay Goodstein hdr_len, dev_cmd)) 5110cd1ad2dSMordechay Goodstein goto out_err; 5120cd1ad2dSMordechay Goodstein 5130cd1ad2dSMordechay Goodstein /* building the A-MSDU might have changed this data, memcpy it now */ 5140cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5150cd1ad2dSMordechay Goodstein return tfd; 5160cd1ad2dSMordechay Goodstein 5170cd1ad2dSMordechay Goodstein out_err: 5180cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 5190cd1ad2dSMordechay Goodstein return NULL; 5200cd1ad2dSMordechay Goodstein } 5210cd1ad2dSMordechay Goodstein 5220cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 5230cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5240cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 5250cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 5260cd1ad2dSMordechay Goodstein { 5270cd1ad2dSMordechay Goodstein int i; 5280cd1ad2dSMordechay Goodstein 5290cd1ad2dSMordechay Goodstein for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5300cd1ad2dSMordechay Goodstein const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5310cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5320cd1ad2dSMordechay Goodstein unsigned int fragsz = skb_frag_size(frag); 5330cd1ad2dSMordechay Goodstein int ret; 5340cd1ad2dSMordechay Goodstein 5350cd1ad2dSMordechay Goodstein if (!fragsz) 5360cd1ad2dSMordechay Goodstein continue; 5370cd1ad2dSMordechay Goodstein 5380cd1ad2dSMordechay Goodstein tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 5390cd1ad2dSMordechay Goodstein fragsz, DMA_TO_DEVICE); 5400cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 5410cd1ad2dSMordechay Goodstein skb_frag_address(frag), 5420cd1ad2dSMordechay Goodstein fragsz, out_meta); 5430cd1ad2dSMordechay Goodstein if (ret) 5440cd1ad2dSMordechay Goodstein return ret; 5450cd1ad2dSMordechay Goodstein } 5460cd1ad2dSMordechay Goodstein 5470cd1ad2dSMordechay Goodstein return 0; 5480cd1ad2dSMordechay Goodstein } 5490cd1ad2dSMordechay Goodstein 5500cd1ad2dSMordechay Goodstein static struct 5510cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 5520cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 5530cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 5540cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5550cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 5560cd1ad2dSMordechay Goodstein int hdr_len, 5570cd1ad2dSMordechay Goodstein int tx_cmd_len, 5580cd1ad2dSMordechay Goodstein bool pad) 5590cd1ad2dSMordechay Goodstein { 5600cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 5610cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 5620cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5630cd1ad2dSMordechay Goodstein int len, tb1_len, tb2_len; 5640cd1ad2dSMordechay Goodstein void *tb1_addr; 5650cd1ad2dSMordechay Goodstein struct sk_buff *frag; 5660cd1ad2dSMordechay Goodstein 5670cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 5680cd1ad2dSMordechay Goodstein 5690cd1ad2dSMordechay Goodstein /* The first TB points to bi-directional DMA data */ 5700cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5710cd1ad2dSMordechay Goodstein 5720cd1ad2dSMordechay Goodstein /* 5730cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 5740cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 5750cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 5760cd1ad2dSMordechay Goodstein */ 5770cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 5780cd1ad2dSMordechay Goodstein 5790cd1ad2dSMordechay Goodstein /* 5800cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 5810cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 5820cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 5830cd1ad2dSMordechay Goodstein * setup of the first TB) 5840cd1ad2dSMordechay Goodstein */ 5850cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 5860cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 5870cd1ad2dSMordechay Goodstein 5880cd1ad2dSMordechay Goodstein if (pad) 5890cd1ad2dSMordechay Goodstein tb1_len = ALIGN(len, 4); 5900cd1ad2dSMordechay Goodstein else 5910cd1ad2dSMordechay Goodstein tb1_len = len; 5920cd1ad2dSMordechay Goodstein 5930cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 5940cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 5950cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 5960cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 5970cd1ad2dSMordechay Goodstein goto out_err; 5980cd1ad2dSMordechay Goodstein /* 5990cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 6000cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 6010cd1ad2dSMordechay Goodstein */ 6020cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 6030cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 6040cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 6050cd1ad2dSMordechay Goodstein 6060cd1ad2dSMordechay Goodstein /* set up TFD's third entry to point to remainder of skb's head */ 6070cd1ad2dSMordechay Goodstein tb2_len = skb_headlen(skb) - hdr_len; 6080cd1ad2dSMordechay Goodstein 6090cd1ad2dSMordechay Goodstein if (tb2_len > 0) { 6100cd1ad2dSMordechay Goodstein int ret; 6110cd1ad2dSMordechay Goodstein 6120cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 6130cd1ad2dSMordechay Goodstein tb2_len, DMA_TO_DEVICE); 6140cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6150cd1ad2dSMordechay Goodstein skb->data + hdr_len, tb2_len, 6160cd1ad2dSMordechay Goodstein NULL); 6170cd1ad2dSMordechay Goodstein if (ret) 6180cd1ad2dSMordechay Goodstein goto out_err; 6190cd1ad2dSMordechay Goodstein } 6200cd1ad2dSMordechay Goodstein 6210cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 6220cd1ad2dSMordechay Goodstein goto out_err; 6230cd1ad2dSMordechay Goodstein 6240cd1ad2dSMordechay Goodstein skb_walk_frags(skb, frag) { 6250cd1ad2dSMordechay Goodstein int ret; 6260cd1ad2dSMordechay Goodstein 6270cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, frag->data, 6280cd1ad2dSMordechay Goodstein skb_headlen(frag), DMA_TO_DEVICE); 6290cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6300cd1ad2dSMordechay Goodstein frag->data, 6310cd1ad2dSMordechay Goodstein skb_headlen(frag), NULL); 6320cd1ad2dSMordechay Goodstein if (ret) 6330cd1ad2dSMordechay Goodstein goto out_err; 6340cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 6350cd1ad2dSMordechay Goodstein goto out_err; 6360cd1ad2dSMordechay Goodstein } 6370cd1ad2dSMordechay Goodstein 6380cd1ad2dSMordechay Goodstein return tfd; 6390cd1ad2dSMordechay Goodstein 6400cd1ad2dSMordechay Goodstein out_err: 6410cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 6420cd1ad2dSMordechay Goodstein return NULL; 6430cd1ad2dSMordechay Goodstein } 6440cd1ad2dSMordechay Goodstein 6450cd1ad2dSMordechay Goodstein static 6460cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 6470cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 6480cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 6490cd1ad2dSMordechay Goodstein struct sk_buff *skb, 6500cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 6510cd1ad2dSMordechay Goodstein { 6520cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 6530cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 6540cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 6550cd1ad2dSMordechay Goodstein int len, hdr_len; 6560cd1ad2dSMordechay Goodstein bool amsdu; 6570cd1ad2dSMordechay Goodstein 6580cd1ad2dSMordechay Goodstein /* There must be data left over for TB1 or this code must be changed */ 6590cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 6601caa3a5eSJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 6611caa3a5eSJohannes Berg offsetofend(struct iwl_tx_cmd_gen2, dram_info) > 6621caa3a5eSJohannes Berg IWL_FIRST_TB_SIZE); 6631caa3a5eSJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); 6641caa3a5eSJohannes Berg BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 6651caa3a5eSJohannes Berg offsetofend(struct iwl_tx_cmd_gen3, dram_info) > 6661caa3a5eSJohannes Berg IWL_FIRST_TB_SIZE); 6670cd1ad2dSMordechay Goodstein 6680cd1ad2dSMordechay Goodstein memset(tfd, 0, sizeof(*tfd)); 6690cd1ad2dSMordechay Goodstein 6700cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 6710cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen2); 6720cd1ad2dSMordechay Goodstein else 6730cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen3); 6740cd1ad2dSMordechay Goodstein 6750cd1ad2dSMordechay Goodstein amsdu = ieee80211_is_data_qos(hdr->frame_control) && 6760cd1ad2dSMordechay Goodstein (*ieee80211_get_qos_ctl(hdr) & 6770cd1ad2dSMordechay Goodstein IEEE80211_QOS_CTL_A_MSDU_PRESENT); 6780cd1ad2dSMordechay Goodstein 6790cd1ad2dSMordechay Goodstein hdr_len = ieee80211_hdrlen(hdr->frame_control); 6800cd1ad2dSMordechay Goodstein 6810cd1ad2dSMordechay Goodstein /* 6820cd1ad2dSMordechay Goodstein * Only build A-MSDUs here if doing so by GSO, otherwise it may be 6830cd1ad2dSMordechay Goodstein * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 6840cd1ad2dSMordechay Goodstein * built in the higher layers already. 6850cd1ad2dSMordechay Goodstein */ 6860cd1ad2dSMordechay Goodstein if (amsdu && skb_shinfo(skb)->gso_size) 6870cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 6880cd1ad2dSMordechay Goodstein out_meta, hdr_len, len); 6890cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 6900cd1ad2dSMordechay Goodstein hdr_len, len, !amsdu); 6910cd1ad2dSMordechay Goodstein } 6920cd1ad2dSMordechay Goodstein 6930cd1ad2dSMordechay Goodstein int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 6940cd1ad2dSMordechay Goodstein { 6950cd1ad2dSMordechay Goodstein unsigned int max; 6960cd1ad2dSMordechay Goodstein unsigned int used; 6970cd1ad2dSMordechay Goodstein 6980cd1ad2dSMordechay Goodstein /* 6990cd1ad2dSMordechay Goodstein * To avoid ambiguity between empty and completely full queues, there 7000cd1ad2dSMordechay Goodstein * should always be less than max_tfd_queue_size elements in the queue. 7010cd1ad2dSMordechay Goodstein * If q->n_window is smaller than max_tfd_queue_size, there is no need 7020cd1ad2dSMordechay Goodstein * to reserve any queue entries for this purpose. 7030cd1ad2dSMordechay Goodstein */ 7040cd1ad2dSMordechay Goodstein if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 7050cd1ad2dSMordechay Goodstein max = q->n_window; 7060cd1ad2dSMordechay Goodstein else 7070cd1ad2dSMordechay Goodstein max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 7080cd1ad2dSMordechay Goodstein 7090cd1ad2dSMordechay Goodstein /* 7100cd1ad2dSMordechay Goodstein * max_tfd_queue_size is a power of 2, so the following is equivalent to 7110cd1ad2dSMordechay Goodstein * modulo by max_tfd_queue_size and is well defined. 7120cd1ad2dSMordechay Goodstein */ 7130cd1ad2dSMordechay Goodstein used = (q->write_ptr - q->read_ptr) & 7140cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 7150cd1ad2dSMordechay Goodstein 7160cd1ad2dSMordechay Goodstein if (WARN_ON(used > max)) 7170cd1ad2dSMordechay Goodstein return 0; 7180cd1ad2dSMordechay Goodstein 7190cd1ad2dSMordechay Goodstein return max - used; 7200cd1ad2dSMordechay Goodstein } 7210cd1ad2dSMordechay Goodstein 7220cd1ad2dSMordechay Goodstein int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 7230cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, int txq_id) 7240cd1ad2dSMordechay Goodstein { 7250cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta; 7260cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7270cd1ad2dSMordechay Goodstein u16 cmd_len; 7280cd1ad2dSMordechay Goodstein int idx; 7290cd1ad2dSMordechay Goodstein void *tfd; 7300cd1ad2dSMordechay Goodstein 7310cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 7320cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 7330cd1ad2dSMordechay Goodstein return -EINVAL; 7340cd1ad2dSMordechay Goodstein 7350cd1ad2dSMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 7360cd1ad2dSMordechay Goodstein "TX on unused queue %d\n", txq_id)) 7370cd1ad2dSMordechay Goodstein return -EINVAL; 7380cd1ad2dSMordechay Goodstein 7390cd1ad2dSMordechay Goodstein if (skb_is_nonlinear(skb) && 7400cd1ad2dSMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 7410cd1ad2dSMordechay Goodstein __skb_linearize(skb)) 7420cd1ad2dSMordechay Goodstein return -ENOMEM; 7430cd1ad2dSMordechay Goodstein 7440cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 7450cd1ad2dSMordechay Goodstein 7460cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 7470cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 7480cd1ad2dSMordechay Goodstein 7490cd1ad2dSMordechay Goodstein /* don't put the packet on the ring, if there is no room */ 7500cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 7510cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd **dev_cmd_ptr; 7520cd1ad2dSMordechay Goodstein 7530cd1ad2dSMordechay Goodstein dev_cmd_ptr = (void *)((u8 *)skb->cb + 7540cd1ad2dSMordechay Goodstein trans->txqs.dev_cmd_offs); 7550cd1ad2dSMordechay Goodstein 7560cd1ad2dSMordechay Goodstein *dev_cmd_ptr = dev_cmd; 7570cd1ad2dSMordechay Goodstein __skb_queue_tail(&txq->overflow_q, skb); 7580cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7590cd1ad2dSMordechay Goodstein return 0; 7600cd1ad2dSMordechay Goodstein } 7610cd1ad2dSMordechay Goodstein } 7620cd1ad2dSMordechay Goodstein 7630cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 7640cd1ad2dSMordechay Goodstein 7650cd1ad2dSMordechay Goodstein /* Set up driver data for this TFD */ 7660cd1ad2dSMordechay Goodstein txq->entries[idx].skb = skb; 7670cd1ad2dSMordechay Goodstein txq->entries[idx].cmd = dev_cmd; 7680cd1ad2dSMordechay Goodstein 7690cd1ad2dSMordechay Goodstein dev_cmd->hdr.sequence = 7700cd1ad2dSMordechay Goodstein cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 7710cd1ad2dSMordechay Goodstein INDEX_TO_SEQ(idx))); 7720cd1ad2dSMordechay Goodstein 7730cd1ad2dSMordechay Goodstein /* Set up first empty entry in queue's array of Tx/cmd buffers */ 7740cd1ad2dSMordechay Goodstein out_meta = &txq->entries[idx].meta; 7750cd1ad2dSMordechay Goodstein out_meta->flags = 0; 7760cd1ad2dSMordechay Goodstein 7770cd1ad2dSMordechay Goodstein tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 7780cd1ad2dSMordechay Goodstein if (!tfd) { 7790cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7800cd1ad2dSMordechay Goodstein return -1; 7810cd1ad2dSMordechay Goodstein } 7820cd1ad2dSMordechay Goodstein 7830cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 7840cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 7850cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 7860cd1ad2dSMordechay Goodstein 7870cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen3->len); 7880cd1ad2dSMordechay Goodstein } else { 7890cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 7900cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 7910cd1ad2dSMordechay Goodstein 7920cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen2->len); 7930cd1ad2dSMordechay Goodstein } 7940cd1ad2dSMordechay Goodstein 7950cd1ad2dSMordechay Goodstein /* Set up entry for this TFD in Tx byte-count array */ 7960cd1ad2dSMordechay Goodstein iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 7970cd1ad2dSMordechay Goodstein iwl_txq_gen2_get_num_tbs(trans, tfd)); 7980cd1ad2dSMordechay Goodstein 7990cd1ad2dSMordechay Goodstein /* start timer if queue currently empty */ 8000cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 8010cd1ad2dSMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 8020cd1ad2dSMordechay Goodstein 8030cd1ad2dSMordechay Goodstein /* Tell device the write index *just past* this latest filled TFD */ 8040cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 8050cd1ad2dSMordechay Goodstein iwl_txq_inc_wr_ptr(trans, txq); 8060cd1ad2dSMordechay Goodstein /* 8070cd1ad2dSMordechay Goodstein * At this point the frame is "transmitted" successfully 8080cd1ad2dSMordechay Goodstein * and we will get a TX status notification eventually. 8090cd1ad2dSMordechay Goodstein */ 8100cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 8110cd1ad2dSMordechay Goodstein return 0; 8120cd1ad2dSMordechay Goodstein } 8130cd1ad2dSMordechay Goodstein 8140cd1ad2dSMordechay Goodstein /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 8150cd1ad2dSMordechay Goodstein 8160cd1ad2dSMordechay Goodstein /* 8170cd1ad2dSMordechay Goodstein * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 8180cd1ad2dSMordechay Goodstein */ 8190cd1ad2dSMordechay Goodstein void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 8200cd1ad2dSMordechay Goodstein { 8210cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 8220cd1ad2dSMordechay Goodstein 8230cd1ad2dSMordechay Goodstein spin_lock_bh(&txq->lock); 8240cd1ad2dSMordechay Goodstein while (txq->write_ptr != txq->read_ptr) { 8250cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 8260cd1ad2dSMordechay Goodstein txq_id, txq->read_ptr); 8270cd1ad2dSMordechay Goodstein 8280cd1ad2dSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 8290cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 8300cd1ad2dSMordechay Goodstein struct sk_buff *skb = txq->entries[idx].skb; 8310cd1ad2dSMordechay Goodstein 8320bed6a2aSJohannes Berg if (!WARN_ON_ONCE(!skb)) 8330cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 8340cd1ad2dSMordechay Goodstein } 8350cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_tfd(trans, txq); 8360cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 8370cd1ad2dSMordechay Goodstein } 8380cd1ad2dSMordechay Goodstein 8390cd1ad2dSMordechay Goodstein while (!skb_queue_empty(&txq->overflow_q)) { 8400cd1ad2dSMordechay Goodstein struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 8410cd1ad2dSMordechay Goodstein 8420cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 8430cd1ad2dSMordechay Goodstein } 8440cd1ad2dSMordechay Goodstein 8450cd1ad2dSMordechay Goodstein spin_unlock_bh(&txq->lock); 8460cd1ad2dSMordechay Goodstein 8470cd1ad2dSMordechay Goodstein /* just in case - this queue may have been stopped */ 8480cd1ad2dSMordechay Goodstein iwl_wake_queue(trans, txq); 8490cd1ad2dSMordechay Goodstein } 8500cd1ad2dSMordechay Goodstein 8510cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 8520cd1ad2dSMordechay Goodstein struct iwl_txq *txq) 8530cd1ad2dSMordechay Goodstein { 8540cd1ad2dSMordechay Goodstein struct device *dev = trans->dev; 8550cd1ad2dSMordechay Goodstein 8560cd1ad2dSMordechay Goodstein /* De-alloc circular buffer of TFDs */ 8570cd1ad2dSMordechay Goodstein if (txq->tfds) { 8580cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 8590cd1ad2dSMordechay Goodstein trans->txqs.tfd.size * txq->n_window, 8600cd1ad2dSMordechay Goodstein txq->tfds, txq->dma_addr); 8610cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 8620cd1ad2dSMordechay Goodstein sizeof(*txq->first_tb_bufs) * txq->n_window, 8630cd1ad2dSMordechay Goodstein txq->first_tb_bufs, txq->first_tb_dma); 8640cd1ad2dSMordechay Goodstein } 8650cd1ad2dSMordechay Goodstein 8660cd1ad2dSMordechay Goodstein kfree(txq->entries); 8670cd1ad2dSMordechay Goodstein if (txq->bc_tbl.addr) 8680cd1ad2dSMordechay Goodstein dma_pool_free(trans->txqs.bc_pool, 8690cd1ad2dSMordechay Goodstein txq->bc_tbl.addr, txq->bc_tbl.dma); 8700cd1ad2dSMordechay Goodstein kfree(txq); 8710cd1ad2dSMordechay Goodstein } 8720cd1ad2dSMordechay Goodstein 8730cd1ad2dSMordechay Goodstein /* 8740cd1ad2dSMordechay Goodstein * iwl_pcie_txq_free - Deallocate DMA queue. 8750cd1ad2dSMordechay Goodstein * @txq: Transmit queue to deallocate. 8760cd1ad2dSMordechay Goodstein * 8770cd1ad2dSMordechay Goodstein * Empty queue by removing and destroying all BD's. 8780cd1ad2dSMordechay Goodstein * Free all buffers. 8790cd1ad2dSMordechay Goodstein * 0-fill, but do not free "txq" descriptor structure. 8800cd1ad2dSMordechay Goodstein */ 8810cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 8820cd1ad2dSMordechay Goodstein { 8830cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 8840cd1ad2dSMordechay Goodstein int i; 8850cd1ad2dSMordechay Goodstein 8860cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 8870cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 8880cd1ad2dSMordechay Goodstein return; 8890cd1ad2dSMordechay Goodstein 8900cd1ad2dSMordechay Goodstein txq = trans->txqs.txq[txq_id]; 8910cd1ad2dSMordechay Goodstein 8920cd1ad2dSMordechay Goodstein if (WARN_ON(!txq)) 8930cd1ad2dSMordechay Goodstein return; 8940cd1ad2dSMordechay Goodstein 8950cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, txq_id); 8960cd1ad2dSMordechay Goodstein 8970cd1ad2dSMordechay Goodstein /* De-alloc array of command/tx buffers */ 8980cd1ad2dSMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 8990cd1ad2dSMordechay Goodstein for (i = 0; i < txq->n_window; i++) { 9000cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].cmd); 9010cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].free_buf); 9020cd1ad2dSMordechay Goodstein } 9030cd1ad2dSMordechay Goodstein del_timer_sync(&txq->stuck_timer); 9040cd1ad2dSMordechay Goodstein 9050cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 9060cd1ad2dSMordechay Goodstein 9070cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 9080cd1ad2dSMordechay Goodstein 9090cd1ad2dSMordechay Goodstein clear_bit(txq_id, trans->txqs.queue_used); 9100cd1ad2dSMordechay Goodstein } 9110cd1ad2dSMordechay Goodstein 9120cd1ad2dSMordechay Goodstein /* 9130cd1ad2dSMordechay Goodstein * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 9140cd1ad2dSMordechay Goodstein */ 9150cd1ad2dSMordechay Goodstein static int iwl_queue_init(struct iwl_txq *q, int slots_num) 9160cd1ad2dSMordechay Goodstein { 9170cd1ad2dSMordechay Goodstein q->n_window = slots_num; 9180cd1ad2dSMordechay Goodstein 9190cd1ad2dSMordechay Goodstein /* slots_num must be power-of-two size, otherwise 9200cd1ad2dSMordechay Goodstein * iwl_txq_get_cmd_index is broken. */ 9210cd1ad2dSMordechay Goodstein if (WARN_ON(!is_power_of_2(slots_num))) 9220cd1ad2dSMordechay Goodstein return -EINVAL; 9230cd1ad2dSMordechay Goodstein 9240cd1ad2dSMordechay Goodstein q->low_mark = q->n_window / 4; 9250cd1ad2dSMordechay Goodstein if (q->low_mark < 4) 9260cd1ad2dSMordechay Goodstein q->low_mark = 4; 9270cd1ad2dSMordechay Goodstein 9280cd1ad2dSMordechay Goodstein q->high_mark = q->n_window / 8; 9290cd1ad2dSMordechay Goodstein if (q->high_mark < 2) 9300cd1ad2dSMordechay Goodstein q->high_mark = 2; 9310cd1ad2dSMordechay Goodstein 9320cd1ad2dSMordechay Goodstein q->write_ptr = 0; 9330cd1ad2dSMordechay Goodstein q->read_ptr = 0; 9340cd1ad2dSMordechay Goodstein 9350cd1ad2dSMordechay Goodstein return 0; 9360cd1ad2dSMordechay Goodstein } 9370cd1ad2dSMordechay Goodstein 9380cd1ad2dSMordechay Goodstein int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 9390cd1ad2dSMordechay Goodstein bool cmd_queue) 9400cd1ad2dSMordechay Goodstein { 9410cd1ad2dSMordechay Goodstein int ret; 9420cd1ad2dSMordechay Goodstein u32 tfd_queue_max_size = 9430cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 9440cd1ad2dSMordechay Goodstein 9450cd1ad2dSMordechay Goodstein txq->need_update = false; 9460cd1ad2dSMordechay Goodstein 9470cd1ad2dSMordechay Goodstein /* max_tfd_queue_size must be power-of-two size, otherwise 9480cd1ad2dSMordechay Goodstein * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 9490cd1ad2dSMordechay Goodstein if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 9500cd1ad2dSMordechay Goodstein "Max tfd queue size must be a power of two, but is %d", 9510cd1ad2dSMordechay Goodstein tfd_queue_max_size)) 9520cd1ad2dSMordechay Goodstein return -EINVAL; 9530cd1ad2dSMordechay Goodstein 9540cd1ad2dSMordechay Goodstein /* Initialize queue's high/low-water marks, and head/tail indexes */ 9550cd1ad2dSMordechay Goodstein ret = iwl_queue_init(txq, slots_num); 9560cd1ad2dSMordechay Goodstein if (ret) 9570cd1ad2dSMordechay Goodstein return ret; 9580cd1ad2dSMordechay Goodstein 9590cd1ad2dSMordechay Goodstein spin_lock_init(&txq->lock); 9600cd1ad2dSMordechay Goodstein 9610cd1ad2dSMordechay Goodstein if (cmd_queue) { 9620cd1ad2dSMordechay Goodstein static struct lock_class_key iwl_txq_cmd_queue_lock_class; 9630cd1ad2dSMordechay Goodstein 9640cd1ad2dSMordechay Goodstein lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 9650cd1ad2dSMordechay Goodstein } 9660cd1ad2dSMordechay Goodstein 9670cd1ad2dSMordechay Goodstein __skb_queue_head_init(&txq->overflow_q); 9680cd1ad2dSMordechay Goodstein 9690cd1ad2dSMordechay Goodstein return 0; 9700cd1ad2dSMordechay Goodstein } 9710cd1ad2dSMordechay Goodstein 9720cd1ad2dSMordechay Goodstein void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 9730cd1ad2dSMordechay Goodstein { 9740cd1ad2dSMordechay Goodstein struct page **page_ptr; 9750cd1ad2dSMordechay Goodstein struct page *next; 9760cd1ad2dSMordechay Goodstein 9770cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 9780cd1ad2dSMordechay Goodstein next = *page_ptr; 9790cd1ad2dSMordechay Goodstein *page_ptr = NULL; 9800cd1ad2dSMordechay Goodstein 9810cd1ad2dSMordechay Goodstein while (next) { 9820cd1ad2dSMordechay Goodstein struct page *tmp = next; 9830cd1ad2dSMordechay Goodstein 9843827cb59SJohannes Berg next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - 9850cd1ad2dSMordechay Goodstein sizeof(void *)); 9860cd1ad2dSMordechay Goodstein __free_page(tmp); 9870cd1ad2dSMordechay Goodstein } 9880cd1ad2dSMordechay Goodstein } 9890cd1ad2dSMordechay Goodstein 9900cd1ad2dSMordechay Goodstein void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 9910cd1ad2dSMordechay Goodstein { 9920cd1ad2dSMordechay Goodstein u32 txq_id = txq->id; 9930cd1ad2dSMordechay Goodstein u32 status; 9940cd1ad2dSMordechay Goodstein bool active; 9950cd1ad2dSMordechay Goodstein u8 fifo; 9960cd1ad2dSMordechay Goodstein 99712a89f01SJohannes Berg if (trans->trans_cfg->gen2) { 9980cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 9990cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr); 10000cd1ad2dSMordechay Goodstein /* TODO: access new SCD registers and dump them */ 10010cd1ad2dSMordechay Goodstein return; 10020cd1ad2dSMordechay Goodstein } 10030cd1ad2dSMordechay Goodstein 10040cd1ad2dSMordechay Goodstein status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 10050cd1ad2dSMordechay Goodstein fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 10060cd1ad2dSMordechay Goodstein active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 10070cd1ad2dSMordechay Goodstein 10080cd1ad2dSMordechay Goodstein IWL_ERR(trans, 10090cd1ad2dSMordechay Goodstein "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 10100cd1ad2dSMordechay Goodstein txq_id, active ? "" : "in", fifo, 10110cd1ad2dSMordechay Goodstein jiffies_to_msecs(txq->wd_timeout), 10120cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr, 10130cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 10140cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10150cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 10160cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10170cd1ad2dSMordechay Goodstein iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 10180cd1ad2dSMordechay Goodstein } 10190cd1ad2dSMordechay Goodstein 10200cd1ad2dSMordechay Goodstein static void iwl_txq_stuck_timer(struct timer_list *t) 10210cd1ad2dSMordechay Goodstein { 10220cd1ad2dSMordechay Goodstein struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 10230cd1ad2dSMordechay Goodstein struct iwl_trans *trans = txq->trans; 10240cd1ad2dSMordechay Goodstein 10250cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 10260cd1ad2dSMordechay Goodstein /* check if triggered erroneously */ 10270cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr) { 10280cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10290cd1ad2dSMordechay Goodstein return; 10300cd1ad2dSMordechay Goodstein } 10310cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10320cd1ad2dSMordechay Goodstein 10330cd1ad2dSMordechay Goodstein iwl_txq_log_scd_error(trans, txq); 10340cd1ad2dSMordechay Goodstein 10350cd1ad2dSMordechay Goodstein iwl_force_nmi(trans); 10360cd1ad2dSMordechay Goodstein } 10370cd1ad2dSMordechay Goodstein 1038c83031afSJohannes Berg static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, 1039c83031afSJohannes Berg struct iwl_tfd *tfd) 1040c83031afSJohannes Berg { 1041c83031afSJohannes Berg tfd->num_tbs = 0; 1042c83031afSJohannes Berg 1043c83031afSJohannes Berg iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, 1044c83031afSJohannes Berg trans->invalid_tx_cmd.size); 1045c83031afSJohannes Berg } 1046c83031afSJohannes Berg 10470cd1ad2dSMordechay Goodstein int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 10480cd1ad2dSMordechay Goodstein bool cmd_queue) 10490cd1ad2dSMordechay Goodstein { 1050c83031afSJohannes Berg size_t num_entries = trans->trans_cfg->gen2 ? 1051c83031afSJohannes Berg slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; 1052c83031afSJohannes Berg size_t tfd_sz; 10530cd1ad2dSMordechay Goodstein size_t tb0_buf_sz; 10540cd1ad2dSMordechay Goodstein int i; 10550cd1ad2dSMordechay Goodstein 10562e0ce1deSAnjaneyulu if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) 10572e0ce1deSAnjaneyulu return -EINVAL; 10582e0ce1deSAnjaneyulu 10590cd1ad2dSMordechay Goodstein if (WARN_ON(txq->entries || txq->tfds)) 10600cd1ad2dSMordechay Goodstein return -EINVAL; 10610cd1ad2dSMordechay Goodstein 1062c83031afSJohannes Berg tfd_sz = trans->txqs.tfd.size * num_entries; 10630cd1ad2dSMordechay Goodstein 10640cd1ad2dSMordechay Goodstein timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 10650cd1ad2dSMordechay Goodstein txq->trans = trans; 10660cd1ad2dSMordechay Goodstein 10670cd1ad2dSMordechay Goodstein txq->n_window = slots_num; 10680cd1ad2dSMordechay Goodstein 10690cd1ad2dSMordechay Goodstein txq->entries = kcalloc(slots_num, 10700cd1ad2dSMordechay Goodstein sizeof(struct iwl_pcie_txq_entry), 10710cd1ad2dSMordechay Goodstein GFP_KERNEL); 10720cd1ad2dSMordechay Goodstein 10730cd1ad2dSMordechay Goodstein if (!txq->entries) 10740cd1ad2dSMordechay Goodstein goto error; 10750cd1ad2dSMordechay Goodstein 10760cd1ad2dSMordechay Goodstein if (cmd_queue) 10770cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) { 10780cd1ad2dSMordechay Goodstein txq->entries[i].cmd = 10790cd1ad2dSMordechay Goodstein kmalloc(sizeof(struct iwl_device_cmd), 10800cd1ad2dSMordechay Goodstein GFP_KERNEL); 10810cd1ad2dSMordechay Goodstein if (!txq->entries[i].cmd) 10820cd1ad2dSMordechay Goodstein goto error; 10830cd1ad2dSMordechay Goodstein } 10840cd1ad2dSMordechay Goodstein 10850cd1ad2dSMordechay Goodstein /* Circular buffer of transmit frame descriptors (TFDs), 10860cd1ad2dSMordechay Goodstein * shared with device */ 10870cd1ad2dSMordechay Goodstein txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 10880cd1ad2dSMordechay Goodstein &txq->dma_addr, GFP_KERNEL); 10890cd1ad2dSMordechay Goodstein if (!txq->tfds) 10900cd1ad2dSMordechay Goodstein goto error; 10910cd1ad2dSMordechay Goodstein 10920cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 10930cd1ad2dSMordechay Goodstein 10940cd1ad2dSMordechay Goodstein tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 10950cd1ad2dSMordechay Goodstein 10960cd1ad2dSMordechay Goodstein txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 10970cd1ad2dSMordechay Goodstein &txq->first_tb_dma, 10980cd1ad2dSMordechay Goodstein GFP_KERNEL); 10990cd1ad2dSMordechay Goodstein if (!txq->first_tb_bufs) 11000cd1ad2dSMordechay Goodstein goto err_free_tfds; 11010cd1ad2dSMordechay Goodstein 1102c83031afSJohannes Berg for (i = 0; i < num_entries; i++) { 1103c83031afSJohannes Berg void *tfd = iwl_txq_get_tfd(trans, txq, i); 1104c83031afSJohannes Berg 1105c83031afSJohannes Berg if (trans->trans_cfg->gen2) 1106c83031afSJohannes Berg iwl_txq_set_tfd_invalid_gen2(trans, tfd); 1107c83031afSJohannes Berg else 1108c83031afSJohannes Berg iwl_txq_set_tfd_invalid_gen1(trans, tfd); 1109c83031afSJohannes Berg } 1110c83031afSJohannes Berg 11110cd1ad2dSMordechay Goodstein return 0; 11120cd1ad2dSMordechay Goodstein err_free_tfds: 11130cd1ad2dSMordechay Goodstein dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 1114f973795aSLv Yunlong txq->tfds = NULL; 11150cd1ad2dSMordechay Goodstein error: 11160cd1ad2dSMordechay Goodstein if (txq->entries && cmd_queue) 11170cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) 11180cd1ad2dSMordechay Goodstein kfree(txq->entries[i].cmd); 11190cd1ad2dSMordechay Goodstein kfree(txq->entries); 11200cd1ad2dSMordechay Goodstein txq->entries = NULL; 11210cd1ad2dSMordechay Goodstein 11220cd1ad2dSMordechay Goodstein return -ENOMEM; 11230cd1ad2dSMordechay Goodstein } 11240cd1ad2dSMordechay Goodstein 1125ba3d4acdSJohannes Berg static struct iwl_txq * 1126ba3d4acdSJohannes Berg iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) 11270cd1ad2dSMordechay Goodstein { 11280cd1ad2dSMordechay Goodstein size_t bc_tbl_size, bc_tbl_entries; 11290cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 11300cd1ad2dSMordechay Goodstein int ret; 11310cd1ad2dSMordechay Goodstein 11320cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_tbl_size); 11330cd1ad2dSMordechay Goodstein 11340cd1ad2dSMordechay Goodstein bc_tbl_size = trans->txqs.bc_tbl_size; 11350cd1ad2dSMordechay Goodstein bc_tbl_entries = bc_tbl_size / sizeof(u16); 11360cd1ad2dSMordechay Goodstein 11370cd1ad2dSMordechay Goodstein if (WARN_ON(size > bc_tbl_entries)) 1138ba3d4acdSJohannes Berg return ERR_PTR(-EINVAL); 11390cd1ad2dSMordechay Goodstein 11400cd1ad2dSMordechay Goodstein txq = kzalloc(sizeof(*txq), GFP_KERNEL); 11410cd1ad2dSMordechay Goodstein if (!txq) 1142ba3d4acdSJohannes Berg return ERR_PTR(-ENOMEM); 11430cd1ad2dSMordechay Goodstein 11440cd1ad2dSMordechay Goodstein txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 11450cd1ad2dSMordechay Goodstein &txq->bc_tbl.dma); 11460cd1ad2dSMordechay Goodstein if (!txq->bc_tbl.addr) { 11470cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 11480cd1ad2dSMordechay Goodstein kfree(txq); 1149ba3d4acdSJohannes Berg return ERR_PTR(-ENOMEM); 11500cd1ad2dSMordechay Goodstein } 11510cd1ad2dSMordechay Goodstein 11520cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, txq, size, false); 11530cd1ad2dSMordechay Goodstein if (ret) { 11540cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue alloc failed\n"); 11550cd1ad2dSMordechay Goodstein goto error; 11560cd1ad2dSMordechay Goodstein } 11570cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, txq, size, false); 11580cd1ad2dSMordechay Goodstein if (ret) { 11590cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue init failed\n"); 11600cd1ad2dSMordechay Goodstein goto error; 11610cd1ad2dSMordechay Goodstein } 11620cd1ad2dSMordechay Goodstein 11630cd1ad2dSMordechay Goodstein txq->wd_timeout = msecs_to_jiffies(timeout); 11640cd1ad2dSMordechay Goodstein 1165ba3d4acdSJohannes Berg return txq; 11660cd1ad2dSMordechay Goodstein 11670cd1ad2dSMordechay Goodstein error: 11680cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 1169ba3d4acdSJohannes Berg return ERR_PTR(ret); 11700cd1ad2dSMordechay Goodstein } 11710cd1ad2dSMordechay Goodstein 11720cd1ad2dSMordechay Goodstein static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 11730cd1ad2dSMordechay Goodstein struct iwl_host_cmd *hcmd) 11740cd1ad2dSMordechay Goodstein { 11750cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_rsp *rsp; 11760cd1ad2dSMordechay Goodstein int ret, qid; 11770cd1ad2dSMordechay Goodstein u32 wr_ptr; 11780cd1ad2dSMordechay Goodstein 11790cd1ad2dSMordechay Goodstein if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 11800cd1ad2dSMordechay Goodstein sizeof(*rsp))) { 11810cd1ad2dSMordechay Goodstein ret = -EINVAL; 11820cd1ad2dSMordechay Goodstein goto error_free_resp; 11830cd1ad2dSMordechay Goodstein } 11840cd1ad2dSMordechay Goodstein 11850cd1ad2dSMordechay Goodstein rsp = (void *)hcmd->resp_pkt->data; 11860cd1ad2dSMordechay Goodstein qid = le16_to_cpu(rsp->queue_number); 11870cd1ad2dSMordechay Goodstein wr_ptr = le16_to_cpu(rsp->write_pointer); 11880cd1ad2dSMordechay Goodstein 11890cd1ad2dSMordechay Goodstein if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 11900cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue index %d unsupported", qid); 11910cd1ad2dSMordechay Goodstein ret = -EIO; 11920cd1ad2dSMordechay Goodstein goto error_free_resp; 11930cd1ad2dSMordechay Goodstein } 11940cd1ad2dSMordechay Goodstein 11950cd1ad2dSMordechay Goodstein if (test_and_set_bit(qid, trans->txqs.queue_used)) { 11960cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue %d already used", qid); 11970cd1ad2dSMordechay Goodstein ret = -EIO; 11980cd1ad2dSMordechay Goodstein goto error_free_resp; 11990cd1ad2dSMordechay Goodstein } 12000cd1ad2dSMordechay Goodstein 12014cf2f590SMordechay Goodstein if (WARN_ONCE(trans->txqs.txq[qid], 12024cf2f590SMordechay Goodstein "queue %d already allocated\n", qid)) { 12034cf2f590SMordechay Goodstein ret = -EIO; 12044cf2f590SMordechay Goodstein goto error_free_resp; 12054cf2f590SMordechay Goodstein } 12064cf2f590SMordechay Goodstein 12070cd1ad2dSMordechay Goodstein txq->id = qid; 12080cd1ad2dSMordechay Goodstein trans->txqs.txq[qid] = txq; 12090cd1ad2dSMordechay Goodstein wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 12100cd1ad2dSMordechay Goodstein 12110cd1ad2dSMordechay Goodstein /* Place first TFD at index corresponding to start sequence number */ 12120cd1ad2dSMordechay Goodstein txq->read_ptr = wr_ptr; 12130cd1ad2dSMordechay Goodstein txq->write_ptr = wr_ptr; 12140cd1ad2dSMordechay Goodstein 12150cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 12160cd1ad2dSMordechay Goodstein 12170cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 12180cd1ad2dSMordechay Goodstein return qid; 12190cd1ad2dSMordechay Goodstein 12200cd1ad2dSMordechay Goodstein error_free_resp: 12210cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 12220cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12230cd1ad2dSMordechay Goodstein return ret; 12240cd1ad2dSMordechay Goodstein } 12250cd1ad2dSMordechay Goodstein 1226227f2597SJohannes Berg int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 1227227f2597SJohannes Berg u8 tid, int size, unsigned int timeout) 12280cd1ad2dSMordechay Goodstein { 1229ba3d4acdSJohannes Berg struct iwl_txq *txq; 1230227f2597SJohannes Berg union { 1231227f2597SJohannes Berg struct iwl_tx_queue_cfg_cmd old; 1232227f2597SJohannes Berg struct iwl_scd_queue_cfg_cmd new; 1233227f2597SJohannes Berg } cmd; 12340cd1ad2dSMordechay Goodstein struct iwl_host_cmd hcmd = { 12350cd1ad2dSMordechay Goodstein .flags = CMD_WANT_SKB, 12360cd1ad2dSMordechay Goodstein }; 12370cd1ad2dSMordechay Goodstein int ret; 12380cd1ad2dSMordechay Goodstein 1239bb16ffd5SJohannes Berg if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && 1240bb16ffd5SJohannes Berg trans->hw_rev_step == SILICON_A_STEP) 1241bb16ffd5SJohannes Berg size = 4096; 1242bb16ffd5SJohannes Berg 1243ba3d4acdSJohannes Berg txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); 1244ba3d4acdSJohannes Berg if (IS_ERR(txq)) 1245ba3d4acdSJohannes Berg return PTR_ERR(txq); 12460cd1ad2dSMordechay Goodstein 1247227f2597SJohannes Berg if (trans->txqs.queue_alloc_cmd_ver == 0) { 1248227f2597SJohannes Berg memset(&cmd.old, 0, sizeof(cmd.old)); 1249227f2597SJohannes Berg cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); 1250227f2597SJohannes Berg cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 1251227f2597SJohannes Berg cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 1252227f2597SJohannes Berg cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); 1253227f2597SJohannes Berg cmd.old.tid = tid; 1254227f2597SJohannes Berg 1255227f2597SJohannes Berg if (hweight32(sta_mask) != 1) { 1256227f2597SJohannes Berg ret = -EINVAL; 1257227f2597SJohannes Berg goto error; 1258227f2597SJohannes Berg } 1259227f2597SJohannes Berg cmd.old.sta_id = ffs(sta_mask) - 1; 1260227f2597SJohannes Berg 1261227f2597SJohannes Berg hcmd.id = SCD_QUEUE_CFG; 1262227f2597SJohannes Berg hcmd.len[0] = sizeof(cmd.old); 1263227f2597SJohannes Berg hcmd.data[0] = &cmd.old; 1264227f2597SJohannes Berg } else if (trans->txqs.queue_alloc_cmd_ver == 3) { 1265227f2597SJohannes Berg memset(&cmd.new, 0, sizeof(cmd.new)); 1266227f2597SJohannes Berg cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); 1267227f2597SJohannes Berg cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); 1268227f2597SJohannes Berg cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); 1269227f2597SJohannes Berg cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 1270227f2597SJohannes Berg cmd.new.u.add.flags = cpu_to_le32(flags); 1271227f2597SJohannes Berg cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); 1272227f2597SJohannes Berg cmd.new.u.add.tid = tid; 1273227f2597SJohannes Berg 1274227f2597SJohannes Berg hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); 1275227f2597SJohannes Berg hcmd.len[0] = sizeof(cmd.new); 1276227f2597SJohannes Berg hcmd.data[0] = &cmd.new; 1277227f2597SJohannes Berg } else { 1278227f2597SJohannes Berg ret = -EOPNOTSUPP; 1279227f2597SJohannes Berg goto error; 1280227f2597SJohannes Berg } 12810cd1ad2dSMordechay Goodstein 12820cd1ad2dSMordechay Goodstein ret = iwl_trans_send_cmd(trans, &hcmd); 12830cd1ad2dSMordechay Goodstein if (ret) 12840cd1ad2dSMordechay Goodstein goto error; 12850cd1ad2dSMordechay Goodstein 12860cd1ad2dSMordechay Goodstein return iwl_txq_alloc_response(trans, txq, &hcmd); 12870cd1ad2dSMordechay Goodstein 12880cd1ad2dSMordechay Goodstein error: 12890cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12900cd1ad2dSMordechay Goodstein return ret; 12910cd1ad2dSMordechay Goodstein } 12920cd1ad2dSMordechay Goodstein 12930cd1ad2dSMordechay Goodstein void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 12940cd1ad2dSMordechay Goodstein { 12950cd1ad2dSMordechay Goodstein if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 12960cd1ad2dSMordechay Goodstein "queue %d out of range", queue)) 12970cd1ad2dSMordechay Goodstein return; 12980cd1ad2dSMordechay Goodstein 12990cd1ad2dSMordechay Goodstein /* 13000cd1ad2dSMordechay Goodstein * Upon HW Rfkill - we stop the device, and then stop the queues 13010cd1ad2dSMordechay Goodstein * in the op_mode. Just for the sake of the simplicity of the op_mode, 13020cd1ad2dSMordechay Goodstein * allow the op_mode to call txq_disable after it already called 13030cd1ad2dSMordechay Goodstein * stop_device. 13040cd1ad2dSMordechay Goodstein */ 13050cd1ad2dSMordechay Goodstein if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 13060cd1ad2dSMordechay Goodstein WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 13070cd1ad2dSMordechay Goodstein "queue %d not used", queue); 13080cd1ad2dSMordechay Goodstein return; 13090cd1ad2dSMordechay Goodstein } 13100cd1ad2dSMordechay Goodstein 13112f8cfcc4SMordechay Goodstein iwl_txq_gen2_free(trans, queue); 13120cd1ad2dSMordechay Goodstein 13130cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 13140cd1ad2dSMordechay Goodstein } 13150cd1ad2dSMordechay Goodstein 13160cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tx_free(struct iwl_trans *trans) 13170cd1ad2dSMordechay Goodstein { 13180cd1ad2dSMordechay Goodstein int i; 13190cd1ad2dSMordechay Goodstein 13200cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 13210cd1ad2dSMordechay Goodstein 13220cd1ad2dSMordechay Goodstein /* Free all TX queues */ 13230cd1ad2dSMordechay Goodstein for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 13240cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[i]) 13250cd1ad2dSMordechay Goodstein continue; 13260cd1ad2dSMordechay Goodstein 13270cd1ad2dSMordechay Goodstein iwl_txq_gen2_free(trans, i); 13280cd1ad2dSMordechay Goodstein } 13290cd1ad2dSMordechay Goodstein } 13300cd1ad2dSMordechay Goodstein 13310cd1ad2dSMordechay Goodstein int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 13320cd1ad2dSMordechay Goodstein { 13330cd1ad2dSMordechay Goodstein struct iwl_txq *queue; 13340cd1ad2dSMordechay Goodstein int ret; 13350cd1ad2dSMordechay Goodstein 13360cd1ad2dSMordechay Goodstein /* alloc and init the tx queue */ 13370cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[txq_id]) { 13380cd1ad2dSMordechay Goodstein queue = kzalloc(sizeof(*queue), GFP_KERNEL); 13390cd1ad2dSMordechay Goodstein if (!queue) { 13400cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Not enough memory for tx queue\n"); 13410cd1ad2dSMordechay Goodstein return -ENOMEM; 13420cd1ad2dSMordechay Goodstein } 13430cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = queue; 13440cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, queue, queue_size, true); 13450cd1ad2dSMordechay Goodstein if (ret) { 13460cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 13470cd1ad2dSMordechay Goodstein goto error; 13480cd1ad2dSMordechay Goodstein } 13490cd1ad2dSMordechay Goodstein } else { 13500cd1ad2dSMordechay Goodstein queue = trans->txqs.txq[txq_id]; 13510cd1ad2dSMordechay Goodstein } 13520cd1ad2dSMordechay Goodstein 13530cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, queue, queue_size, 13540cd1ad2dSMordechay Goodstein (txq_id == trans->txqs.cmd.q_id)); 13550cd1ad2dSMordechay Goodstein if (ret) { 13560cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 13570cd1ad2dSMordechay Goodstein goto error; 13580cd1ad2dSMordechay Goodstein } 13590cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 13600cd1ad2dSMordechay Goodstein set_bit(txq_id, trans->txqs.queue_used); 13610cd1ad2dSMordechay Goodstein 13620cd1ad2dSMordechay Goodstein return 0; 13630cd1ad2dSMordechay Goodstein 13640cd1ad2dSMordechay Goodstein error: 13650cd1ad2dSMordechay Goodstein iwl_txq_gen2_tx_free(trans); 13660cd1ad2dSMordechay Goodstein return ret; 13670cd1ad2dSMordechay Goodstein } 13680cd1ad2dSMordechay Goodstein 13690179bfffSMordechay Goodstein static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 1370a0632004SJohannes Berg struct iwl_tfd *tfd, u8 idx) 13710179bfffSMordechay Goodstein { 1372a0632004SJohannes Berg struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 13730179bfffSMordechay Goodstein dma_addr_t addr; 13740179bfffSMordechay Goodstein dma_addr_t hi_len; 13750179bfffSMordechay Goodstein 13760179bfffSMordechay Goodstein addr = get_unaligned_le32(&tb->lo); 13770179bfffSMordechay Goodstein 13780179bfffSMordechay Goodstein if (sizeof(dma_addr_t) <= sizeof(u32)) 13790179bfffSMordechay Goodstein return addr; 13800179bfffSMordechay Goodstein 13810179bfffSMordechay Goodstein hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 13820179bfffSMordechay Goodstein 13830179bfffSMordechay Goodstein /* 13840179bfffSMordechay Goodstein * shift by 16 twice to avoid warnings on 32-bit 13850179bfffSMordechay Goodstein * (where this code never runs anyway due to the 13860179bfffSMordechay Goodstein * if statement above) 13870179bfffSMordechay Goodstein */ 13880179bfffSMordechay Goodstein return addr | ((hi_len << 16) << 16); 13890179bfffSMordechay Goodstein } 13900179bfffSMordechay Goodstein 13910179bfffSMordechay Goodstein void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 13920179bfffSMordechay Goodstein struct iwl_cmd_meta *meta, 13930179bfffSMordechay Goodstein struct iwl_txq *txq, int index) 13940179bfffSMordechay Goodstein { 13950179bfffSMordechay Goodstein int i, num_tbs; 1396a0632004SJohannes Berg struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); 13970179bfffSMordechay Goodstein 13980179bfffSMordechay Goodstein /* Sanity check on number of chunks */ 13990179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 14000179bfffSMordechay Goodstein 14010179bfffSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 14020179bfffSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 14030179bfffSMordechay Goodstein /* @todo issue fatal error, it is quite serious situation */ 14040179bfffSMordechay Goodstein return; 14050179bfffSMordechay Goodstein } 14060179bfffSMordechay Goodstein 14070179bfffSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 14080179bfffSMordechay Goodstein 14090179bfffSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 14100179bfffSMordechay Goodstein if (meta->tbs & BIT(i)) 14110179bfffSMordechay Goodstein dma_unmap_page(trans->dev, 14120179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 14130179bfffSMordechay Goodstein tfd, i), 14140179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 14150179bfffSMordechay Goodstein tfd, i), 14160179bfffSMordechay Goodstein DMA_TO_DEVICE); 14170179bfffSMordechay Goodstein else 14180179bfffSMordechay Goodstein dma_unmap_single(trans->dev, 14190179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 14200179bfffSMordechay Goodstein tfd, i), 14210179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 14220179bfffSMordechay Goodstein tfd, i), 14230179bfffSMordechay Goodstein DMA_TO_DEVICE); 14240179bfffSMordechay Goodstein } 14250179bfffSMordechay Goodstein 14260179bfffSMordechay Goodstein meta->tbs = 0; 14270179bfffSMordechay Goodstein 1428c83031afSJohannes Berg iwl_txq_set_tfd_invalid_gen1(trans, tfd); 14290179bfffSMordechay Goodstein } 14300179bfffSMordechay Goodstein 14310179bfffSMordechay Goodstein #define IWL_TX_CRC_SIZE 4 14320179bfffSMordechay Goodstein #define IWL_TX_DELIMITER_SIZE 4 14330179bfffSMordechay Goodstein 14340179bfffSMordechay Goodstein /* 14350179bfffSMordechay Goodstein * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 14360179bfffSMordechay Goodstein */ 14370179bfffSMordechay Goodstein void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 14380179bfffSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 14390179bfffSMordechay Goodstein int num_tbs) 14400179bfffSMordechay Goodstein { 14410179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl; 14420179bfffSMordechay Goodstein int write_ptr = txq->write_ptr; 14430179bfffSMordechay Goodstein int txq_id = txq->id; 14440179bfffSMordechay Goodstein u8 sec_ctl = 0; 14450179bfffSMordechay Goodstein u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 14460179bfffSMordechay Goodstein __le16 bc_ent; 14470179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 14480179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14490179bfffSMordechay Goodstein u8 sta_id = tx_cmd->sta_id; 14500179bfffSMordechay Goodstein 14510179bfffSMordechay Goodstein scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14520179bfffSMordechay Goodstein 14530179bfffSMordechay Goodstein sec_ctl = tx_cmd->sec_ctl; 14540179bfffSMordechay Goodstein 14550179bfffSMordechay Goodstein switch (sec_ctl & TX_CMD_SEC_MSK) { 14560179bfffSMordechay Goodstein case TX_CMD_SEC_CCM: 14570179bfffSMordechay Goodstein len += IEEE80211_CCMP_MIC_LEN; 14580179bfffSMordechay Goodstein break; 14590179bfffSMordechay Goodstein case TX_CMD_SEC_TKIP: 14600179bfffSMordechay Goodstein len += IEEE80211_TKIP_ICV_LEN; 14610179bfffSMordechay Goodstein break; 14620179bfffSMordechay Goodstein case TX_CMD_SEC_WEP: 14630179bfffSMordechay Goodstein len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 14640179bfffSMordechay Goodstein break; 14650179bfffSMordechay Goodstein } 14660179bfffSMordechay Goodstein if (trans->txqs.bc_table_dword) 14670179bfffSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 14680179bfffSMordechay Goodstein 14690179bfffSMordechay Goodstein if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 14700179bfffSMordechay Goodstein return; 14710179bfffSMordechay Goodstein 14720179bfffSMordechay Goodstein bc_ent = cpu_to_le16(len | (sta_id << 12)); 14730179bfffSMordechay Goodstein 14740179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 14750179bfffSMordechay Goodstein 14760179bfffSMordechay Goodstein if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 14770179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 14780179bfffSMordechay Goodstein bc_ent; 14790179bfffSMordechay Goodstein } 14800179bfffSMordechay Goodstein 14810179bfffSMordechay Goodstein void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 14820179bfffSMordechay Goodstein struct iwl_txq *txq) 14830179bfffSMordechay Goodstein { 14840179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14850179bfffSMordechay Goodstein int txq_id = txq->id; 14860179bfffSMordechay Goodstein int read_ptr = txq->read_ptr; 14870179bfffSMordechay Goodstein u8 sta_id = 0; 14880179bfffSMordechay Goodstein __le16 bc_ent; 14890179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 14900179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14910179bfffSMordechay Goodstein 14920179bfffSMordechay Goodstein WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 14930179bfffSMordechay Goodstein 14940179bfffSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 14950179bfffSMordechay Goodstein sta_id = tx_cmd->sta_id; 14960179bfffSMordechay Goodstein 14970179bfffSMordechay Goodstein bc_ent = cpu_to_le16(1 | (sta_id << 12)); 14980179bfffSMordechay Goodstein 14990179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 15000179bfffSMordechay Goodstein 15010179bfffSMordechay Goodstein if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 15020179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 15030179bfffSMordechay Goodstein bc_ent; 15040179bfffSMordechay Goodstein } 1505a4450980SMordechay Goodstein 1506a4450980SMordechay Goodstein /* 1507a4450980SMordechay Goodstein * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 1508a4450980SMordechay Goodstein * @trans - transport private data 1509a4450980SMordechay Goodstein * @txq - tx queue 1510a4450980SMordechay Goodstein * @dma_dir - the direction of the DMA mapping 1511a4450980SMordechay Goodstein * 1512a4450980SMordechay Goodstein * Does NOT advance any TFD circular buffer read/write indexes 1513a4450980SMordechay Goodstein * Does NOT free the TFD itself (which is within circular buffer) 1514a4450980SMordechay Goodstein */ 1515a4450980SMordechay Goodstein void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1516a4450980SMordechay Goodstein { 1517a4450980SMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1518a4450980SMordechay Goodstein * idx is bounded by n_window 1519a4450980SMordechay Goodstein */ 1520a4450980SMordechay Goodstein int rd_ptr = txq->read_ptr; 1521a4450980SMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 15220f8d5656SEmmanuel Grumbach struct sk_buff *skb; 1523a4450980SMordechay Goodstein 1524a4450980SMordechay Goodstein lockdep_assert_held(&txq->lock); 1525a4450980SMordechay Goodstein 15260f8d5656SEmmanuel Grumbach if (!txq->entries) 15270f8d5656SEmmanuel Grumbach return; 15280f8d5656SEmmanuel Grumbach 1529a4450980SMordechay Goodstein /* We have only q->n_window txq->entries, but we use 1530a4450980SMordechay Goodstein * TFD_QUEUE_SIZE_MAX tfds 1531a4450980SMordechay Goodstein */ 1532a0632004SJohannes Berg if (trans->trans_cfg->gen2) 1533a0632004SJohannes Berg iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1534a0632004SJohannes Berg iwl_txq_get_tfd(trans, txq, rd_ptr)); 1535a0632004SJohannes Berg else 1536a0632004SJohannes Berg iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, 1537a0632004SJohannes Berg txq, rd_ptr); 1538a4450980SMordechay Goodstein 1539a4450980SMordechay Goodstein /* free SKB */ 1540a4450980SMordechay Goodstein skb = txq->entries[idx].skb; 1541a4450980SMordechay Goodstein 1542a4450980SMordechay Goodstein /* Can be called from irqs-disabled context 1543a4450980SMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 1544a4450980SMordechay Goodstein * freed and that the queue is not empty - free the skb 1545a4450980SMordechay Goodstein */ 1546a4450980SMordechay Goodstein if (skb) { 1547a4450980SMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 1548a4450980SMordechay Goodstein txq->entries[idx].skb = NULL; 1549a4450980SMordechay Goodstein } 1550a4450980SMordechay Goodstein } 1551a4450980SMordechay Goodstein 1552a4450980SMordechay Goodstein void iwl_txq_progress(struct iwl_txq *txq) 1553a4450980SMordechay Goodstein { 1554a4450980SMordechay Goodstein lockdep_assert_held(&txq->lock); 1555a4450980SMordechay Goodstein 1556a4450980SMordechay Goodstein if (!txq->wd_timeout) 1557a4450980SMordechay Goodstein return; 1558a4450980SMordechay Goodstein 1559a4450980SMordechay Goodstein /* 1560a4450980SMordechay Goodstein * station is asleep and we send data - that must 1561a4450980SMordechay Goodstein * be uAPSD or PS-Poll. Don't rearm the timer. 1562a4450980SMordechay Goodstein */ 1563a4450980SMordechay Goodstein if (txq->frozen) 1564a4450980SMordechay Goodstein return; 1565a4450980SMordechay Goodstein 1566a4450980SMordechay Goodstein /* 1567a4450980SMordechay Goodstein * if empty delete timer, otherwise move timer forward 1568a4450980SMordechay Goodstein * since we're making progress on this queue 1569a4450980SMordechay Goodstein */ 1570a4450980SMordechay Goodstein if (txq->read_ptr == txq->write_ptr) 1571a4450980SMordechay Goodstein del_timer(&txq->stuck_timer); 1572a4450980SMordechay Goodstein else 1573a4450980SMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1574a4450980SMordechay Goodstein } 1575a4450980SMordechay Goodstein 1576a4450980SMordechay Goodstein /* Frees buffers until index _not_ inclusive */ 1577a4450980SMordechay Goodstein void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1578*fc163831SMiri Korenblit struct sk_buff_head *skbs, bool is_flush) 1579a4450980SMordechay Goodstein { 1580a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1581413be839SMiri Korenblit int tfd_num, read_ptr, last_to_free; 1582a4450980SMordechay Goodstein 1583a4450980SMordechay Goodstein /* This function is not meant to release cmd queue*/ 1584a4450980SMordechay Goodstein if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 1585a4450980SMordechay Goodstein return; 1586a4450980SMordechay Goodstein 1587413be839SMiri Korenblit if (WARN_ON(!txq)) 1588413be839SMiri Korenblit return; 1589413be839SMiri Korenblit 1590413be839SMiri Korenblit tfd_num = iwl_txq_get_cmd_index(txq, ssn); 1591413be839SMiri Korenblit read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1592413be839SMiri Korenblit 1593a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1594a4450980SMordechay Goodstein 1595a4450980SMordechay Goodstein if (!test_bit(txq_id, trans->txqs.queue_used)) { 1596a4450980SMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1597a4450980SMordechay Goodstein txq_id, ssn); 1598a4450980SMordechay Goodstein goto out; 1599a4450980SMordechay Goodstein } 1600a4450980SMordechay Goodstein 1601a4450980SMordechay Goodstein if (read_ptr == tfd_num) 1602a4450980SMordechay Goodstein goto out; 1603a4450980SMordechay Goodstein 1604a4450980SMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1605a4450980SMordechay Goodstein txq_id, txq->read_ptr, tfd_num, ssn); 1606a4450980SMordechay Goodstein 1607a4450980SMordechay Goodstein /*Since we free until index _not_ inclusive, the one before index is 1608a4450980SMordechay Goodstein * the last we will free. This one must be used */ 1609a4450980SMordechay Goodstein last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 1610a4450980SMordechay Goodstein 1611a4450980SMordechay Goodstein if (!iwl_txq_used(txq, last_to_free)) { 1612a4450980SMordechay Goodstein IWL_ERR(trans, 1613a4450980SMordechay Goodstein "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1614a4450980SMordechay Goodstein __func__, txq_id, last_to_free, 1615a4450980SMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size, 1616a4450980SMordechay Goodstein txq->write_ptr, txq->read_ptr); 16179cd3de81SMordechay Goodstein 16189cd3de81SMordechay Goodstein iwl_op_mode_time_point(trans->op_mode, 16199cd3de81SMordechay Goodstein IWL_FW_INI_TIME_POINT_FAKE_TX, 16209cd3de81SMordechay Goodstein NULL); 1621a4450980SMordechay Goodstein goto out; 1622a4450980SMordechay Goodstein } 1623a4450980SMordechay Goodstein 1624a4450980SMordechay Goodstein if (WARN_ON(!skb_queue_empty(skbs))) 1625a4450980SMordechay Goodstein goto out; 1626a4450980SMordechay Goodstein 1627a4450980SMordechay Goodstein for (; 1628a4450980SMordechay Goodstein read_ptr != tfd_num; 1629a4450980SMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 1630a4450980SMordechay Goodstein read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 1631a4450980SMordechay Goodstein struct sk_buff *skb = txq->entries[read_ptr].skb; 1632a4450980SMordechay Goodstein 1633a4450980SMordechay Goodstein if (WARN_ON_ONCE(!skb)) 1634a4450980SMordechay Goodstein continue; 1635a4450980SMordechay Goodstein 1636a4450980SMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 1637a4450980SMordechay Goodstein 1638a4450980SMordechay Goodstein __skb_queue_tail(skbs, skb); 1639a4450980SMordechay Goodstein 1640a4450980SMordechay Goodstein txq->entries[read_ptr].skb = NULL; 1641a4450980SMordechay Goodstein 164212a89f01SJohannes Berg if (!trans->trans_cfg->gen2) 1643a4450980SMordechay Goodstein iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 1644a4450980SMordechay Goodstein 1645a4450980SMordechay Goodstein iwl_txq_free_tfd(trans, txq); 1646a4450980SMordechay Goodstein } 1647a4450980SMordechay Goodstein 1648a4450980SMordechay Goodstein iwl_txq_progress(txq); 1649a4450980SMordechay Goodstein 1650a4450980SMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark && 1651a4450980SMordechay Goodstein test_bit(txq_id, trans->txqs.queue_stopped)) { 1652a4450980SMordechay Goodstein struct sk_buff_head overflow_skbs; 1653*fc163831SMiri Korenblit struct sk_buff *skb; 1654a4450980SMordechay Goodstein 1655a4450980SMordechay Goodstein __skb_queue_head_init(&overflow_skbs); 1656*fc163831SMiri Korenblit skb_queue_splice_init(&txq->overflow_q, 1657*fc163831SMiri Korenblit is_flush ? skbs : &overflow_skbs); 1658a4450980SMordechay Goodstein 1659a4450980SMordechay Goodstein /* 1660a4450980SMordechay Goodstein * We are going to transmit from the overflow queue. 1661a4450980SMordechay Goodstein * Remember this state so that wait_for_txq_empty will know we 1662a4450980SMordechay Goodstein * are adding more packets to the TFD queue. It cannot rely on 1663a4450980SMordechay Goodstein * the state of &txq->overflow_q, as we just emptied it, but 1664a4450980SMordechay Goodstein * haven't TXed the content yet. 1665a4450980SMordechay Goodstein */ 1666a4450980SMordechay Goodstein txq->overflow_tx = true; 1667a4450980SMordechay Goodstein 1668a4450980SMordechay Goodstein /* 1669a4450980SMordechay Goodstein * This is tricky: we are in reclaim path which is non 1670a4450980SMordechay Goodstein * re-entrant, so noone will try to take the access the 1671a4450980SMordechay Goodstein * txq data from that path. We stopped tx, so we can't 1672a4450980SMordechay Goodstein * have tx as well. Bottom line, we can unlock and re-lock 1673a4450980SMordechay Goodstein * later. 1674a4450980SMordechay Goodstein */ 1675a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1676a4450980SMordechay Goodstein 1677*fc163831SMiri Korenblit while ((skb = __skb_dequeue(&overflow_skbs))) { 1678a4450980SMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd_ptr; 1679a4450980SMordechay Goodstein 1680a4450980SMordechay Goodstein dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1681a4450980SMordechay Goodstein trans->txqs.dev_cmd_offs); 1682a4450980SMordechay Goodstein 1683a4450980SMordechay Goodstein /* 1684a4450980SMordechay Goodstein * Note that we can very well be overflowing again. 1685a4450980SMordechay Goodstein * In that case, iwl_txq_space will be small again 1686a4450980SMordechay Goodstein * and we won't wake mac80211's queue. 1687a4450980SMordechay Goodstein */ 1688a4450980SMordechay Goodstein iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 1689a4450980SMordechay Goodstein } 1690a4450980SMordechay Goodstein 1691a4450980SMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark) 1692a4450980SMordechay Goodstein iwl_wake_queue(trans, txq); 1693a4450980SMordechay Goodstein 1694a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1695a4450980SMordechay Goodstein txq->overflow_tx = false; 1696a4450980SMordechay Goodstein } 1697a4450980SMordechay Goodstein 1698a4450980SMordechay Goodstein out: 1699a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1700a4450980SMordechay Goodstein } 1701a4450980SMordechay Goodstein 1702a4450980SMordechay Goodstein /* Set wr_ptr of specific device and txq */ 1703a4450980SMordechay Goodstein void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 1704a4450980SMordechay Goodstein { 1705a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1706a4450980SMordechay Goodstein 1707a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1708a4450980SMordechay Goodstein 1709a4450980SMordechay Goodstein txq->write_ptr = ptr; 1710a4450980SMordechay Goodstein txq->read_ptr = txq->write_ptr; 1711a4450980SMordechay Goodstein 1712a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1713a4450980SMordechay Goodstein } 1714a4450980SMordechay Goodstein 1715a4450980SMordechay Goodstein void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, 1716a4450980SMordechay Goodstein bool freeze) 1717a4450980SMordechay Goodstein { 1718a4450980SMordechay Goodstein int queue; 1719a4450980SMordechay Goodstein 1720a4450980SMordechay Goodstein for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1721a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[queue]; 1722a4450980SMordechay Goodstein unsigned long now; 1723a4450980SMordechay Goodstein 1724a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1725a4450980SMordechay Goodstein 1726a4450980SMordechay Goodstein now = jiffies; 1727a4450980SMordechay Goodstein 1728a4450980SMordechay Goodstein if (txq->frozen == freeze) 1729a4450980SMordechay Goodstein goto next_queue; 1730a4450980SMordechay Goodstein 1731a4450980SMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1732a4450980SMordechay Goodstein freeze ? "Freezing" : "Waking", queue); 1733a4450980SMordechay Goodstein 1734a4450980SMordechay Goodstein txq->frozen = freeze; 1735a4450980SMordechay Goodstein 1736a4450980SMordechay Goodstein if (txq->read_ptr == txq->write_ptr) 1737a4450980SMordechay Goodstein goto next_queue; 1738a4450980SMordechay Goodstein 1739a4450980SMordechay Goodstein if (freeze) { 1740a4450980SMordechay Goodstein if (unlikely(time_after(now, 1741a4450980SMordechay Goodstein txq->stuck_timer.expires))) { 1742a4450980SMordechay Goodstein /* 1743a4450980SMordechay Goodstein * The timer should have fired, maybe it is 1744a4450980SMordechay Goodstein * spinning right now on the lock. 1745a4450980SMordechay Goodstein */ 1746a4450980SMordechay Goodstein goto next_queue; 1747a4450980SMordechay Goodstein } 1748a4450980SMordechay Goodstein /* remember how long until the timer fires */ 1749a4450980SMordechay Goodstein txq->frozen_expiry_remainder = 1750a4450980SMordechay Goodstein txq->stuck_timer.expires - now; 1751a4450980SMordechay Goodstein del_timer(&txq->stuck_timer); 1752a4450980SMordechay Goodstein goto next_queue; 1753a4450980SMordechay Goodstein } 1754a4450980SMordechay Goodstein 1755a4450980SMordechay Goodstein /* 1756a4450980SMordechay Goodstein * Wake a non-empty queue -> arm timer with the 1757a4450980SMordechay Goodstein * remainder before it froze 1758a4450980SMordechay Goodstein */ 1759a4450980SMordechay Goodstein mod_timer(&txq->stuck_timer, 1760a4450980SMordechay Goodstein now + txq->frozen_expiry_remainder); 1761a4450980SMordechay Goodstein 1762a4450980SMordechay Goodstein next_queue: 1763a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1764a4450980SMordechay Goodstein } 1765a4450980SMordechay Goodstein } 1766a4450980SMordechay Goodstein 176713f028b4SMordechay Goodstein #define HOST_COMPLETE_TIMEOUT (2 * HZ) 176813f028b4SMordechay Goodstein 176913f028b4SMordechay Goodstein static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, 177013f028b4SMordechay Goodstein struct iwl_host_cmd *cmd) 177113f028b4SMordechay Goodstein { 177213f028b4SMordechay Goodstein const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); 177313f028b4SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 177413f028b4SMordechay Goodstein int cmd_idx; 177513f028b4SMordechay Goodstein int ret; 177613f028b4SMordechay Goodstein 177713f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); 177813f028b4SMordechay Goodstein 177913f028b4SMordechay Goodstein if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 178013f028b4SMordechay Goodstein &trans->status), 178113f028b4SMordechay Goodstein "Command %s: a command is already active!\n", cmd_str)) 178213f028b4SMordechay Goodstein return -EIO; 178313f028b4SMordechay Goodstein 178413f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); 178513f028b4SMordechay Goodstein 178613f028b4SMordechay Goodstein cmd_idx = trans->ops->send_cmd(trans, cmd); 178713f028b4SMordechay Goodstein if (cmd_idx < 0) { 178813f028b4SMordechay Goodstein ret = cmd_idx; 178913f028b4SMordechay Goodstein clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 179013f028b4SMordechay Goodstein IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", 179113f028b4SMordechay Goodstein cmd_str, ret); 179213f028b4SMordechay Goodstein return ret; 179313f028b4SMordechay Goodstein } 179413f028b4SMordechay Goodstein 179513f028b4SMordechay Goodstein ret = wait_event_timeout(trans->wait_command_queue, 179613f028b4SMordechay Goodstein !test_bit(STATUS_SYNC_HCMD_ACTIVE, 179713f028b4SMordechay Goodstein &trans->status), 179813f028b4SMordechay Goodstein HOST_COMPLETE_TIMEOUT); 179913f028b4SMordechay Goodstein if (!ret) { 180013f028b4SMordechay Goodstein IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 180113f028b4SMordechay Goodstein cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 180213f028b4SMordechay Goodstein 180313f028b4SMordechay Goodstein IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 180413f028b4SMordechay Goodstein txq->read_ptr, txq->write_ptr); 180513f028b4SMordechay Goodstein 180613f028b4SMordechay Goodstein clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 180713f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 180813f028b4SMordechay Goodstein cmd_str); 180913f028b4SMordechay Goodstein ret = -ETIMEDOUT; 181013f028b4SMordechay Goodstein 181113f028b4SMordechay Goodstein iwl_trans_sync_nmi(trans); 181213f028b4SMordechay Goodstein goto cancel; 181313f028b4SMordechay Goodstein } 181413f028b4SMordechay Goodstein 181513f028b4SMordechay Goodstein if (test_bit(STATUS_FW_ERROR, &trans->status)) { 18164b992db6SJohannes Berg if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, 18174b992db6SJohannes Berg &trans->status)) { 181813f028b4SMordechay Goodstein IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); 181913f028b4SMordechay Goodstein dump_stack(); 18204b992db6SJohannes Berg } 182113f028b4SMordechay Goodstein ret = -EIO; 182213f028b4SMordechay Goodstein goto cancel; 182313f028b4SMordechay Goodstein } 182413f028b4SMordechay Goodstein 182513f028b4SMordechay Goodstein if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 182613f028b4SMordechay Goodstein test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 182713f028b4SMordechay Goodstein IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 182813f028b4SMordechay Goodstein ret = -ERFKILL; 182913f028b4SMordechay Goodstein goto cancel; 183013f028b4SMordechay Goodstein } 183113f028b4SMordechay Goodstein 183213f028b4SMordechay Goodstein if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 183313f028b4SMordechay Goodstein IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); 183413f028b4SMordechay Goodstein ret = -EIO; 183513f028b4SMordechay Goodstein goto cancel; 183613f028b4SMordechay Goodstein } 183713f028b4SMordechay Goodstein 183813f028b4SMordechay Goodstein return 0; 183913f028b4SMordechay Goodstein 184013f028b4SMordechay Goodstein cancel: 184113f028b4SMordechay Goodstein if (cmd->flags & CMD_WANT_SKB) { 184213f028b4SMordechay Goodstein /* 184313f028b4SMordechay Goodstein * Cancel the CMD_WANT_SKB flag for the cmd in the 184413f028b4SMordechay Goodstein * TX cmd queue. Otherwise in case the cmd comes 184513f028b4SMordechay Goodstein * in later, it will possibly set an invalid 184613f028b4SMordechay Goodstein * address (cmd->meta.source). 184713f028b4SMordechay Goodstein */ 184813f028b4SMordechay Goodstein txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 184913f028b4SMordechay Goodstein } 185013f028b4SMordechay Goodstein 185113f028b4SMordechay Goodstein if (cmd->resp_pkt) { 185213f028b4SMordechay Goodstein iwl_free_resp(cmd); 185313f028b4SMordechay Goodstein cmd->resp_pkt = NULL; 185413f028b4SMordechay Goodstein } 185513f028b4SMordechay Goodstein 185613f028b4SMordechay Goodstein return ret; 185713f028b4SMordechay Goodstein } 185813f028b4SMordechay Goodstein 185913f028b4SMordechay Goodstein int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, 186013f028b4SMordechay Goodstein struct iwl_host_cmd *cmd) 186113f028b4SMordechay Goodstein { 186213f028b4SMordechay Goodstein /* Make sure the NIC is still alive in the bus */ 186313f028b4SMordechay Goodstein if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 186413f028b4SMordechay Goodstein return -ENODEV; 186513f028b4SMordechay Goodstein 186613f028b4SMordechay Goodstein if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 186713f028b4SMordechay Goodstein test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 186813f028b4SMordechay Goodstein IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 186913f028b4SMordechay Goodstein cmd->id); 187013f028b4SMordechay Goodstein return -ERFKILL; 187113f028b4SMordechay Goodstein } 187213f028b4SMordechay Goodstein 187313f028b4SMordechay Goodstein if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 187413f028b4SMordechay Goodstein !(cmd->flags & CMD_SEND_IN_D3))) { 187513f028b4SMordechay Goodstein IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); 187613f028b4SMordechay Goodstein return -EHOSTDOWN; 187713f028b4SMordechay Goodstein } 187813f028b4SMordechay Goodstein 187913f028b4SMordechay Goodstein if (cmd->flags & CMD_ASYNC) { 188013f028b4SMordechay Goodstein int ret; 188113f028b4SMordechay Goodstein 188213f028b4SMordechay Goodstein /* An asynchronous command can not expect an SKB to be set. */ 188313f028b4SMordechay Goodstein if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 188413f028b4SMordechay Goodstein return -EINVAL; 188513f028b4SMordechay Goodstein 188613f028b4SMordechay Goodstein ret = trans->ops->send_cmd(trans, cmd); 188713f028b4SMordechay Goodstein if (ret < 0) { 188813f028b4SMordechay Goodstein IWL_ERR(trans, 188913f028b4SMordechay Goodstein "Error sending %s: enqueue_hcmd failed: %d\n", 189013f028b4SMordechay Goodstein iwl_get_cmd_string(trans, cmd->id), ret); 189113f028b4SMordechay Goodstein return ret; 189213f028b4SMordechay Goodstein } 189313f028b4SMordechay Goodstein return 0; 189413f028b4SMordechay Goodstein } 189513f028b4SMordechay Goodstein 189613f028b4SMordechay Goodstein return iwl_trans_txq_send_hcmd_sync(trans, cmd); 189713f028b4SMordechay Goodstein } 189813f028b4SMordechay Goodstein 1899