18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 28e99ea8dSJohannes Berg /* 3*ba3d4acdSJohannes Berg * Copyright (C) 2020-2022 Intel Corporation 48e99ea8dSJohannes Berg */ 50cd1ad2dSMordechay Goodstein #include <net/tso.h> 60cd1ad2dSMordechay Goodstein #include <linux/tcp.h> 70cd1ad2dSMordechay Goodstein 80cd1ad2dSMordechay Goodstein #include "iwl-debug.h" 90cd1ad2dSMordechay Goodstein #include "iwl-io.h" 100cd1ad2dSMordechay Goodstein #include "fw/api/tx.h" 110cd1ad2dSMordechay Goodstein #include "queue/tx.h" 120cd1ad2dSMordechay Goodstein #include "iwl-fh.h" 130cd1ad2dSMordechay Goodstein #include "iwl-scd.h" 140cd1ad2dSMordechay Goodstein #include <linux/dmapool.h> 150cd1ad2dSMordechay Goodstein 160cd1ad2dSMordechay Goodstein /* 170cd1ad2dSMordechay Goodstein * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 180cd1ad2dSMordechay Goodstein */ 190cd1ad2dSMordechay Goodstein static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 200cd1ad2dSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 210cd1ad2dSMordechay Goodstein int num_tbs) 220cd1ad2dSMordechay Goodstein { 230cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 240cd1ad2dSMordechay Goodstein u8 filled_tfd_size, num_fetch_chunks; 250cd1ad2dSMordechay Goodstein u16 len = byte_cnt; 260cd1ad2dSMordechay Goodstein __le16 bc_ent; 270cd1ad2dSMordechay Goodstein 280cd1ad2dSMordechay Goodstein if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 290cd1ad2dSMordechay Goodstein return; 300cd1ad2dSMordechay Goodstein 310cd1ad2dSMordechay Goodstein filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 320cd1ad2dSMordechay Goodstein num_tbs * sizeof(struct iwl_tfh_tb); 330cd1ad2dSMordechay Goodstein /* 340cd1ad2dSMordechay Goodstein * filled_tfd_size contains the number of filled bytes in the TFD. 350cd1ad2dSMordechay Goodstein * Dividing it by 64 will give the number of chunks to fetch 360cd1ad2dSMordechay Goodstein * to SRAM- 0 for one chunk, 1 for 2 and so on. 370cd1ad2dSMordechay Goodstein * If, for example, TFD contains only 3 TBs then 32 bytes 380cd1ad2dSMordechay Goodstein * of the TFD are used, and only one chunk of 64 bytes should 390cd1ad2dSMordechay Goodstein * be fetched 400cd1ad2dSMordechay Goodstein */ 410cd1ad2dSMordechay Goodstein num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 420cd1ad2dSMordechay Goodstein 430cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 44d5399f11SMordechay Goodstein struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 450cd1ad2dSMordechay Goodstein 460cd1ad2dSMordechay Goodstein /* Starting from AX210, the HW expects bytes */ 470cd1ad2dSMordechay Goodstein WARN_ON(trans->txqs.bc_table_dword); 480cd1ad2dSMordechay Goodstein WARN_ON(len > 0x3FFF); 490cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 50d5399f11SMordechay Goodstein scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; 510cd1ad2dSMordechay Goodstein } else { 520cd1ad2dSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 530cd1ad2dSMordechay Goodstein 540cd1ad2dSMordechay Goodstein /* Before AX210, the HW expects DW */ 550cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_table_dword); 560cd1ad2dSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 570cd1ad2dSMordechay Goodstein WARN_ON(len > 0xFFF); 580cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 590cd1ad2dSMordechay Goodstein scd_bc_tbl->tfd_offset[idx] = bc_ent; 600cd1ad2dSMordechay Goodstein } 610cd1ad2dSMordechay Goodstein } 620cd1ad2dSMordechay Goodstein 630cd1ad2dSMordechay Goodstein /* 640cd1ad2dSMordechay Goodstein * iwl_txq_inc_wr_ptr - Send new write index to hardware 650cd1ad2dSMordechay Goodstein */ 660cd1ad2dSMordechay Goodstein void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 670cd1ad2dSMordechay Goodstein { 680cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 690cd1ad2dSMordechay Goodstein 700cd1ad2dSMordechay Goodstein IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 710cd1ad2dSMordechay Goodstein 720cd1ad2dSMordechay Goodstein /* 730cd1ad2dSMordechay Goodstein * if not in power-save mode, uCode will never sleep when we're 740cd1ad2dSMordechay Goodstein * trying to tx (during RFKILL, we're not trying to tx). 750cd1ad2dSMordechay Goodstein */ 760cd1ad2dSMordechay Goodstein iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 770cd1ad2dSMordechay Goodstein } 780cd1ad2dSMordechay Goodstein 790cd1ad2dSMordechay Goodstein static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 800cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 810cd1ad2dSMordechay Goodstein { 820cd1ad2dSMordechay Goodstein return le16_to_cpu(tfd->num_tbs) & 0x1f; 830cd1ad2dSMordechay Goodstein } 840cd1ad2dSMordechay Goodstein 850cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 860cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 870cd1ad2dSMordechay Goodstein { 880cd1ad2dSMordechay Goodstein int i, num_tbs; 890cd1ad2dSMordechay Goodstein 900cd1ad2dSMordechay Goodstein /* Sanity check on number of chunks */ 910cd1ad2dSMordechay Goodstein num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 920cd1ad2dSMordechay Goodstein 930cd1ad2dSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 940cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 950cd1ad2dSMordechay Goodstein return; 960cd1ad2dSMordechay Goodstein } 970cd1ad2dSMordechay Goodstein 980cd1ad2dSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 990cd1ad2dSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 1000cd1ad2dSMordechay Goodstein if (meta->tbs & BIT(i)) 1010cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, 1020cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1030cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1040cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1050cd1ad2dSMordechay Goodstein else 1060cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, 1070cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1080cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1090cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1100cd1ad2dSMordechay Goodstein } 1110cd1ad2dSMordechay Goodstein 1120cd1ad2dSMordechay Goodstein tfd->num_tbs = 0; 1130cd1ad2dSMordechay Goodstein } 1140cd1ad2dSMordechay Goodstein 1150cd1ad2dSMordechay Goodstein void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1160cd1ad2dSMordechay Goodstein { 1170cd1ad2dSMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1180cd1ad2dSMordechay Goodstein * idx is bounded by n_window 1190cd1ad2dSMordechay Goodstein */ 1200cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1210f8d5656SEmmanuel Grumbach struct sk_buff *skb; 1220cd1ad2dSMordechay Goodstein 1230cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 1240cd1ad2dSMordechay Goodstein 1250f8d5656SEmmanuel Grumbach if (!txq->entries) 1260f8d5656SEmmanuel Grumbach return; 1270f8d5656SEmmanuel Grumbach 1280cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1290cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, idx)); 1300cd1ad2dSMordechay Goodstein 1310cd1ad2dSMordechay Goodstein skb = txq->entries[idx].skb; 1320cd1ad2dSMordechay Goodstein 1330cd1ad2dSMordechay Goodstein /* Can be called from irqs-disabled context 1340cd1ad2dSMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 1350cd1ad2dSMordechay Goodstein * freed and that the queue is not empty - free the skb 1360cd1ad2dSMordechay Goodstein */ 1370cd1ad2dSMordechay Goodstein if (skb) { 1380cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 1390cd1ad2dSMordechay Goodstein txq->entries[idx].skb = NULL; 1400cd1ad2dSMordechay Goodstein } 1410cd1ad2dSMordechay Goodstein } 1420cd1ad2dSMordechay Goodstein 1430cd1ad2dSMordechay Goodstein int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 1440cd1ad2dSMordechay Goodstein dma_addr_t addr, u16 len) 1450cd1ad2dSMordechay Goodstein { 1460cd1ad2dSMordechay Goodstein int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 1470cd1ad2dSMordechay Goodstein struct iwl_tfh_tb *tb; 1480cd1ad2dSMordechay Goodstein 1490cd1ad2dSMordechay Goodstein /* 1500cd1ad2dSMordechay Goodstein * Only WARN here so we know about the issue, but we mess up our 1510cd1ad2dSMordechay Goodstein * unmap path because not every place currently checks for errors 1520cd1ad2dSMordechay Goodstein * returned from this function - it can only return an error if 1530cd1ad2dSMordechay Goodstein * there's no more space, and so when we know there is enough we 1540cd1ad2dSMordechay Goodstein * don't always check ... 1550cd1ad2dSMordechay Goodstein */ 1560cd1ad2dSMordechay Goodstein WARN(iwl_txq_crosses_4g_boundary(addr, len), 1570cd1ad2dSMordechay Goodstein "possible DMA problem with iova:0x%llx, len:%d\n", 1580cd1ad2dSMordechay Goodstein (unsigned long long)addr, len); 1590cd1ad2dSMordechay Goodstein 1600cd1ad2dSMordechay Goodstein if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 1610cd1ad2dSMordechay Goodstein return -EINVAL; 1620cd1ad2dSMordechay Goodstein tb = &tfd->tbs[idx]; 1630cd1ad2dSMordechay Goodstein 1640cd1ad2dSMordechay Goodstein /* Each TFD can point to a maximum max_tbs Tx buffers */ 1650cd1ad2dSMordechay Goodstein if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 1660cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Error can not send more than %d chunks\n", 1670cd1ad2dSMordechay Goodstein trans->txqs.tfd.max_tbs); 1680cd1ad2dSMordechay Goodstein return -EINVAL; 1690cd1ad2dSMordechay Goodstein } 1700cd1ad2dSMordechay Goodstein 1710cd1ad2dSMordechay Goodstein put_unaligned_le64(addr, &tb->addr); 1720cd1ad2dSMordechay Goodstein tb->tb_len = cpu_to_le16(len); 1730cd1ad2dSMordechay Goodstein 1740cd1ad2dSMordechay Goodstein tfd->num_tbs = cpu_to_le16(idx + 1); 1750cd1ad2dSMordechay Goodstein 1760cd1ad2dSMordechay Goodstein return idx; 1770cd1ad2dSMordechay Goodstein } 1780cd1ad2dSMordechay Goodstein 1790cd1ad2dSMordechay Goodstein static struct page *get_workaround_page(struct iwl_trans *trans, 1800cd1ad2dSMordechay Goodstein struct sk_buff *skb) 1810cd1ad2dSMordechay Goodstein { 1820cd1ad2dSMordechay Goodstein struct page **page_ptr; 1830cd1ad2dSMordechay Goodstein struct page *ret; 1840cd1ad2dSMordechay Goodstein 1850cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 1860cd1ad2dSMordechay Goodstein 1870cd1ad2dSMordechay Goodstein ret = alloc_page(GFP_ATOMIC); 1880cd1ad2dSMordechay Goodstein if (!ret) 1890cd1ad2dSMordechay Goodstein return NULL; 1900cd1ad2dSMordechay Goodstein 1910cd1ad2dSMordechay Goodstein /* set the chaining pointer to the previous page if there */ 1923827cb59SJohannes Berg *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 1930cd1ad2dSMordechay Goodstein *page_ptr = ret; 1940cd1ad2dSMordechay Goodstein 1950cd1ad2dSMordechay Goodstein return ret; 1960cd1ad2dSMordechay Goodstein } 1970cd1ad2dSMordechay Goodstein 1980cd1ad2dSMordechay Goodstein /* 1990cd1ad2dSMordechay Goodstein * Add a TB and if needed apply the FH HW bug workaround; 2000cd1ad2dSMordechay Goodstein * meta != NULL indicates that it's a page mapping and we 2010cd1ad2dSMordechay Goodstein * need to dma_unmap_page() and set the meta->tbs bit in 2020cd1ad2dSMordechay Goodstein * this case. 2030cd1ad2dSMordechay Goodstein */ 2040cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 2050cd1ad2dSMordechay Goodstein struct sk_buff *skb, 2060cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 2070cd1ad2dSMordechay Goodstein dma_addr_t phys, void *virt, 2080cd1ad2dSMordechay Goodstein u16 len, struct iwl_cmd_meta *meta) 2090cd1ad2dSMordechay Goodstein { 2100cd1ad2dSMordechay Goodstein dma_addr_t oldphys = phys; 2110cd1ad2dSMordechay Goodstein struct page *page; 2120cd1ad2dSMordechay Goodstein int ret; 2130cd1ad2dSMordechay Goodstein 2140cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2150cd1ad2dSMordechay Goodstein return -ENOMEM; 2160cd1ad2dSMordechay Goodstein 2170cd1ad2dSMordechay Goodstein if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 2180cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2190cd1ad2dSMordechay Goodstein 2200cd1ad2dSMordechay Goodstein if (ret < 0) 2210cd1ad2dSMordechay Goodstein goto unmap; 2220cd1ad2dSMordechay Goodstein 2230cd1ad2dSMordechay Goodstein if (meta) 2240cd1ad2dSMordechay Goodstein meta->tbs |= BIT(ret); 2250cd1ad2dSMordechay Goodstein 2260cd1ad2dSMordechay Goodstein ret = 0; 2270cd1ad2dSMordechay Goodstein goto trace; 2280cd1ad2dSMordechay Goodstein } 2290cd1ad2dSMordechay Goodstein 2300cd1ad2dSMordechay Goodstein /* 2310cd1ad2dSMordechay Goodstein * Work around a hardware bug. If (as expressed in the 2320cd1ad2dSMordechay Goodstein * condition above) the TB ends on a 32-bit boundary, 2330cd1ad2dSMordechay Goodstein * then the next TB may be accessed with the wrong 2340cd1ad2dSMordechay Goodstein * address. 2350cd1ad2dSMordechay Goodstein * To work around it, copy the data elsewhere and make 2360cd1ad2dSMordechay Goodstein * a new mapping for it so the device will not fail. 2370cd1ad2dSMordechay Goodstein */ 2380cd1ad2dSMordechay Goodstein 2390cd1ad2dSMordechay Goodstein if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 2400cd1ad2dSMordechay Goodstein ret = -ENOBUFS; 2410cd1ad2dSMordechay Goodstein goto unmap; 2420cd1ad2dSMordechay Goodstein } 2430cd1ad2dSMordechay Goodstein 2440cd1ad2dSMordechay Goodstein page = get_workaround_page(trans, skb); 2450cd1ad2dSMordechay Goodstein if (!page) { 2460cd1ad2dSMordechay Goodstein ret = -ENOMEM; 2470cd1ad2dSMordechay Goodstein goto unmap; 2480cd1ad2dSMordechay Goodstein } 2490cd1ad2dSMordechay Goodstein 2500cd1ad2dSMordechay Goodstein memcpy(page_address(page), virt, len); 2510cd1ad2dSMordechay Goodstein 2520cd1ad2dSMordechay Goodstein phys = dma_map_single(trans->dev, page_address(page), len, 2530cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 2540cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2550cd1ad2dSMordechay Goodstein return -ENOMEM; 2560cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2570cd1ad2dSMordechay Goodstein if (ret < 0) { 2580cd1ad2dSMordechay Goodstein /* unmap the new allocation as single */ 2590cd1ad2dSMordechay Goodstein oldphys = phys; 2600cd1ad2dSMordechay Goodstein meta = NULL; 2610cd1ad2dSMordechay Goodstein goto unmap; 2620cd1ad2dSMordechay Goodstein } 2630cd1ad2dSMordechay Goodstein IWL_WARN(trans, 2640cd1ad2dSMordechay Goodstein "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 2650cd1ad2dSMordechay Goodstein len, (unsigned long long)oldphys, (unsigned long long)phys); 2660cd1ad2dSMordechay Goodstein 2670cd1ad2dSMordechay Goodstein ret = 0; 2680cd1ad2dSMordechay Goodstein unmap: 2690cd1ad2dSMordechay Goodstein if (meta) 2700cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 2710cd1ad2dSMordechay Goodstein else 2720cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 2730cd1ad2dSMordechay Goodstein trace: 2740cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 2750cd1ad2dSMordechay Goodstein 2760cd1ad2dSMordechay Goodstein return ret; 2770cd1ad2dSMordechay Goodstein } 2780cd1ad2dSMordechay Goodstein 2790cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 2800cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 2810cd1ad2dSMordechay Goodstein struct sk_buff *skb) 2820cd1ad2dSMordechay Goodstein { 2830cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 2840cd1ad2dSMordechay Goodstein struct page **page_ptr; 2850cd1ad2dSMordechay Goodstein 2860cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 2870cd1ad2dSMordechay Goodstein 2880cd1ad2dSMordechay Goodstein if (WARN_ON(*page_ptr)) 2890cd1ad2dSMordechay Goodstein return NULL; 2900cd1ad2dSMordechay Goodstein 2910cd1ad2dSMordechay Goodstein if (!p->page) 2920cd1ad2dSMordechay Goodstein goto alloc; 2930cd1ad2dSMordechay Goodstein 2940cd1ad2dSMordechay Goodstein /* 2950cd1ad2dSMordechay Goodstein * Check if there's enough room on this page 2960cd1ad2dSMordechay Goodstein * 2970cd1ad2dSMordechay Goodstein * Note that we put a page chaining pointer *last* in the 2980cd1ad2dSMordechay Goodstein * page - we need it somewhere, and if it's there then we 2990cd1ad2dSMordechay Goodstein * avoid DMA mapping the last bits of the page which may 3000cd1ad2dSMordechay Goodstein * trigger the 32-bit boundary hardware bug. 3010cd1ad2dSMordechay Goodstein * 3020cd1ad2dSMordechay Goodstein * (see also get_workaround_page() in tx-gen2.c) 3030cd1ad2dSMordechay Goodstein */ 3040cd1ad2dSMordechay Goodstein if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 3050cd1ad2dSMordechay Goodstein sizeof(void *)) 3060cd1ad2dSMordechay Goodstein goto out; 3070cd1ad2dSMordechay Goodstein 3080cd1ad2dSMordechay Goodstein /* We don't have enough room on this page, get a new one. */ 3090cd1ad2dSMordechay Goodstein __free_page(p->page); 3100cd1ad2dSMordechay Goodstein 3110cd1ad2dSMordechay Goodstein alloc: 3120cd1ad2dSMordechay Goodstein p->page = alloc_page(GFP_ATOMIC); 3130cd1ad2dSMordechay Goodstein if (!p->page) 3140cd1ad2dSMordechay Goodstein return NULL; 3150cd1ad2dSMordechay Goodstein p->pos = page_address(p->page); 3160cd1ad2dSMordechay Goodstein /* set the chaining pointer to NULL */ 3173827cb59SJohannes Berg *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 3180cd1ad2dSMordechay Goodstein out: 3190cd1ad2dSMordechay Goodstein *page_ptr = p->page; 3200cd1ad2dSMordechay Goodstein get_page(p->page); 3210cd1ad2dSMordechay Goodstein return p; 3220cd1ad2dSMordechay Goodstein } 3230cd1ad2dSMordechay Goodstein #endif 3240cd1ad2dSMordechay Goodstein 3250cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 3260cd1ad2dSMordechay Goodstein struct sk_buff *skb, 3270cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, int start_len, 3280cd1ad2dSMordechay Goodstein u8 hdr_len, 3290cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd) 3300cd1ad2dSMordechay Goodstein { 3310cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 3320cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 3330cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (void *)skb->data; 3340cd1ad2dSMordechay Goodstein unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 3350cd1ad2dSMordechay Goodstein unsigned int mss = skb_shinfo(skb)->gso_size; 3360cd1ad2dSMordechay Goodstein u16 length, amsdu_pad; 3370cd1ad2dSMordechay Goodstein u8 *start_hdr; 3380cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *hdr_page; 3390cd1ad2dSMordechay Goodstein struct tso_t tso; 3400cd1ad2dSMordechay Goodstein 3410cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 3420cd1ad2dSMordechay Goodstein &dev_cmd->hdr, start_len, 0); 3430cd1ad2dSMordechay Goodstein 3440cd1ad2dSMordechay Goodstein ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 3450cd1ad2dSMordechay Goodstein snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 3460cd1ad2dSMordechay Goodstein total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 3470cd1ad2dSMordechay Goodstein amsdu_pad = 0; 3480cd1ad2dSMordechay Goodstein 3490cd1ad2dSMordechay Goodstein /* total amount of header we may need for this A-MSDU */ 3500cd1ad2dSMordechay Goodstein hdr_room = DIV_ROUND_UP(total_len, mss) * 3510cd1ad2dSMordechay Goodstein (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 3520cd1ad2dSMordechay Goodstein 3530cd1ad2dSMordechay Goodstein /* Our device supports 9 segments at most, it will fit in 1 page */ 3540cd1ad2dSMordechay Goodstein hdr_page = get_page_hdr(trans, hdr_room, skb); 3550cd1ad2dSMordechay Goodstein if (!hdr_page) 3560cd1ad2dSMordechay Goodstein return -ENOMEM; 3570cd1ad2dSMordechay Goodstein 3580cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 3590cd1ad2dSMordechay Goodstein 3600cd1ad2dSMordechay Goodstein /* 3610cd1ad2dSMordechay Goodstein * Pull the ieee80211 header to be able to use TSO core, 3620cd1ad2dSMordechay Goodstein * we will restore it for the tx_status flow. 3630cd1ad2dSMordechay Goodstein */ 3640cd1ad2dSMordechay Goodstein skb_pull(skb, hdr_len); 3650cd1ad2dSMordechay Goodstein 3660cd1ad2dSMordechay Goodstein /* 3670cd1ad2dSMordechay Goodstein * Remove the length of all the headers that we don't actually 3680cd1ad2dSMordechay Goodstein * have in the MPDU by themselves, but that we duplicate into 3690cd1ad2dSMordechay Goodstein * all the different MSDUs inside the A-MSDU. 3700cd1ad2dSMordechay Goodstein */ 3710cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 3720cd1ad2dSMordechay Goodstein 3730cd1ad2dSMordechay Goodstein tso_start(skb, &tso); 3740cd1ad2dSMordechay Goodstein 3750cd1ad2dSMordechay Goodstein while (total_len) { 3760cd1ad2dSMordechay Goodstein /* this is the data left for this subframe */ 3770cd1ad2dSMordechay Goodstein unsigned int data_left = min_t(unsigned int, mss, total_len); 3780cd1ad2dSMordechay Goodstein unsigned int tb_len; 3790cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 3800cd1ad2dSMordechay Goodstein u8 *subf_hdrs_start = hdr_page->pos; 3810cd1ad2dSMordechay Goodstein 3820cd1ad2dSMordechay Goodstein total_len -= data_left; 3830cd1ad2dSMordechay Goodstein 3840cd1ad2dSMordechay Goodstein memset(hdr_page->pos, 0, amsdu_pad); 3850cd1ad2dSMordechay Goodstein hdr_page->pos += amsdu_pad; 3860cd1ad2dSMordechay Goodstein amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 3870cd1ad2dSMordechay Goodstein data_left)) & 0x3; 3880cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 3890cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 3900cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 3910cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 3920cd1ad2dSMordechay Goodstein 3930cd1ad2dSMordechay Goodstein length = snap_ip_tcp_hdrlen + data_left; 3940cd1ad2dSMordechay Goodstein *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 3950cd1ad2dSMordechay Goodstein hdr_page->pos += sizeof(length); 3960cd1ad2dSMordechay Goodstein 3970cd1ad2dSMordechay Goodstein /* 3980cd1ad2dSMordechay Goodstein * This will copy the SNAP as well which will be considered 3990cd1ad2dSMordechay Goodstein * as MAC header. 4000cd1ad2dSMordechay Goodstein */ 4010cd1ad2dSMordechay Goodstein tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 4020cd1ad2dSMordechay Goodstein 4030cd1ad2dSMordechay Goodstein hdr_page->pos += snap_ip_tcp_hdrlen; 4040cd1ad2dSMordechay Goodstein 4050cd1ad2dSMordechay Goodstein tb_len = hdr_page->pos - start_hdr; 4060cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, start_hdr, 4070cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 408fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 4090cd1ad2dSMordechay Goodstein goto out_err; 4100cd1ad2dSMordechay Goodstein /* 4110cd1ad2dSMordechay Goodstein * No need for _with_wa, this is from the TSO page and 4120cd1ad2dSMordechay Goodstein * we leave some space at the end of it so can't hit 4130cd1ad2dSMordechay Goodstein * the buggy scenario. 4140cd1ad2dSMordechay Goodstein */ 4150cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 4160cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 4170cd1ad2dSMordechay Goodstein tb_phys, tb_len); 4180cd1ad2dSMordechay Goodstein /* add this subframe's headers' length to the tx_cmd */ 4190cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 4200cd1ad2dSMordechay Goodstein 4210cd1ad2dSMordechay Goodstein /* prepare the start_hdr for the next subframe */ 4220cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 4230cd1ad2dSMordechay Goodstein 4240cd1ad2dSMordechay Goodstein /* put the payload */ 4250cd1ad2dSMordechay Goodstein while (data_left) { 4260cd1ad2dSMordechay Goodstein int ret; 4270cd1ad2dSMordechay Goodstein 4280cd1ad2dSMordechay Goodstein tb_len = min_t(unsigned int, tso.size, data_left); 4290cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tso.data, 4300cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 4310cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 4320cd1ad2dSMordechay Goodstein tb_phys, tso.data, 4330cd1ad2dSMordechay Goodstein tb_len, NULL); 434fb54b863SJohannes Berg if (ret) 4350cd1ad2dSMordechay Goodstein goto out_err; 4360cd1ad2dSMordechay Goodstein 4370cd1ad2dSMordechay Goodstein data_left -= tb_len; 4380cd1ad2dSMordechay Goodstein tso_build_data(skb, &tso, tb_len); 4390cd1ad2dSMordechay Goodstein } 4400cd1ad2dSMordechay Goodstein } 4410cd1ad2dSMordechay Goodstein 4420cd1ad2dSMordechay Goodstein /* re -add the WiFi header */ 4430cd1ad2dSMordechay Goodstein skb_push(skb, hdr_len); 4440cd1ad2dSMordechay Goodstein 4450cd1ad2dSMordechay Goodstein return 0; 4460cd1ad2dSMordechay Goodstein 4470cd1ad2dSMordechay Goodstein out_err: 4480cd1ad2dSMordechay Goodstein #endif 4490cd1ad2dSMordechay Goodstein return -EINVAL; 4500cd1ad2dSMordechay Goodstein } 4510cd1ad2dSMordechay Goodstein 4520cd1ad2dSMordechay Goodstein static struct 4530cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 4540cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 4550cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 4560cd1ad2dSMordechay Goodstein struct sk_buff *skb, 4570cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 4580cd1ad2dSMordechay Goodstein int hdr_len, 4590cd1ad2dSMordechay Goodstein int tx_cmd_len) 4600cd1ad2dSMordechay Goodstein { 4610cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 4620cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 4630cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 4640cd1ad2dSMordechay Goodstein int len; 4650cd1ad2dSMordechay Goodstein void *tb1_addr; 4660cd1ad2dSMordechay Goodstein 4670cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 4680cd1ad2dSMordechay Goodstein 4690cd1ad2dSMordechay Goodstein /* 4700cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 4710cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 4720cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 4730cd1ad2dSMordechay Goodstein */ 4740cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 4750cd1ad2dSMordechay Goodstein 4760cd1ad2dSMordechay Goodstein /* 4770cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 4780cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 4790cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 4800cd1ad2dSMordechay Goodstein * setup of the first TB) 4810cd1ad2dSMordechay Goodstein */ 4820cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 4830cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 4840cd1ad2dSMordechay Goodstein 4850cd1ad2dSMordechay Goodstein /* do not align A-MSDU to dword as the subframe header aligns it */ 4860cd1ad2dSMordechay Goodstein 4870cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 4880cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 4890cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 4900cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 4910cd1ad2dSMordechay Goodstein goto out_err; 4920cd1ad2dSMordechay Goodstein /* 4930cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 4940cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 4950cd1ad2dSMordechay Goodstein */ 4960cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 4970cd1ad2dSMordechay Goodstein 4980cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 4990cd1ad2dSMordechay Goodstein hdr_len, dev_cmd)) 5000cd1ad2dSMordechay Goodstein goto out_err; 5010cd1ad2dSMordechay Goodstein 5020cd1ad2dSMordechay Goodstein /* building the A-MSDU might have changed this data, memcpy it now */ 5030cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5040cd1ad2dSMordechay Goodstein return tfd; 5050cd1ad2dSMordechay Goodstein 5060cd1ad2dSMordechay Goodstein out_err: 5070cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 5080cd1ad2dSMordechay Goodstein return NULL; 5090cd1ad2dSMordechay Goodstein } 5100cd1ad2dSMordechay Goodstein 5110cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 5120cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5130cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 5140cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 5150cd1ad2dSMordechay Goodstein { 5160cd1ad2dSMordechay Goodstein int i; 5170cd1ad2dSMordechay Goodstein 5180cd1ad2dSMordechay Goodstein for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5190cd1ad2dSMordechay Goodstein const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5200cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5210cd1ad2dSMordechay Goodstein unsigned int fragsz = skb_frag_size(frag); 5220cd1ad2dSMordechay Goodstein int ret; 5230cd1ad2dSMordechay Goodstein 5240cd1ad2dSMordechay Goodstein if (!fragsz) 5250cd1ad2dSMordechay Goodstein continue; 5260cd1ad2dSMordechay Goodstein 5270cd1ad2dSMordechay Goodstein tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 5280cd1ad2dSMordechay Goodstein fragsz, DMA_TO_DEVICE); 5290cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 5300cd1ad2dSMordechay Goodstein skb_frag_address(frag), 5310cd1ad2dSMordechay Goodstein fragsz, out_meta); 5320cd1ad2dSMordechay Goodstein if (ret) 5330cd1ad2dSMordechay Goodstein return ret; 5340cd1ad2dSMordechay Goodstein } 5350cd1ad2dSMordechay Goodstein 5360cd1ad2dSMordechay Goodstein return 0; 5370cd1ad2dSMordechay Goodstein } 5380cd1ad2dSMordechay Goodstein 5390cd1ad2dSMordechay Goodstein static struct 5400cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 5410cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 5420cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 5430cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5440cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 5450cd1ad2dSMordechay Goodstein int hdr_len, 5460cd1ad2dSMordechay Goodstein int tx_cmd_len, 5470cd1ad2dSMordechay Goodstein bool pad) 5480cd1ad2dSMordechay Goodstein { 5490cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 5500cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 5510cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5520cd1ad2dSMordechay Goodstein int len, tb1_len, tb2_len; 5530cd1ad2dSMordechay Goodstein void *tb1_addr; 5540cd1ad2dSMordechay Goodstein struct sk_buff *frag; 5550cd1ad2dSMordechay Goodstein 5560cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 5570cd1ad2dSMordechay Goodstein 5580cd1ad2dSMordechay Goodstein /* The first TB points to bi-directional DMA data */ 5590cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5600cd1ad2dSMordechay Goodstein 5610cd1ad2dSMordechay Goodstein /* 5620cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 5630cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 5640cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 5650cd1ad2dSMordechay Goodstein */ 5660cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 5670cd1ad2dSMordechay Goodstein 5680cd1ad2dSMordechay Goodstein /* 5690cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 5700cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 5710cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 5720cd1ad2dSMordechay Goodstein * setup of the first TB) 5730cd1ad2dSMordechay Goodstein */ 5740cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 5750cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 5760cd1ad2dSMordechay Goodstein 5770cd1ad2dSMordechay Goodstein if (pad) 5780cd1ad2dSMordechay Goodstein tb1_len = ALIGN(len, 4); 5790cd1ad2dSMordechay Goodstein else 5800cd1ad2dSMordechay Goodstein tb1_len = len; 5810cd1ad2dSMordechay Goodstein 5820cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 5830cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 5840cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 5850cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 5860cd1ad2dSMordechay Goodstein goto out_err; 5870cd1ad2dSMordechay Goodstein /* 5880cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 5890cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 5900cd1ad2dSMordechay Goodstein */ 5910cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 5920cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 5930cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 5940cd1ad2dSMordechay Goodstein 5950cd1ad2dSMordechay Goodstein /* set up TFD's third entry to point to remainder of skb's head */ 5960cd1ad2dSMordechay Goodstein tb2_len = skb_headlen(skb) - hdr_len; 5970cd1ad2dSMordechay Goodstein 5980cd1ad2dSMordechay Goodstein if (tb2_len > 0) { 5990cd1ad2dSMordechay Goodstein int ret; 6000cd1ad2dSMordechay Goodstein 6010cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 6020cd1ad2dSMordechay Goodstein tb2_len, DMA_TO_DEVICE); 6030cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6040cd1ad2dSMordechay Goodstein skb->data + hdr_len, tb2_len, 6050cd1ad2dSMordechay Goodstein NULL); 6060cd1ad2dSMordechay Goodstein if (ret) 6070cd1ad2dSMordechay Goodstein goto out_err; 6080cd1ad2dSMordechay Goodstein } 6090cd1ad2dSMordechay Goodstein 6100cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 6110cd1ad2dSMordechay Goodstein goto out_err; 6120cd1ad2dSMordechay Goodstein 6130cd1ad2dSMordechay Goodstein skb_walk_frags(skb, frag) { 6140cd1ad2dSMordechay Goodstein int ret; 6150cd1ad2dSMordechay Goodstein 6160cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, frag->data, 6170cd1ad2dSMordechay Goodstein skb_headlen(frag), DMA_TO_DEVICE); 6180cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6190cd1ad2dSMordechay Goodstein frag->data, 6200cd1ad2dSMordechay Goodstein skb_headlen(frag), NULL); 6210cd1ad2dSMordechay Goodstein if (ret) 6220cd1ad2dSMordechay Goodstein goto out_err; 6230cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 6240cd1ad2dSMordechay Goodstein goto out_err; 6250cd1ad2dSMordechay Goodstein } 6260cd1ad2dSMordechay Goodstein 6270cd1ad2dSMordechay Goodstein return tfd; 6280cd1ad2dSMordechay Goodstein 6290cd1ad2dSMordechay Goodstein out_err: 6300cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 6310cd1ad2dSMordechay Goodstein return NULL; 6320cd1ad2dSMordechay Goodstein } 6330cd1ad2dSMordechay Goodstein 6340cd1ad2dSMordechay Goodstein static 6350cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 6360cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 6370cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 6380cd1ad2dSMordechay Goodstein struct sk_buff *skb, 6390cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 6400cd1ad2dSMordechay Goodstein { 6410cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 6420cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 6430cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 6440cd1ad2dSMordechay Goodstein int len, hdr_len; 6450cd1ad2dSMordechay Goodstein bool amsdu; 6460cd1ad2dSMordechay Goodstein 6470cd1ad2dSMordechay Goodstein /* There must be data left over for TB1 or this code must be changed */ 6480cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 6490cd1ad2dSMordechay Goodstein 6500cd1ad2dSMordechay Goodstein memset(tfd, 0, sizeof(*tfd)); 6510cd1ad2dSMordechay Goodstein 6520cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 6530cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen2); 6540cd1ad2dSMordechay Goodstein else 6550cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen3); 6560cd1ad2dSMordechay Goodstein 6570cd1ad2dSMordechay Goodstein amsdu = ieee80211_is_data_qos(hdr->frame_control) && 6580cd1ad2dSMordechay Goodstein (*ieee80211_get_qos_ctl(hdr) & 6590cd1ad2dSMordechay Goodstein IEEE80211_QOS_CTL_A_MSDU_PRESENT); 6600cd1ad2dSMordechay Goodstein 6610cd1ad2dSMordechay Goodstein hdr_len = ieee80211_hdrlen(hdr->frame_control); 6620cd1ad2dSMordechay Goodstein 6630cd1ad2dSMordechay Goodstein /* 6640cd1ad2dSMordechay Goodstein * Only build A-MSDUs here if doing so by GSO, otherwise it may be 6650cd1ad2dSMordechay Goodstein * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 6660cd1ad2dSMordechay Goodstein * built in the higher layers already. 6670cd1ad2dSMordechay Goodstein */ 6680cd1ad2dSMordechay Goodstein if (amsdu && skb_shinfo(skb)->gso_size) 6690cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 6700cd1ad2dSMordechay Goodstein out_meta, hdr_len, len); 6710cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 6720cd1ad2dSMordechay Goodstein hdr_len, len, !amsdu); 6730cd1ad2dSMordechay Goodstein } 6740cd1ad2dSMordechay Goodstein 6750cd1ad2dSMordechay Goodstein int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 6760cd1ad2dSMordechay Goodstein { 6770cd1ad2dSMordechay Goodstein unsigned int max; 6780cd1ad2dSMordechay Goodstein unsigned int used; 6790cd1ad2dSMordechay Goodstein 6800cd1ad2dSMordechay Goodstein /* 6810cd1ad2dSMordechay Goodstein * To avoid ambiguity between empty and completely full queues, there 6820cd1ad2dSMordechay Goodstein * should always be less than max_tfd_queue_size elements in the queue. 6830cd1ad2dSMordechay Goodstein * If q->n_window is smaller than max_tfd_queue_size, there is no need 6840cd1ad2dSMordechay Goodstein * to reserve any queue entries for this purpose. 6850cd1ad2dSMordechay Goodstein */ 6860cd1ad2dSMordechay Goodstein if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 6870cd1ad2dSMordechay Goodstein max = q->n_window; 6880cd1ad2dSMordechay Goodstein else 6890cd1ad2dSMordechay Goodstein max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 6900cd1ad2dSMordechay Goodstein 6910cd1ad2dSMordechay Goodstein /* 6920cd1ad2dSMordechay Goodstein * max_tfd_queue_size is a power of 2, so the following is equivalent to 6930cd1ad2dSMordechay Goodstein * modulo by max_tfd_queue_size and is well defined. 6940cd1ad2dSMordechay Goodstein */ 6950cd1ad2dSMordechay Goodstein used = (q->write_ptr - q->read_ptr) & 6960cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 6970cd1ad2dSMordechay Goodstein 6980cd1ad2dSMordechay Goodstein if (WARN_ON(used > max)) 6990cd1ad2dSMordechay Goodstein return 0; 7000cd1ad2dSMordechay Goodstein 7010cd1ad2dSMordechay Goodstein return max - used; 7020cd1ad2dSMordechay Goodstein } 7030cd1ad2dSMordechay Goodstein 7040cd1ad2dSMordechay Goodstein int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 7050cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, int txq_id) 7060cd1ad2dSMordechay Goodstein { 7070cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta; 7080cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7090cd1ad2dSMordechay Goodstein u16 cmd_len; 7100cd1ad2dSMordechay Goodstein int idx; 7110cd1ad2dSMordechay Goodstein void *tfd; 7120cd1ad2dSMordechay Goodstein 7130cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 7140cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 7150cd1ad2dSMordechay Goodstein return -EINVAL; 7160cd1ad2dSMordechay Goodstein 7170cd1ad2dSMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 7180cd1ad2dSMordechay Goodstein "TX on unused queue %d\n", txq_id)) 7190cd1ad2dSMordechay Goodstein return -EINVAL; 7200cd1ad2dSMordechay Goodstein 7210cd1ad2dSMordechay Goodstein if (skb_is_nonlinear(skb) && 7220cd1ad2dSMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 7230cd1ad2dSMordechay Goodstein __skb_linearize(skb)) 7240cd1ad2dSMordechay Goodstein return -ENOMEM; 7250cd1ad2dSMordechay Goodstein 7260cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 7270cd1ad2dSMordechay Goodstein 7280cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 7290cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 7300cd1ad2dSMordechay Goodstein 7310cd1ad2dSMordechay Goodstein /* don't put the packet on the ring, if there is no room */ 7320cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 7330cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd **dev_cmd_ptr; 7340cd1ad2dSMordechay Goodstein 7350cd1ad2dSMordechay Goodstein dev_cmd_ptr = (void *)((u8 *)skb->cb + 7360cd1ad2dSMordechay Goodstein trans->txqs.dev_cmd_offs); 7370cd1ad2dSMordechay Goodstein 7380cd1ad2dSMordechay Goodstein *dev_cmd_ptr = dev_cmd; 7390cd1ad2dSMordechay Goodstein __skb_queue_tail(&txq->overflow_q, skb); 7400cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7410cd1ad2dSMordechay Goodstein return 0; 7420cd1ad2dSMordechay Goodstein } 7430cd1ad2dSMordechay Goodstein } 7440cd1ad2dSMordechay Goodstein 7450cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 7460cd1ad2dSMordechay Goodstein 7470cd1ad2dSMordechay Goodstein /* Set up driver data for this TFD */ 7480cd1ad2dSMordechay Goodstein txq->entries[idx].skb = skb; 7490cd1ad2dSMordechay Goodstein txq->entries[idx].cmd = dev_cmd; 7500cd1ad2dSMordechay Goodstein 7510cd1ad2dSMordechay Goodstein dev_cmd->hdr.sequence = 7520cd1ad2dSMordechay Goodstein cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 7530cd1ad2dSMordechay Goodstein INDEX_TO_SEQ(idx))); 7540cd1ad2dSMordechay Goodstein 7550cd1ad2dSMordechay Goodstein /* Set up first empty entry in queue's array of Tx/cmd buffers */ 7560cd1ad2dSMordechay Goodstein out_meta = &txq->entries[idx].meta; 7570cd1ad2dSMordechay Goodstein out_meta->flags = 0; 7580cd1ad2dSMordechay Goodstein 7590cd1ad2dSMordechay Goodstein tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 7600cd1ad2dSMordechay Goodstein if (!tfd) { 7610cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7620cd1ad2dSMordechay Goodstein return -1; 7630cd1ad2dSMordechay Goodstein } 7640cd1ad2dSMordechay Goodstein 7650cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 7660cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 7670cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 7680cd1ad2dSMordechay Goodstein 7690cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen3->len); 7700cd1ad2dSMordechay Goodstein } else { 7710cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 7720cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 7730cd1ad2dSMordechay Goodstein 7740cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen2->len); 7750cd1ad2dSMordechay Goodstein } 7760cd1ad2dSMordechay Goodstein 7770cd1ad2dSMordechay Goodstein /* Set up entry for this TFD in Tx byte-count array */ 7780cd1ad2dSMordechay Goodstein iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 7790cd1ad2dSMordechay Goodstein iwl_txq_gen2_get_num_tbs(trans, tfd)); 7800cd1ad2dSMordechay Goodstein 7810cd1ad2dSMordechay Goodstein /* start timer if queue currently empty */ 7820cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 7830cd1ad2dSMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 7840cd1ad2dSMordechay Goodstein 7850cd1ad2dSMordechay Goodstein /* Tell device the write index *just past* this latest filled TFD */ 7860cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 7870cd1ad2dSMordechay Goodstein iwl_txq_inc_wr_ptr(trans, txq); 7880cd1ad2dSMordechay Goodstein /* 7890cd1ad2dSMordechay Goodstein * At this point the frame is "transmitted" successfully 7900cd1ad2dSMordechay Goodstein * and we will get a TX status notification eventually. 7910cd1ad2dSMordechay Goodstein */ 7920cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7930cd1ad2dSMordechay Goodstein return 0; 7940cd1ad2dSMordechay Goodstein } 7950cd1ad2dSMordechay Goodstein 7960cd1ad2dSMordechay Goodstein /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 7970cd1ad2dSMordechay Goodstein 7980cd1ad2dSMordechay Goodstein /* 7990cd1ad2dSMordechay Goodstein * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 8000cd1ad2dSMordechay Goodstein */ 8010cd1ad2dSMordechay Goodstein void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 8020cd1ad2dSMordechay Goodstein { 8030cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 8040cd1ad2dSMordechay Goodstein 8050cd1ad2dSMordechay Goodstein spin_lock_bh(&txq->lock); 8060cd1ad2dSMordechay Goodstein while (txq->write_ptr != txq->read_ptr) { 8070cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 8080cd1ad2dSMordechay Goodstein txq_id, txq->read_ptr); 8090cd1ad2dSMordechay Goodstein 8100cd1ad2dSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 8110cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 8120cd1ad2dSMordechay Goodstein struct sk_buff *skb = txq->entries[idx].skb; 8130cd1ad2dSMordechay Goodstein 8140bed6a2aSJohannes Berg if (!WARN_ON_ONCE(!skb)) 8150cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 8160cd1ad2dSMordechay Goodstein } 8170cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_tfd(trans, txq); 8180cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 8190cd1ad2dSMordechay Goodstein } 8200cd1ad2dSMordechay Goodstein 8210cd1ad2dSMordechay Goodstein while (!skb_queue_empty(&txq->overflow_q)) { 8220cd1ad2dSMordechay Goodstein struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 8230cd1ad2dSMordechay Goodstein 8240cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 8250cd1ad2dSMordechay Goodstein } 8260cd1ad2dSMordechay Goodstein 8270cd1ad2dSMordechay Goodstein spin_unlock_bh(&txq->lock); 8280cd1ad2dSMordechay Goodstein 8290cd1ad2dSMordechay Goodstein /* just in case - this queue may have been stopped */ 8300cd1ad2dSMordechay Goodstein iwl_wake_queue(trans, txq); 8310cd1ad2dSMordechay Goodstein } 8320cd1ad2dSMordechay Goodstein 8330cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 8340cd1ad2dSMordechay Goodstein struct iwl_txq *txq) 8350cd1ad2dSMordechay Goodstein { 8360cd1ad2dSMordechay Goodstein struct device *dev = trans->dev; 8370cd1ad2dSMordechay Goodstein 8380cd1ad2dSMordechay Goodstein /* De-alloc circular buffer of TFDs */ 8390cd1ad2dSMordechay Goodstein if (txq->tfds) { 8400cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 8410cd1ad2dSMordechay Goodstein trans->txqs.tfd.size * txq->n_window, 8420cd1ad2dSMordechay Goodstein txq->tfds, txq->dma_addr); 8430cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 8440cd1ad2dSMordechay Goodstein sizeof(*txq->first_tb_bufs) * txq->n_window, 8450cd1ad2dSMordechay Goodstein txq->first_tb_bufs, txq->first_tb_dma); 8460cd1ad2dSMordechay Goodstein } 8470cd1ad2dSMordechay Goodstein 8480cd1ad2dSMordechay Goodstein kfree(txq->entries); 8490cd1ad2dSMordechay Goodstein if (txq->bc_tbl.addr) 8500cd1ad2dSMordechay Goodstein dma_pool_free(trans->txqs.bc_pool, 8510cd1ad2dSMordechay Goodstein txq->bc_tbl.addr, txq->bc_tbl.dma); 8520cd1ad2dSMordechay Goodstein kfree(txq); 8530cd1ad2dSMordechay Goodstein } 8540cd1ad2dSMordechay Goodstein 8550cd1ad2dSMordechay Goodstein /* 8560cd1ad2dSMordechay Goodstein * iwl_pcie_txq_free - Deallocate DMA queue. 8570cd1ad2dSMordechay Goodstein * @txq: Transmit queue to deallocate. 8580cd1ad2dSMordechay Goodstein * 8590cd1ad2dSMordechay Goodstein * Empty queue by removing and destroying all BD's. 8600cd1ad2dSMordechay Goodstein * Free all buffers. 8610cd1ad2dSMordechay Goodstein * 0-fill, but do not free "txq" descriptor structure. 8620cd1ad2dSMordechay Goodstein */ 8630cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 8640cd1ad2dSMordechay Goodstein { 8650cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 8660cd1ad2dSMordechay Goodstein int i; 8670cd1ad2dSMordechay Goodstein 8680cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 8690cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 8700cd1ad2dSMordechay Goodstein return; 8710cd1ad2dSMordechay Goodstein 8720cd1ad2dSMordechay Goodstein txq = trans->txqs.txq[txq_id]; 8730cd1ad2dSMordechay Goodstein 8740cd1ad2dSMordechay Goodstein if (WARN_ON(!txq)) 8750cd1ad2dSMordechay Goodstein return; 8760cd1ad2dSMordechay Goodstein 8770cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, txq_id); 8780cd1ad2dSMordechay Goodstein 8790cd1ad2dSMordechay Goodstein /* De-alloc array of command/tx buffers */ 8800cd1ad2dSMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 8810cd1ad2dSMordechay Goodstein for (i = 0; i < txq->n_window; i++) { 8820cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].cmd); 8830cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].free_buf); 8840cd1ad2dSMordechay Goodstein } 8850cd1ad2dSMordechay Goodstein del_timer_sync(&txq->stuck_timer); 8860cd1ad2dSMordechay Goodstein 8870cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 8880cd1ad2dSMordechay Goodstein 8890cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 8900cd1ad2dSMordechay Goodstein 8910cd1ad2dSMordechay Goodstein clear_bit(txq_id, trans->txqs.queue_used); 8920cd1ad2dSMordechay Goodstein } 8930cd1ad2dSMordechay Goodstein 8940cd1ad2dSMordechay Goodstein /* 8950cd1ad2dSMordechay Goodstein * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 8960cd1ad2dSMordechay Goodstein */ 8970cd1ad2dSMordechay Goodstein static int iwl_queue_init(struct iwl_txq *q, int slots_num) 8980cd1ad2dSMordechay Goodstein { 8990cd1ad2dSMordechay Goodstein q->n_window = slots_num; 9000cd1ad2dSMordechay Goodstein 9010cd1ad2dSMordechay Goodstein /* slots_num must be power-of-two size, otherwise 9020cd1ad2dSMordechay Goodstein * iwl_txq_get_cmd_index is broken. */ 9030cd1ad2dSMordechay Goodstein if (WARN_ON(!is_power_of_2(slots_num))) 9040cd1ad2dSMordechay Goodstein return -EINVAL; 9050cd1ad2dSMordechay Goodstein 9060cd1ad2dSMordechay Goodstein q->low_mark = q->n_window / 4; 9070cd1ad2dSMordechay Goodstein if (q->low_mark < 4) 9080cd1ad2dSMordechay Goodstein q->low_mark = 4; 9090cd1ad2dSMordechay Goodstein 9100cd1ad2dSMordechay Goodstein q->high_mark = q->n_window / 8; 9110cd1ad2dSMordechay Goodstein if (q->high_mark < 2) 9120cd1ad2dSMordechay Goodstein q->high_mark = 2; 9130cd1ad2dSMordechay Goodstein 9140cd1ad2dSMordechay Goodstein q->write_ptr = 0; 9150cd1ad2dSMordechay Goodstein q->read_ptr = 0; 9160cd1ad2dSMordechay Goodstein 9170cd1ad2dSMordechay Goodstein return 0; 9180cd1ad2dSMordechay Goodstein } 9190cd1ad2dSMordechay Goodstein 9200cd1ad2dSMordechay Goodstein int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 9210cd1ad2dSMordechay Goodstein bool cmd_queue) 9220cd1ad2dSMordechay Goodstein { 9230cd1ad2dSMordechay Goodstein int ret; 9240cd1ad2dSMordechay Goodstein u32 tfd_queue_max_size = 9250cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 9260cd1ad2dSMordechay Goodstein 9270cd1ad2dSMordechay Goodstein txq->need_update = false; 9280cd1ad2dSMordechay Goodstein 9290cd1ad2dSMordechay Goodstein /* max_tfd_queue_size must be power-of-two size, otherwise 9300cd1ad2dSMordechay Goodstein * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 9310cd1ad2dSMordechay Goodstein if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 9320cd1ad2dSMordechay Goodstein "Max tfd queue size must be a power of two, but is %d", 9330cd1ad2dSMordechay Goodstein tfd_queue_max_size)) 9340cd1ad2dSMordechay Goodstein return -EINVAL; 9350cd1ad2dSMordechay Goodstein 9360cd1ad2dSMordechay Goodstein /* Initialize queue's high/low-water marks, and head/tail indexes */ 9370cd1ad2dSMordechay Goodstein ret = iwl_queue_init(txq, slots_num); 9380cd1ad2dSMordechay Goodstein if (ret) 9390cd1ad2dSMordechay Goodstein return ret; 9400cd1ad2dSMordechay Goodstein 9410cd1ad2dSMordechay Goodstein spin_lock_init(&txq->lock); 9420cd1ad2dSMordechay Goodstein 9430cd1ad2dSMordechay Goodstein if (cmd_queue) { 9440cd1ad2dSMordechay Goodstein static struct lock_class_key iwl_txq_cmd_queue_lock_class; 9450cd1ad2dSMordechay Goodstein 9460cd1ad2dSMordechay Goodstein lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 9470cd1ad2dSMordechay Goodstein } 9480cd1ad2dSMordechay Goodstein 9490cd1ad2dSMordechay Goodstein __skb_queue_head_init(&txq->overflow_q); 9500cd1ad2dSMordechay Goodstein 9510cd1ad2dSMordechay Goodstein return 0; 9520cd1ad2dSMordechay Goodstein } 9530cd1ad2dSMordechay Goodstein 9540cd1ad2dSMordechay Goodstein void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 9550cd1ad2dSMordechay Goodstein { 9560cd1ad2dSMordechay Goodstein struct page **page_ptr; 9570cd1ad2dSMordechay Goodstein struct page *next; 9580cd1ad2dSMordechay Goodstein 9590cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 9600cd1ad2dSMordechay Goodstein next = *page_ptr; 9610cd1ad2dSMordechay Goodstein *page_ptr = NULL; 9620cd1ad2dSMordechay Goodstein 9630cd1ad2dSMordechay Goodstein while (next) { 9640cd1ad2dSMordechay Goodstein struct page *tmp = next; 9650cd1ad2dSMordechay Goodstein 9663827cb59SJohannes Berg next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - 9670cd1ad2dSMordechay Goodstein sizeof(void *)); 9680cd1ad2dSMordechay Goodstein __free_page(tmp); 9690cd1ad2dSMordechay Goodstein } 9700cd1ad2dSMordechay Goodstein } 9710cd1ad2dSMordechay Goodstein 9720cd1ad2dSMordechay Goodstein void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 9730cd1ad2dSMordechay Goodstein { 9740cd1ad2dSMordechay Goodstein u32 txq_id = txq->id; 9750cd1ad2dSMordechay Goodstein u32 status; 9760cd1ad2dSMordechay Goodstein bool active; 9770cd1ad2dSMordechay Goodstein u8 fifo; 9780cd1ad2dSMordechay Goodstein 9790cd1ad2dSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 9800cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 9810cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr); 9820cd1ad2dSMordechay Goodstein /* TODO: access new SCD registers and dump them */ 9830cd1ad2dSMordechay Goodstein return; 9840cd1ad2dSMordechay Goodstein } 9850cd1ad2dSMordechay Goodstein 9860cd1ad2dSMordechay Goodstein status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 9870cd1ad2dSMordechay Goodstein fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 9880cd1ad2dSMordechay Goodstein active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 9890cd1ad2dSMordechay Goodstein 9900cd1ad2dSMordechay Goodstein IWL_ERR(trans, 9910cd1ad2dSMordechay Goodstein "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 9920cd1ad2dSMordechay Goodstein txq_id, active ? "" : "in", fifo, 9930cd1ad2dSMordechay Goodstein jiffies_to_msecs(txq->wd_timeout), 9940cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr, 9950cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 9960cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 9970cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 9980cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 9990cd1ad2dSMordechay Goodstein iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 10000cd1ad2dSMordechay Goodstein } 10010cd1ad2dSMordechay Goodstein 10020cd1ad2dSMordechay Goodstein static void iwl_txq_stuck_timer(struct timer_list *t) 10030cd1ad2dSMordechay Goodstein { 10040cd1ad2dSMordechay Goodstein struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 10050cd1ad2dSMordechay Goodstein struct iwl_trans *trans = txq->trans; 10060cd1ad2dSMordechay Goodstein 10070cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 10080cd1ad2dSMordechay Goodstein /* check if triggered erroneously */ 10090cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr) { 10100cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10110cd1ad2dSMordechay Goodstein return; 10120cd1ad2dSMordechay Goodstein } 10130cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10140cd1ad2dSMordechay Goodstein 10150cd1ad2dSMordechay Goodstein iwl_txq_log_scd_error(trans, txq); 10160cd1ad2dSMordechay Goodstein 10170cd1ad2dSMordechay Goodstein iwl_force_nmi(trans); 10180cd1ad2dSMordechay Goodstein } 10190cd1ad2dSMordechay Goodstein 10200cd1ad2dSMordechay Goodstein int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 10210cd1ad2dSMordechay Goodstein bool cmd_queue) 10220cd1ad2dSMordechay Goodstein { 10230cd1ad2dSMordechay Goodstein size_t tfd_sz = trans->txqs.tfd.size * 10240cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 10250cd1ad2dSMordechay Goodstein size_t tb0_buf_sz; 10260cd1ad2dSMordechay Goodstein int i; 10270cd1ad2dSMordechay Goodstein 10280cd1ad2dSMordechay Goodstein if (WARN_ON(txq->entries || txq->tfds)) 10290cd1ad2dSMordechay Goodstein return -EINVAL; 10300cd1ad2dSMordechay Goodstein 10310cd1ad2dSMordechay Goodstein if (trans->trans_cfg->use_tfh) 10320cd1ad2dSMordechay Goodstein tfd_sz = trans->txqs.tfd.size * slots_num; 10330cd1ad2dSMordechay Goodstein 10340cd1ad2dSMordechay Goodstein timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 10350cd1ad2dSMordechay Goodstein txq->trans = trans; 10360cd1ad2dSMordechay Goodstein 10370cd1ad2dSMordechay Goodstein txq->n_window = slots_num; 10380cd1ad2dSMordechay Goodstein 10390cd1ad2dSMordechay Goodstein txq->entries = kcalloc(slots_num, 10400cd1ad2dSMordechay Goodstein sizeof(struct iwl_pcie_txq_entry), 10410cd1ad2dSMordechay Goodstein GFP_KERNEL); 10420cd1ad2dSMordechay Goodstein 10430cd1ad2dSMordechay Goodstein if (!txq->entries) 10440cd1ad2dSMordechay Goodstein goto error; 10450cd1ad2dSMordechay Goodstein 10460cd1ad2dSMordechay Goodstein if (cmd_queue) 10470cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) { 10480cd1ad2dSMordechay Goodstein txq->entries[i].cmd = 10490cd1ad2dSMordechay Goodstein kmalloc(sizeof(struct iwl_device_cmd), 10500cd1ad2dSMordechay Goodstein GFP_KERNEL); 10510cd1ad2dSMordechay Goodstein if (!txq->entries[i].cmd) 10520cd1ad2dSMordechay Goodstein goto error; 10530cd1ad2dSMordechay Goodstein } 10540cd1ad2dSMordechay Goodstein 10550cd1ad2dSMordechay Goodstein /* Circular buffer of transmit frame descriptors (TFDs), 10560cd1ad2dSMordechay Goodstein * shared with device */ 10570cd1ad2dSMordechay Goodstein txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 10580cd1ad2dSMordechay Goodstein &txq->dma_addr, GFP_KERNEL); 10590cd1ad2dSMordechay Goodstein if (!txq->tfds) 10600cd1ad2dSMordechay Goodstein goto error; 10610cd1ad2dSMordechay Goodstein 10620cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 10630cd1ad2dSMordechay Goodstein 10640cd1ad2dSMordechay Goodstein tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 10650cd1ad2dSMordechay Goodstein 10660cd1ad2dSMordechay Goodstein txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 10670cd1ad2dSMordechay Goodstein &txq->first_tb_dma, 10680cd1ad2dSMordechay Goodstein GFP_KERNEL); 10690cd1ad2dSMordechay Goodstein if (!txq->first_tb_bufs) 10700cd1ad2dSMordechay Goodstein goto err_free_tfds; 10710cd1ad2dSMordechay Goodstein 10720cd1ad2dSMordechay Goodstein return 0; 10730cd1ad2dSMordechay Goodstein err_free_tfds: 10740cd1ad2dSMordechay Goodstein dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 1075f973795aSLv Yunlong txq->tfds = NULL; 10760cd1ad2dSMordechay Goodstein error: 10770cd1ad2dSMordechay Goodstein if (txq->entries && cmd_queue) 10780cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) 10790cd1ad2dSMordechay Goodstein kfree(txq->entries[i].cmd); 10800cd1ad2dSMordechay Goodstein kfree(txq->entries); 10810cd1ad2dSMordechay Goodstein txq->entries = NULL; 10820cd1ad2dSMordechay Goodstein 10830cd1ad2dSMordechay Goodstein return -ENOMEM; 10840cd1ad2dSMordechay Goodstein } 10850cd1ad2dSMordechay Goodstein 1086*ba3d4acdSJohannes Berg static struct iwl_txq * 1087*ba3d4acdSJohannes Berg iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) 10880cd1ad2dSMordechay Goodstein { 10890cd1ad2dSMordechay Goodstein size_t bc_tbl_size, bc_tbl_entries; 10900cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 10910cd1ad2dSMordechay Goodstein int ret; 10920cd1ad2dSMordechay Goodstein 10930cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_tbl_size); 10940cd1ad2dSMordechay Goodstein 10950cd1ad2dSMordechay Goodstein bc_tbl_size = trans->txqs.bc_tbl_size; 10960cd1ad2dSMordechay Goodstein bc_tbl_entries = bc_tbl_size / sizeof(u16); 10970cd1ad2dSMordechay Goodstein 10980cd1ad2dSMordechay Goodstein if (WARN_ON(size > bc_tbl_entries)) 1099*ba3d4acdSJohannes Berg return ERR_PTR(-EINVAL); 11000cd1ad2dSMordechay Goodstein 11010cd1ad2dSMordechay Goodstein txq = kzalloc(sizeof(*txq), GFP_KERNEL); 11020cd1ad2dSMordechay Goodstein if (!txq) 1103*ba3d4acdSJohannes Berg return ERR_PTR(-ENOMEM); 11040cd1ad2dSMordechay Goodstein 11050cd1ad2dSMordechay Goodstein txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 11060cd1ad2dSMordechay Goodstein &txq->bc_tbl.dma); 11070cd1ad2dSMordechay Goodstein if (!txq->bc_tbl.addr) { 11080cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 11090cd1ad2dSMordechay Goodstein kfree(txq); 1110*ba3d4acdSJohannes Berg return ERR_PTR(-ENOMEM); 11110cd1ad2dSMordechay Goodstein } 11120cd1ad2dSMordechay Goodstein 11130cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, txq, size, false); 11140cd1ad2dSMordechay Goodstein if (ret) { 11150cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue alloc failed\n"); 11160cd1ad2dSMordechay Goodstein goto error; 11170cd1ad2dSMordechay Goodstein } 11180cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, txq, size, false); 11190cd1ad2dSMordechay Goodstein if (ret) { 11200cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue init failed\n"); 11210cd1ad2dSMordechay Goodstein goto error; 11220cd1ad2dSMordechay Goodstein } 11230cd1ad2dSMordechay Goodstein 11240cd1ad2dSMordechay Goodstein txq->wd_timeout = msecs_to_jiffies(timeout); 11250cd1ad2dSMordechay Goodstein 1126*ba3d4acdSJohannes Berg return txq; 11270cd1ad2dSMordechay Goodstein 11280cd1ad2dSMordechay Goodstein error: 11290cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 1130*ba3d4acdSJohannes Berg return ERR_PTR(ret); 11310cd1ad2dSMordechay Goodstein } 11320cd1ad2dSMordechay Goodstein 11330cd1ad2dSMordechay Goodstein static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 11340cd1ad2dSMordechay Goodstein struct iwl_host_cmd *hcmd) 11350cd1ad2dSMordechay Goodstein { 11360cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_rsp *rsp; 11370cd1ad2dSMordechay Goodstein int ret, qid; 11380cd1ad2dSMordechay Goodstein u32 wr_ptr; 11390cd1ad2dSMordechay Goodstein 11400cd1ad2dSMordechay Goodstein if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 11410cd1ad2dSMordechay Goodstein sizeof(*rsp))) { 11420cd1ad2dSMordechay Goodstein ret = -EINVAL; 11430cd1ad2dSMordechay Goodstein goto error_free_resp; 11440cd1ad2dSMordechay Goodstein } 11450cd1ad2dSMordechay Goodstein 11460cd1ad2dSMordechay Goodstein rsp = (void *)hcmd->resp_pkt->data; 11470cd1ad2dSMordechay Goodstein qid = le16_to_cpu(rsp->queue_number); 11480cd1ad2dSMordechay Goodstein wr_ptr = le16_to_cpu(rsp->write_pointer); 11490cd1ad2dSMordechay Goodstein 11500cd1ad2dSMordechay Goodstein if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 11510cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue index %d unsupported", qid); 11520cd1ad2dSMordechay Goodstein ret = -EIO; 11530cd1ad2dSMordechay Goodstein goto error_free_resp; 11540cd1ad2dSMordechay Goodstein } 11550cd1ad2dSMordechay Goodstein 11560cd1ad2dSMordechay Goodstein if (test_and_set_bit(qid, trans->txqs.queue_used)) { 11570cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue %d already used", qid); 11580cd1ad2dSMordechay Goodstein ret = -EIO; 11590cd1ad2dSMordechay Goodstein goto error_free_resp; 11600cd1ad2dSMordechay Goodstein } 11610cd1ad2dSMordechay Goodstein 11624cf2f590SMordechay Goodstein if (WARN_ONCE(trans->txqs.txq[qid], 11634cf2f590SMordechay Goodstein "queue %d already allocated\n", qid)) { 11644cf2f590SMordechay Goodstein ret = -EIO; 11654cf2f590SMordechay Goodstein goto error_free_resp; 11664cf2f590SMordechay Goodstein } 11674cf2f590SMordechay Goodstein 11680cd1ad2dSMordechay Goodstein txq->id = qid; 11690cd1ad2dSMordechay Goodstein trans->txqs.txq[qid] = txq; 11700cd1ad2dSMordechay Goodstein wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 11710cd1ad2dSMordechay Goodstein 11720cd1ad2dSMordechay Goodstein /* Place first TFD at index corresponding to start sequence number */ 11730cd1ad2dSMordechay Goodstein txq->read_ptr = wr_ptr; 11740cd1ad2dSMordechay Goodstein txq->write_ptr = wr_ptr; 11750cd1ad2dSMordechay Goodstein 11760cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 11770cd1ad2dSMordechay Goodstein 11780cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 11790cd1ad2dSMordechay Goodstein return qid; 11800cd1ad2dSMordechay Goodstein 11810cd1ad2dSMordechay Goodstein error_free_resp: 11820cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 11830cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 11840cd1ad2dSMordechay Goodstein return ret; 11850cd1ad2dSMordechay Goodstein } 11860cd1ad2dSMordechay Goodstein 11870cd1ad2dSMordechay Goodstein int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, 11880cd1ad2dSMordechay Goodstein int cmd_id, int size, unsigned int timeout) 11890cd1ad2dSMordechay Goodstein { 1190*ba3d4acdSJohannes Berg struct iwl_txq *txq; 11910cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_cmd cmd = { 11920cd1ad2dSMordechay Goodstein .flags = flags, 11930cd1ad2dSMordechay Goodstein .sta_id = sta_id, 11940cd1ad2dSMordechay Goodstein .tid = tid, 11950cd1ad2dSMordechay Goodstein }; 11960cd1ad2dSMordechay Goodstein struct iwl_host_cmd hcmd = { 11970cd1ad2dSMordechay Goodstein .id = cmd_id, 11980cd1ad2dSMordechay Goodstein .len = { sizeof(cmd) }, 11990cd1ad2dSMordechay Goodstein .data = { &cmd, }, 12000cd1ad2dSMordechay Goodstein .flags = CMD_WANT_SKB, 12010cd1ad2dSMordechay Goodstein }; 12020cd1ad2dSMordechay Goodstein int ret; 12030cd1ad2dSMordechay Goodstein 1204*ba3d4acdSJohannes Berg txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); 1205*ba3d4acdSJohannes Berg if (IS_ERR(txq)) 1206*ba3d4acdSJohannes Berg return PTR_ERR(txq); 12070cd1ad2dSMordechay Goodstein 12080cd1ad2dSMordechay Goodstein cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); 12090cd1ad2dSMordechay Goodstein cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 12100cd1ad2dSMordechay Goodstein cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 12110cd1ad2dSMordechay Goodstein 12120cd1ad2dSMordechay Goodstein ret = iwl_trans_send_cmd(trans, &hcmd); 12130cd1ad2dSMordechay Goodstein if (ret) 12140cd1ad2dSMordechay Goodstein goto error; 12150cd1ad2dSMordechay Goodstein 12160cd1ad2dSMordechay Goodstein return iwl_txq_alloc_response(trans, txq, &hcmd); 12170cd1ad2dSMordechay Goodstein 12180cd1ad2dSMordechay Goodstein error: 12190cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12200cd1ad2dSMordechay Goodstein return ret; 12210cd1ad2dSMordechay Goodstein } 12220cd1ad2dSMordechay Goodstein 12230cd1ad2dSMordechay Goodstein void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 12240cd1ad2dSMordechay Goodstein { 12250cd1ad2dSMordechay Goodstein if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 12260cd1ad2dSMordechay Goodstein "queue %d out of range", queue)) 12270cd1ad2dSMordechay Goodstein return; 12280cd1ad2dSMordechay Goodstein 12290cd1ad2dSMordechay Goodstein /* 12300cd1ad2dSMordechay Goodstein * Upon HW Rfkill - we stop the device, and then stop the queues 12310cd1ad2dSMordechay Goodstein * in the op_mode. Just for the sake of the simplicity of the op_mode, 12320cd1ad2dSMordechay Goodstein * allow the op_mode to call txq_disable after it already called 12330cd1ad2dSMordechay Goodstein * stop_device. 12340cd1ad2dSMordechay Goodstein */ 12350cd1ad2dSMordechay Goodstein if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 12360cd1ad2dSMordechay Goodstein WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 12370cd1ad2dSMordechay Goodstein "queue %d not used", queue); 12380cd1ad2dSMordechay Goodstein return; 12390cd1ad2dSMordechay Goodstein } 12400cd1ad2dSMordechay Goodstein 12412f8cfcc4SMordechay Goodstein iwl_txq_gen2_free(trans, queue); 12420cd1ad2dSMordechay Goodstein 12430cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 12440cd1ad2dSMordechay Goodstein } 12450cd1ad2dSMordechay Goodstein 12460cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tx_free(struct iwl_trans *trans) 12470cd1ad2dSMordechay Goodstein { 12480cd1ad2dSMordechay Goodstein int i; 12490cd1ad2dSMordechay Goodstein 12500cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 12510cd1ad2dSMordechay Goodstein 12520cd1ad2dSMordechay Goodstein /* Free all TX queues */ 12530cd1ad2dSMordechay Goodstein for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 12540cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[i]) 12550cd1ad2dSMordechay Goodstein continue; 12560cd1ad2dSMordechay Goodstein 12570cd1ad2dSMordechay Goodstein iwl_txq_gen2_free(trans, i); 12580cd1ad2dSMordechay Goodstein } 12590cd1ad2dSMordechay Goodstein } 12600cd1ad2dSMordechay Goodstein 12610cd1ad2dSMordechay Goodstein int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 12620cd1ad2dSMordechay Goodstein { 12630cd1ad2dSMordechay Goodstein struct iwl_txq *queue; 12640cd1ad2dSMordechay Goodstein int ret; 12650cd1ad2dSMordechay Goodstein 12660cd1ad2dSMordechay Goodstein /* alloc and init the tx queue */ 12670cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[txq_id]) { 12680cd1ad2dSMordechay Goodstein queue = kzalloc(sizeof(*queue), GFP_KERNEL); 12690cd1ad2dSMordechay Goodstein if (!queue) { 12700cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Not enough memory for tx queue\n"); 12710cd1ad2dSMordechay Goodstein return -ENOMEM; 12720cd1ad2dSMordechay Goodstein } 12730cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = queue; 12740cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, queue, queue_size, true); 12750cd1ad2dSMordechay Goodstein if (ret) { 12760cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 12770cd1ad2dSMordechay Goodstein goto error; 12780cd1ad2dSMordechay Goodstein } 12790cd1ad2dSMordechay Goodstein } else { 12800cd1ad2dSMordechay Goodstein queue = trans->txqs.txq[txq_id]; 12810cd1ad2dSMordechay Goodstein } 12820cd1ad2dSMordechay Goodstein 12830cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, queue, queue_size, 12840cd1ad2dSMordechay Goodstein (txq_id == trans->txqs.cmd.q_id)); 12850cd1ad2dSMordechay Goodstein if (ret) { 12860cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 12870cd1ad2dSMordechay Goodstein goto error; 12880cd1ad2dSMordechay Goodstein } 12890cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 12900cd1ad2dSMordechay Goodstein set_bit(txq_id, trans->txqs.queue_used); 12910cd1ad2dSMordechay Goodstein 12920cd1ad2dSMordechay Goodstein return 0; 12930cd1ad2dSMordechay Goodstein 12940cd1ad2dSMordechay Goodstein error: 12950cd1ad2dSMordechay Goodstein iwl_txq_gen2_tx_free(trans); 12960cd1ad2dSMordechay Goodstein return ret; 12970cd1ad2dSMordechay Goodstein } 12980cd1ad2dSMordechay Goodstein 12990179bfffSMordechay Goodstein static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 13000179bfffSMordechay Goodstein void *_tfd, u8 idx) 13010179bfffSMordechay Goodstein { 13020179bfffSMordechay Goodstein struct iwl_tfd *tfd; 13030179bfffSMordechay Goodstein struct iwl_tfd_tb *tb; 13040179bfffSMordechay Goodstein dma_addr_t addr; 13050179bfffSMordechay Goodstein dma_addr_t hi_len; 13060179bfffSMordechay Goodstein 13070179bfffSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 1308d4530f63SJohannes Berg struct iwl_tfh_tfd *tfh_tfd = _tfd; 1309d4530f63SJohannes Berg struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; 13100179bfffSMordechay Goodstein 1311d4530f63SJohannes Berg return (dma_addr_t)(le64_to_cpu(tfh_tb->addr)); 13120179bfffSMordechay Goodstein } 13130179bfffSMordechay Goodstein 13140179bfffSMordechay Goodstein tfd = _tfd; 13150179bfffSMordechay Goodstein tb = &tfd->tbs[idx]; 13160179bfffSMordechay Goodstein addr = get_unaligned_le32(&tb->lo); 13170179bfffSMordechay Goodstein 13180179bfffSMordechay Goodstein if (sizeof(dma_addr_t) <= sizeof(u32)) 13190179bfffSMordechay Goodstein return addr; 13200179bfffSMordechay Goodstein 13210179bfffSMordechay Goodstein hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 13220179bfffSMordechay Goodstein 13230179bfffSMordechay Goodstein /* 13240179bfffSMordechay Goodstein * shift by 16 twice to avoid warnings on 32-bit 13250179bfffSMordechay Goodstein * (where this code never runs anyway due to the 13260179bfffSMordechay Goodstein * if statement above) 13270179bfffSMordechay Goodstein */ 13280179bfffSMordechay Goodstein return addr | ((hi_len << 16) << 16); 13290179bfffSMordechay Goodstein } 13300179bfffSMordechay Goodstein 13310179bfffSMordechay Goodstein void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 13320179bfffSMordechay Goodstein struct iwl_cmd_meta *meta, 13330179bfffSMordechay Goodstein struct iwl_txq *txq, int index) 13340179bfffSMordechay Goodstein { 13350179bfffSMordechay Goodstein int i, num_tbs; 13360179bfffSMordechay Goodstein void *tfd = iwl_txq_get_tfd(trans, txq, index); 13370179bfffSMordechay Goodstein 13380179bfffSMordechay Goodstein /* Sanity check on number of chunks */ 13390179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 13400179bfffSMordechay Goodstein 13410179bfffSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 13420179bfffSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 13430179bfffSMordechay Goodstein /* @todo issue fatal error, it is quite serious situation */ 13440179bfffSMordechay Goodstein return; 13450179bfffSMordechay Goodstein } 13460179bfffSMordechay Goodstein 13470179bfffSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 13480179bfffSMordechay Goodstein 13490179bfffSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 13500179bfffSMordechay Goodstein if (meta->tbs & BIT(i)) 13510179bfffSMordechay Goodstein dma_unmap_page(trans->dev, 13520179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 13530179bfffSMordechay Goodstein tfd, i), 13540179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 13550179bfffSMordechay Goodstein tfd, i), 13560179bfffSMordechay Goodstein DMA_TO_DEVICE); 13570179bfffSMordechay Goodstein else 13580179bfffSMordechay Goodstein dma_unmap_single(trans->dev, 13590179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 13600179bfffSMordechay Goodstein tfd, i), 13610179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 13620179bfffSMordechay Goodstein tfd, i), 13630179bfffSMordechay Goodstein DMA_TO_DEVICE); 13640179bfffSMordechay Goodstein } 13650179bfffSMordechay Goodstein 13660179bfffSMordechay Goodstein meta->tbs = 0; 13670179bfffSMordechay Goodstein 13680179bfffSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 13690179bfffSMordechay Goodstein struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 13700179bfffSMordechay Goodstein 13710179bfffSMordechay Goodstein tfd_fh->num_tbs = 0; 13720179bfffSMordechay Goodstein } else { 13730179bfffSMordechay Goodstein struct iwl_tfd *tfd_fh = (void *)tfd; 13740179bfffSMordechay Goodstein 13750179bfffSMordechay Goodstein tfd_fh->num_tbs = 0; 13760179bfffSMordechay Goodstein } 13770179bfffSMordechay Goodstein } 13780179bfffSMordechay Goodstein 13790179bfffSMordechay Goodstein #define IWL_TX_CRC_SIZE 4 13800179bfffSMordechay Goodstein #define IWL_TX_DELIMITER_SIZE 4 13810179bfffSMordechay Goodstein 13820179bfffSMordechay Goodstein /* 13830179bfffSMordechay Goodstein * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 13840179bfffSMordechay Goodstein */ 13850179bfffSMordechay Goodstein void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 13860179bfffSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 13870179bfffSMordechay Goodstein int num_tbs) 13880179bfffSMordechay Goodstein { 13890179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl; 13900179bfffSMordechay Goodstein int write_ptr = txq->write_ptr; 13910179bfffSMordechay Goodstein int txq_id = txq->id; 13920179bfffSMordechay Goodstein u8 sec_ctl = 0; 13930179bfffSMordechay Goodstein u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 13940179bfffSMordechay Goodstein __le16 bc_ent; 13950179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 13960179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 13970179bfffSMordechay Goodstein u8 sta_id = tx_cmd->sta_id; 13980179bfffSMordechay Goodstein 13990179bfffSMordechay Goodstein scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14000179bfffSMordechay Goodstein 14010179bfffSMordechay Goodstein sec_ctl = tx_cmd->sec_ctl; 14020179bfffSMordechay Goodstein 14030179bfffSMordechay Goodstein switch (sec_ctl & TX_CMD_SEC_MSK) { 14040179bfffSMordechay Goodstein case TX_CMD_SEC_CCM: 14050179bfffSMordechay Goodstein len += IEEE80211_CCMP_MIC_LEN; 14060179bfffSMordechay Goodstein break; 14070179bfffSMordechay Goodstein case TX_CMD_SEC_TKIP: 14080179bfffSMordechay Goodstein len += IEEE80211_TKIP_ICV_LEN; 14090179bfffSMordechay Goodstein break; 14100179bfffSMordechay Goodstein case TX_CMD_SEC_WEP: 14110179bfffSMordechay Goodstein len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 14120179bfffSMordechay Goodstein break; 14130179bfffSMordechay Goodstein } 14140179bfffSMordechay Goodstein if (trans->txqs.bc_table_dword) 14150179bfffSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 14160179bfffSMordechay Goodstein 14170179bfffSMordechay Goodstein if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 14180179bfffSMordechay Goodstein return; 14190179bfffSMordechay Goodstein 14200179bfffSMordechay Goodstein bc_ent = cpu_to_le16(len | (sta_id << 12)); 14210179bfffSMordechay Goodstein 14220179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 14230179bfffSMordechay Goodstein 14240179bfffSMordechay Goodstein if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 14250179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 14260179bfffSMordechay Goodstein bc_ent; 14270179bfffSMordechay Goodstein } 14280179bfffSMordechay Goodstein 14290179bfffSMordechay Goodstein void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 14300179bfffSMordechay Goodstein struct iwl_txq *txq) 14310179bfffSMordechay Goodstein { 14320179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14330179bfffSMordechay Goodstein int txq_id = txq->id; 14340179bfffSMordechay Goodstein int read_ptr = txq->read_ptr; 14350179bfffSMordechay Goodstein u8 sta_id = 0; 14360179bfffSMordechay Goodstein __le16 bc_ent; 14370179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 14380179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14390179bfffSMordechay Goodstein 14400179bfffSMordechay Goodstein WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 14410179bfffSMordechay Goodstein 14420179bfffSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 14430179bfffSMordechay Goodstein sta_id = tx_cmd->sta_id; 14440179bfffSMordechay Goodstein 14450179bfffSMordechay Goodstein bc_ent = cpu_to_le16(1 | (sta_id << 12)); 14460179bfffSMordechay Goodstein 14470179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 14480179bfffSMordechay Goodstein 14490179bfffSMordechay Goodstein if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 14500179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 14510179bfffSMordechay Goodstein bc_ent; 14520179bfffSMordechay Goodstein } 1453a4450980SMordechay Goodstein 1454a4450980SMordechay Goodstein /* 1455a4450980SMordechay Goodstein * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 1456a4450980SMordechay Goodstein * @trans - transport private data 1457a4450980SMordechay Goodstein * @txq - tx queue 1458a4450980SMordechay Goodstein * @dma_dir - the direction of the DMA mapping 1459a4450980SMordechay Goodstein * 1460a4450980SMordechay Goodstein * Does NOT advance any TFD circular buffer read/write indexes 1461a4450980SMordechay Goodstein * Does NOT free the TFD itself (which is within circular buffer) 1462a4450980SMordechay Goodstein */ 1463a4450980SMordechay Goodstein void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1464a4450980SMordechay Goodstein { 1465a4450980SMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1466a4450980SMordechay Goodstein * idx is bounded by n_window 1467a4450980SMordechay Goodstein */ 1468a4450980SMordechay Goodstein int rd_ptr = txq->read_ptr; 1469a4450980SMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 14700f8d5656SEmmanuel Grumbach struct sk_buff *skb; 1471a4450980SMordechay Goodstein 1472a4450980SMordechay Goodstein lockdep_assert_held(&txq->lock); 1473a4450980SMordechay Goodstein 14740f8d5656SEmmanuel Grumbach if (!txq->entries) 14750f8d5656SEmmanuel Grumbach return; 14760f8d5656SEmmanuel Grumbach 1477a4450980SMordechay Goodstein /* We have only q->n_window txq->entries, but we use 1478a4450980SMordechay Goodstein * TFD_QUEUE_SIZE_MAX tfds 1479a4450980SMordechay Goodstein */ 1480a4450980SMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 1481a4450980SMordechay Goodstein 1482a4450980SMordechay Goodstein /* free SKB */ 1483a4450980SMordechay Goodstein skb = txq->entries[idx].skb; 1484a4450980SMordechay Goodstein 1485a4450980SMordechay Goodstein /* Can be called from irqs-disabled context 1486a4450980SMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 1487a4450980SMordechay Goodstein * freed and that the queue is not empty - free the skb 1488a4450980SMordechay Goodstein */ 1489a4450980SMordechay Goodstein if (skb) { 1490a4450980SMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 1491a4450980SMordechay Goodstein txq->entries[idx].skb = NULL; 1492a4450980SMordechay Goodstein } 1493a4450980SMordechay Goodstein } 1494a4450980SMordechay Goodstein 1495a4450980SMordechay Goodstein void iwl_txq_progress(struct iwl_txq *txq) 1496a4450980SMordechay Goodstein { 1497a4450980SMordechay Goodstein lockdep_assert_held(&txq->lock); 1498a4450980SMordechay Goodstein 1499a4450980SMordechay Goodstein if (!txq->wd_timeout) 1500a4450980SMordechay Goodstein return; 1501a4450980SMordechay Goodstein 1502a4450980SMordechay Goodstein /* 1503a4450980SMordechay Goodstein * station is asleep and we send data - that must 1504a4450980SMordechay Goodstein * be uAPSD or PS-Poll. Don't rearm the timer. 1505a4450980SMordechay Goodstein */ 1506a4450980SMordechay Goodstein if (txq->frozen) 1507a4450980SMordechay Goodstein return; 1508a4450980SMordechay Goodstein 1509a4450980SMordechay Goodstein /* 1510a4450980SMordechay Goodstein * if empty delete timer, otherwise move timer forward 1511a4450980SMordechay Goodstein * since we're making progress on this queue 1512a4450980SMordechay Goodstein */ 1513a4450980SMordechay Goodstein if (txq->read_ptr == txq->write_ptr) 1514a4450980SMordechay Goodstein del_timer(&txq->stuck_timer); 1515a4450980SMordechay Goodstein else 1516a4450980SMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1517a4450980SMordechay Goodstein } 1518a4450980SMordechay Goodstein 1519a4450980SMordechay Goodstein /* Frees buffers until index _not_ inclusive */ 1520a4450980SMordechay Goodstein void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1521a4450980SMordechay Goodstein struct sk_buff_head *skbs) 1522a4450980SMordechay Goodstein { 1523a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1524a4450980SMordechay Goodstein int tfd_num = iwl_txq_get_cmd_index(txq, ssn); 1525a4450980SMordechay Goodstein int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1526a4450980SMordechay Goodstein int last_to_free; 1527a4450980SMordechay Goodstein 1528a4450980SMordechay Goodstein /* This function is not meant to release cmd queue*/ 1529a4450980SMordechay Goodstein if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 1530a4450980SMordechay Goodstein return; 1531a4450980SMordechay Goodstein 1532a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1533a4450980SMordechay Goodstein 1534a4450980SMordechay Goodstein if (!test_bit(txq_id, trans->txqs.queue_used)) { 1535a4450980SMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1536a4450980SMordechay Goodstein txq_id, ssn); 1537a4450980SMordechay Goodstein goto out; 1538a4450980SMordechay Goodstein } 1539a4450980SMordechay Goodstein 1540a4450980SMordechay Goodstein if (read_ptr == tfd_num) 1541a4450980SMordechay Goodstein goto out; 1542a4450980SMordechay Goodstein 1543a4450980SMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1544a4450980SMordechay Goodstein txq_id, txq->read_ptr, tfd_num, ssn); 1545a4450980SMordechay Goodstein 1546a4450980SMordechay Goodstein /*Since we free until index _not_ inclusive, the one before index is 1547a4450980SMordechay Goodstein * the last we will free. This one must be used */ 1548a4450980SMordechay Goodstein last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 1549a4450980SMordechay Goodstein 1550a4450980SMordechay Goodstein if (!iwl_txq_used(txq, last_to_free)) { 1551a4450980SMordechay Goodstein IWL_ERR(trans, 1552a4450980SMordechay Goodstein "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1553a4450980SMordechay Goodstein __func__, txq_id, last_to_free, 1554a4450980SMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size, 1555a4450980SMordechay Goodstein txq->write_ptr, txq->read_ptr); 15569cd3de81SMordechay Goodstein 15579cd3de81SMordechay Goodstein iwl_op_mode_time_point(trans->op_mode, 15589cd3de81SMordechay Goodstein IWL_FW_INI_TIME_POINT_FAKE_TX, 15599cd3de81SMordechay Goodstein NULL); 1560a4450980SMordechay Goodstein goto out; 1561a4450980SMordechay Goodstein } 1562a4450980SMordechay Goodstein 1563a4450980SMordechay Goodstein if (WARN_ON(!skb_queue_empty(skbs))) 1564a4450980SMordechay Goodstein goto out; 1565a4450980SMordechay Goodstein 1566a4450980SMordechay Goodstein for (; 1567a4450980SMordechay Goodstein read_ptr != tfd_num; 1568a4450980SMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 1569a4450980SMordechay Goodstein read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 1570a4450980SMordechay Goodstein struct sk_buff *skb = txq->entries[read_ptr].skb; 1571a4450980SMordechay Goodstein 1572a4450980SMordechay Goodstein if (WARN_ON_ONCE(!skb)) 1573a4450980SMordechay Goodstein continue; 1574a4450980SMordechay Goodstein 1575a4450980SMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 1576a4450980SMordechay Goodstein 1577a4450980SMordechay Goodstein __skb_queue_tail(skbs, skb); 1578a4450980SMordechay Goodstein 1579a4450980SMordechay Goodstein txq->entries[read_ptr].skb = NULL; 1580a4450980SMordechay Goodstein 1581a4450980SMordechay Goodstein if (!trans->trans_cfg->use_tfh) 1582a4450980SMordechay Goodstein iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 1583a4450980SMordechay Goodstein 1584a4450980SMordechay Goodstein iwl_txq_free_tfd(trans, txq); 1585a4450980SMordechay Goodstein } 1586a4450980SMordechay Goodstein 1587a4450980SMordechay Goodstein iwl_txq_progress(txq); 1588a4450980SMordechay Goodstein 1589a4450980SMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark && 1590a4450980SMordechay Goodstein test_bit(txq_id, trans->txqs.queue_stopped)) { 1591a4450980SMordechay Goodstein struct sk_buff_head overflow_skbs; 1592a4450980SMordechay Goodstein 1593a4450980SMordechay Goodstein __skb_queue_head_init(&overflow_skbs); 1594a4450980SMordechay Goodstein skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1595a4450980SMordechay Goodstein 1596a4450980SMordechay Goodstein /* 1597a4450980SMordechay Goodstein * We are going to transmit from the overflow queue. 1598a4450980SMordechay Goodstein * Remember this state so that wait_for_txq_empty will know we 1599a4450980SMordechay Goodstein * are adding more packets to the TFD queue. It cannot rely on 1600a4450980SMordechay Goodstein * the state of &txq->overflow_q, as we just emptied it, but 1601a4450980SMordechay Goodstein * haven't TXed the content yet. 1602a4450980SMordechay Goodstein */ 1603a4450980SMordechay Goodstein txq->overflow_tx = true; 1604a4450980SMordechay Goodstein 1605a4450980SMordechay Goodstein /* 1606a4450980SMordechay Goodstein * This is tricky: we are in reclaim path which is non 1607a4450980SMordechay Goodstein * re-entrant, so noone will try to take the access the 1608a4450980SMordechay Goodstein * txq data from that path. We stopped tx, so we can't 1609a4450980SMordechay Goodstein * have tx as well. Bottom line, we can unlock and re-lock 1610a4450980SMordechay Goodstein * later. 1611a4450980SMordechay Goodstein */ 1612a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1613a4450980SMordechay Goodstein 1614a4450980SMordechay Goodstein while (!skb_queue_empty(&overflow_skbs)) { 1615a4450980SMordechay Goodstein struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1616a4450980SMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd_ptr; 1617a4450980SMordechay Goodstein 1618a4450980SMordechay Goodstein dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1619a4450980SMordechay Goodstein trans->txqs.dev_cmd_offs); 1620a4450980SMordechay Goodstein 1621a4450980SMordechay Goodstein /* 1622a4450980SMordechay Goodstein * Note that we can very well be overflowing again. 1623a4450980SMordechay Goodstein * In that case, iwl_txq_space will be small again 1624a4450980SMordechay Goodstein * and we won't wake mac80211's queue. 1625a4450980SMordechay Goodstein */ 1626a4450980SMordechay Goodstein iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 1627a4450980SMordechay Goodstein } 1628a4450980SMordechay Goodstein 1629a4450980SMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark) 1630a4450980SMordechay Goodstein iwl_wake_queue(trans, txq); 1631a4450980SMordechay Goodstein 1632a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1633a4450980SMordechay Goodstein txq->overflow_tx = false; 1634a4450980SMordechay Goodstein } 1635a4450980SMordechay Goodstein 1636a4450980SMordechay Goodstein out: 1637a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1638a4450980SMordechay Goodstein } 1639a4450980SMordechay Goodstein 1640a4450980SMordechay Goodstein /* Set wr_ptr of specific device and txq */ 1641a4450980SMordechay Goodstein void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 1642a4450980SMordechay Goodstein { 1643a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1644a4450980SMordechay Goodstein 1645a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1646a4450980SMordechay Goodstein 1647a4450980SMordechay Goodstein txq->write_ptr = ptr; 1648a4450980SMordechay Goodstein txq->read_ptr = txq->write_ptr; 1649a4450980SMordechay Goodstein 1650a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1651a4450980SMordechay Goodstein } 1652a4450980SMordechay Goodstein 1653a4450980SMordechay Goodstein void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, 1654a4450980SMordechay Goodstein bool freeze) 1655a4450980SMordechay Goodstein { 1656a4450980SMordechay Goodstein int queue; 1657a4450980SMordechay Goodstein 1658a4450980SMordechay Goodstein for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1659a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[queue]; 1660a4450980SMordechay Goodstein unsigned long now; 1661a4450980SMordechay Goodstein 1662a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1663a4450980SMordechay Goodstein 1664a4450980SMordechay Goodstein now = jiffies; 1665a4450980SMordechay Goodstein 1666a4450980SMordechay Goodstein if (txq->frozen == freeze) 1667a4450980SMordechay Goodstein goto next_queue; 1668a4450980SMordechay Goodstein 1669a4450980SMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1670a4450980SMordechay Goodstein freeze ? "Freezing" : "Waking", queue); 1671a4450980SMordechay Goodstein 1672a4450980SMordechay Goodstein txq->frozen = freeze; 1673a4450980SMordechay Goodstein 1674a4450980SMordechay Goodstein if (txq->read_ptr == txq->write_ptr) 1675a4450980SMordechay Goodstein goto next_queue; 1676a4450980SMordechay Goodstein 1677a4450980SMordechay Goodstein if (freeze) { 1678a4450980SMordechay Goodstein if (unlikely(time_after(now, 1679a4450980SMordechay Goodstein txq->stuck_timer.expires))) { 1680a4450980SMordechay Goodstein /* 1681a4450980SMordechay Goodstein * The timer should have fired, maybe it is 1682a4450980SMordechay Goodstein * spinning right now on the lock. 1683a4450980SMordechay Goodstein */ 1684a4450980SMordechay Goodstein goto next_queue; 1685a4450980SMordechay Goodstein } 1686a4450980SMordechay Goodstein /* remember how long until the timer fires */ 1687a4450980SMordechay Goodstein txq->frozen_expiry_remainder = 1688a4450980SMordechay Goodstein txq->stuck_timer.expires - now; 1689a4450980SMordechay Goodstein del_timer(&txq->stuck_timer); 1690a4450980SMordechay Goodstein goto next_queue; 1691a4450980SMordechay Goodstein } 1692a4450980SMordechay Goodstein 1693a4450980SMordechay Goodstein /* 1694a4450980SMordechay Goodstein * Wake a non-empty queue -> arm timer with the 1695a4450980SMordechay Goodstein * remainder before it froze 1696a4450980SMordechay Goodstein */ 1697a4450980SMordechay Goodstein mod_timer(&txq->stuck_timer, 1698a4450980SMordechay Goodstein now + txq->frozen_expiry_remainder); 1699a4450980SMordechay Goodstein 1700a4450980SMordechay Goodstein next_queue: 1701a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1702a4450980SMordechay Goodstein } 1703a4450980SMordechay Goodstein } 1704a4450980SMordechay Goodstein 170513f028b4SMordechay Goodstein #define HOST_COMPLETE_TIMEOUT (2 * HZ) 170613f028b4SMordechay Goodstein 170713f028b4SMordechay Goodstein static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, 170813f028b4SMordechay Goodstein struct iwl_host_cmd *cmd) 170913f028b4SMordechay Goodstein { 171013f028b4SMordechay Goodstein const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); 171113f028b4SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 171213f028b4SMordechay Goodstein int cmd_idx; 171313f028b4SMordechay Goodstein int ret; 171413f028b4SMordechay Goodstein 171513f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); 171613f028b4SMordechay Goodstein 171713f028b4SMordechay Goodstein if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 171813f028b4SMordechay Goodstein &trans->status), 171913f028b4SMordechay Goodstein "Command %s: a command is already active!\n", cmd_str)) 172013f028b4SMordechay Goodstein return -EIO; 172113f028b4SMordechay Goodstein 172213f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); 172313f028b4SMordechay Goodstein 172413f028b4SMordechay Goodstein cmd_idx = trans->ops->send_cmd(trans, cmd); 172513f028b4SMordechay Goodstein if (cmd_idx < 0) { 172613f028b4SMordechay Goodstein ret = cmd_idx; 172713f028b4SMordechay Goodstein clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 172813f028b4SMordechay Goodstein IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", 172913f028b4SMordechay Goodstein cmd_str, ret); 173013f028b4SMordechay Goodstein return ret; 173113f028b4SMordechay Goodstein } 173213f028b4SMordechay Goodstein 173313f028b4SMordechay Goodstein ret = wait_event_timeout(trans->wait_command_queue, 173413f028b4SMordechay Goodstein !test_bit(STATUS_SYNC_HCMD_ACTIVE, 173513f028b4SMordechay Goodstein &trans->status), 173613f028b4SMordechay Goodstein HOST_COMPLETE_TIMEOUT); 173713f028b4SMordechay Goodstein if (!ret) { 173813f028b4SMordechay Goodstein IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 173913f028b4SMordechay Goodstein cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 174013f028b4SMordechay Goodstein 174113f028b4SMordechay Goodstein IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 174213f028b4SMordechay Goodstein txq->read_ptr, txq->write_ptr); 174313f028b4SMordechay Goodstein 174413f028b4SMordechay Goodstein clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 174513f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 174613f028b4SMordechay Goodstein cmd_str); 174713f028b4SMordechay Goodstein ret = -ETIMEDOUT; 174813f028b4SMordechay Goodstein 174913f028b4SMordechay Goodstein iwl_trans_sync_nmi(trans); 175013f028b4SMordechay Goodstein goto cancel; 175113f028b4SMordechay Goodstein } 175213f028b4SMordechay Goodstein 175313f028b4SMordechay Goodstein if (test_bit(STATUS_FW_ERROR, &trans->status)) { 17544b992db6SJohannes Berg if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, 17554b992db6SJohannes Berg &trans->status)) { 175613f028b4SMordechay Goodstein IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); 175713f028b4SMordechay Goodstein dump_stack(); 17584b992db6SJohannes Berg } 175913f028b4SMordechay Goodstein ret = -EIO; 176013f028b4SMordechay Goodstein goto cancel; 176113f028b4SMordechay Goodstein } 176213f028b4SMordechay Goodstein 176313f028b4SMordechay Goodstein if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 176413f028b4SMordechay Goodstein test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 176513f028b4SMordechay Goodstein IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 176613f028b4SMordechay Goodstein ret = -ERFKILL; 176713f028b4SMordechay Goodstein goto cancel; 176813f028b4SMordechay Goodstein } 176913f028b4SMordechay Goodstein 177013f028b4SMordechay Goodstein if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 177113f028b4SMordechay Goodstein IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); 177213f028b4SMordechay Goodstein ret = -EIO; 177313f028b4SMordechay Goodstein goto cancel; 177413f028b4SMordechay Goodstein } 177513f028b4SMordechay Goodstein 177613f028b4SMordechay Goodstein return 0; 177713f028b4SMordechay Goodstein 177813f028b4SMordechay Goodstein cancel: 177913f028b4SMordechay Goodstein if (cmd->flags & CMD_WANT_SKB) { 178013f028b4SMordechay Goodstein /* 178113f028b4SMordechay Goodstein * Cancel the CMD_WANT_SKB flag for the cmd in the 178213f028b4SMordechay Goodstein * TX cmd queue. Otherwise in case the cmd comes 178313f028b4SMordechay Goodstein * in later, it will possibly set an invalid 178413f028b4SMordechay Goodstein * address (cmd->meta.source). 178513f028b4SMordechay Goodstein */ 178613f028b4SMordechay Goodstein txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 178713f028b4SMordechay Goodstein } 178813f028b4SMordechay Goodstein 178913f028b4SMordechay Goodstein if (cmd->resp_pkt) { 179013f028b4SMordechay Goodstein iwl_free_resp(cmd); 179113f028b4SMordechay Goodstein cmd->resp_pkt = NULL; 179213f028b4SMordechay Goodstein } 179313f028b4SMordechay Goodstein 179413f028b4SMordechay Goodstein return ret; 179513f028b4SMordechay Goodstein } 179613f028b4SMordechay Goodstein 179713f028b4SMordechay Goodstein int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, 179813f028b4SMordechay Goodstein struct iwl_host_cmd *cmd) 179913f028b4SMordechay Goodstein { 180013f028b4SMordechay Goodstein /* Make sure the NIC is still alive in the bus */ 180113f028b4SMordechay Goodstein if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 180213f028b4SMordechay Goodstein return -ENODEV; 180313f028b4SMordechay Goodstein 180413f028b4SMordechay Goodstein if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 180513f028b4SMordechay Goodstein test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 180613f028b4SMordechay Goodstein IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 180713f028b4SMordechay Goodstein cmd->id); 180813f028b4SMordechay Goodstein return -ERFKILL; 180913f028b4SMordechay Goodstein } 181013f028b4SMordechay Goodstein 181113f028b4SMordechay Goodstein if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 181213f028b4SMordechay Goodstein !(cmd->flags & CMD_SEND_IN_D3))) { 181313f028b4SMordechay Goodstein IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); 181413f028b4SMordechay Goodstein return -EHOSTDOWN; 181513f028b4SMordechay Goodstein } 181613f028b4SMordechay Goodstein 181713f028b4SMordechay Goodstein if (cmd->flags & CMD_ASYNC) { 181813f028b4SMordechay Goodstein int ret; 181913f028b4SMordechay Goodstein 182013f028b4SMordechay Goodstein /* An asynchronous command can not expect an SKB to be set. */ 182113f028b4SMordechay Goodstein if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 182213f028b4SMordechay Goodstein return -EINVAL; 182313f028b4SMordechay Goodstein 182413f028b4SMordechay Goodstein ret = trans->ops->send_cmd(trans, cmd); 182513f028b4SMordechay Goodstein if (ret < 0) { 182613f028b4SMordechay Goodstein IWL_ERR(trans, 182713f028b4SMordechay Goodstein "Error sending %s: enqueue_hcmd failed: %d\n", 182813f028b4SMordechay Goodstein iwl_get_cmd_string(trans, cmd->id), ret); 182913f028b4SMordechay Goodstein return ret; 183013f028b4SMordechay Goodstein } 183113f028b4SMordechay Goodstein return 0; 183213f028b4SMordechay Goodstein } 183313f028b4SMordechay Goodstein 183413f028b4SMordechay Goodstein return iwl_trans_txq_send_hcmd_sync(trans, cmd); 183513f028b4SMordechay Goodstein } 183613f028b4SMordechay Goodstein 1837