18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 28e99ea8dSJohannes Berg /* 3*fb54b863SJohannes Berg * Copyright (C) 2020-2021 Intel Corporation 48e99ea8dSJohannes Berg */ 50cd1ad2dSMordechay Goodstein #include <net/tso.h> 60cd1ad2dSMordechay Goodstein #include <linux/tcp.h> 70cd1ad2dSMordechay Goodstein 80cd1ad2dSMordechay Goodstein #include "iwl-debug.h" 90cd1ad2dSMordechay Goodstein #include "iwl-io.h" 100cd1ad2dSMordechay Goodstein #include "fw/api/tx.h" 110cd1ad2dSMordechay Goodstein #include "queue/tx.h" 120cd1ad2dSMordechay Goodstein #include "iwl-fh.h" 130cd1ad2dSMordechay Goodstein #include "iwl-scd.h" 140cd1ad2dSMordechay Goodstein #include <linux/dmapool.h> 150cd1ad2dSMordechay Goodstein 160cd1ad2dSMordechay Goodstein /* 170cd1ad2dSMordechay Goodstein * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels 180cd1ad2dSMordechay Goodstein */ 190cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tx_stop(struct iwl_trans *trans) 200cd1ad2dSMordechay Goodstein { 210cd1ad2dSMordechay Goodstein int txq_id; 220cd1ad2dSMordechay Goodstein 230cd1ad2dSMordechay Goodstein /* 240cd1ad2dSMordechay Goodstein * This function can be called before the op_mode disabled the 250cd1ad2dSMordechay Goodstein * queues. This happens when we have an rfkill interrupt. 260cd1ad2dSMordechay Goodstein * Since we stop Tx altogether - mark the queues as stopped. 270cd1ad2dSMordechay Goodstein */ 280cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_stopped, 0, 290cd1ad2dSMordechay Goodstein sizeof(trans->txqs.queue_stopped)); 300cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 310cd1ad2dSMordechay Goodstein 320cd1ad2dSMordechay Goodstein /* Unmap DMA from host system and free skb's */ 330cd1ad2dSMordechay Goodstein for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { 340cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[txq_id]) 350cd1ad2dSMordechay Goodstein continue; 360cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, txq_id); 370cd1ad2dSMordechay Goodstein } 380cd1ad2dSMordechay Goodstein } 390cd1ad2dSMordechay Goodstein 400cd1ad2dSMordechay Goodstein /* 410cd1ad2dSMordechay Goodstein * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 420cd1ad2dSMordechay Goodstein */ 430cd1ad2dSMordechay Goodstein static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 440cd1ad2dSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 450cd1ad2dSMordechay Goodstein int num_tbs) 460cd1ad2dSMordechay Goodstein { 470cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 480cd1ad2dSMordechay Goodstein u8 filled_tfd_size, num_fetch_chunks; 490cd1ad2dSMordechay Goodstein u16 len = byte_cnt; 500cd1ad2dSMordechay Goodstein __le16 bc_ent; 510cd1ad2dSMordechay Goodstein 520cd1ad2dSMordechay Goodstein if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 530cd1ad2dSMordechay Goodstein return; 540cd1ad2dSMordechay Goodstein 550cd1ad2dSMordechay Goodstein filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 560cd1ad2dSMordechay Goodstein num_tbs * sizeof(struct iwl_tfh_tb); 570cd1ad2dSMordechay Goodstein /* 580cd1ad2dSMordechay Goodstein * filled_tfd_size contains the number of filled bytes in the TFD. 590cd1ad2dSMordechay Goodstein * Dividing it by 64 will give the number of chunks to fetch 600cd1ad2dSMordechay Goodstein * to SRAM- 0 for one chunk, 1 for 2 and so on. 610cd1ad2dSMordechay Goodstein * If, for example, TFD contains only 3 TBs then 32 bytes 620cd1ad2dSMordechay Goodstein * of the TFD are used, and only one chunk of 64 bytes should 630cd1ad2dSMordechay Goodstein * be fetched 640cd1ad2dSMordechay Goodstein */ 650cd1ad2dSMordechay Goodstein num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 660cd1ad2dSMordechay Goodstein 670cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 680cd1ad2dSMordechay Goodstein struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 690cd1ad2dSMordechay Goodstein 700cd1ad2dSMordechay Goodstein /* Starting from AX210, the HW expects bytes */ 710cd1ad2dSMordechay Goodstein WARN_ON(trans->txqs.bc_table_dword); 720cd1ad2dSMordechay Goodstein WARN_ON(len > 0x3FFF); 730cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 740cd1ad2dSMordechay Goodstein scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 750cd1ad2dSMordechay Goodstein } else { 760cd1ad2dSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 770cd1ad2dSMordechay Goodstein 780cd1ad2dSMordechay Goodstein /* Before AX210, the HW expects DW */ 790cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_table_dword); 800cd1ad2dSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 810cd1ad2dSMordechay Goodstein WARN_ON(len > 0xFFF); 820cd1ad2dSMordechay Goodstein bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 830cd1ad2dSMordechay Goodstein scd_bc_tbl->tfd_offset[idx] = bc_ent; 840cd1ad2dSMordechay Goodstein } 850cd1ad2dSMordechay Goodstein } 860cd1ad2dSMordechay Goodstein 870cd1ad2dSMordechay Goodstein /* 880cd1ad2dSMordechay Goodstein * iwl_txq_inc_wr_ptr - Send new write index to hardware 890cd1ad2dSMordechay Goodstein */ 900cd1ad2dSMordechay Goodstein void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 910cd1ad2dSMordechay Goodstein { 920cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 930cd1ad2dSMordechay Goodstein 940cd1ad2dSMordechay Goodstein IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 950cd1ad2dSMordechay Goodstein 960cd1ad2dSMordechay Goodstein /* 970cd1ad2dSMordechay Goodstein * if not in power-save mode, uCode will never sleep when we're 980cd1ad2dSMordechay Goodstein * trying to tx (during RFKILL, we're not trying to tx). 990cd1ad2dSMordechay Goodstein */ 1000cd1ad2dSMordechay Goodstein iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 1010cd1ad2dSMordechay Goodstein } 1020cd1ad2dSMordechay Goodstein 1030cd1ad2dSMordechay Goodstein static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 1040cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 1050cd1ad2dSMordechay Goodstein { 1060cd1ad2dSMordechay Goodstein return le16_to_cpu(tfd->num_tbs) & 0x1f; 1070cd1ad2dSMordechay Goodstein } 1080cd1ad2dSMordechay Goodstein 1090cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 1100cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd) 1110cd1ad2dSMordechay Goodstein { 1120cd1ad2dSMordechay Goodstein int i, num_tbs; 1130cd1ad2dSMordechay Goodstein 1140cd1ad2dSMordechay Goodstein /* Sanity check on number of chunks */ 1150cd1ad2dSMordechay Goodstein num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 1160cd1ad2dSMordechay Goodstein 1170cd1ad2dSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 1180cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 1190cd1ad2dSMordechay Goodstein return; 1200cd1ad2dSMordechay Goodstein } 1210cd1ad2dSMordechay Goodstein 1220cd1ad2dSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 1230cd1ad2dSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 1240cd1ad2dSMordechay Goodstein if (meta->tbs & BIT(i)) 1250cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, 1260cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1270cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1280cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1290cd1ad2dSMordechay Goodstein else 1300cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, 1310cd1ad2dSMordechay Goodstein le64_to_cpu(tfd->tbs[i].addr), 1320cd1ad2dSMordechay Goodstein le16_to_cpu(tfd->tbs[i].tb_len), 1330cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 1340cd1ad2dSMordechay Goodstein } 1350cd1ad2dSMordechay Goodstein 1360cd1ad2dSMordechay Goodstein tfd->num_tbs = 0; 1370cd1ad2dSMordechay Goodstein } 1380cd1ad2dSMordechay Goodstein 1390cd1ad2dSMordechay Goodstein void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1400cd1ad2dSMordechay Goodstein { 1410cd1ad2dSMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1420cd1ad2dSMordechay Goodstein * idx is bounded by n_window 1430cd1ad2dSMordechay Goodstein */ 1440cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1450f8d5656SEmmanuel Grumbach struct sk_buff *skb; 1460cd1ad2dSMordechay Goodstein 1470cd1ad2dSMordechay Goodstein lockdep_assert_held(&txq->lock); 1480cd1ad2dSMordechay Goodstein 1490f8d5656SEmmanuel Grumbach if (!txq->entries) 1500f8d5656SEmmanuel Grumbach return; 1510f8d5656SEmmanuel Grumbach 1520cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1530cd1ad2dSMordechay Goodstein iwl_txq_get_tfd(trans, txq, idx)); 1540cd1ad2dSMordechay Goodstein 1550cd1ad2dSMordechay Goodstein skb = txq->entries[idx].skb; 1560cd1ad2dSMordechay Goodstein 1570cd1ad2dSMordechay Goodstein /* Can be called from irqs-disabled context 1580cd1ad2dSMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 1590cd1ad2dSMordechay Goodstein * freed and that the queue is not empty - free the skb 1600cd1ad2dSMordechay Goodstein */ 1610cd1ad2dSMordechay Goodstein if (skb) { 1620cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 1630cd1ad2dSMordechay Goodstein txq->entries[idx].skb = NULL; 1640cd1ad2dSMordechay Goodstein } 1650cd1ad2dSMordechay Goodstein } 1660cd1ad2dSMordechay Goodstein 1670cd1ad2dSMordechay Goodstein int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 1680cd1ad2dSMordechay Goodstein dma_addr_t addr, u16 len) 1690cd1ad2dSMordechay Goodstein { 1700cd1ad2dSMordechay Goodstein int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 1710cd1ad2dSMordechay Goodstein struct iwl_tfh_tb *tb; 1720cd1ad2dSMordechay Goodstein 1730cd1ad2dSMordechay Goodstein /* 1740cd1ad2dSMordechay Goodstein * Only WARN here so we know about the issue, but we mess up our 1750cd1ad2dSMordechay Goodstein * unmap path because not every place currently checks for errors 1760cd1ad2dSMordechay Goodstein * returned from this function - it can only return an error if 1770cd1ad2dSMordechay Goodstein * there's no more space, and so when we know there is enough we 1780cd1ad2dSMordechay Goodstein * don't always check ... 1790cd1ad2dSMordechay Goodstein */ 1800cd1ad2dSMordechay Goodstein WARN(iwl_txq_crosses_4g_boundary(addr, len), 1810cd1ad2dSMordechay Goodstein "possible DMA problem with iova:0x%llx, len:%d\n", 1820cd1ad2dSMordechay Goodstein (unsigned long long)addr, len); 1830cd1ad2dSMordechay Goodstein 1840cd1ad2dSMordechay Goodstein if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 1850cd1ad2dSMordechay Goodstein return -EINVAL; 1860cd1ad2dSMordechay Goodstein tb = &tfd->tbs[idx]; 1870cd1ad2dSMordechay Goodstein 1880cd1ad2dSMordechay Goodstein /* Each TFD can point to a maximum max_tbs Tx buffers */ 1890cd1ad2dSMordechay Goodstein if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 1900cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Error can not send more than %d chunks\n", 1910cd1ad2dSMordechay Goodstein trans->txqs.tfd.max_tbs); 1920cd1ad2dSMordechay Goodstein return -EINVAL; 1930cd1ad2dSMordechay Goodstein } 1940cd1ad2dSMordechay Goodstein 1950cd1ad2dSMordechay Goodstein put_unaligned_le64(addr, &tb->addr); 1960cd1ad2dSMordechay Goodstein tb->tb_len = cpu_to_le16(len); 1970cd1ad2dSMordechay Goodstein 1980cd1ad2dSMordechay Goodstein tfd->num_tbs = cpu_to_le16(idx + 1); 1990cd1ad2dSMordechay Goodstein 2000cd1ad2dSMordechay Goodstein return idx; 2010cd1ad2dSMordechay Goodstein } 2020cd1ad2dSMordechay Goodstein 2030cd1ad2dSMordechay Goodstein static struct page *get_workaround_page(struct iwl_trans *trans, 2040cd1ad2dSMordechay Goodstein struct sk_buff *skb) 2050cd1ad2dSMordechay Goodstein { 2060cd1ad2dSMordechay Goodstein struct page **page_ptr; 2070cd1ad2dSMordechay Goodstein struct page *ret; 2080cd1ad2dSMordechay Goodstein 2090cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 2100cd1ad2dSMordechay Goodstein 2110cd1ad2dSMordechay Goodstein ret = alloc_page(GFP_ATOMIC); 2120cd1ad2dSMordechay Goodstein if (!ret) 2130cd1ad2dSMordechay Goodstein return NULL; 2140cd1ad2dSMordechay Goodstein 2150cd1ad2dSMordechay Goodstein /* set the chaining pointer to the previous page if there */ 2160cd1ad2dSMordechay Goodstein *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 2170cd1ad2dSMordechay Goodstein *page_ptr = ret; 2180cd1ad2dSMordechay Goodstein 2190cd1ad2dSMordechay Goodstein return ret; 2200cd1ad2dSMordechay Goodstein } 2210cd1ad2dSMordechay Goodstein 2220cd1ad2dSMordechay Goodstein /* 2230cd1ad2dSMordechay Goodstein * Add a TB and if needed apply the FH HW bug workaround; 2240cd1ad2dSMordechay Goodstein * meta != NULL indicates that it's a page mapping and we 2250cd1ad2dSMordechay Goodstein * need to dma_unmap_page() and set the meta->tbs bit in 2260cd1ad2dSMordechay Goodstein * this case. 2270cd1ad2dSMordechay Goodstein */ 2280cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 2290cd1ad2dSMordechay Goodstein struct sk_buff *skb, 2300cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 2310cd1ad2dSMordechay Goodstein dma_addr_t phys, void *virt, 2320cd1ad2dSMordechay Goodstein u16 len, struct iwl_cmd_meta *meta) 2330cd1ad2dSMordechay Goodstein { 2340cd1ad2dSMordechay Goodstein dma_addr_t oldphys = phys; 2350cd1ad2dSMordechay Goodstein struct page *page; 2360cd1ad2dSMordechay Goodstein int ret; 2370cd1ad2dSMordechay Goodstein 2380cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2390cd1ad2dSMordechay Goodstein return -ENOMEM; 2400cd1ad2dSMordechay Goodstein 2410cd1ad2dSMordechay Goodstein if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 2420cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2430cd1ad2dSMordechay Goodstein 2440cd1ad2dSMordechay Goodstein if (ret < 0) 2450cd1ad2dSMordechay Goodstein goto unmap; 2460cd1ad2dSMordechay Goodstein 2470cd1ad2dSMordechay Goodstein if (meta) 2480cd1ad2dSMordechay Goodstein meta->tbs |= BIT(ret); 2490cd1ad2dSMordechay Goodstein 2500cd1ad2dSMordechay Goodstein ret = 0; 2510cd1ad2dSMordechay Goodstein goto trace; 2520cd1ad2dSMordechay Goodstein } 2530cd1ad2dSMordechay Goodstein 2540cd1ad2dSMordechay Goodstein /* 2550cd1ad2dSMordechay Goodstein * Work around a hardware bug. If (as expressed in the 2560cd1ad2dSMordechay Goodstein * condition above) the TB ends on a 32-bit boundary, 2570cd1ad2dSMordechay Goodstein * then the next TB may be accessed with the wrong 2580cd1ad2dSMordechay Goodstein * address. 2590cd1ad2dSMordechay Goodstein * To work around it, copy the data elsewhere and make 2600cd1ad2dSMordechay Goodstein * a new mapping for it so the device will not fail. 2610cd1ad2dSMordechay Goodstein */ 2620cd1ad2dSMordechay Goodstein 2630cd1ad2dSMordechay Goodstein if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 2640cd1ad2dSMordechay Goodstein ret = -ENOBUFS; 2650cd1ad2dSMordechay Goodstein goto unmap; 2660cd1ad2dSMordechay Goodstein } 2670cd1ad2dSMordechay Goodstein 2680cd1ad2dSMordechay Goodstein page = get_workaround_page(trans, skb); 2690cd1ad2dSMordechay Goodstein if (!page) { 2700cd1ad2dSMordechay Goodstein ret = -ENOMEM; 2710cd1ad2dSMordechay Goodstein goto unmap; 2720cd1ad2dSMordechay Goodstein } 2730cd1ad2dSMordechay Goodstein 2740cd1ad2dSMordechay Goodstein memcpy(page_address(page), virt, len); 2750cd1ad2dSMordechay Goodstein 2760cd1ad2dSMordechay Goodstein phys = dma_map_single(trans->dev, page_address(page), len, 2770cd1ad2dSMordechay Goodstein DMA_TO_DEVICE); 2780cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, phys))) 2790cd1ad2dSMordechay Goodstein return -ENOMEM; 2800cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2810cd1ad2dSMordechay Goodstein if (ret < 0) { 2820cd1ad2dSMordechay Goodstein /* unmap the new allocation as single */ 2830cd1ad2dSMordechay Goodstein oldphys = phys; 2840cd1ad2dSMordechay Goodstein meta = NULL; 2850cd1ad2dSMordechay Goodstein goto unmap; 2860cd1ad2dSMordechay Goodstein } 2870cd1ad2dSMordechay Goodstein IWL_WARN(trans, 2880cd1ad2dSMordechay Goodstein "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 2890cd1ad2dSMordechay Goodstein len, (unsigned long long)oldphys, (unsigned long long)phys); 2900cd1ad2dSMordechay Goodstein 2910cd1ad2dSMordechay Goodstein ret = 0; 2920cd1ad2dSMordechay Goodstein unmap: 2930cd1ad2dSMordechay Goodstein if (meta) 2940cd1ad2dSMordechay Goodstein dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 2950cd1ad2dSMordechay Goodstein else 2960cd1ad2dSMordechay Goodstein dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 2970cd1ad2dSMordechay Goodstein trace: 2980cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 2990cd1ad2dSMordechay Goodstein 3000cd1ad2dSMordechay Goodstein return ret; 3010cd1ad2dSMordechay Goodstein } 3020cd1ad2dSMordechay Goodstein 3030cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 3040cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 3050cd1ad2dSMordechay Goodstein struct sk_buff *skb) 3060cd1ad2dSMordechay Goodstein { 3070cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 3080cd1ad2dSMordechay Goodstein struct page **page_ptr; 3090cd1ad2dSMordechay Goodstein 3100cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 3110cd1ad2dSMordechay Goodstein 3120cd1ad2dSMordechay Goodstein if (WARN_ON(*page_ptr)) 3130cd1ad2dSMordechay Goodstein return NULL; 3140cd1ad2dSMordechay Goodstein 3150cd1ad2dSMordechay Goodstein if (!p->page) 3160cd1ad2dSMordechay Goodstein goto alloc; 3170cd1ad2dSMordechay Goodstein 3180cd1ad2dSMordechay Goodstein /* 3190cd1ad2dSMordechay Goodstein * Check if there's enough room on this page 3200cd1ad2dSMordechay Goodstein * 3210cd1ad2dSMordechay Goodstein * Note that we put a page chaining pointer *last* in the 3220cd1ad2dSMordechay Goodstein * page - we need it somewhere, and if it's there then we 3230cd1ad2dSMordechay Goodstein * avoid DMA mapping the last bits of the page which may 3240cd1ad2dSMordechay Goodstein * trigger the 32-bit boundary hardware bug. 3250cd1ad2dSMordechay Goodstein * 3260cd1ad2dSMordechay Goodstein * (see also get_workaround_page() in tx-gen2.c) 3270cd1ad2dSMordechay Goodstein */ 3280cd1ad2dSMordechay Goodstein if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 3290cd1ad2dSMordechay Goodstein sizeof(void *)) 3300cd1ad2dSMordechay Goodstein goto out; 3310cd1ad2dSMordechay Goodstein 3320cd1ad2dSMordechay Goodstein /* We don't have enough room on this page, get a new one. */ 3330cd1ad2dSMordechay Goodstein __free_page(p->page); 3340cd1ad2dSMordechay Goodstein 3350cd1ad2dSMordechay Goodstein alloc: 3360cd1ad2dSMordechay Goodstein p->page = alloc_page(GFP_ATOMIC); 3370cd1ad2dSMordechay Goodstein if (!p->page) 3380cd1ad2dSMordechay Goodstein return NULL; 3390cd1ad2dSMordechay Goodstein p->pos = page_address(p->page); 3400cd1ad2dSMordechay Goodstein /* set the chaining pointer to NULL */ 3410cd1ad2dSMordechay Goodstein *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 3420cd1ad2dSMordechay Goodstein out: 3430cd1ad2dSMordechay Goodstein *page_ptr = p->page; 3440cd1ad2dSMordechay Goodstein get_page(p->page); 3450cd1ad2dSMordechay Goodstein return p; 3460cd1ad2dSMordechay Goodstein } 3470cd1ad2dSMordechay Goodstein #endif 3480cd1ad2dSMordechay Goodstein 3490cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 3500cd1ad2dSMordechay Goodstein struct sk_buff *skb, 3510cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, int start_len, 3520cd1ad2dSMordechay Goodstein u8 hdr_len, 3530cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd) 3540cd1ad2dSMordechay Goodstein { 3550cd1ad2dSMordechay Goodstein #ifdef CONFIG_INET 3560cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 3570cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (void *)skb->data; 3580cd1ad2dSMordechay Goodstein unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 3590cd1ad2dSMordechay Goodstein unsigned int mss = skb_shinfo(skb)->gso_size; 3600cd1ad2dSMordechay Goodstein u16 length, amsdu_pad; 3610cd1ad2dSMordechay Goodstein u8 *start_hdr; 3620cd1ad2dSMordechay Goodstein struct iwl_tso_hdr_page *hdr_page; 3630cd1ad2dSMordechay Goodstein struct tso_t tso; 3640cd1ad2dSMordechay Goodstein 3650cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 3660cd1ad2dSMordechay Goodstein &dev_cmd->hdr, start_len, 0); 3670cd1ad2dSMordechay Goodstein 3680cd1ad2dSMordechay Goodstein ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 3690cd1ad2dSMordechay Goodstein snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 3700cd1ad2dSMordechay Goodstein total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 3710cd1ad2dSMordechay Goodstein amsdu_pad = 0; 3720cd1ad2dSMordechay Goodstein 3730cd1ad2dSMordechay Goodstein /* total amount of header we may need for this A-MSDU */ 3740cd1ad2dSMordechay Goodstein hdr_room = DIV_ROUND_UP(total_len, mss) * 3750cd1ad2dSMordechay Goodstein (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 3760cd1ad2dSMordechay Goodstein 3770cd1ad2dSMordechay Goodstein /* Our device supports 9 segments at most, it will fit in 1 page */ 3780cd1ad2dSMordechay Goodstein hdr_page = get_page_hdr(trans, hdr_room, skb); 3790cd1ad2dSMordechay Goodstein if (!hdr_page) 3800cd1ad2dSMordechay Goodstein return -ENOMEM; 3810cd1ad2dSMordechay Goodstein 3820cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 3830cd1ad2dSMordechay Goodstein 3840cd1ad2dSMordechay Goodstein /* 3850cd1ad2dSMordechay Goodstein * Pull the ieee80211 header to be able to use TSO core, 3860cd1ad2dSMordechay Goodstein * we will restore it for the tx_status flow. 3870cd1ad2dSMordechay Goodstein */ 3880cd1ad2dSMordechay Goodstein skb_pull(skb, hdr_len); 3890cd1ad2dSMordechay Goodstein 3900cd1ad2dSMordechay Goodstein /* 3910cd1ad2dSMordechay Goodstein * Remove the length of all the headers that we don't actually 3920cd1ad2dSMordechay Goodstein * have in the MPDU by themselves, but that we duplicate into 3930cd1ad2dSMordechay Goodstein * all the different MSDUs inside the A-MSDU. 3940cd1ad2dSMordechay Goodstein */ 3950cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 3960cd1ad2dSMordechay Goodstein 3970cd1ad2dSMordechay Goodstein tso_start(skb, &tso); 3980cd1ad2dSMordechay Goodstein 3990cd1ad2dSMordechay Goodstein while (total_len) { 4000cd1ad2dSMordechay Goodstein /* this is the data left for this subframe */ 4010cd1ad2dSMordechay Goodstein unsigned int data_left = min_t(unsigned int, mss, total_len); 4020cd1ad2dSMordechay Goodstein unsigned int tb_len; 4030cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 4040cd1ad2dSMordechay Goodstein u8 *subf_hdrs_start = hdr_page->pos; 4050cd1ad2dSMordechay Goodstein 4060cd1ad2dSMordechay Goodstein total_len -= data_left; 4070cd1ad2dSMordechay Goodstein 4080cd1ad2dSMordechay Goodstein memset(hdr_page->pos, 0, amsdu_pad); 4090cd1ad2dSMordechay Goodstein hdr_page->pos += amsdu_pad; 4100cd1ad2dSMordechay Goodstein amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 4110cd1ad2dSMordechay Goodstein data_left)) & 0x3; 4120cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 4130cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 4140cd1ad2dSMordechay Goodstein ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 4150cd1ad2dSMordechay Goodstein hdr_page->pos += ETH_ALEN; 4160cd1ad2dSMordechay Goodstein 4170cd1ad2dSMordechay Goodstein length = snap_ip_tcp_hdrlen + data_left; 4180cd1ad2dSMordechay Goodstein *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 4190cd1ad2dSMordechay Goodstein hdr_page->pos += sizeof(length); 4200cd1ad2dSMordechay Goodstein 4210cd1ad2dSMordechay Goodstein /* 4220cd1ad2dSMordechay Goodstein * This will copy the SNAP as well which will be considered 4230cd1ad2dSMordechay Goodstein * as MAC header. 4240cd1ad2dSMordechay Goodstein */ 4250cd1ad2dSMordechay Goodstein tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 4260cd1ad2dSMordechay Goodstein 4270cd1ad2dSMordechay Goodstein hdr_page->pos += snap_ip_tcp_hdrlen; 4280cd1ad2dSMordechay Goodstein 4290cd1ad2dSMordechay Goodstein tb_len = hdr_page->pos - start_hdr; 4300cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, start_hdr, 4310cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 432*fb54b863SJohannes Berg if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 4330cd1ad2dSMordechay Goodstein goto out_err; 4340cd1ad2dSMordechay Goodstein /* 4350cd1ad2dSMordechay Goodstein * No need for _with_wa, this is from the TSO page and 4360cd1ad2dSMordechay Goodstein * we leave some space at the end of it so can't hit 4370cd1ad2dSMordechay Goodstein * the buggy scenario. 4380cd1ad2dSMordechay Goodstein */ 4390cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 4400cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 4410cd1ad2dSMordechay Goodstein tb_phys, tb_len); 4420cd1ad2dSMordechay Goodstein /* add this subframe's headers' length to the tx_cmd */ 4430cd1ad2dSMordechay Goodstein le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 4440cd1ad2dSMordechay Goodstein 4450cd1ad2dSMordechay Goodstein /* prepare the start_hdr for the next subframe */ 4460cd1ad2dSMordechay Goodstein start_hdr = hdr_page->pos; 4470cd1ad2dSMordechay Goodstein 4480cd1ad2dSMordechay Goodstein /* put the payload */ 4490cd1ad2dSMordechay Goodstein while (data_left) { 4500cd1ad2dSMordechay Goodstein int ret; 4510cd1ad2dSMordechay Goodstein 4520cd1ad2dSMordechay Goodstein tb_len = min_t(unsigned int, tso.size, data_left); 4530cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tso.data, 4540cd1ad2dSMordechay Goodstein tb_len, DMA_TO_DEVICE); 4550cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 4560cd1ad2dSMordechay Goodstein tb_phys, tso.data, 4570cd1ad2dSMordechay Goodstein tb_len, NULL); 458*fb54b863SJohannes Berg if (ret) 4590cd1ad2dSMordechay Goodstein goto out_err; 4600cd1ad2dSMordechay Goodstein 4610cd1ad2dSMordechay Goodstein data_left -= tb_len; 4620cd1ad2dSMordechay Goodstein tso_build_data(skb, &tso, tb_len); 4630cd1ad2dSMordechay Goodstein } 4640cd1ad2dSMordechay Goodstein } 4650cd1ad2dSMordechay Goodstein 4660cd1ad2dSMordechay Goodstein /* re -add the WiFi header */ 4670cd1ad2dSMordechay Goodstein skb_push(skb, hdr_len); 4680cd1ad2dSMordechay Goodstein 4690cd1ad2dSMordechay Goodstein return 0; 4700cd1ad2dSMordechay Goodstein 4710cd1ad2dSMordechay Goodstein out_err: 4720cd1ad2dSMordechay Goodstein #endif 4730cd1ad2dSMordechay Goodstein return -EINVAL; 4740cd1ad2dSMordechay Goodstein } 4750cd1ad2dSMordechay Goodstein 4760cd1ad2dSMordechay Goodstein static struct 4770cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 4780cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 4790cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 4800cd1ad2dSMordechay Goodstein struct sk_buff *skb, 4810cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 4820cd1ad2dSMordechay Goodstein int hdr_len, 4830cd1ad2dSMordechay Goodstein int tx_cmd_len) 4840cd1ad2dSMordechay Goodstein { 4850cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 4860cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 4870cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 4880cd1ad2dSMordechay Goodstein int len; 4890cd1ad2dSMordechay Goodstein void *tb1_addr; 4900cd1ad2dSMordechay Goodstein 4910cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 4920cd1ad2dSMordechay Goodstein 4930cd1ad2dSMordechay Goodstein /* 4940cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 4950cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 4960cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 4970cd1ad2dSMordechay Goodstein */ 4980cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 4990cd1ad2dSMordechay Goodstein 5000cd1ad2dSMordechay Goodstein /* 5010cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 5020cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 5030cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 5040cd1ad2dSMordechay Goodstein * setup of the first TB) 5050cd1ad2dSMordechay Goodstein */ 5060cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 5070cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 5080cd1ad2dSMordechay Goodstein 5090cd1ad2dSMordechay Goodstein /* do not align A-MSDU to dword as the subframe header aligns it */ 5100cd1ad2dSMordechay Goodstein 5110cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 5120cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 5130cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 5140cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 5150cd1ad2dSMordechay Goodstein goto out_err; 5160cd1ad2dSMordechay Goodstein /* 5170cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 5180cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 5190cd1ad2dSMordechay Goodstein */ 5200cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 5210cd1ad2dSMordechay Goodstein 5220cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 5230cd1ad2dSMordechay Goodstein hdr_len, dev_cmd)) 5240cd1ad2dSMordechay Goodstein goto out_err; 5250cd1ad2dSMordechay Goodstein 5260cd1ad2dSMordechay Goodstein /* building the A-MSDU might have changed this data, memcpy it now */ 5270cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5280cd1ad2dSMordechay Goodstein return tfd; 5290cd1ad2dSMordechay Goodstein 5300cd1ad2dSMordechay Goodstein out_err: 5310cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 5320cd1ad2dSMordechay Goodstein return NULL; 5330cd1ad2dSMordechay Goodstein } 5340cd1ad2dSMordechay Goodstein 5350cd1ad2dSMordechay Goodstein static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 5360cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5370cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd, 5380cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 5390cd1ad2dSMordechay Goodstein { 5400cd1ad2dSMordechay Goodstein int i; 5410cd1ad2dSMordechay Goodstein 5420cd1ad2dSMordechay Goodstein for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5430cd1ad2dSMordechay Goodstein const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5440cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5450cd1ad2dSMordechay Goodstein unsigned int fragsz = skb_frag_size(frag); 5460cd1ad2dSMordechay Goodstein int ret; 5470cd1ad2dSMordechay Goodstein 5480cd1ad2dSMordechay Goodstein if (!fragsz) 5490cd1ad2dSMordechay Goodstein continue; 5500cd1ad2dSMordechay Goodstein 5510cd1ad2dSMordechay Goodstein tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 5520cd1ad2dSMordechay Goodstein fragsz, DMA_TO_DEVICE); 5530cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 5540cd1ad2dSMordechay Goodstein skb_frag_address(frag), 5550cd1ad2dSMordechay Goodstein fragsz, out_meta); 5560cd1ad2dSMordechay Goodstein if (ret) 5570cd1ad2dSMordechay Goodstein return ret; 5580cd1ad2dSMordechay Goodstein } 5590cd1ad2dSMordechay Goodstein 5600cd1ad2dSMordechay Goodstein return 0; 5610cd1ad2dSMordechay Goodstein } 5620cd1ad2dSMordechay Goodstein 5630cd1ad2dSMordechay Goodstein static struct 5640cd1ad2dSMordechay Goodstein iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 5650cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 5660cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 5670cd1ad2dSMordechay Goodstein struct sk_buff *skb, 5680cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta, 5690cd1ad2dSMordechay Goodstein int hdr_len, 5700cd1ad2dSMordechay Goodstein int tx_cmd_len, 5710cd1ad2dSMordechay Goodstein bool pad) 5720cd1ad2dSMordechay Goodstein { 5730cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 5740cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 5750cd1ad2dSMordechay Goodstein dma_addr_t tb_phys; 5760cd1ad2dSMordechay Goodstein int len, tb1_len, tb2_len; 5770cd1ad2dSMordechay Goodstein void *tb1_addr; 5780cd1ad2dSMordechay Goodstein struct sk_buff *frag; 5790cd1ad2dSMordechay Goodstein 5800cd1ad2dSMordechay Goodstein tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 5810cd1ad2dSMordechay Goodstein 5820cd1ad2dSMordechay Goodstein /* The first TB points to bi-directional DMA data */ 5830cd1ad2dSMordechay Goodstein memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5840cd1ad2dSMordechay Goodstein 5850cd1ad2dSMordechay Goodstein /* 5860cd1ad2dSMordechay Goodstein * No need for _with_wa, the first TB allocation is aligned up 5870cd1ad2dSMordechay Goodstein * to a 64-byte boundary and thus can't be at the end or cross 5880cd1ad2dSMordechay Goodstein * a page boundary (much less a 2^32 boundary). 5890cd1ad2dSMordechay Goodstein */ 5900cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 5910cd1ad2dSMordechay Goodstein 5920cd1ad2dSMordechay Goodstein /* 5930cd1ad2dSMordechay Goodstein * The second TB (tb1) points to the remainder of the TX command 5940cd1ad2dSMordechay Goodstein * and the 802.11 header - dword aligned size 5950cd1ad2dSMordechay Goodstein * (This calculation modifies the TX command, so do it before the 5960cd1ad2dSMordechay Goodstein * setup of the first TB) 5970cd1ad2dSMordechay Goodstein */ 5980cd1ad2dSMordechay Goodstein len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 5990cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE; 6000cd1ad2dSMordechay Goodstein 6010cd1ad2dSMordechay Goodstein if (pad) 6020cd1ad2dSMordechay Goodstein tb1_len = ALIGN(len, 4); 6030cd1ad2dSMordechay Goodstein else 6040cd1ad2dSMordechay Goodstein tb1_len = len; 6050cd1ad2dSMordechay Goodstein 6060cd1ad2dSMordechay Goodstein /* map the data for TB1 */ 6070cd1ad2dSMordechay Goodstein tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 6080cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 6090cd1ad2dSMordechay Goodstein if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 6100cd1ad2dSMordechay Goodstein goto out_err; 6110cd1ad2dSMordechay Goodstein /* 6120cd1ad2dSMordechay Goodstein * No need for _with_wa(), we ensure (via alignment) that the data 6130cd1ad2dSMordechay Goodstein * here can never cross or end at a page boundary. 6140cd1ad2dSMordechay Goodstein */ 6150cd1ad2dSMordechay Goodstein iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 6160cd1ad2dSMordechay Goodstein trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 6170cd1ad2dSMordechay Goodstein IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 6180cd1ad2dSMordechay Goodstein 6190cd1ad2dSMordechay Goodstein /* set up TFD's third entry to point to remainder of skb's head */ 6200cd1ad2dSMordechay Goodstein tb2_len = skb_headlen(skb) - hdr_len; 6210cd1ad2dSMordechay Goodstein 6220cd1ad2dSMordechay Goodstein if (tb2_len > 0) { 6230cd1ad2dSMordechay Goodstein int ret; 6240cd1ad2dSMordechay Goodstein 6250cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 6260cd1ad2dSMordechay Goodstein tb2_len, DMA_TO_DEVICE); 6270cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6280cd1ad2dSMordechay Goodstein skb->data + hdr_len, tb2_len, 6290cd1ad2dSMordechay Goodstein NULL); 6300cd1ad2dSMordechay Goodstein if (ret) 6310cd1ad2dSMordechay Goodstein goto out_err; 6320cd1ad2dSMordechay Goodstein } 6330cd1ad2dSMordechay Goodstein 6340cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 6350cd1ad2dSMordechay Goodstein goto out_err; 6360cd1ad2dSMordechay Goodstein 6370cd1ad2dSMordechay Goodstein skb_walk_frags(skb, frag) { 6380cd1ad2dSMordechay Goodstein int ret; 6390cd1ad2dSMordechay Goodstein 6400cd1ad2dSMordechay Goodstein tb_phys = dma_map_single(trans->dev, frag->data, 6410cd1ad2dSMordechay Goodstein skb_headlen(frag), DMA_TO_DEVICE); 6420cd1ad2dSMordechay Goodstein ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6430cd1ad2dSMordechay Goodstein frag->data, 6440cd1ad2dSMordechay Goodstein skb_headlen(frag), NULL); 6450cd1ad2dSMordechay Goodstein if (ret) 6460cd1ad2dSMordechay Goodstein goto out_err; 6470cd1ad2dSMordechay Goodstein if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 6480cd1ad2dSMordechay Goodstein goto out_err; 6490cd1ad2dSMordechay Goodstein } 6500cd1ad2dSMordechay Goodstein 6510cd1ad2dSMordechay Goodstein return tfd; 6520cd1ad2dSMordechay Goodstein 6530cd1ad2dSMordechay Goodstein out_err: 6540cd1ad2dSMordechay Goodstein iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 6550cd1ad2dSMordechay Goodstein return NULL; 6560cd1ad2dSMordechay Goodstein } 6570cd1ad2dSMordechay Goodstein 6580cd1ad2dSMordechay Goodstein static 6590cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 6600cd1ad2dSMordechay Goodstein struct iwl_txq *txq, 6610cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, 6620cd1ad2dSMordechay Goodstein struct sk_buff *skb, 6630cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta) 6640cd1ad2dSMordechay Goodstein { 6650cd1ad2dSMordechay Goodstein struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 6660cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 6670cd1ad2dSMordechay Goodstein struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 6680cd1ad2dSMordechay Goodstein int len, hdr_len; 6690cd1ad2dSMordechay Goodstein bool amsdu; 6700cd1ad2dSMordechay Goodstein 6710cd1ad2dSMordechay Goodstein /* There must be data left over for TB1 or this code must be changed */ 6720cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 6730cd1ad2dSMordechay Goodstein 6740cd1ad2dSMordechay Goodstein memset(tfd, 0, sizeof(*tfd)); 6750cd1ad2dSMordechay Goodstein 6760cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 6770cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen2); 6780cd1ad2dSMordechay Goodstein else 6790cd1ad2dSMordechay Goodstein len = sizeof(struct iwl_tx_cmd_gen3); 6800cd1ad2dSMordechay Goodstein 6810cd1ad2dSMordechay Goodstein amsdu = ieee80211_is_data_qos(hdr->frame_control) && 6820cd1ad2dSMordechay Goodstein (*ieee80211_get_qos_ctl(hdr) & 6830cd1ad2dSMordechay Goodstein IEEE80211_QOS_CTL_A_MSDU_PRESENT); 6840cd1ad2dSMordechay Goodstein 6850cd1ad2dSMordechay Goodstein hdr_len = ieee80211_hdrlen(hdr->frame_control); 6860cd1ad2dSMordechay Goodstein 6870cd1ad2dSMordechay Goodstein /* 6880cd1ad2dSMordechay Goodstein * Only build A-MSDUs here if doing so by GSO, otherwise it may be 6890cd1ad2dSMordechay Goodstein * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 6900cd1ad2dSMordechay Goodstein * built in the higher layers already. 6910cd1ad2dSMordechay Goodstein */ 6920cd1ad2dSMordechay Goodstein if (amsdu && skb_shinfo(skb)->gso_size) 6930cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 6940cd1ad2dSMordechay Goodstein out_meta, hdr_len, len); 6950cd1ad2dSMordechay Goodstein return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 6960cd1ad2dSMordechay Goodstein hdr_len, len, !amsdu); 6970cd1ad2dSMordechay Goodstein } 6980cd1ad2dSMordechay Goodstein 6990cd1ad2dSMordechay Goodstein int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 7000cd1ad2dSMordechay Goodstein { 7010cd1ad2dSMordechay Goodstein unsigned int max; 7020cd1ad2dSMordechay Goodstein unsigned int used; 7030cd1ad2dSMordechay Goodstein 7040cd1ad2dSMordechay Goodstein /* 7050cd1ad2dSMordechay Goodstein * To avoid ambiguity between empty and completely full queues, there 7060cd1ad2dSMordechay Goodstein * should always be less than max_tfd_queue_size elements in the queue. 7070cd1ad2dSMordechay Goodstein * If q->n_window is smaller than max_tfd_queue_size, there is no need 7080cd1ad2dSMordechay Goodstein * to reserve any queue entries for this purpose. 7090cd1ad2dSMordechay Goodstein */ 7100cd1ad2dSMordechay Goodstein if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 7110cd1ad2dSMordechay Goodstein max = q->n_window; 7120cd1ad2dSMordechay Goodstein else 7130cd1ad2dSMordechay Goodstein max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 7140cd1ad2dSMordechay Goodstein 7150cd1ad2dSMordechay Goodstein /* 7160cd1ad2dSMordechay Goodstein * max_tfd_queue_size is a power of 2, so the following is equivalent to 7170cd1ad2dSMordechay Goodstein * modulo by max_tfd_queue_size and is well defined. 7180cd1ad2dSMordechay Goodstein */ 7190cd1ad2dSMordechay Goodstein used = (q->write_ptr - q->read_ptr) & 7200cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 7210cd1ad2dSMordechay Goodstein 7220cd1ad2dSMordechay Goodstein if (WARN_ON(used > max)) 7230cd1ad2dSMordechay Goodstein return 0; 7240cd1ad2dSMordechay Goodstein 7250cd1ad2dSMordechay Goodstein return max - used; 7260cd1ad2dSMordechay Goodstein } 7270cd1ad2dSMordechay Goodstein 7280cd1ad2dSMordechay Goodstein int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 7290cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd, int txq_id) 7300cd1ad2dSMordechay Goodstein { 7310cd1ad2dSMordechay Goodstein struct iwl_cmd_meta *out_meta; 7320cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7330cd1ad2dSMordechay Goodstein u16 cmd_len; 7340cd1ad2dSMordechay Goodstein int idx; 7350cd1ad2dSMordechay Goodstein void *tfd; 7360cd1ad2dSMordechay Goodstein 7370cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 7380cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 7390cd1ad2dSMordechay Goodstein return -EINVAL; 7400cd1ad2dSMordechay Goodstein 7410cd1ad2dSMordechay Goodstein if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 7420cd1ad2dSMordechay Goodstein "TX on unused queue %d\n", txq_id)) 7430cd1ad2dSMordechay Goodstein return -EINVAL; 7440cd1ad2dSMordechay Goodstein 7450cd1ad2dSMordechay Goodstein if (skb_is_nonlinear(skb) && 7460cd1ad2dSMordechay Goodstein skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 7470cd1ad2dSMordechay Goodstein __skb_linearize(skb)) 7480cd1ad2dSMordechay Goodstein return -ENOMEM; 7490cd1ad2dSMordechay Goodstein 7500cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 7510cd1ad2dSMordechay Goodstein 7520cd1ad2dSMordechay Goodstein if (iwl_txq_space(trans, txq) < txq->high_mark) { 7530cd1ad2dSMordechay Goodstein iwl_txq_stop(trans, txq); 7540cd1ad2dSMordechay Goodstein 7550cd1ad2dSMordechay Goodstein /* don't put the packet on the ring, if there is no room */ 7560cd1ad2dSMordechay Goodstein if (unlikely(iwl_txq_space(trans, txq) < 3)) { 7570cd1ad2dSMordechay Goodstein struct iwl_device_tx_cmd **dev_cmd_ptr; 7580cd1ad2dSMordechay Goodstein 7590cd1ad2dSMordechay Goodstein dev_cmd_ptr = (void *)((u8 *)skb->cb + 7600cd1ad2dSMordechay Goodstein trans->txqs.dev_cmd_offs); 7610cd1ad2dSMordechay Goodstein 7620cd1ad2dSMordechay Goodstein *dev_cmd_ptr = dev_cmd; 7630cd1ad2dSMordechay Goodstein __skb_queue_tail(&txq->overflow_q, skb); 7640cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7650cd1ad2dSMordechay Goodstein return 0; 7660cd1ad2dSMordechay Goodstein } 7670cd1ad2dSMordechay Goodstein } 7680cd1ad2dSMordechay Goodstein 7690cd1ad2dSMordechay Goodstein idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 7700cd1ad2dSMordechay Goodstein 7710cd1ad2dSMordechay Goodstein /* Set up driver data for this TFD */ 7720cd1ad2dSMordechay Goodstein txq->entries[idx].skb = skb; 7730cd1ad2dSMordechay Goodstein txq->entries[idx].cmd = dev_cmd; 7740cd1ad2dSMordechay Goodstein 7750cd1ad2dSMordechay Goodstein dev_cmd->hdr.sequence = 7760cd1ad2dSMordechay Goodstein cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 7770cd1ad2dSMordechay Goodstein INDEX_TO_SEQ(idx))); 7780cd1ad2dSMordechay Goodstein 7790cd1ad2dSMordechay Goodstein /* Set up first empty entry in queue's array of Tx/cmd buffers */ 7800cd1ad2dSMordechay Goodstein out_meta = &txq->entries[idx].meta; 7810cd1ad2dSMordechay Goodstein out_meta->flags = 0; 7820cd1ad2dSMordechay Goodstein 7830cd1ad2dSMordechay Goodstein tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 7840cd1ad2dSMordechay Goodstein if (!tfd) { 7850cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 7860cd1ad2dSMordechay Goodstein return -1; 7870cd1ad2dSMordechay Goodstein } 7880cd1ad2dSMordechay Goodstein 7890cd1ad2dSMordechay Goodstein if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 7900cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 7910cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 7920cd1ad2dSMordechay Goodstein 7930cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen3->len); 7940cd1ad2dSMordechay Goodstein } else { 7950cd1ad2dSMordechay Goodstein struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 7960cd1ad2dSMordechay Goodstein (void *)dev_cmd->payload; 7970cd1ad2dSMordechay Goodstein 7980cd1ad2dSMordechay Goodstein cmd_len = le16_to_cpu(tx_cmd_gen2->len); 7990cd1ad2dSMordechay Goodstein } 8000cd1ad2dSMordechay Goodstein 8010cd1ad2dSMordechay Goodstein /* Set up entry for this TFD in Tx byte-count array */ 8020cd1ad2dSMordechay Goodstein iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 8030cd1ad2dSMordechay Goodstein iwl_txq_gen2_get_num_tbs(trans, tfd)); 8040cd1ad2dSMordechay Goodstein 8050cd1ad2dSMordechay Goodstein /* start timer if queue currently empty */ 8060cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 8070cd1ad2dSMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 8080cd1ad2dSMordechay Goodstein 8090cd1ad2dSMordechay Goodstein /* Tell device the write index *just past* this latest filled TFD */ 8100cd1ad2dSMordechay Goodstein txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 8110cd1ad2dSMordechay Goodstein iwl_txq_inc_wr_ptr(trans, txq); 8120cd1ad2dSMordechay Goodstein /* 8130cd1ad2dSMordechay Goodstein * At this point the frame is "transmitted" successfully 8140cd1ad2dSMordechay Goodstein * and we will get a TX status notification eventually. 8150cd1ad2dSMordechay Goodstein */ 8160cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 8170cd1ad2dSMordechay Goodstein return 0; 8180cd1ad2dSMordechay Goodstein } 8190cd1ad2dSMordechay Goodstein 8200cd1ad2dSMordechay Goodstein /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 8210cd1ad2dSMordechay Goodstein 8220cd1ad2dSMordechay Goodstein /* 8230cd1ad2dSMordechay Goodstein * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 8240cd1ad2dSMordechay Goodstein */ 8250cd1ad2dSMordechay Goodstein void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 8260cd1ad2dSMordechay Goodstein { 8270cd1ad2dSMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 8280cd1ad2dSMordechay Goodstein 8290cd1ad2dSMordechay Goodstein spin_lock_bh(&txq->lock); 8300cd1ad2dSMordechay Goodstein while (txq->write_ptr != txq->read_ptr) { 8310cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 8320cd1ad2dSMordechay Goodstein txq_id, txq->read_ptr); 8330cd1ad2dSMordechay Goodstein 8340cd1ad2dSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) { 8350cd1ad2dSMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 8360cd1ad2dSMordechay Goodstein struct sk_buff *skb = txq->entries[idx].skb; 8370cd1ad2dSMordechay Goodstein 8380bed6a2aSJohannes Berg if (!WARN_ON_ONCE(!skb)) 8390cd1ad2dSMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 8400cd1ad2dSMordechay Goodstein } 8410cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_tfd(trans, txq); 8420cd1ad2dSMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 8430cd1ad2dSMordechay Goodstein } 8440cd1ad2dSMordechay Goodstein 8450cd1ad2dSMordechay Goodstein while (!skb_queue_empty(&txq->overflow_q)) { 8460cd1ad2dSMordechay Goodstein struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 8470cd1ad2dSMordechay Goodstein 8480cd1ad2dSMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 8490cd1ad2dSMordechay Goodstein } 8500cd1ad2dSMordechay Goodstein 8510cd1ad2dSMordechay Goodstein spin_unlock_bh(&txq->lock); 8520cd1ad2dSMordechay Goodstein 8530cd1ad2dSMordechay Goodstein /* just in case - this queue may have been stopped */ 8540cd1ad2dSMordechay Goodstein iwl_wake_queue(trans, txq); 8550cd1ad2dSMordechay Goodstein } 8560cd1ad2dSMordechay Goodstein 8570cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 8580cd1ad2dSMordechay Goodstein struct iwl_txq *txq) 8590cd1ad2dSMordechay Goodstein { 8600cd1ad2dSMordechay Goodstein struct device *dev = trans->dev; 8610cd1ad2dSMordechay Goodstein 8620cd1ad2dSMordechay Goodstein /* De-alloc circular buffer of TFDs */ 8630cd1ad2dSMordechay Goodstein if (txq->tfds) { 8640cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 8650cd1ad2dSMordechay Goodstein trans->txqs.tfd.size * txq->n_window, 8660cd1ad2dSMordechay Goodstein txq->tfds, txq->dma_addr); 8670cd1ad2dSMordechay Goodstein dma_free_coherent(dev, 8680cd1ad2dSMordechay Goodstein sizeof(*txq->first_tb_bufs) * txq->n_window, 8690cd1ad2dSMordechay Goodstein txq->first_tb_bufs, txq->first_tb_dma); 8700cd1ad2dSMordechay Goodstein } 8710cd1ad2dSMordechay Goodstein 8720cd1ad2dSMordechay Goodstein kfree(txq->entries); 8730cd1ad2dSMordechay Goodstein if (txq->bc_tbl.addr) 8740cd1ad2dSMordechay Goodstein dma_pool_free(trans->txqs.bc_pool, 8750cd1ad2dSMordechay Goodstein txq->bc_tbl.addr, txq->bc_tbl.dma); 8760cd1ad2dSMordechay Goodstein kfree(txq); 8770cd1ad2dSMordechay Goodstein } 8780cd1ad2dSMordechay Goodstein 8790cd1ad2dSMordechay Goodstein /* 8800cd1ad2dSMordechay Goodstein * iwl_pcie_txq_free - Deallocate DMA queue. 8810cd1ad2dSMordechay Goodstein * @txq: Transmit queue to deallocate. 8820cd1ad2dSMordechay Goodstein * 8830cd1ad2dSMordechay Goodstein * Empty queue by removing and destroying all BD's. 8840cd1ad2dSMordechay Goodstein * Free all buffers. 8850cd1ad2dSMordechay Goodstein * 0-fill, but do not free "txq" descriptor structure. 8860cd1ad2dSMordechay Goodstein */ 8870cd1ad2dSMordechay Goodstein static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 8880cd1ad2dSMordechay Goodstein { 8890cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 8900cd1ad2dSMordechay Goodstein int i; 8910cd1ad2dSMordechay Goodstein 8920cd1ad2dSMordechay Goodstein if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 8930cd1ad2dSMordechay Goodstein "queue %d out of range", txq_id)) 8940cd1ad2dSMordechay Goodstein return; 8950cd1ad2dSMordechay Goodstein 8960cd1ad2dSMordechay Goodstein txq = trans->txqs.txq[txq_id]; 8970cd1ad2dSMordechay Goodstein 8980cd1ad2dSMordechay Goodstein if (WARN_ON(!txq)) 8990cd1ad2dSMordechay Goodstein return; 9000cd1ad2dSMordechay Goodstein 9010cd1ad2dSMordechay Goodstein iwl_txq_gen2_unmap(trans, txq_id); 9020cd1ad2dSMordechay Goodstein 9030cd1ad2dSMordechay Goodstein /* De-alloc array of command/tx buffers */ 9040cd1ad2dSMordechay Goodstein if (txq_id == trans->txqs.cmd.q_id) 9050cd1ad2dSMordechay Goodstein for (i = 0; i < txq->n_window; i++) { 9060cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].cmd); 9070cd1ad2dSMordechay Goodstein kfree_sensitive(txq->entries[i].free_buf); 9080cd1ad2dSMordechay Goodstein } 9090cd1ad2dSMordechay Goodstein del_timer_sync(&txq->stuck_timer); 9100cd1ad2dSMordechay Goodstein 9110cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 9120cd1ad2dSMordechay Goodstein 9130cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = NULL; 9140cd1ad2dSMordechay Goodstein 9150cd1ad2dSMordechay Goodstein clear_bit(txq_id, trans->txqs.queue_used); 9160cd1ad2dSMordechay Goodstein } 9170cd1ad2dSMordechay Goodstein 9180cd1ad2dSMordechay Goodstein /* 9190cd1ad2dSMordechay Goodstein * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 9200cd1ad2dSMordechay Goodstein */ 9210cd1ad2dSMordechay Goodstein static int iwl_queue_init(struct iwl_txq *q, int slots_num) 9220cd1ad2dSMordechay Goodstein { 9230cd1ad2dSMordechay Goodstein q->n_window = slots_num; 9240cd1ad2dSMordechay Goodstein 9250cd1ad2dSMordechay Goodstein /* slots_num must be power-of-two size, otherwise 9260cd1ad2dSMordechay Goodstein * iwl_txq_get_cmd_index is broken. */ 9270cd1ad2dSMordechay Goodstein if (WARN_ON(!is_power_of_2(slots_num))) 9280cd1ad2dSMordechay Goodstein return -EINVAL; 9290cd1ad2dSMordechay Goodstein 9300cd1ad2dSMordechay Goodstein q->low_mark = q->n_window / 4; 9310cd1ad2dSMordechay Goodstein if (q->low_mark < 4) 9320cd1ad2dSMordechay Goodstein q->low_mark = 4; 9330cd1ad2dSMordechay Goodstein 9340cd1ad2dSMordechay Goodstein q->high_mark = q->n_window / 8; 9350cd1ad2dSMordechay Goodstein if (q->high_mark < 2) 9360cd1ad2dSMordechay Goodstein q->high_mark = 2; 9370cd1ad2dSMordechay Goodstein 9380cd1ad2dSMordechay Goodstein q->write_ptr = 0; 9390cd1ad2dSMordechay Goodstein q->read_ptr = 0; 9400cd1ad2dSMordechay Goodstein 9410cd1ad2dSMordechay Goodstein return 0; 9420cd1ad2dSMordechay Goodstein } 9430cd1ad2dSMordechay Goodstein 9440cd1ad2dSMordechay Goodstein int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 9450cd1ad2dSMordechay Goodstein bool cmd_queue) 9460cd1ad2dSMordechay Goodstein { 9470cd1ad2dSMordechay Goodstein int ret; 9480cd1ad2dSMordechay Goodstein u32 tfd_queue_max_size = 9490cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 9500cd1ad2dSMordechay Goodstein 9510cd1ad2dSMordechay Goodstein txq->need_update = false; 9520cd1ad2dSMordechay Goodstein 9530cd1ad2dSMordechay Goodstein /* max_tfd_queue_size must be power-of-two size, otherwise 9540cd1ad2dSMordechay Goodstein * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 9550cd1ad2dSMordechay Goodstein if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 9560cd1ad2dSMordechay Goodstein "Max tfd queue size must be a power of two, but is %d", 9570cd1ad2dSMordechay Goodstein tfd_queue_max_size)) 9580cd1ad2dSMordechay Goodstein return -EINVAL; 9590cd1ad2dSMordechay Goodstein 9600cd1ad2dSMordechay Goodstein /* Initialize queue's high/low-water marks, and head/tail indexes */ 9610cd1ad2dSMordechay Goodstein ret = iwl_queue_init(txq, slots_num); 9620cd1ad2dSMordechay Goodstein if (ret) 9630cd1ad2dSMordechay Goodstein return ret; 9640cd1ad2dSMordechay Goodstein 9650cd1ad2dSMordechay Goodstein spin_lock_init(&txq->lock); 9660cd1ad2dSMordechay Goodstein 9670cd1ad2dSMordechay Goodstein if (cmd_queue) { 9680cd1ad2dSMordechay Goodstein static struct lock_class_key iwl_txq_cmd_queue_lock_class; 9690cd1ad2dSMordechay Goodstein 9700cd1ad2dSMordechay Goodstein lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 9710cd1ad2dSMordechay Goodstein } 9720cd1ad2dSMordechay Goodstein 9730cd1ad2dSMordechay Goodstein __skb_queue_head_init(&txq->overflow_q); 9740cd1ad2dSMordechay Goodstein 9750cd1ad2dSMordechay Goodstein return 0; 9760cd1ad2dSMordechay Goodstein } 9770cd1ad2dSMordechay Goodstein 9780cd1ad2dSMordechay Goodstein void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 9790cd1ad2dSMordechay Goodstein { 9800cd1ad2dSMordechay Goodstein struct page **page_ptr; 9810cd1ad2dSMordechay Goodstein struct page *next; 9820cd1ad2dSMordechay Goodstein 9830cd1ad2dSMordechay Goodstein page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 9840cd1ad2dSMordechay Goodstein next = *page_ptr; 9850cd1ad2dSMordechay Goodstein *page_ptr = NULL; 9860cd1ad2dSMordechay Goodstein 9870cd1ad2dSMordechay Goodstein while (next) { 9880cd1ad2dSMordechay Goodstein struct page *tmp = next; 9890cd1ad2dSMordechay Goodstein 9900cd1ad2dSMordechay Goodstein next = *(void **)(page_address(next) + PAGE_SIZE - 9910cd1ad2dSMordechay Goodstein sizeof(void *)); 9920cd1ad2dSMordechay Goodstein __free_page(tmp); 9930cd1ad2dSMordechay Goodstein } 9940cd1ad2dSMordechay Goodstein } 9950cd1ad2dSMordechay Goodstein 9960cd1ad2dSMordechay Goodstein void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 9970cd1ad2dSMordechay Goodstein { 9980cd1ad2dSMordechay Goodstein u32 txq_id = txq->id; 9990cd1ad2dSMordechay Goodstein u32 status; 10000cd1ad2dSMordechay Goodstein bool active; 10010cd1ad2dSMordechay Goodstein u8 fifo; 10020cd1ad2dSMordechay Goodstein 10030cd1ad2dSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 10040cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 10050cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr); 10060cd1ad2dSMordechay Goodstein /* TODO: access new SCD registers and dump them */ 10070cd1ad2dSMordechay Goodstein return; 10080cd1ad2dSMordechay Goodstein } 10090cd1ad2dSMordechay Goodstein 10100cd1ad2dSMordechay Goodstein status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 10110cd1ad2dSMordechay Goodstein fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 10120cd1ad2dSMordechay Goodstein active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 10130cd1ad2dSMordechay Goodstein 10140cd1ad2dSMordechay Goodstein IWL_ERR(trans, 10150cd1ad2dSMordechay Goodstein "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 10160cd1ad2dSMordechay Goodstein txq_id, active ? "" : "in", fifo, 10170cd1ad2dSMordechay Goodstein jiffies_to_msecs(txq->wd_timeout), 10180cd1ad2dSMordechay Goodstein txq->read_ptr, txq->write_ptr, 10190cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 10200cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10210cd1ad2dSMordechay Goodstein iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 10220cd1ad2dSMordechay Goodstein (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10230cd1ad2dSMordechay Goodstein iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 10240cd1ad2dSMordechay Goodstein } 10250cd1ad2dSMordechay Goodstein 10260cd1ad2dSMordechay Goodstein static void iwl_txq_stuck_timer(struct timer_list *t) 10270cd1ad2dSMordechay Goodstein { 10280cd1ad2dSMordechay Goodstein struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 10290cd1ad2dSMordechay Goodstein struct iwl_trans *trans = txq->trans; 10300cd1ad2dSMordechay Goodstein 10310cd1ad2dSMordechay Goodstein spin_lock(&txq->lock); 10320cd1ad2dSMordechay Goodstein /* check if triggered erroneously */ 10330cd1ad2dSMordechay Goodstein if (txq->read_ptr == txq->write_ptr) { 10340cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10350cd1ad2dSMordechay Goodstein return; 10360cd1ad2dSMordechay Goodstein } 10370cd1ad2dSMordechay Goodstein spin_unlock(&txq->lock); 10380cd1ad2dSMordechay Goodstein 10390cd1ad2dSMordechay Goodstein iwl_txq_log_scd_error(trans, txq); 10400cd1ad2dSMordechay Goodstein 10410cd1ad2dSMordechay Goodstein iwl_force_nmi(trans); 10420cd1ad2dSMordechay Goodstein } 10430cd1ad2dSMordechay Goodstein 10440cd1ad2dSMordechay Goodstein int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 10450cd1ad2dSMordechay Goodstein bool cmd_queue) 10460cd1ad2dSMordechay Goodstein { 10470cd1ad2dSMordechay Goodstein size_t tfd_sz = trans->txqs.tfd.size * 10480cd1ad2dSMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size; 10490cd1ad2dSMordechay Goodstein size_t tb0_buf_sz; 10500cd1ad2dSMordechay Goodstein int i; 10510cd1ad2dSMordechay Goodstein 10520cd1ad2dSMordechay Goodstein if (WARN_ON(txq->entries || txq->tfds)) 10530cd1ad2dSMordechay Goodstein return -EINVAL; 10540cd1ad2dSMordechay Goodstein 10550cd1ad2dSMordechay Goodstein if (trans->trans_cfg->use_tfh) 10560cd1ad2dSMordechay Goodstein tfd_sz = trans->txqs.tfd.size * slots_num; 10570cd1ad2dSMordechay Goodstein 10580cd1ad2dSMordechay Goodstein timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 10590cd1ad2dSMordechay Goodstein txq->trans = trans; 10600cd1ad2dSMordechay Goodstein 10610cd1ad2dSMordechay Goodstein txq->n_window = slots_num; 10620cd1ad2dSMordechay Goodstein 10630cd1ad2dSMordechay Goodstein txq->entries = kcalloc(slots_num, 10640cd1ad2dSMordechay Goodstein sizeof(struct iwl_pcie_txq_entry), 10650cd1ad2dSMordechay Goodstein GFP_KERNEL); 10660cd1ad2dSMordechay Goodstein 10670cd1ad2dSMordechay Goodstein if (!txq->entries) 10680cd1ad2dSMordechay Goodstein goto error; 10690cd1ad2dSMordechay Goodstein 10700cd1ad2dSMordechay Goodstein if (cmd_queue) 10710cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) { 10720cd1ad2dSMordechay Goodstein txq->entries[i].cmd = 10730cd1ad2dSMordechay Goodstein kmalloc(sizeof(struct iwl_device_cmd), 10740cd1ad2dSMordechay Goodstein GFP_KERNEL); 10750cd1ad2dSMordechay Goodstein if (!txq->entries[i].cmd) 10760cd1ad2dSMordechay Goodstein goto error; 10770cd1ad2dSMordechay Goodstein } 10780cd1ad2dSMordechay Goodstein 10790cd1ad2dSMordechay Goodstein /* Circular buffer of transmit frame descriptors (TFDs), 10800cd1ad2dSMordechay Goodstein * shared with device */ 10810cd1ad2dSMordechay Goodstein txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 10820cd1ad2dSMordechay Goodstein &txq->dma_addr, GFP_KERNEL); 10830cd1ad2dSMordechay Goodstein if (!txq->tfds) 10840cd1ad2dSMordechay Goodstein goto error; 10850cd1ad2dSMordechay Goodstein 10860cd1ad2dSMordechay Goodstein BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 10870cd1ad2dSMordechay Goodstein 10880cd1ad2dSMordechay Goodstein tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 10890cd1ad2dSMordechay Goodstein 10900cd1ad2dSMordechay Goodstein txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 10910cd1ad2dSMordechay Goodstein &txq->first_tb_dma, 10920cd1ad2dSMordechay Goodstein GFP_KERNEL); 10930cd1ad2dSMordechay Goodstein if (!txq->first_tb_bufs) 10940cd1ad2dSMordechay Goodstein goto err_free_tfds; 10950cd1ad2dSMordechay Goodstein 10960cd1ad2dSMordechay Goodstein return 0; 10970cd1ad2dSMordechay Goodstein err_free_tfds: 10980cd1ad2dSMordechay Goodstein dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 10990cd1ad2dSMordechay Goodstein error: 11000cd1ad2dSMordechay Goodstein if (txq->entries && cmd_queue) 11010cd1ad2dSMordechay Goodstein for (i = 0; i < slots_num; i++) 11020cd1ad2dSMordechay Goodstein kfree(txq->entries[i].cmd); 11030cd1ad2dSMordechay Goodstein kfree(txq->entries); 11040cd1ad2dSMordechay Goodstein txq->entries = NULL; 11050cd1ad2dSMordechay Goodstein 11060cd1ad2dSMordechay Goodstein return -ENOMEM; 11070cd1ad2dSMordechay Goodstein } 11080cd1ad2dSMordechay Goodstein 11090cd1ad2dSMordechay Goodstein static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, 11100cd1ad2dSMordechay Goodstein struct iwl_txq **intxq, int size, 11110cd1ad2dSMordechay Goodstein unsigned int timeout) 11120cd1ad2dSMordechay Goodstein { 11130cd1ad2dSMordechay Goodstein size_t bc_tbl_size, bc_tbl_entries; 11140cd1ad2dSMordechay Goodstein struct iwl_txq *txq; 11150cd1ad2dSMordechay Goodstein int ret; 11160cd1ad2dSMordechay Goodstein 11170cd1ad2dSMordechay Goodstein WARN_ON(!trans->txqs.bc_tbl_size); 11180cd1ad2dSMordechay Goodstein 11190cd1ad2dSMordechay Goodstein bc_tbl_size = trans->txqs.bc_tbl_size; 11200cd1ad2dSMordechay Goodstein bc_tbl_entries = bc_tbl_size / sizeof(u16); 11210cd1ad2dSMordechay Goodstein 11220cd1ad2dSMordechay Goodstein if (WARN_ON(size > bc_tbl_entries)) 11230cd1ad2dSMordechay Goodstein return -EINVAL; 11240cd1ad2dSMordechay Goodstein 11250cd1ad2dSMordechay Goodstein txq = kzalloc(sizeof(*txq), GFP_KERNEL); 11260cd1ad2dSMordechay Goodstein if (!txq) 11270cd1ad2dSMordechay Goodstein return -ENOMEM; 11280cd1ad2dSMordechay Goodstein 11290cd1ad2dSMordechay Goodstein txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 11300cd1ad2dSMordechay Goodstein &txq->bc_tbl.dma); 11310cd1ad2dSMordechay Goodstein if (!txq->bc_tbl.addr) { 11320cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 11330cd1ad2dSMordechay Goodstein kfree(txq); 11340cd1ad2dSMordechay Goodstein return -ENOMEM; 11350cd1ad2dSMordechay Goodstein } 11360cd1ad2dSMordechay Goodstein 11370cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, txq, size, false); 11380cd1ad2dSMordechay Goodstein if (ret) { 11390cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue alloc failed\n"); 11400cd1ad2dSMordechay Goodstein goto error; 11410cd1ad2dSMordechay Goodstein } 11420cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, txq, size, false); 11430cd1ad2dSMordechay Goodstein if (ret) { 11440cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx queue init failed\n"); 11450cd1ad2dSMordechay Goodstein goto error; 11460cd1ad2dSMordechay Goodstein } 11470cd1ad2dSMordechay Goodstein 11480cd1ad2dSMordechay Goodstein txq->wd_timeout = msecs_to_jiffies(timeout); 11490cd1ad2dSMordechay Goodstein 11500cd1ad2dSMordechay Goodstein *intxq = txq; 11510cd1ad2dSMordechay Goodstein return 0; 11520cd1ad2dSMordechay Goodstein 11530cd1ad2dSMordechay Goodstein error: 11540cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 11550cd1ad2dSMordechay Goodstein return ret; 11560cd1ad2dSMordechay Goodstein } 11570cd1ad2dSMordechay Goodstein 11580cd1ad2dSMordechay Goodstein static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 11590cd1ad2dSMordechay Goodstein struct iwl_host_cmd *hcmd) 11600cd1ad2dSMordechay Goodstein { 11610cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_rsp *rsp; 11620cd1ad2dSMordechay Goodstein int ret, qid; 11630cd1ad2dSMordechay Goodstein u32 wr_ptr; 11640cd1ad2dSMordechay Goodstein 11650cd1ad2dSMordechay Goodstein if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 11660cd1ad2dSMordechay Goodstein sizeof(*rsp))) { 11670cd1ad2dSMordechay Goodstein ret = -EINVAL; 11680cd1ad2dSMordechay Goodstein goto error_free_resp; 11690cd1ad2dSMordechay Goodstein } 11700cd1ad2dSMordechay Goodstein 11710cd1ad2dSMordechay Goodstein rsp = (void *)hcmd->resp_pkt->data; 11720cd1ad2dSMordechay Goodstein qid = le16_to_cpu(rsp->queue_number); 11730cd1ad2dSMordechay Goodstein wr_ptr = le16_to_cpu(rsp->write_pointer); 11740cd1ad2dSMordechay Goodstein 11750cd1ad2dSMordechay Goodstein if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 11760cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue index %d unsupported", qid); 11770cd1ad2dSMordechay Goodstein ret = -EIO; 11780cd1ad2dSMordechay Goodstein goto error_free_resp; 11790cd1ad2dSMordechay Goodstein } 11800cd1ad2dSMordechay Goodstein 11810cd1ad2dSMordechay Goodstein if (test_and_set_bit(qid, trans->txqs.queue_used)) { 11820cd1ad2dSMordechay Goodstein WARN_ONCE(1, "queue %d already used", qid); 11830cd1ad2dSMordechay Goodstein ret = -EIO; 11840cd1ad2dSMordechay Goodstein goto error_free_resp; 11850cd1ad2dSMordechay Goodstein } 11860cd1ad2dSMordechay Goodstein 11870cd1ad2dSMordechay Goodstein txq->id = qid; 11880cd1ad2dSMordechay Goodstein trans->txqs.txq[qid] = txq; 11890cd1ad2dSMordechay Goodstein wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 11900cd1ad2dSMordechay Goodstein 11910cd1ad2dSMordechay Goodstein /* Place first TFD at index corresponding to start sequence number */ 11920cd1ad2dSMordechay Goodstein txq->read_ptr = wr_ptr; 11930cd1ad2dSMordechay Goodstein txq->write_ptr = wr_ptr; 11940cd1ad2dSMordechay Goodstein 11950cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 11960cd1ad2dSMordechay Goodstein 11970cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 11980cd1ad2dSMordechay Goodstein return qid; 11990cd1ad2dSMordechay Goodstein 12000cd1ad2dSMordechay Goodstein error_free_resp: 12010cd1ad2dSMordechay Goodstein iwl_free_resp(hcmd); 12020cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12030cd1ad2dSMordechay Goodstein return ret; 12040cd1ad2dSMordechay Goodstein } 12050cd1ad2dSMordechay Goodstein 12060cd1ad2dSMordechay Goodstein int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, 12070cd1ad2dSMordechay Goodstein int cmd_id, int size, unsigned int timeout) 12080cd1ad2dSMordechay Goodstein { 12090cd1ad2dSMordechay Goodstein struct iwl_txq *txq = NULL; 12100cd1ad2dSMordechay Goodstein struct iwl_tx_queue_cfg_cmd cmd = { 12110cd1ad2dSMordechay Goodstein .flags = flags, 12120cd1ad2dSMordechay Goodstein .sta_id = sta_id, 12130cd1ad2dSMordechay Goodstein .tid = tid, 12140cd1ad2dSMordechay Goodstein }; 12150cd1ad2dSMordechay Goodstein struct iwl_host_cmd hcmd = { 12160cd1ad2dSMordechay Goodstein .id = cmd_id, 12170cd1ad2dSMordechay Goodstein .len = { sizeof(cmd) }, 12180cd1ad2dSMordechay Goodstein .data = { &cmd, }, 12190cd1ad2dSMordechay Goodstein .flags = CMD_WANT_SKB, 12200cd1ad2dSMordechay Goodstein }; 12210cd1ad2dSMordechay Goodstein int ret; 12220cd1ad2dSMordechay Goodstein 12230cd1ad2dSMordechay Goodstein ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); 12240cd1ad2dSMordechay Goodstein if (ret) 12250cd1ad2dSMordechay Goodstein return ret; 12260cd1ad2dSMordechay Goodstein 12270cd1ad2dSMordechay Goodstein cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); 12280cd1ad2dSMordechay Goodstein cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 12290cd1ad2dSMordechay Goodstein cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 12300cd1ad2dSMordechay Goodstein 12310cd1ad2dSMordechay Goodstein ret = iwl_trans_send_cmd(trans, &hcmd); 12320cd1ad2dSMordechay Goodstein if (ret) 12330cd1ad2dSMordechay Goodstein goto error; 12340cd1ad2dSMordechay Goodstein 12350cd1ad2dSMordechay Goodstein return iwl_txq_alloc_response(trans, txq, &hcmd); 12360cd1ad2dSMordechay Goodstein 12370cd1ad2dSMordechay Goodstein error: 12380cd1ad2dSMordechay Goodstein iwl_txq_gen2_free_memory(trans, txq); 12390cd1ad2dSMordechay Goodstein return ret; 12400cd1ad2dSMordechay Goodstein } 12410cd1ad2dSMordechay Goodstein 12420cd1ad2dSMordechay Goodstein void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 12430cd1ad2dSMordechay Goodstein { 12440cd1ad2dSMordechay Goodstein if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 12450cd1ad2dSMordechay Goodstein "queue %d out of range", queue)) 12460cd1ad2dSMordechay Goodstein return; 12470cd1ad2dSMordechay Goodstein 12480cd1ad2dSMordechay Goodstein /* 12490cd1ad2dSMordechay Goodstein * Upon HW Rfkill - we stop the device, and then stop the queues 12500cd1ad2dSMordechay Goodstein * in the op_mode. Just for the sake of the simplicity of the op_mode, 12510cd1ad2dSMordechay Goodstein * allow the op_mode to call txq_disable after it already called 12520cd1ad2dSMordechay Goodstein * stop_device. 12530cd1ad2dSMordechay Goodstein */ 12540cd1ad2dSMordechay Goodstein if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 12550cd1ad2dSMordechay Goodstein WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 12560cd1ad2dSMordechay Goodstein "queue %d not used", queue); 12570cd1ad2dSMordechay Goodstein return; 12580cd1ad2dSMordechay Goodstein } 12590cd1ad2dSMordechay Goodstein 12602f8cfcc4SMordechay Goodstein iwl_txq_gen2_free(trans, queue); 12610cd1ad2dSMordechay Goodstein 12620cd1ad2dSMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 12630cd1ad2dSMordechay Goodstein } 12640cd1ad2dSMordechay Goodstein 12650cd1ad2dSMordechay Goodstein void iwl_txq_gen2_tx_free(struct iwl_trans *trans) 12660cd1ad2dSMordechay Goodstein { 12670cd1ad2dSMordechay Goodstein int i; 12680cd1ad2dSMordechay Goodstein 12690cd1ad2dSMordechay Goodstein memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 12700cd1ad2dSMordechay Goodstein 12710cd1ad2dSMordechay Goodstein /* Free all TX queues */ 12720cd1ad2dSMordechay Goodstein for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 12730cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[i]) 12740cd1ad2dSMordechay Goodstein continue; 12750cd1ad2dSMordechay Goodstein 12760cd1ad2dSMordechay Goodstein iwl_txq_gen2_free(trans, i); 12770cd1ad2dSMordechay Goodstein } 12780cd1ad2dSMordechay Goodstein } 12790cd1ad2dSMordechay Goodstein 12800cd1ad2dSMordechay Goodstein int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 12810cd1ad2dSMordechay Goodstein { 12820cd1ad2dSMordechay Goodstein struct iwl_txq *queue; 12830cd1ad2dSMordechay Goodstein int ret; 12840cd1ad2dSMordechay Goodstein 12850cd1ad2dSMordechay Goodstein /* alloc and init the tx queue */ 12860cd1ad2dSMordechay Goodstein if (!trans->txqs.txq[txq_id]) { 12870cd1ad2dSMordechay Goodstein queue = kzalloc(sizeof(*queue), GFP_KERNEL); 12880cd1ad2dSMordechay Goodstein if (!queue) { 12890cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Not enough memory for tx queue\n"); 12900cd1ad2dSMordechay Goodstein return -ENOMEM; 12910cd1ad2dSMordechay Goodstein } 12920cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id] = queue; 12930cd1ad2dSMordechay Goodstein ret = iwl_txq_alloc(trans, queue, queue_size, true); 12940cd1ad2dSMordechay Goodstein if (ret) { 12950cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 12960cd1ad2dSMordechay Goodstein goto error; 12970cd1ad2dSMordechay Goodstein } 12980cd1ad2dSMordechay Goodstein } else { 12990cd1ad2dSMordechay Goodstein queue = trans->txqs.txq[txq_id]; 13000cd1ad2dSMordechay Goodstein } 13010cd1ad2dSMordechay Goodstein 13020cd1ad2dSMordechay Goodstein ret = iwl_txq_init(trans, queue, queue_size, 13030cd1ad2dSMordechay Goodstein (txq_id == trans->txqs.cmd.q_id)); 13040cd1ad2dSMordechay Goodstein if (ret) { 13050cd1ad2dSMordechay Goodstein IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 13060cd1ad2dSMordechay Goodstein goto error; 13070cd1ad2dSMordechay Goodstein } 13080cd1ad2dSMordechay Goodstein trans->txqs.txq[txq_id]->id = txq_id; 13090cd1ad2dSMordechay Goodstein set_bit(txq_id, trans->txqs.queue_used); 13100cd1ad2dSMordechay Goodstein 13110cd1ad2dSMordechay Goodstein return 0; 13120cd1ad2dSMordechay Goodstein 13130cd1ad2dSMordechay Goodstein error: 13140cd1ad2dSMordechay Goodstein iwl_txq_gen2_tx_free(trans); 13150cd1ad2dSMordechay Goodstein return ret; 13160cd1ad2dSMordechay Goodstein } 13170cd1ad2dSMordechay Goodstein 13180179bfffSMordechay Goodstein static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 13190179bfffSMordechay Goodstein void *_tfd, u8 idx) 13200179bfffSMordechay Goodstein { 13210179bfffSMordechay Goodstein struct iwl_tfd *tfd; 13220179bfffSMordechay Goodstein struct iwl_tfd_tb *tb; 13230179bfffSMordechay Goodstein dma_addr_t addr; 13240179bfffSMordechay Goodstein dma_addr_t hi_len; 13250179bfffSMordechay Goodstein 13260179bfffSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 13270179bfffSMordechay Goodstein struct iwl_tfh_tfd *tfd = _tfd; 13280179bfffSMordechay Goodstein struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 13290179bfffSMordechay Goodstein 13300179bfffSMordechay Goodstein return (dma_addr_t)(le64_to_cpu(tb->addr)); 13310179bfffSMordechay Goodstein } 13320179bfffSMordechay Goodstein 13330179bfffSMordechay Goodstein tfd = _tfd; 13340179bfffSMordechay Goodstein tb = &tfd->tbs[idx]; 13350179bfffSMordechay Goodstein addr = get_unaligned_le32(&tb->lo); 13360179bfffSMordechay Goodstein 13370179bfffSMordechay Goodstein if (sizeof(dma_addr_t) <= sizeof(u32)) 13380179bfffSMordechay Goodstein return addr; 13390179bfffSMordechay Goodstein 13400179bfffSMordechay Goodstein hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 13410179bfffSMordechay Goodstein 13420179bfffSMordechay Goodstein /* 13430179bfffSMordechay Goodstein * shift by 16 twice to avoid warnings on 32-bit 13440179bfffSMordechay Goodstein * (where this code never runs anyway due to the 13450179bfffSMordechay Goodstein * if statement above) 13460179bfffSMordechay Goodstein */ 13470179bfffSMordechay Goodstein return addr | ((hi_len << 16) << 16); 13480179bfffSMordechay Goodstein } 13490179bfffSMordechay Goodstein 13500179bfffSMordechay Goodstein void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 13510179bfffSMordechay Goodstein struct iwl_cmd_meta *meta, 13520179bfffSMordechay Goodstein struct iwl_txq *txq, int index) 13530179bfffSMordechay Goodstein { 13540179bfffSMordechay Goodstein int i, num_tbs; 13550179bfffSMordechay Goodstein void *tfd = iwl_txq_get_tfd(trans, txq, index); 13560179bfffSMordechay Goodstein 13570179bfffSMordechay Goodstein /* Sanity check on number of chunks */ 13580179bfffSMordechay Goodstein num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 13590179bfffSMordechay Goodstein 13600179bfffSMordechay Goodstein if (num_tbs > trans->txqs.tfd.max_tbs) { 13610179bfffSMordechay Goodstein IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 13620179bfffSMordechay Goodstein /* @todo issue fatal error, it is quite serious situation */ 13630179bfffSMordechay Goodstein return; 13640179bfffSMordechay Goodstein } 13650179bfffSMordechay Goodstein 13660179bfffSMordechay Goodstein /* first TB is never freed - it's the bidirectional DMA data */ 13670179bfffSMordechay Goodstein 13680179bfffSMordechay Goodstein for (i = 1; i < num_tbs; i++) { 13690179bfffSMordechay Goodstein if (meta->tbs & BIT(i)) 13700179bfffSMordechay Goodstein dma_unmap_page(trans->dev, 13710179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 13720179bfffSMordechay Goodstein tfd, i), 13730179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 13740179bfffSMordechay Goodstein tfd, i), 13750179bfffSMordechay Goodstein DMA_TO_DEVICE); 13760179bfffSMordechay Goodstein else 13770179bfffSMordechay Goodstein dma_unmap_single(trans->dev, 13780179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_addr(trans, 13790179bfffSMordechay Goodstein tfd, i), 13800179bfffSMordechay Goodstein iwl_txq_gen1_tfd_tb_get_len(trans, 13810179bfffSMordechay Goodstein tfd, i), 13820179bfffSMordechay Goodstein DMA_TO_DEVICE); 13830179bfffSMordechay Goodstein } 13840179bfffSMordechay Goodstein 13850179bfffSMordechay Goodstein meta->tbs = 0; 13860179bfffSMordechay Goodstein 13870179bfffSMordechay Goodstein if (trans->trans_cfg->use_tfh) { 13880179bfffSMordechay Goodstein struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 13890179bfffSMordechay Goodstein 13900179bfffSMordechay Goodstein tfd_fh->num_tbs = 0; 13910179bfffSMordechay Goodstein } else { 13920179bfffSMordechay Goodstein struct iwl_tfd *tfd_fh = (void *)tfd; 13930179bfffSMordechay Goodstein 13940179bfffSMordechay Goodstein tfd_fh->num_tbs = 0; 13950179bfffSMordechay Goodstein } 13960179bfffSMordechay Goodstein } 13970179bfffSMordechay Goodstein 13980179bfffSMordechay Goodstein #define IWL_TX_CRC_SIZE 4 13990179bfffSMordechay Goodstein #define IWL_TX_DELIMITER_SIZE 4 14000179bfffSMordechay Goodstein 14010179bfffSMordechay Goodstein /* 14020179bfffSMordechay Goodstein * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 14030179bfffSMordechay Goodstein */ 14040179bfffSMordechay Goodstein void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 14050179bfffSMordechay Goodstein struct iwl_txq *txq, u16 byte_cnt, 14060179bfffSMordechay Goodstein int num_tbs) 14070179bfffSMordechay Goodstein { 14080179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl; 14090179bfffSMordechay Goodstein int write_ptr = txq->write_ptr; 14100179bfffSMordechay Goodstein int txq_id = txq->id; 14110179bfffSMordechay Goodstein u8 sec_ctl = 0; 14120179bfffSMordechay Goodstein u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 14130179bfffSMordechay Goodstein __le16 bc_ent; 14140179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 14150179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14160179bfffSMordechay Goodstein u8 sta_id = tx_cmd->sta_id; 14170179bfffSMordechay Goodstein 14180179bfffSMordechay Goodstein scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14190179bfffSMordechay Goodstein 14200179bfffSMordechay Goodstein sec_ctl = tx_cmd->sec_ctl; 14210179bfffSMordechay Goodstein 14220179bfffSMordechay Goodstein switch (sec_ctl & TX_CMD_SEC_MSK) { 14230179bfffSMordechay Goodstein case TX_CMD_SEC_CCM: 14240179bfffSMordechay Goodstein len += IEEE80211_CCMP_MIC_LEN; 14250179bfffSMordechay Goodstein break; 14260179bfffSMordechay Goodstein case TX_CMD_SEC_TKIP: 14270179bfffSMordechay Goodstein len += IEEE80211_TKIP_ICV_LEN; 14280179bfffSMordechay Goodstein break; 14290179bfffSMordechay Goodstein case TX_CMD_SEC_WEP: 14300179bfffSMordechay Goodstein len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 14310179bfffSMordechay Goodstein break; 14320179bfffSMordechay Goodstein } 14330179bfffSMordechay Goodstein if (trans->txqs.bc_table_dword) 14340179bfffSMordechay Goodstein len = DIV_ROUND_UP(len, 4); 14350179bfffSMordechay Goodstein 14360179bfffSMordechay Goodstein if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 14370179bfffSMordechay Goodstein return; 14380179bfffSMordechay Goodstein 14390179bfffSMordechay Goodstein bc_ent = cpu_to_le16(len | (sta_id << 12)); 14400179bfffSMordechay Goodstein 14410179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 14420179bfffSMordechay Goodstein 14430179bfffSMordechay Goodstein if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 14440179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 14450179bfffSMordechay Goodstein bc_ent; 14460179bfffSMordechay Goodstein } 14470179bfffSMordechay Goodstein 14480179bfffSMordechay Goodstein void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 14490179bfffSMordechay Goodstein struct iwl_txq *txq) 14500179bfffSMordechay Goodstein { 14510179bfffSMordechay Goodstein struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14520179bfffSMordechay Goodstein int txq_id = txq->id; 14530179bfffSMordechay Goodstein int read_ptr = txq->read_ptr; 14540179bfffSMordechay Goodstein u8 sta_id = 0; 14550179bfffSMordechay Goodstein __le16 bc_ent; 14560179bfffSMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 14570179bfffSMordechay Goodstein struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14580179bfffSMordechay Goodstein 14590179bfffSMordechay Goodstein WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 14600179bfffSMordechay Goodstein 14610179bfffSMordechay Goodstein if (txq_id != trans->txqs.cmd.q_id) 14620179bfffSMordechay Goodstein sta_id = tx_cmd->sta_id; 14630179bfffSMordechay Goodstein 14640179bfffSMordechay Goodstein bc_ent = cpu_to_le16(1 | (sta_id << 12)); 14650179bfffSMordechay Goodstein 14660179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 14670179bfffSMordechay Goodstein 14680179bfffSMordechay Goodstein if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 14690179bfffSMordechay Goodstein scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 14700179bfffSMordechay Goodstein bc_ent; 14710179bfffSMordechay Goodstein } 1472a4450980SMordechay Goodstein 1473a4450980SMordechay Goodstein /* 1474a4450980SMordechay Goodstein * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 1475a4450980SMordechay Goodstein * @trans - transport private data 1476a4450980SMordechay Goodstein * @txq - tx queue 1477a4450980SMordechay Goodstein * @dma_dir - the direction of the DMA mapping 1478a4450980SMordechay Goodstein * 1479a4450980SMordechay Goodstein * Does NOT advance any TFD circular buffer read/write indexes 1480a4450980SMordechay Goodstein * Does NOT free the TFD itself (which is within circular buffer) 1481a4450980SMordechay Goodstein */ 1482a4450980SMordechay Goodstein void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1483a4450980SMordechay Goodstein { 1484a4450980SMordechay Goodstein /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1485a4450980SMordechay Goodstein * idx is bounded by n_window 1486a4450980SMordechay Goodstein */ 1487a4450980SMordechay Goodstein int rd_ptr = txq->read_ptr; 1488a4450980SMordechay Goodstein int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 14890f8d5656SEmmanuel Grumbach struct sk_buff *skb; 1490a4450980SMordechay Goodstein 1491a4450980SMordechay Goodstein lockdep_assert_held(&txq->lock); 1492a4450980SMordechay Goodstein 14930f8d5656SEmmanuel Grumbach if (!txq->entries) 14940f8d5656SEmmanuel Grumbach return; 14950f8d5656SEmmanuel Grumbach 1496a4450980SMordechay Goodstein /* We have only q->n_window txq->entries, but we use 1497a4450980SMordechay Goodstein * TFD_QUEUE_SIZE_MAX tfds 1498a4450980SMordechay Goodstein */ 1499a4450980SMordechay Goodstein iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 1500a4450980SMordechay Goodstein 1501a4450980SMordechay Goodstein /* free SKB */ 1502a4450980SMordechay Goodstein skb = txq->entries[idx].skb; 1503a4450980SMordechay Goodstein 1504a4450980SMordechay Goodstein /* Can be called from irqs-disabled context 1505a4450980SMordechay Goodstein * If skb is not NULL, it means that the whole queue is being 1506a4450980SMordechay Goodstein * freed and that the queue is not empty - free the skb 1507a4450980SMordechay Goodstein */ 1508a4450980SMordechay Goodstein if (skb) { 1509a4450980SMordechay Goodstein iwl_op_mode_free_skb(trans->op_mode, skb); 1510a4450980SMordechay Goodstein txq->entries[idx].skb = NULL; 1511a4450980SMordechay Goodstein } 1512a4450980SMordechay Goodstein } 1513a4450980SMordechay Goodstein 1514a4450980SMordechay Goodstein void iwl_txq_progress(struct iwl_txq *txq) 1515a4450980SMordechay Goodstein { 1516a4450980SMordechay Goodstein lockdep_assert_held(&txq->lock); 1517a4450980SMordechay Goodstein 1518a4450980SMordechay Goodstein if (!txq->wd_timeout) 1519a4450980SMordechay Goodstein return; 1520a4450980SMordechay Goodstein 1521a4450980SMordechay Goodstein /* 1522a4450980SMordechay Goodstein * station is asleep and we send data - that must 1523a4450980SMordechay Goodstein * be uAPSD or PS-Poll. Don't rearm the timer. 1524a4450980SMordechay Goodstein */ 1525a4450980SMordechay Goodstein if (txq->frozen) 1526a4450980SMordechay Goodstein return; 1527a4450980SMordechay Goodstein 1528a4450980SMordechay Goodstein /* 1529a4450980SMordechay Goodstein * if empty delete timer, otherwise move timer forward 1530a4450980SMordechay Goodstein * since we're making progress on this queue 1531a4450980SMordechay Goodstein */ 1532a4450980SMordechay Goodstein if (txq->read_ptr == txq->write_ptr) 1533a4450980SMordechay Goodstein del_timer(&txq->stuck_timer); 1534a4450980SMordechay Goodstein else 1535a4450980SMordechay Goodstein mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1536a4450980SMordechay Goodstein } 1537a4450980SMordechay Goodstein 1538a4450980SMordechay Goodstein /* Frees buffers until index _not_ inclusive */ 1539a4450980SMordechay Goodstein void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 1540a4450980SMordechay Goodstein struct sk_buff_head *skbs) 1541a4450980SMordechay Goodstein { 1542a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1543a4450980SMordechay Goodstein int tfd_num = iwl_txq_get_cmd_index(txq, ssn); 1544a4450980SMordechay Goodstein int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1545a4450980SMordechay Goodstein int last_to_free; 1546a4450980SMordechay Goodstein 1547a4450980SMordechay Goodstein /* This function is not meant to release cmd queue*/ 1548a4450980SMordechay Goodstein if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 1549a4450980SMordechay Goodstein return; 1550a4450980SMordechay Goodstein 1551a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1552a4450980SMordechay Goodstein 1553a4450980SMordechay Goodstein if (!test_bit(txq_id, trans->txqs.queue_used)) { 1554a4450980SMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 1555a4450980SMordechay Goodstein txq_id, ssn); 1556a4450980SMordechay Goodstein goto out; 1557a4450980SMordechay Goodstein } 1558a4450980SMordechay Goodstein 1559a4450980SMordechay Goodstein if (read_ptr == tfd_num) 1560a4450980SMordechay Goodstein goto out; 1561a4450980SMordechay Goodstein 1562a4450980SMordechay Goodstein IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1563a4450980SMordechay Goodstein txq_id, txq->read_ptr, tfd_num, ssn); 1564a4450980SMordechay Goodstein 1565a4450980SMordechay Goodstein /*Since we free until index _not_ inclusive, the one before index is 1566a4450980SMordechay Goodstein * the last we will free. This one must be used */ 1567a4450980SMordechay Goodstein last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 1568a4450980SMordechay Goodstein 1569a4450980SMordechay Goodstein if (!iwl_txq_used(txq, last_to_free)) { 1570a4450980SMordechay Goodstein IWL_ERR(trans, 1571a4450980SMordechay Goodstein "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1572a4450980SMordechay Goodstein __func__, txq_id, last_to_free, 1573a4450980SMordechay Goodstein trans->trans_cfg->base_params->max_tfd_queue_size, 1574a4450980SMordechay Goodstein txq->write_ptr, txq->read_ptr); 15759cd3de81SMordechay Goodstein 15769cd3de81SMordechay Goodstein iwl_op_mode_time_point(trans->op_mode, 15779cd3de81SMordechay Goodstein IWL_FW_INI_TIME_POINT_FAKE_TX, 15789cd3de81SMordechay Goodstein NULL); 1579a4450980SMordechay Goodstein goto out; 1580a4450980SMordechay Goodstein } 1581a4450980SMordechay Goodstein 1582a4450980SMordechay Goodstein if (WARN_ON(!skb_queue_empty(skbs))) 1583a4450980SMordechay Goodstein goto out; 1584a4450980SMordechay Goodstein 1585a4450980SMordechay Goodstein for (; 1586a4450980SMordechay Goodstein read_ptr != tfd_num; 1587a4450980SMordechay Goodstein txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 1588a4450980SMordechay Goodstein read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 1589a4450980SMordechay Goodstein struct sk_buff *skb = txq->entries[read_ptr].skb; 1590a4450980SMordechay Goodstein 1591a4450980SMordechay Goodstein if (WARN_ON_ONCE(!skb)) 1592a4450980SMordechay Goodstein continue; 1593a4450980SMordechay Goodstein 1594a4450980SMordechay Goodstein iwl_txq_free_tso_page(trans, skb); 1595a4450980SMordechay Goodstein 1596a4450980SMordechay Goodstein __skb_queue_tail(skbs, skb); 1597a4450980SMordechay Goodstein 1598a4450980SMordechay Goodstein txq->entries[read_ptr].skb = NULL; 1599a4450980SMordechay Goodstein 1600a4450980SMordechay Goodstein if (!trans->trans_cfg->use_tfh) 1601a4450980SMordechay Goodstein iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 1602a4450980SMordechay Goodstein 1603a4450980SMordechay Goodstein iwl_txq_free_tfd(trans, txq); 1604a4450980SMordechay Goodstein } 1605a4450980SMordechay Goodstein 1606a4450980SMordechay Goodstein iwl_txq_progress(txq); 1607a4450980SMordechay Goodstein 1608a4450980SMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark && 1609a4450980SMordechay Goodstein test_bit(txq_id, trans->txqs.queue_stopped)) { 1610a4450980SMordechay Goodstein struct sk_buff_head overflow_skbs; 1611a4450980SMordechay Goodstein 1612a4450980SMordechay Goodstein __skb_queue_head_init(&overflow_skbs); 1613a4450980SMordechay Goodstein skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 1614a4450980SMordechay Goodstein 1615a4450980SMordechay Goodstein /* 1616a4450980SMordechay Goodstein * We are going to transmit from the overflow queue. 1617a4450980SMordechay Goodstein * Remember this state so that wait_for_txq_empty will know we 1618a4450980SMordechay Goodstein * are adding more packets to the TFD queue. It cannot rely on 1619a4450980SMordechay Goodstein * the state of &txq->overflow_q, as we just emptied it, but 1620a4450980SMordechay Goodstein * haven't TXed the content yet. 1621a4450980SMordechay Goodstein */ 1622a4450980SMordechay Goodstein txq->overflow_tx = true; 1623a4450980SMordechay Goodstein 1624a4450980SMordechay Goodstein /* 1625a4450980SMordechay Goodstein * This is tricky: we are in reclaim path which is non 1626a4450980SMordechay Goodstein * re-entrant, so noone will try to take the access the 1627a4450980SMordechay Goodstein * txq data from that path. We stopped tx, so we can't 1628a4450980SMordechay Goodstein * have tx as well. Bottom line, we can unlock and re-lock 1629a4450980SMordechay Goodstein * later. 1630a4450980SMordechay Goodstein */ 1631a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1632a4450980SMordechay Goodstein 1633a4450980SMordechay Goodstein while (!skb_queue_empty(&overflow_skbs)) { 1634a4450980SMordechay Goodstein struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 1635a4450980SMordechay Goodstein struct iwl_device_tx_cmd *dev_cmd_ptr; 1636a4450980SMordechay Goodstein 1637a4450980SMordechay Goodstein dev_cmd_ptr = *(void **)((u8 *)skb->cb + 1638a4450980SMordechay Goodstein trans->txqs.dev_cmd_offs); 1639a4450980SMordechay Goodstein 1640a4450980SMordechay Goodstein /* 1641a4450980SMordechay Goodstein * Note that we can very well be overflowing again. 1642a4450980SMordechay Goodstein * In that case, iwl_txq_space will be small again 1643a4450980SMordechay Goodstein * and we won't wake mac80211's queue. 1644a4450980SMordechay Goodstein */ 1645a4450980SMordechay Goodstein iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 1646a4450980SMordechay Goodstein } 1647a4450980SMordechay Goodstein 1648a4450980SMordechay Goodstein if (iwl_txq_space(trans, txq) > txq->low_mark) 1649a4450980SMordechay Goodstein iwl_wake_queue(trans, txq); 1650a4450980SMordechay Goodstein 1651a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1652a4450980SMordechay Goodstein txq->overflow_tx = false; 1653a4450980SMordechay Goodstein } 1654a4450980SMordechay Goodstein 1655a4450980SMordechay Goodstein out: 1656a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1657a4450980SMordechay Goodstein } 1658a4450980SMordechay Goodstein 1659a4450980SMordechay Goodstein /* Set wr_ptr of specific device and txq */ 1660a4450980SMordechay Goodstein void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 1661a4450980SMordechay Goodstein { 1662a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[txq_id]; 1663a4450980SMordechay Goodstein 1664a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1665a4450980SMordechay Goodstein 1666a4450980SMordechay Goodstein txq->write_ptr = ptr; 1667a4450980SMordechay Goodstein txq->read_ptr = txq->write_ptr; 1668a4450980SMordechay Goodstein 1669a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1670a4450980SMordechay Goodstein } 1671a4450980SMordechay Goodstein 1672a4450980SMordechay Goodstein void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, 1673a4450980SMordechay Goodstein bool freeze) 1674a4450980SMordechay Goodstein { 1675a4450980SMordechay Goodstein int queue; 1676a4450980SMordechay Goodstein 1677a4450980SMordechay Goodstein for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1678a4450980SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[queue]; 1679a4450980SMordechay Goodstein unsigned long now; 1680a4450980SMordechay Goodstein 1681a4450980SMordechay Goodstein spin_lock_bh(&txq->lock); 1682a4450980SMordechay Goodstein 1683a4450980SMordechay Goodstein now = jiffies; 1684a4450980SMordechay Goodstein 1685a4450980SMordechay Goodstein if (txq->frozen == freeze) 1686a4450980SMordechay Goodstein goto next_queue; 1687a4450980SMordechay Goodstein 1688a4450980SMordechay Goodstein IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1689a4450980SMordechay Goodstein freeze ? "Freezing" : "Waking", queue); 1690a4450980SMordechay Goodstein 1691a4450980SMordechay Goodstein txq->frozen = freeze; 1692a4450980SMordechay Goodstein 1693a4450980SMordechay Goodstein if (txq->read_ptr == txq->write_ptr) 1694a4450980SMordechay Goodstein goto next_queue; 1695a4450980SMordechay Goodstein 1696a4450980SMordechay Goodstein if (freeze) { 1697a4450980SMordechay Goodstein if (unlikely(time_after(now, 1698a4450980SMordechay Goodstein txq->stuck_timer.expires))) { 1699a4450980SMordechay Goodstein /* 1700a4450980SMordechay Goodstein * The timer should have fired, maybe it is 1701a4450980SMordechay Goodstein * spinning right now on the lock. 1702a4450980SMordechay Goodstein */ 1703a4450980SMordechay Goodstein goto next_queue; 1704a4450980SMordechay Goodstein } 1705a4450980SMordechay Goodstein /* remember how long until the timer fires */ 1706a4450980SMordechay Goodstein txq->frozen_expiry_remainder = 1707a4450980SMordechay Goodstein txq->stuck_timer.expires - now; 1708a4450980SMordechay Goodstein del_timer(&txq->stuck_timer); 1709a4450980SMordechay Goodstein goto next_queue; 1710a4450980SMordechay Goodstein } 1711a4450980SMordechay Goodstein 1712a4450980SMordechay Goodstein /* 1713a4450980SMordechay Goodstein * Wake a non-empty queue -> arm timer with the 1714a4450980SMordechay Goodstein * remainder before it froze 1715a4450980SMordechay Goodstein */ 1716a4450980SMordechay Goodstein mod_timer(&txq->stuck_timer, 1717a4450980SMordechay Goodstein now + txq->frozen_expiry_remainder); 1718a4450980SMordechay Goodstein 1719a4450980SMordechay Goodstein next_queue: 1720a4450980SMordechay Goodstein spin_unlock_bh(&txq->lock); 1721a4450980SMordechay Goodstein } 1722a4450980SMordechay Goodstein } 1723a4450980SMordechay Goodstein 172413f028b4SMordechay Goodstein #define HOST_COMPLETE_TIMEOUT (2 * HZ) 172513f028b4SMordechay Goodstein 172613f028b4SMordechay Goodstein static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, 172713f028b4SMordechay Goodstein struct iwl_host_cmd *cmd) 172813f028b4SMordechay Goodstein { 172913f028b4SMordechay Goodstein const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); 173013f028b4SMordechay Goodstein struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 173113f028b4SMordechay Goodstein int cmd_idx; 173213f028b4SMordechay Goodstein int ret; 173313f028b4SMordechay Goodstein 173413f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); 173513f028b4SMordechay Goodstein 173613f028b4SMordechay Goodstein if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 173713f028b4SMordechay Goodstein &trans->status), 173813f028b4SMordechay Goodstein "Command %s: a command is already active!\n", cmd_str)) 173913f028b4SMordechay Goodstein return -EIO; 174013f028b4SMordechay Goodstein 174113f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); 174213f028b4SMordechay Goodstein 174313f028b4SMordechay Goodstein cmd_idx = trans->ops->send_cmd(trans, cmd); 174413f028b4SMordechay Goodstein if (cmd_idx < 0) { 174513f028b4SMordechay Goodstein ret = cmd_idx; 174613f028b4SMordechay Goodstein clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 174713f028b4SMordechay Goodstein IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", 174813f028b4SMordechay Goodstein cmd_str, ret); 174913f028b4SMordechay Goodstein return ret; 175013f028b4SMordechay Goodstein } 175113f028b4SMordechay Goodstein 175213f028b4SMordechay Goodstein ret = wait_event_timeout(trans->wait_command_queue, 175313f028b4SMordechay Goodstein !test_bit(STATUS_SYNC_HCMD_ACTIVE, 175413f028b4SMordechay Goodstein &trans->status), 175513f028b4SMordechay Goodstein HOST_COMPLETE_TIMEOUT); 175613f028b4SMordechay Goodstein if (!ret) { 175713f028b4SMordechay Goodstein IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 175813f028b4SMordechay Goodstein cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 175913f028b4SMordechay Goodstein 176013f028b4SMordechay Goodstein IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 176113f028b4SMordechay Goodstein txq->read_ptr, txq->write_ptr); 176213f028b4SMordechay Goodstein 176313f028b4SMordechay Goodstein clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 176413f028b4SMordechay Goodstein IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 176513f028b4SMordechay Goodstein cmd_str); 176613f028b4SMordechay Goodstein ret = -ETIMEDOUT; 176713f028b4SMordechay Goodstein 176813f028b4SMordechay Goodstein iwl_trans_sync_nmi(trans); 176913f028b4SMordechay Goodstein goto cancel; 177013f028b4SMordechay Goodstein } 177113f028b4SMordechay Goodstein 177213f028b4SMordechay Goodstein if (test_bit(STATUS_FW_ERROR, &trans->status)) { 177313f028b4SMordechay Goodstein IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); 177413f028b4SMordechay Goodstein dump_stack(); 177513f028b4SMordechay Goodstein ret = -EIO; 177613f028b4SMordechay Goodstein goto cancel; 177713f028b4SMordechay Goodstein } 177813f028b4SMordechay Goodstein 177913f028b4SMordechay Goodstein if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 178013f028b4SMordechay Goodstein test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 178113f028b4SMordechay Goodstein IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 178213f028b4SMordechay Goodstein ret = -ERFKILL; 178313f028b4SMordechay Goodstein goto cancel; 178413f028b4SMordechay Goodstein } 178513f028b4SMordechay Goodstein 178613f028b4SMordechay Goodstein if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 178713f028b4SMordechay Goodstein IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); 178813f028b4SMordechay Goodstein ret = -EIO; 178913f028b4SMordechay Goodstein goto cancel; 179013f028b4SMordechay Goodstein } 179113f028b4SMordechay Goodstein 179213f028b4SMordechay Goodstein return 0; 179313f028b4SMordechay Goodstein 179413f028b4SMordechay Goodstein cancel: 179513f028b4SMordechay Goodstein if (cmd->flags & CMD_WANT_SKB) { 179613f028b4SMordechay Goodstein /* 179713f028b4SMordechay Goodstein * Cancel the CMD_WANT_SKB flag for the cmd in the 179813f028b4SMordechay Goodstein * TX cmd queue. Otherwise in case the cmd comes 179913f028b4SMordechay Goodstein * in later, it will possibly set an invalid 180013f028b4SMordechay Goodstein * address (cmd->meta.source). 180113f028b4SMordechay Goodstein */ 180213f028b4SMordechay Goodstein txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 180313f028b4SMordechay Goodstein } 180413f028b4SMordechay Goodstein 180513f028b4SMordechay Goodstein if (cmd->resp_pkt) { 180613f028b4SMordechay Goodstein iwl_free_resp(cmd); 180713f028b4SMordechay Goodstein cmd->resp_pkt = NULL; 180813f028b4SMordechay Goodstein } 180913f028b4SMordechay Goodstein 181013f028b4SMordechay Goodstein return ret; 181113f028b4SMordechay Goodstein } 181213f028b4SMordechay Goodstein 181313f028b4SMordechay Goodstein int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, 181413f028b4SMordechay Goodstein struct iwl_host_cmd *cmd) 181513f028b4SMordechay Goodstein { 181613f028b4SMordechay Goodstein /* Make sure the NIC is still alive in the bus */ 181713f028b4SMordechay Goodstein if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 181813f028b4SMordechay Goodstein return -ENODEV; 181913f028b4SMordechay Goodstein 182013f028b4SMordechay Goodstein if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 182113f028b4SMordechay Goodstein test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 182213f028b4SMordechay Goodstein IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 182313f028b4SMordechay Goodstein cmd->id); 182413f028b4SMordechay Goodstein return -ERFKILL; 182513f028b4SMordechay Goodstein } 182613f028b4SMordechay Goodstein 182713f028b4SMordechay Goodstein if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 182813f028b4SMordechay Goodstein !(cmd->flags & CMD_SEND_IN_D3))) { 182913f028b4SMordechay Goodstein IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); 183013f028b4SMordechay Goodstein return -EHOSTDOWN; 183113f028b4SMordechay Goodstein } 183213f028b4SMordechay Goodstein 183313f028b4SMordechay Goodstein if (cmd->flags & CMD_ASYNC) { 183413f028b4SMordechay Goodstein int ret; 183513f028b4SMordechay Goodstein 183613f028b4SMordechay Goodstein /* An asynchronous command can not expect an SKB to be set. */ 183713f028b4SMordechay Goodstein if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 183813f028b4SMordechay Goodstein return -EINVAL; 183913f028b4SMordechay Goodstein 184013f028b4SMordechay Goodstein ret = trans->ops->send_cmd(trans, cmd); 184113f028b4SMordechay Goodstein if (ret < 0) { 184213f028b4SMordechay Goodstein IWL_ERR(trans, 184313f028b4SMordechay Goodstein "Error sending %s: enqueue_hcmd failed: %d\n", 184413f028b4SMordechay Goodstein iwl_get_cmd_string(trans, cmd->id), ret); 184513f028b4SMordechay Goodstein return ret; 184613f028b4SMordechay Goodstein } 184713f028b4SMordechay Goodstein return 0; 184813f028b4SMordechay Goodstein } 184913f028b4SMordechay Goodstein 185013f028b4SMordechay Goodstein return iwl_trans_txq_send_hcmd_sync(trans, cmd); 185113f028b4SMordechay Goodstein } 185213f028b4SMordechay Goodstein 1853