11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
233aca94dSKalle Valo /*
333aca94dSKalle Valo Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
433aca94dSKalle Valo Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
533aca94dSKalle Valo Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
633aca94dSKalle Valo <http://rt2x00.serialmonkey.com>
733aca94dSKalle Valo
833aca94dSKalle Valo */
933aca94dSKalle Valo
1033aca94dSKalle Valo /*
1133aca94dSKalle Valo Module: rt2x00lib
1233aca94dSKalle Valo Abstract: rt2x00 queue specific routines.
1333aca94dSKalle Valo */
1433aca94dSKalle Valo
1533aca94dSKalle Valo #include <linux/slab.h>
1633aca94dSKalle Valo #include <linux/kernel.h>
1733aca94dSKalle Valo #include <linux/module.h>
1833aca94dSKalle Valo #include <linux/dma-mapping.h>
1933aca94dSKalle Valo
2033aca94dSKalle Valo #include "rt2x00.h"
2133aca94dSKalle Valo #include "rt2x00lib.h"
2233aca94dSKalle Valo
rt2x00queue_alloc_rxskb(struct queue_entry * entry,gfp_t gfp)2333aca94dSKalle Valo struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
2433aca94dSKalle Valo {
2533aca94dSKalle Valo struct data_queue *queue = entry->queue;
2633aca94dSKalle Valo struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
2733aca94dSKalle Valo struct sk_buff *skb;
2833aca94dSKalle Valo struct skb_frame_desc *skbdesc;
2933aca94dSKalle Valo unsigned int frame_size;
3033aca94dSKalle Valo unsigned int head_size = 0;
3133aca94dSKalle Valo unsigned int tail_size = 0;
3233aca94dSKalle Valo
3333aca94dSKalle Valo /*
3433aca94dSKalle Valo * The frame size includes descriptor size, because the
3533aca94dSKalle Valo * hardware directly receive the frame into the skbuffer.
3633aca94dSKalle Valo */
3733aca94dSKalle Valo frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
3833aca94dSKalle Valo
3933aca94dSKalle Valo /*
4033aca94dSKalle Valo * The payload should be aligned to a 4-byte boundary,
4133aca94dSKalle Valo * this means we need at least 3 bytes for moving the frame
4233aca94dSKalle Valo * into the correct offset.
4333aca94dSKalle Valo */
4433aca94dSKalle Valo head_size = 4;
4533aca94dSKalle Valo
4633aca94dSKalle Valo /*
4733aca94dSKalle Valo * For IV/EIV/ICV assembly we must make sure there is
4833aca94dSKalle Valo * at least 8 bytes bytes available in headroom for IV/EIV
4933aca94dSKalle Valo * and 8 bytes for ICV data as tailroon.
5033aca94dSKalle Valo */
5133aca94dSKalle Valo if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
5233aca94dSKalle Valo head_size += 8;
5333aca94dSKalle Valo tail_size += 8;
5433aca94dSKalle Valo }
5533aca94dSKalle Valo
5633aca94dSKalle Valo /*
5733aca94dSKalle Valo * Allocate skbuffer.
5833aca94dSKalle Valo */
5933aca94dSKalle Valo skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
6033aca94dSKalle Valo if (!skb)
6133aca94dSKalle Valo return NULL;
6233aca94dSKalle Valo
6333aca94dSKalle Valo /*
6433aca94dSKalle Valo * Make sure we not have a frame with the requested bytes
6533aca94dSKalle Valo * available in the head and tail.
6633aca94dSKalle Valo */
6733aca94dSKalle Valo skb_reserve(skb, head_size);
6833aca94dSKalle Valo skb_put(skb, frame_size);
6933aca94dSKalle Valo
7033aca94dSKalle Valo /*
7133aca94dSKalle Valo * Populate skbdesc.
7233aca94dSKalle Valo */
7333aca94dSKalle Valo skbdesc = get_skb_frame_desc(skb);
7433aca94dSKalle Valo memset(skbdesc, 0, sizeof(*skbdesc));
7533aca94dSKalle Valo
7633aca94dSKalle Valo if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
7733aca94dSKalle Valo dma_addr_t skb_dma;
7833aca94dSKalle Valo
7933aca94dSKalle Valo skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
8033aca94dSKalle Valo DMA_FROM_DEVICE);
8133aca94dSKalle Valo if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
8233aca94dSKalle Valo dev_kfree_skb_any(skb);
8333aca94dSKalle Valo return NULL;
8433aca94dSKalle Valo }
8533aca94dSKalle Valo
8633aca94dSKalle Valo skbdesc->skb_dma = skb_dma;
8733aca94dSKalle Valo skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
8833aca94dSKalle Valo }
8933aca94dSKalle Valo
9033aca94dSKalle Valo return skb;
9133aca94dSKalle Valo }
9233aca94dSKalle Valo
rt2x00queue_map_txskb(struct queue_entry * entry)9333aca94dSKalle Valo int rt2x00queue_map_txskb(struct queue_entry *entry)
9433aca94dSKalle Valo {
9533aca94dSKalle Valo struct device *dev = entry->queue->rt2x00dev->dev;
9633aca94dSKalle Valo struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
9733aca94dSKalle Valo
9833aca94dSKalle Valo skbdesc->skb_dma =
9933aca94dSKalle Valo dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
10033aca94dSKalle Valo
10133aca94dSKalle Valo if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
10233aca94dSKalle Valo return -ENOMEM;
10333aca94dSKalle Valo
10433aca94dSKalle Valo skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
1050b0d556eSStanislaw Gruszka rt2x00lib_dmadone(entry);
10633aca94dSKalle Valo return 0;
10733aca94dSKalle Valo }
10833aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
10933aca94dSKalle Valo
rt2x00queue_unmap_skb(struct queue_entry * entry)11033aca94dSKalle Valo void rt2x00queue_unmap_skb(struct queue_entry *entry)
11133aca94dSKalle Valo {
11233aca94dSKalle Valo struct device *dev = entry->queue->rt2x00dev->dev;
11333aca94dSKalle Valo struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
11433aca94dSKalle Valo
11533aca94dSKalle Valo if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
11633aca94dSKalle Valo dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
11733aca94dSKalle Valo DMA_FROM_DEVICE);
11833aca94dSKalle Valo skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
11933aca94dSKalle Valo } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
12033aca94dSKalle Valo dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
12133aca94dSKalle Valo DMA_TO_DEVICE);
12233aca94dSKalle Valo skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
12333aca94dSKalle Valo }
12433aca94dSKalle Valo }
12533aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
12633aca94dSKalle Valo
rt2x00queue_free_skb(struct queue_entry * entry)12733aca94dSKalle Valo void rt2x00queue_free_skb(struct queue_entry *entry)
12833aca94dSKalle Valo {
12933aca94dSKalle Valo if (!entry->skb)
13033aca94dSKalle Valo return;
13133aca94dSKalle Valo
13233aca94dSKalle Valo rt2x00queue_unmap_skb(entry);
13333aca94dSKalle Valo dev_kfree_skb_any(entry->skb);
13433aca94dSKalle Valo entry->skb = NULL;
13533aca94dSKalle Valo }
13633aca94dSKalle Valo
rt2x00queue_align_frame(struct sk_buff * skb)13733aca94dSKalle Valo void rt2x00queue_align_frame(struct sk_buff *skb)
13833aca94dSKalle Valo {
13933aca94dSKalle Valo unsigned int frame_length = skb->len;
14033aca94dSKalle Valo unsigned int align = ALIGN_SIZE(skb, 0);
14133aca94dSKalle Valo
14233aca94dSKalle Valo if (!align)
14333aca94dSKalle Valo return;
14433aca94dSKalle Valo
14533aca94dSKalle Valo skb_push(skb, align);
14633aca94dSKalle Valo memmove(skb->data, skb->data + align, frame_length);
14733aca94dSKalle Valo skb_trim(skb, frame_length);
14833aca94dSKalle Valo }
14933aca94dSKalle Valo
15033aca94dSKalle Valo /*
15133aca94dSKalle Valo * H/W needs L2 padding between the header and the paylod if header size
15233aca94dSKalle Valo * is not 4 bytes aligned.
15333aca94dSKalle Valo */
rt2x00queue_insert_l2pad(struct sk_buff * skb,unsigned int hdr_len)15433aca94dSKalle Valo void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
15533aca94dSKalle Valo {
15633aca94dSKalle Valo unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
15733aca94dSKalle Valo
15833aca94dSKalle Valo if (!l2pad)
15933aca94dSKalle Valo return;
16033aca94dSKalle Valo
16133aca94dSKalle Valo skb_push(skb, l2pad);
16233aca94dSKalle Valo memmove(skb->data, skb->data + l2pad, hdr_len);
16333aca94dSKalle Valo }
16433aca94dSKalle Valo
rt2x00queue_remove_l2pad(struct sk_buff * skb,unsigned int hdr_len)16533aca94dSKalle Valo void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
16633aca94dSKalle Valo {
16733aca94dSKalle Valo unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
16833aca94dSKalle Valo
16933aca94dSKalle Valo if (!l2pad)
17033aca94dSKalle Valo return;
17133aca94dSKalle Valo
17233aca94dSKalle Valo memmove(skb->data + l2pad, skb->data, hdr_len);
17333aca94dSKalle Valo skb_pull(skb, l2pad);
17433aca94dSKalle Valo }
17533aca94dSKalle Valo
rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc)17633aca94dSKalle Valo static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
17733aca94dSKalle Valo struct sk_buff *skb,
17833aca94dSKalle Valo struct txentry_desc *txdesc)
17933aca94dSKalle Valo {
18033aca94dSKalle Valo struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
18133aca94dSKalle Valo struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
18233aca94dSKalle Valo struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
18333aca94dSKalle Valo u16 seqno;
18433aca94dSKalle Valo
18533aca94dSKalle Valo if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
18633aca94dSKalle Valo return;
18733aca94dSKalle Valo
18833aca94dSKalle Valo __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
18933aca94dSKalle Valo
19033aca94dSKalle Valo if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
19133aca94dSKalle Valo /*
19233aca94dSKalle Valo * rt2800 has a H/W (or F/W) bug, device incorrectly increase
193746ba11fSVijayakumar Durai * seqno on retransmitted data (non-QOS) and management frames.
194746ba11fSVijayakumar Durai * To workaround the problem let's generate seqno in software.
195746ba11fSVijayakumar Durai * Except for beacons which are transmitted periodically by H/W
196746ba11fSVijayakumar Durai * hence hardware has to assign seqno for them.
19733aca94dSKalle Valo */
198746ba11fSVijayakumar Durai if (ieee80211_is_beacon(hdr->frame_control)) {
199746ba11fSVijayakumar Durai __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
20033aca94dSKalle Valo /* H/W will generate sequence number */
20133aca94dSKalle Valo return;
20233aca94dSKalle Valo }
20333aca94dSKalle Valo
204746ba11fSVijayakumar Durai __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
205746ba11fSVijayakumar Durai }
206746ba11fSVijayakumar Durai
20733aca94dSKalle Valo /*
20833aca94dSKalle Valo * The hardware is not able to insert a sequence number. Assign a
20933aca94dSKalle Valo * software generated one here.
21033aca94dSKalle Valo *
21133aca94dSKalle Valo * This is wrong because beacons are not getting sequence
21233aca94dSKalle Valo * numbers assigned properly.
21333aca94dSKalle Valo *
21433aca94dSKalle Valo * A secondary problem exists for drivers that cannot toggle
21533aca94dSKalle Valo * sequence counting per-frame, since those will override the
21633aca94dSKalle Valo * sequence counter given by mac80211.
21733aca94dSKalle Valo */
21833aca94dSKalle Valo if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
21933aca94dSKalle Valo seqno = atomic_add_return(0x10, &intf->seqno);
22033aca94dSKalle Valo else
22133aca94dSKalle Valo seqno = atomic_read(&intf->seqno);
22233aca94dSKalle Valo
22333aca94dSKalle Valo hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
22433aca94dSKalle Valo hdr->seq_ctrl |= cpu_to_le16(seqno);
22533aca94dSKalle Valo }
22633aca94dSKalle Valo
rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc,const struct rt2x00_rate * hwrate)22733aca94dSKalle Valo static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
22833aca94dSKalle Valo struct sk_buff *skb,
22933aca94dSKalle Valo struct txentry_desc *txdesc,
23033aca94dSKalle Valo const struct rt2x00_rate *hwrate)
23133aca94dSKalle Valo {
23233aca94dSKalle Valo struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
23333aca94dSKalle Valo struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
23433aca94dSKalle Valo unsigned int data_length;
23533aca94dSKalle Valo unsigned int duration;
23633aca94dSKalle Valo unsigned int residual;
23733aca94dSKalle Valo
23833aca94dSKalle Valo /*
23933aca94dSKalle Valo * Determine with what IFS priority this frame should be send.
24033aca94dSKalle Valo * Set ifs to IFS_SIFS when the this is not the first fragment,
24133aca94dSKalle Valo * or this fragment came after RTS/CTS.
24233aca94dSKalle Valo */
24333aca94dSKalle Valo if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
24433aca94dSKalle Valo txdesc->u.plcp.ifs = IFS_BACKOFF;
24533aca94dSKalle Valo else
24633aca94dSKalle Valo txdesc->u.plcp.ifs = IFS_SIFS;
24733aca94dSKalle Valo
24833aca94dSKalle Valo /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
24933aca94dSKalle Valo data_length = skb->len + 4;
25033aca94dSKalle Valo data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
25133aca94dSKalle Valo
25233aca94dSKalle Valo /*
25333aca94dSKalle Valo * PLCP setup
25433aca94dSKalle Valo * Length calculation depends on OFDM/CCK rate.
25533aca94dSKalle Valo */
25633aca94dSKalle Valo txdesc->u.plcp.signal = hwrate->plcp;
25733aca94dSKalle Valo txdesc->u.plcp.service = 0x04;
25833aca94dSKalle Valo
25933aca94dSKalle Valo if (hwrate->flags & DEV_RATE_OFDM) {
26033aca94dSKalle Valo txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
26133aca94dSKalle Valo txdesc->u.plcp.length_low = data_length & 0x3f;
26233aca94dSKalle Valo } else {
26333aca94dSKalle Valo /*
26433aca94dSKalle Valo * Convert length to microseconds.
26533aca94dSKalle Valo */
26633aca94dSKalle Valo residual = GET_DURATION_RES(data_length, hwrate->bitrate);
26733aca94dSKalle Valo duration = GET_DURATION(data_length, hwrate->bitrate);
26833aca94dSKalle Valo
26933aca94dSKalle Valo if (residual != 0) {
27033aca94dSKalle Valo duration++;
27133aca94dSKalle Valo
27233aca94dSKalle Valo /*
27333aca94dSKalle Valo * Check if we need to set the Length Extension
27433aca94dSKalle Valo */
27533aca94dSKalle Valo if (hwrate->bitrate == 110 && residual <= 30)
27633aca94dSKalle Valo txdesc->u.plcp.service |= 0x80;
27733aca94dSKalle Valo }
27833aca94dSKalle Valo
27933aca94dSKalle Valo txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
28033aca94dSKalle Valo txdesc->u.plcp.length_low = duration & 0xff;
28133aca94dSKalle Valo
28233aca94dSKalle Valo /*
28333aca94dSKalle Valo * When preamble is enabled we should set the
28433aca94dSKalle Valo * preamble bit for the signal.
28533aca94dSKalle Valo */
28633aca94dSKalle Valo if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
28733aca94dSKalle Valo txdesc->u.plcp.signal |= 0x08;
28833aca94dSKalle Valo }
28933aca94dSKalle Valo }
29033aca94dSKalle Valo
rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc,struct ieee80211_sta * sta,const struct rt2x00_rate * hwrate)29133aca94dSKalle Valo static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
29233aca94dSKalle Valo struct sk_buff *skb,
29333aca94dSKalle Valo struct txentry_desc *txdesc,
29433aca94dSKalle Valo struct ieee80211_sta *sta,
29533aca94dSKalle Valo const struct rt2x00_rate *hwrate)
29633aca94dSKalle Valo {
29733aca94dSKalle Valo struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
29833aca94dSKalle Valo struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
29933aca94dSKalle Valo struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
30033aca94dSKalle Valo struct rt2x00_sta *sta_priv = NULL;
301e49abb19SStanislaw Gruszka u8 density = 0;
30233aca94dSKalle Valo
30333aca94dSKalle Valo if (sta) {
30433aca94dSKalle Valo sta_priv = sta_to_rt2x00_sta(sta);
30533aca94dSKalle Valo txdesc->u.ht.wcid = sta_priv->wcid;
306046d2e7cSSriram R density = sta->deflink.ht_cap.ampdu_density;
30733aca94dSKalle Valo }
30833aca94dSKalle Valo
30933aca94dSKalle Valo /*
31033aca94dSKalle Valo * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
31133aca94dSKalle Valo * mcs rate to be used
31233aca94dSKalle Valo */
31333aca94dSKalle Valo if (txrate->flags & IEEE80211_TX_RC_MCS) {
31433aca94dSKalle Valo txdesc->u.ht.mcs = txrate->idx;
31533aca94dSKalle Valo
31633aca94dSKalle Valo /*
31733aca94dSKalle Valo * MIMO PS should be set to 1 for STA's using dynamic SM PS
31833aca94dSKalle Valo * when using more then one tx stream (>MCS7).
31933aca94dSKalle Valo */
32033aca94dSKalle Valo if (sta && txdesc->u.ht.mcs > 7 &&
321*261ce887SBenjamin Berg sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
32233aca94dSKalle Valo __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
32333aca94dSKalle Valo } else {
32433aca94dSKalle Valo txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
32533aca94dSKalle Valo if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
32633aca94dSKalle Valo txdesc->u.ht.mcs |= 0x08;
32733aca94dSKalle Valo }
32833aca94dSKalle Valo
32933aca94dSKalle Valo if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
33033aca94dSKalle Valo if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
33133aca94dSKalle Valo txdesc->u.ht.txop = TXOP_SIFS;
33233aca94dSKalle Valo else
33333aca94dSKalle Valo txdesc->u.ht.txop = TXOP_BACKOFF;
33433aca94dSKalle Valo
33533aca94dSKalle Valo /* Left zero on all other settings. */
33633aca94dSKalle Valo return;
33733aca94dSKalle Valo }
33833aca94dSKalle Valo
33933aca94dSKalle Valo /*
34033aca94dSKalle Valo * Only one STBC stream is supported for now.
34133aca94dSKalle Valo */
34233aca94dSKalle Valo if (tx_info->flags & IEEE80211_TX_CTL_STBC)
34333aca94dSKalle Valo txdesc->u.ht.stbc = 1;
34433aca94dSKalle Valo
34533aca94dSKalle Valo /*
34633aca94dSKalle Valo * This frame is eligible for an AMPDU, however, don't aggregate
34733aca94dSKalle Valo * frames that are intended to probe a specific tx rate.
34833aca94dSKalle Valo */
34933aca94dSKalle Valo if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
350e49abb19SStanislaw Gruszka !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
35133aca94dSKalle Valo __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
352e49abb19SStanislaw Gruszka txdesc->u.ht.mpdu_density = density;
353e49abb19SStanislaw Gruszka txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
354e49abb19SStanislaw Gruszka }
35533aca94dSKalle Valo
35633aca94dSKalle Valo /*
35733aca94dSKalle Valo * Set 40Mhz mode if necessary (for legacy rates this will
35833aca94dSKalle Valo * duplicate the frame to both channels).
35933aca94dSKalle Valo */
36033aca94dSKalle Valo if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
36133aca94dSKalle Valo txrate->flags & IEEE80211_TX_RC_DUP_DATA)
36233aca94dSKalle Valo __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
36333aca94dSKalle Valo if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
36433aca94dSKalle Valo __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
36533aca94dSKalle Valo
36633aca94dSKalle Valo /*
36733aca94dSKalle Valo * Determine IFS values
36852a19236SStanislaw Gruszka * - Use TXOP_BACKOFF for management frames except beacons
36933aca94dSKalle Valo * - Use TXOP_SIFS for fragment bursts
37033aca94dSKalle Valo * - Use TXOP_HTTXOP for everything else
37133aca94dSKalle Valo *
37233aca94dSKalle Valo * Note: rt2800 devices won't use CTS protection (if used)
37333aca94dSKalle Valo * for frames not transmitted with TXOP_HTTXOP
37433aca94dSKalle Valo */
37552a19236SStanislaw Gruszka if (ieee80211_is_mgmt(hdr->frame_control) &&
37652a19236SStanislaw Gruszka !ieee80211_is_beacon(hdr->frame_control))
37733aca94dSKalle Valo txdesc->u.ht.txop = TXOP_BACKOFF;
37833aca94dSKalle Valo else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
37933aca94dSKalle Valo txdesc->u.ht.txop = TXOP_SIFS;
38033aca94dSKalle Valo else
38133aca94dSKalle Valo txdesc->u.ht.txop = TXOP_HTTXOP;
38233aca94dSKalle Valo }
38333aca94dSKalle Valo
rt2x00queue_create_tx_descriptor(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc,struct ieee80211_sta * sta)38433aca94dSKalle Valo static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
38533aca94dSKalle Valo struct sk_buff *skb,
38633aca94dSKalle Valo struct txentry_desc *txdesc,
38733aca94dSKalle Valo struct ieee80211_sta *sta)
38833aca94dSKalle Valo {
38933aca94dSKalle Valo struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
39033aca94dSKalle Valo struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
39133aca94dSKalle Valo struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
39233aca94dSKalle Valo struct ieee80211_rate *rate;
39333aca94dSKalle Valo const struct rt2x00_rate *hwrate = NULL;
39433aca94dSKalle Valo
39533aca94dSKalle Valo memset(txdesc, 0, sizeof(*txdesc));
39633aca94dSKalle Valo
39733aca94dSKalle Valo /*
39833aca94dSKalle Valo * Header and frame information.
39933aca94dSKalle Valo */
40033aca94dSKalle Valo txdesc->length = skb->len;
40133aca94dSKalle Valo txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
40233aca94dSKalle Valo
40333aca94dSKalle Valo /*
40433aca94dSKalle Valo * Check whether this frame is to be acked.
40533aca94dSKalle Valo */
40633aca94dSKalle Valo if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
40733aca94dSKalle Valo __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
40833aca94dSKalle Valo
40933aca94dSKalle Valo /*
41033aca94dSKalle Valo * Check if this is a RTS/CTS frame
41133aca94dSKalle Valo */
41233aca94dSKalle Valo if (ieee80211_is_rts(hdr->frame_control) ||
41333aca94dSKalle Valo ieee80211_is_cts(hdr->frame_control)) {
41433aca94dSKalle Valo __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
41533aca94dSKalle Valo if (ieee80211_is_rts(hdr->frame_control))
41633aca94dSKalle Valo __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
41733aca94dSKalle Valo else
41833aca94dSKalle Valo __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
41933aca94dSKalle Valo if (tx_info->control.rts_cts_rate_idx >= 0)
42033aca94dSKalle Valo rate =
42133aca94dSKalle Valo ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
42233aca94dSKalle Valo }
42333aca94dSKalle Valo
42433aca94dSKalle Valo /*
42533aca94dSKalle Valo * Determine retry information.
42633aca94dSKalle Valo */
42733aca94dSKalle Valo txdesc->retry_limit = tx_info->control.rates[0].count - 1;
42833aca94dSKalle Valo if (txdesc->retry_limit >= rt2x00dev->long_retry)
42933aca94dSKalle Valo __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
43033aca94dSKalle Valo
43133aca94dSKalle Valo /*
43233aca94dSKalle Valo * Check if more fragments are pending
43333aca94dSKalle Valo */
43433aca94dSKalle Valo if (ieee80211_has_morefrags(hdr->frame_control)) {
43533aca94dSKalle Valo __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
43633aca94dSKalle Valo __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
43733aca94dSKalle Valo }
43833aca94dSKalle Valo
43933aca94dSKalle Valo /*
44033aca94dSKalle Valo * Check if more frames (!= fragments) are pending
44133aca94dSKalle Valo */
44233aca94dSKalle Valo if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
44333aca94dSKalle Valo __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
44433aca94dSKalle Valo
44533aca94dSKalle Valo /*
44633aca94dSKalle Valo * Beacons and probe responses require the tsf timestamp
44733aca94dSKalle Valo * to be inserted into the frame.
44833aca94dSKalle Valo */
4497af305a1SStanislaw Gruszka if ((ieee80211_is_beacon(hdr->frame_control) ||
4507af305a1SStanislaw Gruszka ieee80211_is_probe_resp(hdr->frame_control)) &&
4517af305a1SStanislaw Gruszka !(tx_info->flags & IEEE80211_TX_CTL_INJECTED))
45233aca94dSKalle Valo __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
45333aca94dSKalle Valo
45433aca94dSKalle Valo if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
45533aca94dSKalle Valo !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
45633aca94dSKalle Valo __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
45733aca94dSKalle Valo
45833aca94dSKalle Valo /*
45933aca94dSKalle Valo * Determine rate modulation.
46033aca94dSKalle Valo */
46133aca94dSKalle Valo if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
46233aca94dSKalle Valo txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
46333aca94dSKalle Valo else if (txrate->flags & IEEE80211_TX_RC_MCS)
46433aca94dSKalle Valo txdesc->rate_mode = RATE_MODE_HT_MIX;
46533aca94dSKalle Valo else {
46633aca94dSKalle Valo rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
46733aca94dSKalle Valo hwrate = rt2x00_get_rate(rate->hw_value);
46833aca94dSKalle Valo if (hwrate->flags & DEV_RATE_OFDM)
46933aca94dSKalle Valo txdesc->rate_mode = RATE_MODE_OFDM;
47033aca94dSKalle Valo else
47133aca94dSKalle Valo txdesc->rate_mode = RATE_MODE_CCK;
47233aca94dSKalle Valo }
47333aca94dSKalle Valo
47433aca94dSKalle Valo /*
47533aca94dSKalle Valo * Apply TX descriptor handling by components
47633aca94dSKalle Valo */
47733aca94dSKalle Valo rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
47833aca94dSKalle Valo rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
47933aca94dSKalle Valo
48033aca94dSKalle Valo if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
48133aca94dSKalle Valo rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
48233aca94dSKalle Valo sta, hwrate);
48333aca94dSKalle Valo else
48433aca94dSKalle Valo rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
48533aca94dSKalle Valo hwrate);
48633aca94dSKalle Valo }
48733aca94dSKalle Valo
rt2x00queue_write_tx_data(struct queue_entry * entry,struct txentry_desc * txdesc)48833aca94dSKalle Valo static int rt2x00queue_write_tx_data(struct queue_entry *entry,
48933aca94dSKalle Valo struct txentry_desc *txdesc)
49033aca94dSKalle Valo {
49133aca94dSKalle Valo struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
49233aca94dSKalle Valo
49333aca94dSKalle Valo /*
49433aca94dSKalle Valo * This should not happen, we already checked the entry
49533aca94dSKalle Valo * was ours. When the hardware disagrees there has been
49633aca94dSKalle Valo * a queue corruption!
49733aca94dSKalle Valo */
49833aca94dSKalle Valo if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
49933aca94dSKalle Valo rt2x00dev->ops->lib->get_entry_state(entry))) {
50033aca94dSKalle Valo rt2x00_err(rt2x00dev,
50133aca94dSKalle Valo "Corrupt queue %d, accessing entry which is not ours\n"
50233aca94dSKalle Valo "Please file bug report to %s\n",
50333aca94dSKalle Valo entry->queue->qid, DRV_PROJECT);
50433aca94dSKalle Valo return -EINVAL;
50533aca94dSKalle Valo }
50633aca94dSKalle Valo
50733aca94dSKalle Valo /*
50833aca94dSKalle Valo * Add the requested extra tx headroom in front of the skb.
50933aca94dSKalle Valo */
51033aca94dSKalle Valo skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
51133aca94dSKalle Valo memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
51233aca94dSKalle Valo
51333aca94dSKalle Valo /*
51433aca94dSKalle Valo * Call the driver's write_tx_data function, if it exists.
51533aca94dSKalle Valo */
51633aca94dSKalle Valo if (rt2x00dev->ops->lib->write_tx_data)
51733aca94dSKalle Valo rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
51833aca94dSKalle Valo
51933aca94dSKalle Valo /*
52033aca94dSKalle Valo * Map the skb to DMA.
52133aca94dSKalle Valo */
52233aca94dSKalle Valo if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
52333aca94dSKalle Valo rt2x00queue_map_txskb(entry))
52433aca94dSKalle Valo return -ENOMEM;
52533aca94dSKalle Valo
52633aca94dSKalle Valo return 0;
52733aca94dSKalle Valo }
52833aca94dSKalle Valo
rt2x00queue_write_tx_descriptor(struct queue_entry * entry,struct txentry_desc * txdesc)52933aca94dSKalle Valo static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
53033aca94dSKalle Valo struct txentry_desc *txdesc)
53133aca94dSKalle Valo {
53233aca94dSKalle Valo struct data_queue *queue = entry->queue;
53333aca94dSKalle Valo
53433aca94dSKalle Valo queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
53533aca94dSKalle Valo
53633aca94dSKalle Valo /*
53733aca94dSKalle Valo * All processing on the frame has been completed, this means
53833aca94dSKalle Valo * it is now ready to be dumped to userspace through debugfs.
53933aca94dSKalle Valo */
5402ceb8137SStanislaw Gruszka rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
54133aca94dSKalle Valo }
54233aca94dSKalle Valo
rt2x00queue_kick_tx_queue(struct data_queue * queue,struct txentry_desc * txdesc)54333aca94dSKalle Valo static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
54433aca94dSKalle Valo struct txentry_desc *txdesc)
54533aca94dSKalle Valo {
54633aca94dSKalle Valo /*
54733aca94dSKalle Valo * Check if we need to kick the queue, there are however a few rules
54833aca94dSKalle Valo * 1) Don't kick unless this is the last in frame in a burst.
54933aca94dSKalle Valo * When the burst flag is set, this frame is always followed
55033aca94dSKalle Valo * by another frame which in some way are related to eachother.
55133aca94dSKalle Valo * This is true for fragments, RTS or CTS-to-self frames.
55233aca94dSKalle Valo * 2) Rule 1 can be broken when the available entries
55333aca94dSKalle Valo * in the queue are less then a certain threshold.
55433aca94dSKalle Valo */
55533aca94dSKalle Valo if (rt2x00queue_threshold(queue) ||
55633aca94dSKalle Valo !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
55733aca94dSKalle Valo queue->rt2x00dev->ops->lib->kick_queue(queue);
55833aca94dSKalle Valo }
55933aca94dSKalle Valo
rt2x00queue_bar_check(struct queue_entry * entry)56033aca94dSKalle Valo static void rt2x00queue_bar_check(struct queue_entry *entry)
56133aca94dSKalle Valo {
56233aca94dSKalle Valo struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
56333aca94dSKalle Valo struct ieee80211_bar *bar = (void *) (entry->skb->data +
56433aca94dSKalle Valo rt2x00dev->extra_tx_headroom);
56533aca94dSKalle Valo struct rt2x00_bar_list_entry *bar_entry;
56633aca94dSKalle Valo
56733aca94dSKalle Valo if (likely(!ieee80211_is_back_req(bar->frame_control)))
56833aca94dSKalle Valo return;
56933aca94dSKalle Valo
57033aca94dSKalle Valo bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
57133aca94dSKalle Valo
57233aca94dSKalle Valo /*
57333aca94dSKalle Valo * If the alloc fails we still send the BAR out but just don't track
57433aca94dSKalle Valo * it in our bar list. And as a result we will report it to mac80211
57533aca94dSKalle Valo * back as failed.
57633aca94dSKalle Valo */
57733aca94dSKalle Valo if (!bar_entry)
57833aca94dSKalle Valo return;
57933aca94dSKalle Valo
58033aca94dSKalle Valo bar_entry->entry = entry;
58133aca94dSKalle Valo bar_entry->block_acked = 0;
58233aca94dSKalle Valo
58333aca94dSKalle Valo /*
58433aca94dSKalle Valo * Copy the relevant parts of the 802.11 BAR into out check list
58533aca94dSKalle Valo * such that we can use RCU for less-overhead in the RX path since
58633aca94dSKalle Valo * sending BARs and processing the according BlockAck should be
58733aca94dSKalle Valo * the exception.
58833aca94dSKalle Valo */
58933aca94dSKalle Valo memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
59033aca94dSKalle Valo memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
59133aca94dSKalle Valo bar_entry->control = bar->control;
59233aca94dSKalle Valo bar_entry->start_seq_num = bar->start_seq_num;
59333aca94dSKalle Valo
59433aca94dSKalle Valo /*
59533aca94dSKalle Valo * Insert BAR into our BAR check list.
59633aca94dSKalle Valo */
59733aca94dSKalle Valo spin_lock_bh(&rt2x00dev->bar_list_lock);
59833aca94dSKalle Valo list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
59933aca94dSKalle Valo spin_unlock_bh(&rt2x00dev->bar_list_lock);
60033aca94dSKalle Valo }
60133aca94dSKalle Valo
rt2x00queue_write_tx_frame(struct data_queue * queue,struct sk_buff * skb,struct ieee80211_sta * sta,bool local)60233aca94dSKalle Valo int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
60333aca94dSKalle Valo struct ieee80211_sta *sta, bool local)
60433aca94dSKalle Valo {
60533aca94dSKalle Valo struct ieee80211_tx_info *tx_info;
60633aca94dSKalle Valo struct queue_entry *entry;
60733aca94dSKalle Valo struct txentry_desc txdesc;
60833aca94dSKalle Valo struct skb_frame_desc *skbdesc;
60933aca94dSKalle Valo u8 rate_idx, rate_flags;
61033aca94dSKalle Valo int ret = 0;
61133aca94dSKalle Valo
61233aca94dSKalle Valo /*
61333aca94dSKalle Valo * Copy all TX descriptor information into txdesc,
61433aca94dSKalle Valo * after that we are free to use the skb->cb array
61533aca94dSKalle Valo * for our information.
61633aca94dSKalle Valo */
61733aca94dSKalle Valo rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
61833aca94dSKalle Valo
61933aca94dSKalle Valo /*
62033aca94dSKalle Valo * All information is retrieved from the skb->cb array,
62133aca94dSKalle Valo * now we should claim ownership of the driver part of that
62233aca94dSKalle Valo * array, preserving the bitrate index and flags.
62333aca94dSKalle Valo */
62433aca94dSKalle Valo tx_info = IEEE80211_SKB_CB(skb);
62533aca94dSKalle Valo rate_idx = tx_info->control.rates[0].idx;
62633aca94dSKalle Valo rate_flags = tx_info->control.rates[0].flags;
62733aca94dSKalle Valo skbdesc = get_skb_frame_desc(skb);
62833aca94dSKalle Valo memset(skbdesc, 0, sizeof(*skbdesc));
62933aca94dSKalle Valo skbdesc->tx_rate_idx = rate_idx;
63033aca94dSKalle Valo skbdesc->tx_rate_flags = rate_flags;
63133aca94dSKalle Valo
63233aca94dSKalle Valo if (local)
63333aca94dSKalle Valo skbdesc->flags |= SKBDESC_NOT_MAC80211;
63433aca94dSKalle Valo
63533aca94dSKalle Valo /*
63633aca94dSKalle Valo * When hardware encryption is supported, and this frame
63733aca94dSKalle Valo * is to be encrypted, we should strip the IV/EIV data from
63833aca94dSKalle Valo * the frame so we can provide it to the driver separately.
63933aca94dSKalle Valo */
64033aca94dSKalle Valo if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
64133aca94dSKalle Valo !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
64233aca94dSKalle Valo if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
64333aca94dSKalle Valo rt2x00crypto_tx_copy_iv(skb, &txdesc);
64433aca94dSKalle Valo else
64533aca94dSKalle Valo rt2x00crypto_tx_remove_iv(skb, &txdesc);
64633aca94dSKalle Valo }
64733aca94dSKalle Valo
64833aca94dSKalle Valo /*
64933aca94dSKalle Valo * When DMA allocation is required we should guarantee to the
65033aca94dSKalle Valo * driver that the DMA is aligned to a 4-byte boundary.
65133aca94dSKalle Valo * However some drivers require L2 padding to pad the payload
65233aca94dSKalle Valo * rather then the header. This could be a requirement for
65333aca94dSKalle Valo * PCI and USB devices, while header alignment only is valid
65433aca94dSKalle Valo * for PCI devices.
65533aca94dSKalle Valo */
65633aca94dSKalle Valo if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
65733aca94dSKalle Valo rt2x00queue_insert_l2pad(skb, txdesc.header_length);
65833aca94dSKalle Valo else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
65933aca94dSKalle Valo rt2x00queue_align_frame(skb);
66033aca94dSKalle Valo
66133aca94dSKalle Valo /*
66233aca94dSKalle Valo * That function must be called with bh disabled.
66333aca94dSKalle Valo */
66433aca94dSKalle Valo spin_lock(&queue->tx_lock);
66533aca94dSKalle Valo
66633aca94dSKalle Valo if (unlikely(rt2x00queue_full(queue))) {
66761a4e5ffSStanislaw Gruszka rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
66833aca94dSKalle Valo queue->qid);
66933aca94dSKalle Valo ret = -ENOBUFS;
67033aca94dSKalle Valo goto out;
67133aca94dSKalle Valo }
67233aca94dSKalle Valo
67333aca94dSKalle Valo entry = rt2x00queue_get_entry(queue, Q_INDEX);
67433aca94dSKalle Valo
67533aca94dSKalle Valo if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
67633aca94dSKalle Valo &entry->flags))) {
67733aca94dSKalle Valo rt2x00_err(queue->rt2x00dev,
67833aca94dSKalle Valo "Arrived at non-free entry in the non-full queue %d\n"
67933aca94dSKalle Valo "Please file bug report to %s\n",
68033aca94dSKalle Valo queue->qid, DRV_PROJECT);
68133aca94dSKalle Valo ret = -EINVAL;
68233aca94dSKalle Valo goto out;
68333aca94dSKalle Valo }
68433aca94dSKalle Valo
68533aca94dSKalle Valo entry->skb = skb;
68633aca94dSKalle Valo
68733aca94dSKalle Valo /*
68833aca94dSKalle Valo * It could be possible that the queue was corrupted and this
68933aca94dSKalle Valo * call failed. Since we always return NETDEV_TX_OK to mac80211,
69033aca94dSKalle Valo * this frame will simply be dropped.
69133aca94dSKalle Valo */
69233aca94dSKalle Valo if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
69333aca94dSKalle Valo clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
69433aca94dSKalle Valo entry->skb = NULL;
69533aca94dSKalle Valo ret = -EIO;
69633aca94dSKalle Valo goto out;
69733aca94dSKalle Valo }
69833aca94dSKalle Valo
69933aca94dSKalle Valo /*
70033aca94dSKalle Valo * Put BlockAckReqs into our check list for driver BA processing.
70133aca94dSKalle Valo */
70233aca94dSKalle Valo rt2x00queue_bar_check(entry);
70333aca94dSKalle Valo
70433aca94dSKalle Valo set_bit(ENTRY_DATA_PENDING, &entry->flags);
70533aca94dSKalle Valo
70633aca94dSKalle Valo rt2x00queue_index_inc(entry, Q_INDEX);
70733aca94dSKalle Valo rt2x00queue_write_tx_descriptor(entry, &txdesc);
70833aca94dSKalle Valo rt2x00queue_kick_tx_queue(queue, &txdesc);
70933aca94dSKalle Valo
71033aca94dSKalle Valo out:
7113d8f162cSStanislaw Gruszka /*
7123d8f162cSStanislaw Gruszka * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
7133d8f162cSStanislaw Gruszka * do this under queue->tx_lock. Bottom halve was already disabled
7143d8f162cSStanislaw Gruszka * before ieee80211_xmit() call.
7153d8f162cSStanislaw Gruszka */
7163d8f162cSStanislaw Gruszka if (rt2x00queue_threshold(queue))
7173d8f162cSStanislaw Gruszka rt2x00queue_pause_queue(queue);
7183d8f162cSStanislaw Gruszka
71933aca94dSKalle Valo spin_unlock(&queue->tx_lock);
72033aca94dSKalle Valo return ret;
72133aca94dSKalle Valo }
72233aca94dSKalle Valo
rt2x00queue_clear_beacon(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)72333aca94dSKalle Valo int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
72433aca94dSKalle Valo struct ieee80211_vif *vif)
72533aca94dSKalle Valo {
72633aca94dSKalle Valo struct rt2x00_intf *intf = vif_to_intf(vif);
72733aca94dSKalle Valo
72833aca94dSKalle Valo if (unlikely(!intf->beacon))
72933aca94dSKalle Valo return -ENOBUFS;
73033aca94dSKalle Valo
73133aca94dSKalle Valo /*
73233aca94dSKalle Valo * Clean up the beacon skb.
73333aca94dSKalle Valo */
73433aca94dSKalle Valo rt2x00queue_free_skb(intf->beacon);
73533aca94dSKalle Valo
73633aca94dSKalle Valo /*
73733aca94dSKalle Valo * Clear beacon (single bssid devices don't need to clear the beacon
73833aca94dSKalle Valo * since the beacon queue will get stopped anyway).
73933aca94dSKalle Valo */
74033aca94dSKalle Valo if (rt2x00dev->ops->lib->clear_beacon)
74133aca94dSKalle Valo rt2x00dev->ops->lib->clear_beacon(intf->beacon);
74233aca94dSKalle Valo
74333aca94dSKalle Valo return 0;
74433aca94dSKalle Valo }
74533aca94dSKalle Valo
rt2x00queue_update_beacon(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)74633aca94dSKalle Valo int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
74733aca94dSKalle Valo struct ieee80211_vif *vif)
74833aca94dSKalle Valo {
74933aca94dSKalle Valo struct rt2x00_intf *intf = vif_to_intf(vif);
75033aca94dSKalle Valo struct skb_frame_desc *skbdesc;
75133aca94dSKalle Valo struct txentry_desc txdesc;
75233aca94dSKalle Valo
75333aca94dSKalle Valo if (unlikely(!intf->beacon))
75433aca94dSKalle Valo return -ENOBUFS;
75533aca94dSKalle Valo
75633aca94dSKalle Valo /*
75733aca94dSKalle Valo * Clean up the beacon skb.
75833aca94dSKalle Valo */
75933aca94dSKalle Valo rt2x00queue_free_skb(intf->beacon);
76033aca94dSKalle Valo
7616e8912a5SShaul Triebitz intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0);
76233aca94dSKalle Valo if (!intf->beacon->skb)
76333aca94dSKalle Valo return -ENOMEM;
76433aca94dSKalle Valo
76533aca94dSKalle Valo /*
76633aca94dSKalle Valo * Copy all TX descriptor information into txdesc,
76733aca94dSKalle Valo * after that we are free to use the skb->cb array
76833aca94dSKalle Valo * for our information.
76933aca94dSKalle Valo */
77033aca94dSKalle Valo rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
77133aca94dSKalle Valo
77233aca94dSKalle Valo /*
77333aca94dSKalle Valo * Fill in skb descriptor
77433aca94dSKalle Valo */
77533aca94dSKalle Valo skbdesc = get_skb_frame_desc(intf->beacon->skb);
77633aca94dSKalle Valo memset(skbdesc, 0, sizeof(*skbdesc));
77733aca94dSKalle Valo
77833aca94dSKalle Valo /*
77933aca94dSKalle Valo * Send beacon to hardware.
78033aca94dSKalle Valo */
78133aca94dSKalle Valo rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
78233aca94dSKalle Valo
78333aca94dSKalle Valo return 0;
78433aca94dSKalle Valo
78533aca94dSKalle Valo }
78633aca94dSKalle Valo
rt2x00queue_for_each_entry(struct data_queue * queue,enum queue_index start,enum queue_index end,void * data,bool (* fn)(struct queue_entry * entry,void * data))78733aca94dSKalle Valo bool rt2x00queue_for_each_entry(struct data_queue *queue,
78833aca94dSKalle Valo enum queue_index start,
78933aca94dSKalle Valo enum queue_index end,
79033aca94dSKalle Valo void *data,
79133aca94dSKalle Valo bool (*fn)(struct queue_entry *entry,
79233aca94dSKalle Valo void *data))
79333aca94dSKalle Valo {
79433aca94dSKalle Valo unsigned long irqflags;
79533aca94dSKalle Valo unsigned int index_start;
79633aca94dSKalle Valo unsigned int index_end;
79733aca94dSKalle Valo unsigned int i;
79833aca94dSKalle Valo
79933aca94dSKalle Valo if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
80033aca94dSKalle Valo rt2x00_err(queue->rt2x00dev,
80133aca94dSKalle Valo "Entry requested from invalid index range (%d - %d)\n",
80233aca94dSKalle Valo start, end);
80333aca94dSKalle Valo return true;
80433aca94dSKalle Valo }
80533aca94dSKalle Valo
80633aca94dSKalle Valo /*
80733aca94dSKalle Valo * Only protect the range we are going to loop over,
80833aca94dSKalle Valo * if during our loop a extra entry is set to pending
80933aca94dSKalle Valo * it should not be kicked during this run, since it
81033aca94dSKalle Valo * is part of another TX operation.
81133aca94dSKalle Valo */
81233aca94dSKalle Valo spin_lock_irqsave(&queue->index_lock, irqflags);
81333aca94dSKalle Valo index_start = queue->index[start];
81433aca94dSKalle Valo index_end = queue->index[end];
81533aca94dSKalle Valo spin_unlock_irqrestore(&queue->index_lock, irqflags);
81633aca94dSKalle Valo
81733aca94dSKalle Valo /*
81833aca94dSKalle Valo * Start from the TX done pointer, this guarantees that we will
81933aca94dSKalle Valo * send out all frames in the correct order.
82033aca94dSKalle Valo */
82133aca94dSKalle Valo if (index_start < index_end) {
82233aca94dSKalle Valo for (i = index_start; i < index_end; i++) {
82333aca94dSKalle Valo if (fn(&queue->entries[i], data))
82433aca94dSKalle Valo return true;
82533aca94dSKalle Valo }
82633aca94dSKalle Valo } else {
82733aca94dSKalle Valo for (i = index_start; i < queue->limit; i++) {
82833aca94dSKalle Valo if (fn(&queue->entries[i], data))
82933aca94dSKalle Valo return true;
83033aca94dSKalle Valo }
83133aca94dSKalle Valo
83233aca94dSKalle Valo for (i = 0; i < index_end; i++) {
83333aca94dSKalle Valo if (fn(&queue->entries[i], data))
83433aca94dSKalle Valo return true;
83533aca94dSKalle Valo }
83633aca94dSKalle Valo }
83733aca94dSKalle Valo
83833aca94dSKalle Valo return false;
83933aca94dSKalle Valo }
84033aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
84133aca94dSKalle Valo
rt2x00queue_get_entry(struct data_queue * queue,enum queue_index index)84233aca94dSKalle Valo struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
84333aca94dSKalle Valo enum queue_index index)
84433aca94dSKalle Valo {
84533aca94dSKalle Valo struct queue_entry *entry;
84633aca94dSKalle Valo unsigned long irqflags;
84733aca94dSKalle Valo
84833aca94dSKalle Valo if (unlikely(index >= Q_INDEX_MAX)) {
84933aca94dSKalle Valo rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
85033aca94dSKalle Valo index);
85133aca94dSKalle Valo return NULL;
85233aca94dSKalle Valo }
85333aca94dSKalle Valo
85433aca94dSKalle Valo spin_lock_irqsave(&queue->index_lock, irqflags);
85533aca94dSKalle Valo
85633aca94dSKalle Valo entry = &queue->entries[queue->index[index]];
85733aca94dSKalle Valo
85833aca94dSKalle Valo spin_unlock_irqrestore(&queue->index_lock, irqflags);
85933aca94dSKalle Valo
86033aca94dSKalle Valo return entry;
86133aca94dSKalle Valo }
86233aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
86333aca94dSKalle Valo
rt2x00queue_index_inc(struct queue_entry * entry,enum queue_index index)86433aca94dSKalle Valo void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
86533aca94dSKalle Valo {
86633aca94dSKalle Valo struct data_queue *queue = entry->queue;
86733aca94dSKalle Valo unsigned long irqflags;
86833aca94dSKalle Valo
86933aca94dSKalle Valo if (unlikely(index >= Q_INDEX_MAX)) {
87033aca94dSKalle Valo rt2x00_err(queue->rt2x00dev,
87133aca94dSKalle Valo "Index change on invalid index type (%d)\n", index);
87233aca94dSKalle Valo return;
87333aca94dSKalle Valo }
87433aca94dSKalle Valo
87533aca94dSKalle Valo spin_lock_irqsave(&queue->index_lock, irqflags);
87633aca94dSKalle Valo
87733aca94dSKalle Valo queue->index[index]++;
87833aca94dSKalle Valo if (queue->index[index] >= queue->limit)
87933aca94dSKalle Valo queue->index[index] = 0;
88033aca94dSKalle Valo
88133aca94dSKalle Valo entry->last_action = jiffies;
88233aca94dSKalle Valo
88333aca94dSKalle Valo if (index == Q_INDEX) {
88433aca94dSKalle Valo queue->length++;
88533aca94dSKalle Valo } else if (index == Q_INDEX_DONE) {
88633aca94dSKalle Valo queue->length--;
88733aca94dSKalle Valo queue->count++;
88833aca94dSKalle Valo }
88933aca94dSKalle Valo
89033aca94dSKalle Valo spin_unlock_irqrestore(&queue->index_lock, irqflags);
89133aca94dSKalle Valo }
89233aca94dSKalle Valo
rt2x00queue_pause_queue_nocheck(struct data_queue * queue)89333aca94dSKalle Valo static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
89433aca94dSKalle Valo {
89533aca94dSKalle Valo switch (queue->qid) {
89633aca94dSKalle Valo case QID_AC_VO:
89733aca94dSKalle Valo case QID_AC_VI:
89833aca94dSKalle Valo case QID_AC_BE:
89933aca94dSKalle Valo case QID_AC_BK:
90033aca94dSKalle Valo /*
90133aca94dSKalle Valo * For TX queues, we have to disable the queue
90233aca94dSKalle Valo * inside mac80211.
90333aca94dSKalle Valo */
90433aca94dSKalle Valo ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
90533aca94dSKalle Valo break;
90633aca94dSKalle Valo default:
90733aca94dSKalle Valo break;
90833aca94dSKalle Valo }
90933aca94dSKalle Valo }
rt2x00queue_pause_queue(struct data_queue * queue)91033aca94dSKalle Valo void rt2x00queue_pause_queue(struct data_queue *queue)
91133aca94dSKalle Valo {
91233aca94dSKalle Valo if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
91333aca94dSKalle Valo !test_bit(QUEUE_STARTED, &queue->flags) ||
91433aca94dSKalle Valo test_and_set_bit(QUEUE_PAUSED, &queue->flags))
91533aca94dSKalle Valo return;
91633aca94dSKalle Valo
91733aca94dSKalle Valo rt2x00queue_pause_queue_nocheck(queue);
91833aca94dSKalle Valo }
91933aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
92033aca94dSKalle Valo
rt2x00queue_unpause_queue(struct data_queue * queue)92133aca94dSKalle Valo void rt2x00queue_unpause_queue(struct data_queue *queue)
92233aca94dSKalle Valo {
92333aca94dSKalle Valo if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
92433aca94dSKalle Valo !test_bit(QUEUE_STARTED, &queue->flags) ||
92533aca94dSKalle Valo !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
92633aca94dSKalle Valo return;
92733aca94dSKalle Valo
92833aca94dSKalle Valo switch (queue->qid) {
92933aca94dSKalle Valo case QID_AC_VO:
93033aca94dSKalle Valo case QID_AC_VI:
93133aca94dSKalle Valo case QID_AC_BE:
93233aca94dSKalle Valo case QID_AC_BK:
93333aca94dSKalle Valo /*
93433aca94dSKalle Valo * For TX queues, we have to enable the queue
93533aca94dSKalle Valo * inside mac80211.
93633aca94dSKalle Valo */
93733aca94dSKalle Valo ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
93833aca94dSKalle Valo break;
93933aca94dSKalle Valo case QID_RX:
94033aca94dSKalle Valo /*
94133aca94dSKalle Valo * For RX we need to kick the queue now in order to
94233aca94dSKalle Valo * receive frames.
94333aca94dSKalle Valo */
94433aca94dSKalle Valo queue->rt2x00dev->ops->lib->kick_queue(queue);
945f48d7dccSGustavo A. R. Silva break;
94633aca94dSKalle Valo default:
94733aca94dSKalle Valo break;
94833aca94dSKalle Valo }
94933aca94dSKalle Valo }
95033aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
95133aca94dSKalle Valo
rt2x00queue_start_queue(struct data_queue * queue)95233aca94dSKalle Valo void rt2x00queue_start_queue(struct data_queue *queue)
95333aca94dSKalle Valo {
95433aca94dSKalle Valo mutex_lock(&queue->status_lock);
95533aca94dSKalle Valo
95633aca94dSKalle Valo if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
95733aca94dSKalle Valo test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
95833aca94dSKalle Valo mutex_unlock(&queue->status_lock);
95933aca94dSKalle Valo return;
96033aca94dSKalle Valo }
96133aca94dSKalle Valo
96233aca94dSKalle Valo set_bit(QUEUE_PAUSED, &queue->flags);
96333aca94dSKalle Valo
96433aca94dSKalle Valo queue->rt2x00dev->ops->lib->start_queue(queue);
96533aca94dSKalle Valo
96633aca94dSKalle Valo rt2x00queue_unpause_queue(queue);
96733aca94dSKalle Valo
96833aca94dSKalle Valo mutex_unlock(&queue->status_lock);
96933aca94dSKalle Valo }
97033aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
97133aca94dSKalle Valo
rt2x00queue_stop_queue(struct data_queue * queue)97233aca94dSKalle Valo void rt2x00queue_stop_queue(struct data_queue *queue)
97333aca94dSKalle Valo {
97433aca94dSKalle Valo mutex_lock(&queue->status_lock);
97533aca94dSKalle Valo
97633aca94dSKalle Valo if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
97733aca94dSKalle Valo mutex_unlock(&queue->status_lock);
97833aca94dSKalle Valo return;
97933aca94dSKalle Valo }
98033aca94dSKalle Valo
98133aca94dSKalle Valo rt2x00queue_pause_queue_nocheck(queue);
98233aca94dSKalle Valo
98333aca94dSKalle Valo queue->rt2x00dev->ops->lib->stop_queue(queue);
98433aca94dSKalle Valo
98533aca94dSKalle Valo mutex_unlock(&queue->status_lock);
98633aca94dSKalle Valo }
98733aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
98833aca94dSKalle Valo
rt2x00queue_flush_queue(struct data_queue * queue,bool drop)98933aca94dSKalle Valo void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
99033aca94dSKalle Valo {
99133aca94dSKalle Valo bool tx_queue =
99233aca94dSKalle Valo (queue->qid == QID_AC_VO) ||
99333aca94dSKalle Valo (queue->qid == QID_AC_VI) ||
99433aca94dSKalle Valo (queue->qid == QID_AC_BE) ||
99533aca94dSKalle Valo (queue->qid == QID_AC_BK);
99633aca94dSKalle Valo
997811a3991SStanislaw Gruszka if (rt2x00queue_empty(queue))
998811a3991SStanislaw Gruszka return;
99933aca94dSKalle Valo
100033aca94dSKalle Valo /*
100133aca94dSKalle Valo * If we are not supposed to drop any pending
100233aca94dSKalle Valo * frames, this means we must force a start (=kick)
100333aca94dSKalle Valo * to the queue to make sure the hardware will
100433aca94dSKalle Valo * start transmitting.
100533aca94dSKalle Valo */
100633aca94dSKalle Valo if (!drop && tx_queue)
100733aca94dSKalle Valo queue->rt2x00dev->ops->lib->kick_queue(queue);
100833aca94dSKalle Valo
100933aca94dSKalle Valo /*
101033aca94dSKalle Valo * Check if driver supports flushing, if that is the case we can
101133aca94dSKalle Valo * defer the flushing to the driver. Otherwise we must use the
101233aca94dSKalle Valo * alternative which just waits for the queue to become empty.
101333aca94dSKalle Valo */
101433aca94dSKalle Valo if (likely(queue->rt2x00dev->ops->lib->flush_queue))
101533aca94dSKalle Valo queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
101633aca94dSKalle Valo
101733aca94dSKalle Valo /*
101833aca94dSKalle Valo * The queue flush has failed...
101933aca94dSKalle Valo */
102033aca94dSKalle Valo if (unlikely(!rt2x00queue_empty(queue)))
102133aca94dSKalle Valo rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
102233aca94dSKalle Valo queue->qid);
102333aca94dSKalle Valo }
102433aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
102533aca94dSKalle Valo
rt2x00queue_start_queues(struct rt2x00_dev * rt2x00dev)102633aca94dSKalle Valo void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
102733aca94dSKalle Valo {
102833aca94dSKalle Valo struct data_queue *queue;
102933aca94dSKalle Valo
103033aca94dSKalle Valo /*
103133aca94dSKalle Valo * rt2x00queue_start_queue will call ieee80211_wake_queue
103233aca94dSKalle Valo * for each queue after is has been properly initialized.
103333aca94dSKalle Valo */
103433aca94dSKalle Valo tx_queue_for_each(rt2x00dev, queue)
103533aca94dSKalle Valo rt2x00queue_start_queue(queue);
103633aca94dSKalle Valo
103733aca94dSKalle Valo rt2x00queue_start_queue(rt2x00dev->rx);
103833aca94dSKalle Valo }
103933aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
104033aca94dSKalle Valo
rt2x00queue_stop_queues(struct rt2x00_dev * rt2x00dev)104133aca94dSKalle Valo void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
104233aca94dSKalle Valo {
104333aca94dSKalle Valo struct data_queue *queue;
104433aca94dSKalle Valo
104533aca94dSKalle Valo /*
104633aca94dSKalle Valo * rt2x00queue_stop_queue will call ieee80211_stop_queue
104733aca94dSKalle Valo * as well, but we are completely shutting doing everything
104833aca94dSKalle Valo * now, so it is much safer to stop all TX queues at once,
104933aca94dSKalle Valo * and use rt2x00queue_stop_queue for cleaning up.
105033aca94dSKalle Valo */
105133aca94dSKalle Valo ieee80211_stop_queues(rt2x00dev->hw);
105233aca94dSKalle Valo
105333aca94dSKalle Valo tx_queue_for_each(rt2x00dev, queue)
105433aca94dSKalle Valo rt2x00queue_stop_queue(queue);
105533aca94dSKalle Valo
105633aca94dSKalle Valo rt2x00queue_stop_queue(rt2x00dev->rx);
105733aca94dSKalle Valo }
105833aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
105933aca94dSKalle Valo
rt2x00queue_flush_queues(struct rt2x00_dev * rt2x00dev,bool drop)106033aca94dSKalle Valo void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
106133aca94dSKalle Valo {
106233aca94dSKalle Valo struct data_queue *queue;
106333aca94dSKalle Valo
106433aca94dSKalle Valo tx_queue_for_each(rt2x00dev, queue)
106533aca94dSKalle Valo rt2x00queue_flush_queue(queue, drop);
106633aca94dSKalle Valo
106733aca94dSKalle Valo rt2x00queue_flush_queue(rt2x00dev->rx, drop);
106833aca94dSKalle Valo }
106933aca94dSKalle Valo EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
107033aca94dSKalle Valo
rt2x00queue_reset(struct data_queue * queue)107133aca94dSKalle Valo static void rt2x00queue_reset(struct data_queue *queue)
107233aca94dSKalle Valo {
107333aca94dSKalle Valo unsigned long irqflags;
107433aca94dSKalle Valo unsigned int i;
107533aca94dSKalle Valo
107633aca94dSKalle Valo spin_lock_irqsave(&queue->index_lock, irqflags);
107733aca94dSKalle Valo
107833aca94dSKalle Valo queue->count = 0;
107933aca94dSKalle Valo queue->length = 0;
108033aca94dSKalle Valo
108133aca94dSKalle Valo for (i = 0; i < Q_INDEX_MAX; i++)
108233aca94dSKalle Valo queue->index[i] = 0;
108333aca94dSKalle Valo
108433aca94dSKalle Valo spin_unlock_irqrestore(&queue->index_lock, irqflags);
108533aca94dSKalle Valo }
108633aca94dSKalle Valo
rt2x00queue_init_queues(struct rt2x00_dev * rt2x00dev)108733aca94dSKalle Valo void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
108833aca94dSKalle Valo {
108933aca94dSKalle Valo struct data_queue *queue;
109033aca94dSKalle Valo unsigned int i;
109133aca94dSKalle Valo
109233aca94dSKalle Valo queue_for_each(rt2x00dev, queue) {
109333aca94dSKalle Valo rt2x00queue_reset(queue);
109433aca94dSKalle Valo
109533aca94dSKalle Valo for (i = 0; i < queue->limit; i++)
109633aca94dSKalle Valo rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
109733aca94dSKalle Valo }
109833aca94dSKalle Valo }
109933aca94dSKalle Valo
rt2x00queue_alloc_entries(struct data_queue * queue)110033aca94dSKalle Valo static int rt2x00queue_alloc_entries(struct data_queue *queue)
110133aca94dSKalle Valo {
110233aca94dSKalle Valo struct queue_entry *entries;
110333aca94dSKalle Valo unsigned int entry_size;
110433aca94dSKalle Valo unsigned int i;
110533aca94dSKalle Valo
110633aca94dSKalle Valo rt2x00queue_reset(queue);
110733aca94dSKalle Valo
110833aca94dSKalle Valo /*
110933aca94dSKalle Valo * Allocate all queue entries.
111033aca94dSKalle Valo */
111133aca94dSKalle Valo entry_size = sizeof(*entries) + queue->priv_size;
111233aca94dSKalle Valo entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
111333aca94dSKalle Valo if (!entries)
111433aca94dSKalle Valo return -ENOMEM;
111533aca94dSKalle Valo
111633aca94dSKalle Valo #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
111733aca94dSKalle Valo (((char *)(__base)) + ((__limit) * (__esize)) + \
111833aca94dSKalle Valo ((__index) * (__psize)))
111933aca94dSKalle Valo
112033aca94dSKalle Valo for (i = 0; i < queue->limit; i++) {
112133aca94dSKalle Valo entries[i].flags = 0;
112233aca94dSKalle Valo entries[i].queue = queue;
112333aca94dSKalle Valo entries[i].skb = NULL;
112433aca94dSKalle Valo entries[i].entry_idx = i;
112533aca94dSKalle Valo entries[i].priv_data =
112633aca94dSKalle Valo QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
112733aca94dSKalle Valo sizeof(*entries), queue->priv_size);
112833aca94dSKalle Valo }
112933aca94dSKalle Valo
113033aca94dSKalle Valo #undef QUEUE_ENTRY_PRIV_OFFSET
113133aca94dSKalle Valo
113233aca94dSKalle Valo queue->entries = entries;
113333aca94dSKalle Valo
113433aca94dSKalle Valo return 0;
113533aca94dSKalle Valo }
113633aca94dSKalle Valo
rt2x00queue_free_skbs(struct data_queue * queue)113733aca94dSKalle Valo static void rt2x00queue_free_skbs(struct data_queue *queue)
113833aca94dSKalle Valo {
113933aca94dSKalle Valo unsigned int i;
114033aca94dSKalle Valo
114133aca94dSKalle Valo if (!queue->entries)
114233aca94dSKalle Valo return;
114333aca94dSKalle Valo
114433aca94dSKalle Valo for (i = 0; i < queue->limit; i++) {
114533aca94dSKalle Valo rt2x00queue_free_skb(&queue->entries[i]);
114633aca94dSKalle Valo }
114733aca94dSKalle Valo }
114833aca94dSKalle Valo
rt2x00queue_alloc_rxskbs(struct data_queue * queue)114933aca94dSKalle Valo static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
115033aca94dSKalle Valo {
115133aca94dSKalle Valo unsigned int i;
115233aca94dSKalle Valo struct sk_buff *skb;
115333aca94dSKalle Valo
115433aca94dSKalle Valo for (i = 0; i < queue->limit; i++) {
115533aca94dSKalle Valo skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
115633aca94dSKalle Valo if (!skb)
115733aca94dSKalle Valo return -ENOMEM;
115833aca94dSKalle Valo queue->entries[i].skb = skb;
115933aca94dSKalle Valo }
116033aca94dSKalle Valo
116133aca94dSKalle Valo return 0;
116233aca94dSKalle Valo }
116333aca94dSKalle Valo
rt2x00queue_initialize(struct rt2x00_dev * rt2x00dev)116433aca94dSKalle Valo int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
116533aca94dSKalle Valo {
116633aca94dSKalle Valo struct data_queue *queue;
116733aca94dSKalle Valo int status;
116833aca94dSKalle Valo
116933aca94dSKalle Valo status = rt2x00queue_alloc_entries(rt2x00dev->rx);
117033aca94dSKalle Valo if (status)
117133aca94dSKalle Valo goto exit;
117233aca94dSKalle Valo
117333aca94dSKalle Valo tx_queue_for_each(rt2x00dev, queue) {
117433aca94dSKalle Valo status = rt2x00queue_alloc_entries(queue);
117533aca94dSKalle Valo if (status)
117633aca94dSKalle Valo goto exit;
117733aca94dSKalle Valo }
117833aca94dSKalle Valo
117933aca94dSKalle Valo status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
118033aca94dSKalle Valo if (status)
118133aca94dSKalle Valo goto exit;
118233aca94dSKalle Valo
118333aca94dSKalle Valo if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
118433aca94dSKalle Valo status = rt2x00queue_alloc_entries(rt2x00dev->atim);
118533aca94dSKalle Valo if (status)
118633aca94dSKalle Valo goto exit;
118733aca94dSKalle Valo }
118833aca94dSKalle Valo
118933aca94dSKalle Valo status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
119033aca94dSKalle Valo if (status)
119133aca94dSKalle Valo goto exit;
119233aca94dSKalle Valo
119333aca94dSKalle Valo return 0;
119433aca94dSKalle Valo
119533aca94dSKalle Valo exit:
119633aca94dSKalle Valo rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
119733aca94dSKalle Valo
119833aca94dSKalle Valo rt2x00queue_uninitialize(rt2x00dev);
119933aca94dSKalle Valo
120033aca94dSKalle Valo return status;
120133aca94dSKalle Valo }
120233aca94dSKalle Valo
rt2x00queue_uninitialize(struct rt2x00_dev * rt2x00dev)120333aca94dSKalle Valo void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
120433aca94dSKalle Valo {
120533aca94dSKalle Valo struct data_queue *queue;
120633aca94dSKalle Valo
120733aca94dSKalle Valo rt2x00queue_free_skbs(rt2x00dev->rx);
120833aca94dSKalle Valo
120933aca94dSKalle Valo queue_for_each(rt2x00dev, queue) {
121033aca94dSKalle Valo kfree(queue->entries);
121133aca94dSKalle Valo queue->entries = NULL;
121233aca94dSKalle Valo }
121333aca94dSKalle Valo }
121433aca94dSKalle Valo
rt2x00queue_init(struct rt2x00_dev * rt2x00dev,struct data_queue * queue,enum data_queue_qid qid)121533aca94dSKalle Valo static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
121633aca94dSKalle Valo struct data_queue *queue, enum data_queue_qid qid)
121733aca94dSKalle Valo {
121833aca94dSKalle Valo mutex_init(&queue->status_lock);
121933aca94dSKalle Valo spin_lock_init(&queue->tx_lock);
122033aca94dSKalle Valo spin_lock_init(&queue->index_lock);
122133aca94dSKalle Valo
122233aca94dSKalle Valo queue->rt2x00dev = rt2x00dev;
122333aca94dSKalle Valo queue->qid = qid;
122433aca94dSKalle Valo queue->txop = 0;
122533aca94dSKalle Valo queue->aifs = 2;
122633aca94dSKalle Valo queue->cw_min = 5;
122733aca94dSKalle Valo queue->cw_max = 10;
122833aca94dSKalle Valo
122933aca94dSKalle Valo rt2x00dev->ops->queue_init(queue);
123033aca94dSKalle Valo
123133aca94dSKalle Valo queue->threshold = DIV_ROUND_UP(queue->limit, 10);
123233aca94dSKalle Valo }
123333aca94dSKalle Valo
rt2x00queue_allocate(struct rt2x00_dev * rt2x00dev)123433aca94dSKalle Valo int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
123533aca94dSKalle Valo {
123633aca94dSKalle Valo struct data_queue *queue;
123733aca94dSKalle Valo enum data_queue_qid qid;
123833aca94dSKalle Valo unsigned int req_atim =
123933aca94dSKalle Valo rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
124033aca94dSKalle Valo
124133aca94dSKalle Valo /*
124233aca94dSKalle Valo * We need the following queues:
124333aca94dSKalle Valo * RX: 1
124433aca94dSKalle Valo * TX: ops->tx_queues
124533aca94dSKalle Valo * Beacon: 1
124633aca94dSKalle Valo * Atim: 1 (if required)
124733aca94dSKalle Valo */
124833aca94dSKalle Valo rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
124933aca94dSKalle Valo
125033aca94dSKalle Valo queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1251cd7c0cdaSMarkus Elfring if (!queue)
125233aca94dSKalle Valo return -ENOMEM;
125333aca94dSKalle Valo
125433aca94dSKalle Valo /*
125533aca94dSKalle Valo * Initialize pointers
125633aca94dSKalle Valo */
125733aca94dSKalle Valo rt2x00dev->rx = queue;
125833aca94dSKalle Valo rt2x00dev->tx = &queue[1];
125933aca94dSKalle Valo rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
126033aca94dSKalle Valo rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
126133aca94dSKalle Valo
126233aca94dSKalle Valo /*
126333aca94dSKalle Valo * Initialize queue parameters.
126433aca94dSKalle Valo * RX: qid = QID_RX
126533aca94dSKalle Valo * TX: qid = QID_AC_VO + index
126633aca94dSKalle Valo * TX: cw_min: 2^5 = 32.
126733aca94dSKalle Valo * TX: cw_max: 2^10 = 1024.
126833aca94dSKalle Valo * BCN: qid = QID_BEACON
126933aca94dSKalle Valo * ATIM: qid = QID_ATIM
127033aca94dSKalle Valo */
127133aca94dSKalle Valo rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
127233aca94dSKalle Valo
127333aca94dSKalle Valo qid = QID_AC_VO;
127433aca94dSKalle Valo tx_queue_for_each(rt2x00dev, queue)
127533aca94dSKalle Valo rt2x00queue_init(rt2x00dev, queue, qid++);
127633aca94dSKalle Valo
127733aca94dSKalle Valo rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
127833aca94dSKalle Valo if (req_atim)
127933aca94dSKalle Valo rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
128033aca94dSKalle Valo
128133aca94dSKalle Valo return 0;
128233aca94dSKalle Valo }
128333aca94dSKalle Valo
rt2x00queue_free(struct rt2x00_dev * rt2x00dev)128433aca94dSKalle Valo void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
128533aca94dSKalle Valo {
128633aca94dSKalle Valo kfree(rt2x00dev->rx);
128733aca94dSKalle Valo rt2x00dev->rx = NULL;
128833aca94dSKalle Valo rt2x00dev->tx = NULL;
128933aca94dSKalle Valo rt2x00dev->bcn = NULL;
129033aca94dSKalle Valo }
1291