xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/tx.c (revision f220d3eb)
1 /*
2  * This file is part of wl1271
3  *
4  * Copyright (C) 2009 Nokia Corporation
5  *
6  * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/spinlock.h>
29 
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "io.h"
33 #include "ps.h"
34 #include "tx.h"
35 #include "event.h"
36 #include "hw_ops.h"
37 
38 /*
39  * TODO: this is here just for now, it must be removed when the data
40  * operations are in place.
41  */
42 #include "../wl12xx/reg.h"
43 
44 static int wl1271_set_default_wep_key(struct wl1271 *wl,
45 				      struct wl12xx_vif *wlvif, u8 id)
46 {
47 	int ret;
48 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
49 
50 	if (is_ap)
51 		ret = wl12xx_cmd_set_default_wep_key(wl, id,
52 						     wlvif->ap.bcast_hlid);
53 	else
54 		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
55 
56 	if (ret < 0)
57 		return ret;
58 
59 	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
60 	return 0;
61 }
62 
63 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
64 {
65 	int id;
66 
67 	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
68 	if (id >= wl->num_tx_desc)
69 		return -EBUSY;
70 
71 	__set_bit(id, wl->tx_frames_map);
72 	wl->tx_frames[id] = skb;
73 	wl->tx_frames_cnt++;
74 	return id;
75 }
76 
77 void wl1271_free_tx_id(struct wl1271 *wl, int id)
78 {
79 	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
80 		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
81 			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
82 
83 		wl->tx_frames[id] = NULL;
84 		wl->tx_frames_cnt--;
85 	}
86 }
87 EXPORT_SYMBOL(wl1271_free_tx_id);
88 
89 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
90 						 struct wl12xx_vif *wlvif,
91 						 struct sk_buff *skb)
92 {
93 	struct ieee80211_hdr *hdr;
94 
95 	hdr = (struct ieee80211_hdr *)(skb->data +
96 				       sizeof(struct wl1271_tx_hw_descr));
97 	if (!ieee80211_is_auth(hdr->frame_control))
98 		return;
99 
100 	/*
101 	 * add the station to the known list before transmitting the
102 	 * authentication response. this way it won't get de-authed by FW
103 	 * when transmitting too soon.
104 	 */
105 	wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
106 
107 	/*
108 	 * ROC for 1 second on the AP channel for completing the connection.
109 	 * Note the ROC will be continued by the update_sta_state callbacks
110 	 * once the station reaches the associated state.
111 	 */
112 	wlcore_update_inconn_sta(wl, wlvif, NULL, true);
113 	wlvif->pending_auth_reply_time = jiffies;
114 	cancel_delayed_work(&wlvif->pending_auth_complete_work);
115 	ieee80211_queue_delayed_work(wl->hw,
116 				&wlvif->pending_auth_complete_work,
117 				msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
118 }
119 
120 static void wl1271_tx_regulate_link(struct wl1271 *wl,
121 				    struct wl12xx_vif *wlvif,
122 				    u8 hlid)
123 {
124 	bool fw_ps;
125 	u8 tx_pkts;
126 
127 	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
128 		return;
129 
130 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
131 	tx_pkts = wl->links[hlid].allocated_pkts;
132 
133 	/*
134 	 * if in FW PS and there is enough data in FW we can put the link
135 	 * into high-level PS and clean out its TX queues.
136 	 * Make an exception if this is the only connected link. In this
137 	 * case FW-memory congestion is less of a problem.
138 	 * Note that a single connected STA means 2*ap_count + 1 active links,
139 	 * since we must account for the global and broadcast AP links
140 	 * for each AP. The "fw_ps" check assures us the other link is a STA
141 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
142 	 */
143 	if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
144 	    tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
145 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
146 }
147 
148 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
149 {
150 	return wl->dummy_packet == skb;
151 }
152 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
153 
154 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
155 				struct sk_buff *skb, struct ieee80211_sta *sta)
156 {
157 	if (sta) {
158 		struct wl1271_station *wl_sta;
159 
160 		wl_sta = (struct wl1271_station *)sta->drv_priv;
161 		return wl_sta->hlid;
162 	} else {
163 		struct ieee80211_hdr *hdr;
164 
165 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
166 			return wl->system_hlid;
167 
168 		hdr = (struct ieee80211_hdr *)skb->data;
169 		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
170 			return wlvif->ap.bcast_hlid;
171 		else
172 			return wlvif->ap.global_hlid;
173 	}
174 }
175 
176 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
177 		      struct sk_buff *skb, struct ieee80211_sta *sta)
178 {
179 	struct ieee80211_tx_info *control;
180 
181 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
182 		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
183 
184 	control = IEEE80211_SKB_CB(skb);
185 	if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
186 		wl1271_debug(DEBUG_TX, "tx offchannel");
187 		return wlvif->dev_hlid;
188 	}
189 
190 	return wlvif->sta.hlid;
191 }
192 
193 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
194 					  unsigned int packet_length)
195 {
196 	if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
197 	    !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
198 		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
199 	else
200 		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
201 }
202 EXPORT_SYMBOL(wlcore_calc_packet_alignment);
203 
204 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
205 			      struct sk_buff *skb, u32 extra, u32 buf_offset,
206 			      u8 hlid, bool is_gem)
207 {
208 	struct wl1271_tx_hw_descr *desc;
209 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
210 	u32 total_blocks;
211 	int id, ret = -EBUSY, ac;
212 	u32 spare_blocks;
213 
214 	if (buf_offset + total_len > wl->aggr_buf_size)
215 		return -EAGAIN;
216 
217 	spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
218 
219 	/* allocate free identifier for the packet */
220 	id = wl1271_alloc_tx_id(wl, skb);
221 	if (id < 0)
222 		return id;
223 
224 	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
225 
226 	if (total_blocks <= wl->tx_blocks_available) {
227 		desc = skb_push(skb, total_len - skb->len);
228 
229 		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
230 					     spare_blocks);
231 
232 		desc->id = id;
233 
234 		wl->tx_blocks_available -= total_blocks;
235 		wl->tx_allocated_blocks += total_blocks;
236 
237 		/*
238 		 * If the FW was empty before, arm the Tx watchdog. Also do
239 		 * this on the first Tx after resume, as we always cancel the
240 		 * watchdog on suspend.
241 		 */
242 		if (wl->tx_allocated_blocks == total_blocks ||
243 		    test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
244 			wl12xx_rearm_tx_watchdog_locked(wl);
245 
246 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
247 		wl->tx_allocated_pkts[ac]++;
248 
249 		if (test_bit(hlid, wl->links_map))
250 			wl->links[hlid].allocated_pkts++;
251 
252 		ret = 0;
253 
254 		wl1271_debug(DEBUG_TX,
255 			     "tx_allocate: size: %d, blocks: %d, id: %d",
256 			     total_len, total_blocks, id);
257 	} else {
258 		wl1271_free_tx_id(wl, id);
259 	}
260 
261 	return ret;
262 }
263 
264 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
265 			       struct sk_buff *skb, u32 extra,
266 			       struct ieee80211_tx_info *control, u8 hlid)
267 {
268 	struct wl1271_tx_hw_descr *desc;
269 	int ac, rate_idx;
270 	s64 hosttime;
271 	u16 tx_attr = 0;
272 	__le16 frame_control;
273 	struct ieee80211_hdr *hdr;
274 	u8 *frame_start;
275 	bool is_dummy;
276 
277 	desc = (struct wl1271_tx_hw_descr *) skb->data;
278 	frame_start = (u8 *)(desc + 1);
279 	hdr = (struct ieee80211_hdr *)(frame_start + extra);
280 	frame_control = hdr->frame_control;
281 
282 	/* relocate space for security header */
283 	if (extra) {
284 		int hdrlen = ieee80211_hdrlen(frame_control);
285 		memmove(frame_start, hdr, hdrlen);
286 		skb_set_network_header(skb, skb_network_offset(skb) + extra);
287 	}
288 
289 	/* configure packet life time */
290 	hosttime = (ktime_get_boot_ns() >> 10);
291 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
292 
293 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
294 	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
295 		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
296 	else
297 		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
298 
299 	/* queue */
300 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
301 	desc->tid = skb->priority;
302 
303 	if (is_dummy) {
304 		/*
305 		 * FW expects the dummy packet to have an invalid session id -
306 		 * any session id that is different than the one set in the join
307 		 */
308 		tx_attr = (SESSION_COUNTER_INVALID <<
309 			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
310 			   TX_HW_ATTR_SESSION_COUNTER;
311 
312 		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
313 	} else if (wlvif) {
314 		u8 session_id = wl->session_ids[hlid];
315 
316 		if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
317 		    (wlvif->bss_type == BSS_TYPE_AP_BSS))
318 			session_id = 0;
319 
320 		/* configure the tx attributes */
321 		tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
322 	}
323 
324 	desc->hlid = hlid;
325 	if (is_dummy || !wlvif)
326 		rate_idx = 0;
327 	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
328 		/*
329 		 * if the packets are data packets
330 		 * send them with AP rate policies (EAPOLs are an exception),
331 		 * otherwise use default basic rates
332 		 */
333 		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
334 			rate_idx = wlvif->sta.basic_rate_idx;
335 		else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
336 			rate_idx = wlvif->sta.p2p_rate_idx;
337 		else if (ieee80211_is_data(frame_control))
338 			rate_idx = wlvif->sta.ap_rate_idx;
339 		else
340 			rate_idx = wlvif->sta.basic_rate_idx;
341 	} else {
342 		if (hlid == wlvif->ap.global_hlid)
343 			rate_idx = wlvif->ap.mgmt_rate_idx;
344 		else if (hlid == wlvif->ap.bcast_hlid ||
345 			 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
346 			 !ieee80211_is_data(frame_control))
347 			/*
348 			 * send non-data, bcast and EAPOLs using the
349 			 * min basic rate
350 			 */
351 			rate_idx = wlvif->ap.bcast_rate_idx;
352 		else
353 			rate_idx = wlvif->ap.ucast_rate_idx[ac];
354 	}
355 
356 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
357 
358 	/* for WEP shared auth - no fw encryption is needed */
359 	if (ieee80211_is_auth(frame_control) &&
360 	    ieee80211_has_protected(frame_control))
361 		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
362 
363 	/* send EAPOL frames as voice */
364 	if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
365 		tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
366 
367 	desc->tx_attr = cpu_to_le16(tx_attr);
368 
369 	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
370 	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
371 }
372 
373 /* caller must hold wl->mutex */
374 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
375 				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
376 {
377 	struct ieee80211_tx_info *info;
378 	u32 extra = 0;
379 	int ret = 0;
380 	u32 total_len;
381 	bool is_dummy;
382 	bool is_gem = false;
383 
384 	if (!skb) {
385 		wl1271_error("discarding null skb");
386 		return -EINVAL;
387 	}
388 
389 	if (hlid == WL12XX_INVALID_LINK_ID) {
390 		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
391 		return -EINVAL;
392 	}
393 
394 	info = IEEE80211_SKB_CB(skb);
395 
396 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
397 
398 	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
399 	    info->control.hw_key &&
400 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
401 		extra = WL1271_EXTRA_SPACE_TKIP;
402 
403 	if (info->control.hw_key) {
404 		bool is_wep;
405 		u8 idx = info->control.hw_key->hw_key_idx;
406 		u32 cipher = info->control.hw_key->cipher;
407 
408 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
409 			 (cipher == WLAN_CIPHER_SUITE_WEP104);
410 
411 		if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
412 			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
413 			if (ret < 0)
414 				return ret;
415 			wlvif->default_key = idx;
416 		}
417 
418 		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
419 	}
420 
421 	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
422 				 is_gem);
423 	if (ret < 0)
424 		return ret;
425 
426 	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
427 
428 	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
429 		wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
430 		wl1271_tx_regulate_link(wl, wlvif, hlid);
431 	}
432 
433 	/*
434 	 * The length of each packet is stored in terms of
435 	 * words. Thus, we must pad the skb data to make sure its
436 	 * length is aligned.  The number of padding bytes is computed
437 	 * and set in wl1271_tx_fill_hdr.
438 	 * In special cases, we want to align to a specific block size
439 	 * (eg. for wl128x with SDIO we align to 256).
440 	 */
441 	total_len = wlcore_calc_packet_alignment(wl, skb->len);
442 
443 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
444 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
445 
446 	/* Revert side effects in the dummy packet skb, so it can be reused */
447 	if (is_dummy)
448 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
449 
450 	return total_len;
451 }
452 
453 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
454 				enum nl80211_band rate_band)
455 {
456 	struct ieee80211_supported_band *band;
457 	u32 enabled_rates = 0;
458 	int bit;
459 
460 	band = wl->hw->wiphy->bands[rate_band];
461 	for (bit = 0; bit < band->n_bitrates; bit++) {
462 		if (rate_set & 0x1)
463 			enabled_rates |= band->bitrates[bit].hw_value;
464 		rate_set >>= 1;
465 	}
466 
467 	/* MCS rates indication are on bits 16 - 31 */
468 	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
469 
470 	for (bit = 0; bit < 16; bit++) {
471 		if (rate_set & 0x1)
472 			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
473 		rate_set >>= 1;
474 	}
475 
476 	return enabled_rates;
477 }
478 
479 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
480 {
481 	int i;
482 	struct wl12xx_vif *wlvif;
483 
484 	wl12xx_for_each_wlvif(wl, wlvif) {
485 		for (i = 0; i < NUM_TX_QUEUES; i++) {
486 			if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
487 					WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
488 			    wlvif->tx_queue_count[i] <=
489 					WL1271_TX_QUEUE_LOW_WATERMARK)
490 				/* firmware buffer has space, restart queues */
491 				wlcore_wake_queue(wl, wlvif, i,
492 					WLCORE_QUEUE_STOP_REASON_WATERMARK);
493 		}
494 	}
495 }
496 
497 static int wlcore_select_ac(struct wl1271 *wl)
498 {
499 	int i, q = -1, ac;
500 	u32 min_pkts = 0xffffffff;
501 
502 	/*
503 	 * Find a non-empty ac where:
504 	 * 1. There are packets to transmit
505 	 * 2. The FW has the least allocated blocks
506 	 *
507 	 * We prioritize the ACs according to VO>VI>BE>BK
508 	 */
509 	for (i = 0; i < NUM_TX_QUEUES; i++) {
510 		ac = wl1271_tx_get_queue(i);
511 		if (wl->tx_queue_count[ac] &&
512 		    wl->tx_allocated_pkts[ac] < min_pkts) {
513 			q = ac;
514 			min_pkts = wl->tx_allocated_pkts[q];
515 		}
516 	}
517 
518 	return q;
519 }
520 
521 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
522 					  struct wl1271_link *lnk, u8 q)
523 {
524 	struct sk_buff *skb;
525 	unsigned long flags;
526 
527 	skb = skb_dequeue(&lnk->tx_queue[q]);
528 	if (skb) {
529 		spin_lock_irqsave(&wl->wl_lock, flags);
530 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
531 		wl->tx_queue_count[q]--;
532 		if (lnk->wlvif) {
533 			WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
534 			lnk->wlvif->tx_queue_count[q]--;
535 		}
536 		spin_unlock_irqrestore(&wl->wl_lock, flags);
537 	}
538 
539 	return skb;
540 }
541 
542 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
543 						    u8 hlid, u8 ac,
544 						    u8 *low_prio_hlid)
545 {
546 	struct wl1271_link *lnk = &wl->links[hlid];
547 
548 	if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
549 		if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
550 		    !skb_queue_empty(&lnk->tx_queue[ac]) &&
551 		    wlcore_hw_lnk_low_prio(wl, hlid, lnk))
552 			/* we found the first non-empty low priority queue */
553 			*low_prio_hlid = hlid;
554 
555 		return NULL;
556 	}
557 
558 	return wlcore_lnk_dequeue(wl, lnk, ac);
559 }
560 
561 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
562 						    struct wl12xx_vif *wlvif,
563 						    u8 ac, u8 *hlid,
564 						    u8 *low_prio_hlid)
565 {
566 	struct sk_buff *skb = NULL;
567 	int i, h, start_hlid;
568 
569 	/* start from the link after the last one */
570 	start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
571 
572 	/* dequeue according to AC, round robin on each link */
573 	for (i = 0; i < wl->num_links; i++) {
574 		h = (start_hlid + i) % wl->num_links;
575 
576 		/* only consider connected stations */
577 		if (!test_bit(h, wlvif->links_map))
578 			continue;
579 
580 		skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
581 						   low_prio_hlid);
582 		if (!skb)
583 			continue;
584 
585 		wlvif->last_tx_hlid = h;
586 		break;
587 	}
588 
589 	if (!skb)
590 		wlvif->last_tx_hlid = 0;
591 
592 	*hlid = wlvif->last_tx_hlid;
593 	return skb;
594 }
595 
596 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
597 {
598 	unsigned long flags;
599 	struct wl12xx_vif *wlvif = wl->last_wlvif;
600 	struct sk_buff *skb = NULL;
601 	int ac;
602 	u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
603 
604 	ac = wlcore_select_ac(wl);
605 	if (ac < 0)
606 		goto out;
607 
608 	/* continue from last wlvif (round robin) */
609 	if (wlvif) {
610 		wl12xx_for_each_wlvif_continue(wl, wlvif) {
611 			if (!wlvif->tx_queue_count[ac])
612 				continue;
613 
614 			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
615 							   &low_prio_hlid);
616 			if (!skb)
617 				continue;
618 
619 			wl->last_wlvif = wlvif;
620 			break;
621 		}
622 	}
623 
624 	/* dequeue from the system HLID before the restarting wlvif list */
625 	if (!skb) {
626 		skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
627 						   ac, &low_prio_hlid);
628 		if (skb) {
629 			*hlid = wl->system_hlid;
630 			wl->last_wlvif = NULL;
631 		}
632 	}
633 
634 	/* Do a new pass over the wlvif list. But no need to continue
635 	 * after last_wlvif. The previous pass should have found it. */
636 	if (!skb) {
637 		wl12xx_for_each_wlvif(wl, wlvif) {
638 			if (!wlvif->tx_queue_count[ac])
639 				goto next;
640 
641 			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
642 							   &low_prio_hlid);
643 			if (skb) {
644 				wl->last_wlvif = wlvif;
645 				break;
646 			}
647 
648 next:
649 			if (wlvif == wl->last_wlvif)
650 				break;
651 		}
652 	}
653 
654 	/* no high priority skbs found - but maybe a low priority one? */
655 	if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
656 		struct wl1271_link *lnk = &wl->links[low_prio_hlid];
657 		skb = wlcore_lnk_dequeue(wl, lnk, ac);
658 
659 		WARN_ON(!skb); /* we checked this before */
660 		*hlid = low_prio_hlid;
661 
662 		/* ensure proper round robin in the vif/link levels */
663 		wl->last_wlvif = lnk->wlvif;
664 		if (lnk->wlvif)
665 			lnk->wlvif->last_tx_hlid = low_prio_hlid;
666 
667 	}
668 
669 out:
670 	if (!skb &&
671 	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
672 		int q;
673 
674 		skb = wl->dummy_packet;
675 		*hlid = wl->system_hlid;
676 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
677 		spin_lock_irqsave(&wl->wl_lock, flags);
678 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
679 		wl->tx_queue_count[q]--;
680 		spin_unlock_irqrestore(&wl->wl_lock, flags);
681 	}
682 
683 	return skb;
684 }
685 
686 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
687 				  struct sk_buff *skb, u8 hlid)
688 {
689 	unsigned long flags;
690 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
691 
692 	if (wl12xx_is_dummy_packet(wl, skb)) {
693 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
694 	} else {
695 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
696 
697 		/* make sure we dequeue the same packet next time */
698 		wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
699 				      wl->num_links;
700 	}
701 
702 	spin_lock_irqsave(&wl->wl_lock, flags);
703 	wl->tx_queue_count[q]++;
704 	if (wlvif)
705 		wlvif->tx_queue_count[q]++;
706 	spin_unlock_irqrestore(&wl->wl_lock, flags);
707 }
708 
709 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
710 {
711 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
712 
713 	return ieee80211_is_data_present(hdr->frame_control);
714 }
715 
716 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
717 {
718 	struct wl12xx_vif *wlvif;
719 	u32 timeout;
720 	u8 hlid;
721 
722 	if (!wl->conf.rx_streaming.interval)
723 		return;
724 
725 	if (!wl->conf.rx_streaming.always &&
726 	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
727 		return;
728 
729 	timeout = wl->conf.rx_streaming.duration;
730 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
731 		bool found = false;
732 		for_each_set_bit(hlid, active_hlids, wl->num_links) {
733 			if (test_bit(hlid, wlvif->links_map)) {
734 				found  = true;
735 				break;
736 			}
737 		}
738 
739 		if (!found)
740 			continue;
741 
742 		/* enable rx streaming */
743 		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
744 			ieee80211_queue_work(wl->hw,
745 					     &wlvif->rx_streaming_enable_work);
746 
747 		mod_timer(&wlvif->rx_streaming_timer,
748 			  jiffies + msecs_to_jiffies(timeout));
749 	}
750 }
751 
752 /*
753  * Returns failure values only in case of failed bus ops within this function.
754  * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
755  * triggering recovery by higher layers when not necessary.
756  * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
757  * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
758  * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
759  * within prepare_tx_frame code but there's nothing we should do about those
760  * as well.
761  */
762 int wlcore_tx_work_locked(struct wl1271 *wl)
763 {
764 	struct wl12xx_vif *wlvif;
765 	struct sk_buff *skb;
766 	struct wl1271_tx_hw_descr *desc;
767 	u32 buf_offset = 0, last_len = 0;
768 	bool sent_packets = false;
769 	unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
770 	int ret = 0;
771 	int bus_ret = 0;
772 	u8 hlid;
773 
774 	if (unlikely(wl->state != WLCORE_STATE_ON))
775 		return 0;
776 
777 	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
778 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
779 		bool has_data = false;
780 
781 		wlvif = NULL;
782 		if (!wl12xx_is_dummy_packet(wl, skb))
783 			wlvif = wl12xx_vif_to_data(info->control.vif);
784 		else
785 			hlid = wl->system_hlid;
786 
787 		has_data = wlvif && wl1271_tx_is_data_present(skb);
788 		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
789 					      hlid);
790 		if (ret == -EAGAIN) {
791 			/*
792 			 * Aggregation buffer is full.
793 			 * Flush buffer and try again.
794 			 */
795 			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
796 
797 			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
798 							    last_len);
799 			bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
800 					     wl->aggr_buf, buf_offset, true);
801 			if (bus_ret < 0)
802 				goto out;
803 
804 			sent_packets = true;
805 			buf_offset = 0;
806 			continue;
807 		} else if (ret == -EBUSY) {
808 			/*
809 			 * Firmware buffer is full.
810 			 * Queue back last skb, and stop aggregating.
811 			 */
812 			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
813 			/* No work left, avoid scheduling redundant tx work */
814 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
815 			goto out_ack;
816 		} else if (ret < 0) {
817 			if (wl12xx_is_dummy_packet(wl, skb))
818 				/*
819 				 * fw still expects dummy packet,
820 				 * so re-enqueue it
821 				 */
822 				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
823 			else
824 				ieee80211_free_txskb(wl->hw, skb);
825 			goto out_ack;
826 		}
827 		last_len = ret;
828 		buf_offset += last_len;
829 		wl->tx_packets_count++;
830 		if (has_data) {
831 			desc = (struct wl1271_tx_hw_descr *) skb->data;
832 			__set_bit(desc->hlid, active_hlids);
833 		}
834 	}
835 
836 out_ack:
837 	if (buf_offset) {
838 		buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
839 		bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
840 					     buf_offset, true);
841 		if (bus_ret < 0)
842 			goto out;
843 
844 		sent_packets = true;
845 	}
846 	if (sent_packets) {
847 		/*
848 		 * Interrupt the firmware with the new packets. This is only
849 		 * required for older hardware revisions
850 		 */
851 		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
852 			bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
853 					     wl->tx_packets_count);
854 			if (bus_ret < 0)
855 				goto out;
856 		}
857 
858 		wl1271_handle_tx_low_watermark(wl);
859 	}
860 	wl12xx_rearm_rx_streaming(wl, active_hlids);
861 
862 out:
863 	return bus_ret;
864 }
865 
866 void wl1271_tx_work(struct work_struct *work)
867 {
868 	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
869 	int ret;
870 
871 	mutex_lock(&wl->mutex);
872 	ret = pm_runtime_get_sync(wl->dev);
873 	if (ret < 0) {
874 		pm_runtime_put_noidle(wl->dev);
875 		goto out;
876 	}
877 
878 	ret = wlcore_tx_work_locked(wl);
879 	if (ret < 0) {
880 		wl12xx_queue_recovery_work(wl);
881 		goto out;
882 	}
883 
884 	pm_runtime_mark_last_busy(wl->dev);
885 	pm_runtime_put_autosuspend(wl->dev);
886 out:
887 	mutex_unlock(&wl->mutex);
888 }
889 
890 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
891 {
892 	u8 flags = 0;
893 
894 	/*
895 	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
896 	 * only it uses Tx-completion.
897 	 */
898 	if (rate_class_index <= 8)
899 		flags |= IEEE80211_TX_RC_MCS;
900 
901 	/*
902 	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
903 	 * only it uses Tx-completion.
904 	 */
905 	if (rate_class_index == 0)
906 		flags |= IEEE80211_TX_RC_SHORT_GI;
907 
908 	return flags;
909 }
910 
911 static void wl1271_tx_complete_packet(struct wl1271 *wl,
912 				      struct wl1271_tx_hw_res_descr *result)
913 {
914 	struct ieee80211_tx_info *info;
915 	struct ieee80211_vif *vif;
916 	struct wl12xx_vif *wlvif;
917 	struct sk_buff *skb;
918 	int id = result->id;
919 	int rate = -1;
920 	u8 rate_flags = 0;
921 	u8 retries = 0;
922 
923 	/* check for id legality */
924 	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
925 		wl1271_warning("TX result illegal id: %d", id);
926 		return;
927 	}
928 
929 	skb = wl->tx_frames[id];
930 	info = IEEE80211_SKB_CB(skb);
931 
932 	if (wl12xx_is_dummy_packet(wl, skb)) {
933 		wl1271_free_tx_id(wl, id);
934 		return;
935 	}
936 
937 	/* info->control is valid as long as we don't update info->status */
938 	vif = info->control.vif;
939 	wlvif = wl12xx_vif_to_data(vif);
940 
941 	/* update the TX status info */
942 	if (result->status == TX_SUCCESS) {
943 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
944 			info->flags |= IEEE80211_TX_STAT_ACK;
945 		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
946 					  wlvif->band);
947 		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
948 		retries = result->ack_failures;
949 	} else if (result->status == TX_RETRY_EXCEEDED) {
950 		wl->stats.excessive_retries++;
951 		retries = result->ack_failures;
952 	}
953 
954 	info->status.rates[0].idx = rate;
955 	info->status.rates[0].count = retries;
956 	info->status.rates[0].flags = rate_flags;
957 	info->status.ack_signal = -1;
958 
959 	wl->stats.retry_count += result->ack_failures;
960 
961 	/* remove private header from packet */
962 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
963 
964 	/* remove TKIP header space if present */
965 	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
966 	    info->control.hw_key &&
967 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
968 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
969 		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
970 			hdrlen);
971 		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
972 	}
973 
974 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
975 		     " status 0x%x",
976 		     result->id, skb, result->ack_failures,
977 		     result->rate_class_index, result->status);
978 
979 	/* return the packet to the stack */
980 	skb_queue_tail(&wl->deferred_tx_queue, skb);
981 	queue_work(wl->freezable_wq, &wl->netstack_work);
982 	wl1271_free_tx_id(wl, result->id);
983 }
984 
985 /* Called upon reception of a TX complete interrupt */
986 int wlcore_tx_complete(struct wl1271 *wl)
987 {
988 	struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
989 	u32 count, fw_counter;
990 	u32 i;
991 	int ret;
992 
993 	/* read the tx results from the chipset */
994 	ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
995 			  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
996 	if (ret < 0)
997 		goto out;
998 
999 	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
1000 
1001 	/* write host counter to chipset (to ack) */
1002 	ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
1003 			     offsetof(struct wl1271_tx_hw_res_if,
1004 				      tx_result_host_counter), fw_counter);
1005 	if (ret < 0)
1006 		goto out;
1007 
1008 	count = fw_counter - wl->tx_results_count;
1009 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
1010 
1011 	/* verify that the result buffer is not getting overrun */
1012 	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
1013 		wl1271_warning("TX result overflow from chipset: %d", count);
1014 
1015 	/* process the results */
1016 	for (i = 0; i < count; i++) {
1017 		struct wl1271_tx_hw_res_descr *result;
1018 		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
1019 
1020 		/* process the packet */
1021 		result =  &(wl->tx_res_if->tx_results_queue[offset]);
1022 		wl1271_tx_complete_packet(wl, result);
1023 
1024 		wl->tx_results_count++;
1025 	}
1026 
1027 out:
1028 	return ret;
1029 }
1030 EXPORT_SYMBOL(wlcore_tx_complete);
1031 
1032 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
1033 {
1034 	struct sk_buff *skb;
1035 	int i;
1036 	unsigned long flags;
1037 	struct ieee80211_tx_info *info;
1038 	int total[NUM_TX_QUEUES];
1039 	struct wl1271_link *lnk = &wl->links[hlid];
1040 
1041 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1042 		total[i] = 0;
1043 		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
1044 			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
1045 
1046 			if (!wl12xx_is_dummy_packet(wl, skb)) {
1047 				info = IEEE80211_SKB_CB(skb);
1048 				info->status.rates[0].idx = -1;
1049 				info->status.rates[0].count = 0;
1050 				ieee80211_tx_status_ni(wl->hw, skb);
1051 			}
1052 
1053 			total[i]++;
1054 		}
1055 	}
1056 
1057 	spin_lock_irqsave(&wl->wl_lock, flags);
1058 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1059 		wl->tx_queue_count[i] -= total[i];
1060 		if (lnk->wlvif)
1061 			lnk->wlvif->tx_queue_count[i] -= total[i];
1062 	}
1063 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1064 
1065 	wl1271_handle_tx_low_watermark(wl);
1066 }
1067 
1068 /* caller must hold wl->mutex and TX must be stopped */
1069 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1070 {
1071 	int i;
1072 
1073 	/* TX failure */
1074 	for_each_set_bit(i, wlvif->links_map, wl->num_links) {
1075 		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
1076 		    i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
1077 			/* this calls wl12xx_free_link */
1078 			wl1271_free_sta(wl, wlvif, i);
1079 		} else {
1080 			u8 hlid = i;
1081 			wl12xx_free_link(wl, wlvif, &hlid);
1082 		}
1083 	}
1084 	wlvif->last_tx_hlid = 0;
1085 
1086 	for (i = 0; i < NUM_TX_QUEUES; i++)
1087 		wlvif->tx_queue_count[i] = 0;
1088 }
1089 /* caller must hold wl->mutex and TX must be stopped */
1090 void wl12xx_tx_reset(struct wl1271 *wl)
1091 {
1092 	int i;
1093 	struct sk_buff *skb;
1094 	struct ieee80211_tx_info *info;
1095 
1096 	/* only reset the queues if something bad happened */
1097 	if (wl1271_tx_total_queue_count(wl) != 0) {
1098 		for (i = 0; i < wl->num_links; i++)
1099 			wl1271_tx_reset_link_queues(wl, i);
1100 
1101 		for (i = 0; i < NUM_TX_QUEUES; i++)
1102 			wl->tx_queue_count[i] = 0;
1103 	}
1104 
1105 	/*
1106 	 * Make sure the driver is at a consistent state, in case this
1107 	 * function is called from a context other than interface removal.
1108 	 * This call will always wake the TX queues.
1109 	 */
1110 	wl1271_handle_tx_low_watermark(wl);
1111 
1112 	for (i = 0; i < wl->num_tx_desc; i++) {
1113 		if (wl->tx_frames[i] == NULL)
1114 			continue;
1115 
1116 		skb = wl->tx_frames[i];
1117 		wl1271_free_tx_id(wl, i);
1118 		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1119 
1120 		if (!wl12xx_is_dummy_packet(wl, skb)) {
1121 			/*
1122 			 * Remove private headers before passing the skb to
1123 			 * mac80211
1124 			 */
1125 			info = IEEE80211_SKB_CB(skb);
1126 			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1127 			if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1128 			    info->control.hw_key &&
1129 			    info->control.hw_key->cipher ==
1130 			    WLAN_CIPHER_SUITE_TKIP) {
1131 				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1132 				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1133 					skb->data, hdrlen);
1134 				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1135 			}
1136 
1137 			info->status.rates[0].idx = -1;
1138 			info->status.rates[0].count = 0;
1139 
1140 			ieee80211_tx_status_ni(wl->hw, skb);
1141 		}
1142 	}
1143 }
1144 
1145 #define WL1271_TX_FLUSH_TIMEOUT 500000
1146 
1147 /* caller must *NOT* hold wl->mutex */
1148 void wl1271_tx_flush(struct wl1271 *wl)
1149 {
1150 	unsigned long timeout, start_time;
1151 	int i;
1152 	start_time = jiffies;
1153 	timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1154 
1155 	/* only one flush should be in progress, for consistent queue state */
1156 	mutex_lock(&wl->flush_mutex);
1157 
1158 	mutex_lock(&wl->mutex);
1159 	if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1160 		mutex_unlock(&wl->mutex);
1161 		goto out;
1162 	}
1163 
1164 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1165 
1166 	while (!time_after(jiffies, timeout)) {
1167 		wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1168 			     wl->tx_frames_cnt,
1169 			     wl1271_tx_total_queue_count(wl));
1170 
1171 		/* force Tx and give the driver some time to flush data */
1172 		mutex_unlock(&wl->mutex);
1173 		if (wl1271_tx_total_queue_count(wl))
1174 			wl1271_tx_work(&wl->tx_work);
1175 		msleep(20);
1176 		mutex_lock(&wl->mutex);
1177 
1178 		if ((wl->tx_frames_cnt == 0) &&
1179 		    (wl1271_tx_total_queue_count(wl) == 0)) {
1180 			wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1181 				     jiffies_to_msecs(jiffies - start_time));
1182 			goto out_wake;
1183 		}
1184 	}
1185 
1186 	wl1271_warning("Unable to flush all TX buffers, "
1187 		       "timed out (timeout %d ms",
1188 		       WL1271_TX_FLUSH_TIMEOUT / 1000);
1189 
1190 	/* forcibly flush all Tx buffers on our queues */
1191 	for (i = 0; i < wl->num_links; i++)
1192 		wl1271_tx_reset_link_queues(wl, i);
1193 
1194 out_wake:
1195 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1196 	mutex_unlock(&wl->mutex);
1197 out:
1198 	mutex_unlock(&wl->flush_mutex);
1199 }
1200 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1201 
1202 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1203 {
1204 	if (WARN_ON(!rate_set))
1205 		return 0;
1206 
1207 	return BIT(__ffs(rate_set));
1208 }
1209 EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
1210 
1211 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1212 			      u8 queue, enum wlcore_queue_stop_reason reason)
1213 {
1214 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1215 	bool stopped = !!wl->queue_stop_reasons[hwq];
1216 
1217 	/* queue should not be stopped for this reason */
1218 	WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
1219 
1220 	if (stopped)
1221 		return;
1222 
1223 	ieee80211_stop_queue(wl->hw, hwq);
1224 }
1225 
1226 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1227 		       enum wlcore_queue_stop_reason reason)
1228 {
1229 	unsigned long flags;
1230 
1231 	spin_lock_irqsave(&wl->wl_lock, flags);
1232 	wlcore_stop_queue_locked(wl, wlvif, queue, reason);
1233 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1234 }
1235 
1236 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1237 		       enum wlcore_queue_stop_reason reason)
1238 {
1239 	unsigned long flags;
1240 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1241 
1242 	spin_lock_irqsave(&wl->wl_lock, flags);
1243 
1244 	/* queue should not be clear for this reason */
1245 	WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
1246 
1247 	if (wl->queue_stop_reasons[hwq])
1248 		goto out;
1249 
1250 	ieee80211_wake_queue(wl->hw, hwq);
1251 
1252 out:
1253 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1254 }
1255 
1256 void wlcore_stop_queues(struct wl1271 *wl,
1257 			enum wlcore_queue_stop_reason reason)
1258 {
1259 	int i;
1260 	unsigned long flags;
1261 
1262 	spin_lock_irqsave(&wl->wl_lock, flags);
1263 
1264 	/* mark all possible queues as stopped */
1265         for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1266                 WARN_ON_ONCE(test_and_set_bit(reason,
1267 					      &wl->queue_stop_reasons[i]));
1268 
1269 	/* use the global version to make sure all vifs in mac80211 we don't
1270 	 * know are stopped.
1271 	 */
1272 	ieee80211_stop_queues(wl->hw);
1273 
1274 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1275 }
1276 
1277 void wlcore_wake_queues(struct wl1271 *wl,
1278 			enum wlcore_queue_stop_reason reason)
1279 {
1280 	int i;
1281 	unsigned long flags;
1282 
1283 	spin_lock_irqsave(&wl->wl_lock, flags);
1284 
1285 	/* mark all possible queues as awake */
1286         for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1287 		WARN_ON_ONCE(!test_and_clear_bit(reason,
1288 						 &wl->queue_stop_reasons[i]));
1289 
1290 	/* use the global version to make sure all vifs in mac80211 we don't
1291 	 * know are woken up.
1292 	 */
1293 	ieee80211_wake_queues(wl->hw);
1294 
1295 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1296 }
1297 
1298 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
1299 				       struct wl12xx_vif *wlvif, u8 queue,
1300 				       enum wlcore_queue_stop_reason reason)
1301 {
1302 	unsigned long flags;
1303 	bool stopped;
1304 
1305 	spin_lock_irqsave(&wl->wl_lock, flags);
1306 	stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
1307 							   reason);
1308 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1309 
1310 	return stopped;
1311 }
1312 
1313 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1314 				       struct wl12xx_vif *wlvif, u8 queue,
1315 				       enum wlcore_queue_stop_reason reason)
1316 {
1317 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1318 
1319 	assert_spin_locked(&wl->wl_lock);
1320 	return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1321 }
1322 
1323 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1324 				    u8 queue)
1325 {
1326 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1327 
1328 	assert_spin_locked(&wl->wl_lock);
1329 	return !!wl->queue_stop_reasons[hwq];
1330 }
1331