xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/tx.c (revision 95e9fd10)
1 /*
2  * This file is part of wl1271
3  *
4  * Copyright (C) 2009 Nokia Corporation
5  *
6  * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
27 
28 #include "wlcore.h"
29 #include "debug.h"
30 #include "io.h"
31 #include "ps.h"
32 #include "tx.h"
33 #include "event.h"
34 #include "hw_ops.h"
35 
36 /*
37  * TODO: this is here just for now, it must be removed when the data
38  * operations are in place.
39  */
40 #include "../wl12xx/reg.h"
41 
42 static int wl1271_set_default_wep_key(struct wl1271 *wl,
43 				      struct wl12xx_vif *wlvif, u8 id)
44 {
45 	int ret;
46 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
47 
48 	if (is_ap)
49 		ret = wl12xx_cmd_set_default_wep_key(wl, id,
50 						     wlvif->ap.bcast_hlid);
51 	else
52 		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
53 
54 	if (ret < 0)
55 		return ret;
56 
57 	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
58 	return 0;
59 }
60 
61 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
62 {
63 	int id;
64 
65 	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
66 	if (id >= wl->num_tx_desc)
67 		return -EBUSY;
68 
69 	__set_bit(id, wl->tx_frames_map);
70 	wl->tx_frames[id] = skb;
71 	wl->tx_frames_cnt++;
72 	return id;
73 }
74 
75 void wl1271_free_tx_id(struct wl1271 *wl, int id)
76 {
77 	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78 		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
79 			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
80 
81 		wl->tx_frames[id] = NULL;
82 		wl->tx_frames_cnt--;
83 	}
84 }
85 EXPORT_SYMBOL(wl1271_free_tx_id);
86 
87 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
88 						 struct sk_buff *skb)
89 {
90 	struct ieee80211_hdr *hdr;
91 
92 	/*
93 	 * add the station to the known list before transmitting the
94 	 * authentication response. this way it won't get de-authed by FW
95 	 * when transmitting too soon.
96 	 */
97 	hdr = (struct ieee80211_hdr *)(skb->data +
98 				       sizeof(struct wl1271_tx_hw_descr));
99 	if (ieee80211_is_auth(hdr->frame_control))
100 		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
101 }
102 
103 static void wl1271_tx_regulate_link(struct wl1271 *wl,
104 				    struct wl12xx_vif *wlvif,
105 				    u8 hlid)
106 {
107 	bool fw_ps, single_sta;
108 	u8 tx_pkts;
109 
110 	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
111 		return;
112 
113 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
114 	tx_pkts = wl->links[hlid].allocated_pkts;
115 	single_sta = (wl->active_sta_count == 1);
116 
117 	/*
118 	 * if in FW PS and there is enough data in FW we can put the link
119 	 * into high-level PS and clean out its TX queues.
120 	 * Make an exception if this is the only connected station. In this
121 	 * case FW-memory congestion is not a problem.
122 	 */
123 	if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
124 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
125 }
126 
127 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
128 {
129 	return wl->dummy_packet == skb;
130 }
131 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
132 
133 u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134 			 struct sk_buff *skb)
135 {
136 	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
137 
138 	if (control->control.sta) {
139 		struct wl1271_station *wl_sta;
140 
141 		wl_sta = (struct wl1271_station *)
142 				control->control.sta->drv_priv;
143 		return wl_sta->hlid;
144 	} else {
145 		struct ieee80211_hdr *hdr;
146 
147 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
148 			return wl->system_hlid;
149 
150 		hdr = (struct ieee80211_hdr *)skb->data;
151 		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
152 			return wlvif->ap.bcast_hlid;
153 		else
154 			return wlvif->ap.global_hlid;
155 	}
156 }
157 
158 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
159 		      struct sk_buff *skb)
160 {
161 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
162 
163 	if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
164 		return wl->system_hlid;
165 
166 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
167 		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
168 
169 	if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
170 	     test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
171 	    !ieee80211_is_auth(hdr->frame_control) &&
172 	    !ieee80211_is_assoc_req(hdr->frame_control))
173 		return wlvif->sta.hlid;
174 	else
175 		return wlvif->dev_hlid;
176 }
177 
178 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
179 					  unsigned int packet_length)
180 {
181 	if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
182 	    !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
183 		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
184 	else
185 		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
186 }
187 EXPORT_SYMBOL(wlcore_calc_packet_alignment);
188 
189 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
190 			      struct sk_buff *skb, u32 extra, u32 buf_offset,
191 			      u8 hlid, bool is_gem)
192 {
193 	struct wl1271_tx_hw_descr *desc;
194 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
195 	u32 total_blocks;
196 	int id, ret = -EBUSY, ac;
197 	u32 spare_blocks;
198 
199 	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
200 		return -EAGAIN;
201 
202 	spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
203 
204 	/* allocate free identifier for the packet */
205 	id = wl1271_alloc_tx_id(wl, skb);
206 	if (id < 0)
207 		return id;
208 
209 	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
210 
211 	if (total_blocks <= wl->tx_blocks_available) {
212 		desc = (struct wl1271_tx_hw_descr *)skb_push(
213 			skb, total_len - skb->len);
214 
215 		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
216 					     spare_blocks);
217 
218 		desc->id = id;
219 
220 		wl->tx_blocks_available -= total_blocks;
221 		wl->tx_allocated_blocks += total_blocks;
222 
223 		/* If the FW was empty before, arm the Tx watchdog */
224 		if (wl->tx_allocated_blocks == total_blocks)
225 			wl12xx_rearm_tx_watchdog_locked(wl);
226 
227 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
228 		wl->tx_allocated_pkts[ac]++;
229 
230 		if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
231 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
232 		    test_bit(hlid, wlvif->ap.sta_hlid_map))
233 			wl->links[hlid].allocated_pkts++;
234 
235 		ret = 0;
236 
237 		wl1271_debug(DEBUG_TX,
238 			     "tx_allocate: size: %d, blocks: %d, id: %d",
239 			     total_len, total_blocks, id);
240 	} else {
241 		wl1271_free_tx_id(wl, id);
242 	}
243 
244 	return ret;
245 }
246 
247 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
248 			       struct sk_buff *skb, u32 extra,
249 			       struct ieee80211_tx_info *control, u8 hlid)
250 {
251 	struct timespec ts;
252 	struct wl1271_tx_hw_descr *desc;
253 	int ac, rate_idx;
254 	s64 hosttime;
255 	u16 tx_attr = 0;
256 	__le16 frame_control;
257 	struct ieee80211_hdr *hdr;
258 	u8 *frame_start;
259 	bool is_dummy;
260 
261 	desc = (struct wl1271_tx_hw_descr *) skb->data;
262 	frame_start = (u8 *)(desc + 1);
263 	hdr = (struct ieee80211_hdr *)(frame_start + extra);
264 	frame_control = hdr->frame_control;
265 
266 	/* relocate space for security header */
267 	if (extra) {
268 		int hdrlen = ieee80211_hdrlen(frame_control);
269 		memmove(frame_start, hdr, hdrlen);
270 		skb_set_network_header(skb, skb_network_offset(skb) + extra);
271 	}
272 
273 	/* configure packet life time */
274 	getnstimeofday(&ts);
275 	hosttime = (timespec_to_ns(&ts) >> 10);
276 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
277 
278 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
279 	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
280 		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
281 	else
282 		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
283 
284 	/* queue */
285 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
286 	desc->tid = skb->priority;
287 
288 	if (is_dummy) {
289 		/*
290 		 * FW expects the dummy packet to have an invalid session id -
291 		 * any session id that is different than the one set in the join
292 		 */
293 		tx_attr = (SESSION_COUNTER_INVALID <<
294 			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
295 			   TX_HW_ATTR_SESSION_COUNTER;
296 
297 		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
298 	} else if (wlvif) {
299 		/* configure the tx attributes */
300 		tx_attr = wlvif->session_counter <<
301 			  TX_HW_ATTR_OFST_SESSION_COUNTER;
302 	}
303 
304 	desc->hlid = hlid;
305 	if (is_dummy || !wlvif)
306 		rate_idx = 0;
307 	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
308 		/*
309 		 * if the packets are data packets
310 		 * send them with AP rate policies (EAPOLs are an exception),
311 		 * otherwise use default basic rates
312 		 */
313 		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
314 			rate_idx = wlvif->sta.basic_rate_idx;
315 		else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
316 			rate_idx = wlvif->sta.p2p_rate_idx;
317 		else if (ieee80211_is_data(frame_control))
318 			rate_idx = wlvif->sta.ap_rate_idx;
319 		else
320 			rate_idx = wlvif->sta.basic_rate_idx;
321 	} else {
322 		if (hlid == wlvif->ap.global_hlid)
323 			rate_idx = wlvif->ap.mgmt_rate_idx;
324 		else if (hlid == wlvif->ap.bcast_hlid ||
325 			 skb->protocol == cpu_to_be16(ETH_P_PAE))
326 			/* send AP bcast and EAPOLs using the min basic rate */
327 			rate_idx = wlvif->ap.bcast_rate_idx;
328 		else
329 			rate_idx = wlvif->ap.ucast_rate_idx[ac];
330 	}
331 
332 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
333 
334 	/* for WEP shared auth - no fw encryption is needed */
335 	if (ieee80211_is_auth(frame_control) &&
336 	    ieee80211_has_protected(frame_control))
337 		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
338 
339 	desc->tx_attr = cpu_to_le16(tx_attr);
340 
341 	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
342 	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
343 }
344 
345 /* caller must hold wl->mutex */
346 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
347 				   struct sk_buff *skb, u32 buf_offset)
348 {
349 	struct ieee80211_tx_info *info;
350 	u32 extra = 0;
351 	int ret = 0;
352 	u32 total_len;
353 	u8 hlid;
354 	bool is_dummy;
355 	bool is_gem = false;
356 
357 	if (!skb) {
358 		wl1271_error("discarding null skb");
359 		return -EINVAL;
360 	}
361 
362 	info = IEEE80211_SKB_CB(skb);
363 
364 	/* TODO: handle dummy packets on multi-vifs */
365 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
366 
367 	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
368 	    info->control.hw_key &&
369 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
370 		extra = WL1271_EXTRA_SPACE_TKIP;
371 
372 	if (info->control.hw_key) {
373 		bool is_wep;
374 		u8 idx = info->control.hw_key->hw_key_idx;
375 		u32 cipher = info->control.hw_key->cipher;
376 
377 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
378 			 (cipher == WLAN_CIPHER_SUITE_WEP104);
379 
380 		if (unlikely(is_wep && wlvif->default_key != idx)) {
381 			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
382 			if (ret < 0)
383 				return ret;
384 			wlvif->default_key = idx;
385 		}
386 
387 		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
388 	}
389 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
390 	if (hlid == WL12XX_INVALID_LINK_ID) {
391 		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
392 		return -EINVAL;
393 	}
394 
395 	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
396 				 is_gem);
397 	if (ret < 0)
398 		return ret;
399 
400 	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
401 
402 	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
403 		wl1271_tx_ap_update_inconnection_sta(wl, skb);
404 		wl1271_tx_regulate_link(wl, wlvif, hlid);
405 	}
406 
407 	/*
408 	 * The length of each packet is stored in terms of
409 	 * words. Thus, we must pad the skb data to make sure its
410 	 * length is aligned.  The number of padding bytes is computed
411 	 * and set in wl1271_tx_fill_hdr.
412 	 * In special cases, we want to align to a specific block size
413 	 * (eg. for wl128x with SDIO we align to 256).
414 	 */
415 	total_len = wlcore_calc_packet_alignment(wl, skb->len);
416 
417 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
418 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
419 
420 	/* Revert side effects in the dummy packet skb, so it can be reused */
421 	if (is_dummy)
422 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
423 
424 	return total_len;
425 }
426 
427 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
428 				enum ieee80211_band rate_band)
429 {
430 	struct ieee80211_supported_band *band;
431 	u32 enabled_rates = 0;
432 	int bit;
433 
434 	band = wl->hw->wiphy->bands[rate_band];
435 	for (bit = 0; bit < band->n_bitrates; bit++) {
436 		if (rate_set & 0x1)
437 			enabled_rates |= band->bitrates[bit].hw_value;
438 		rate_set >>= 1;
439 	}
440 
441 	/* MCS rates indication are on bits 16 - 31 */
442 	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
443 
444 	for (bit = 0; bit < 16; bit++) {
445 		if (rate_set & 0x1)
446 			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
447 		rate_set >>= 1;
448 	}
449 
450 	return enabled_rates;
451 }
452 
453 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
454 {
455 	int i;
456 
457 	for (i = 0; i < NUM_TX_QUEUES; i++) {
458 		if (wlcore_is_queue_stopped_by_reason(wl, i,
459 			WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
460 		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
461 			/* firmware buffer has space, restart queues */
462 			wlcore_wake_queue(wl, i,
463 					  WLCORE_QUEUE_STOP_REASON_WATERMARK);
464 		}
465 	}
466 }
467 
468 static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
469 						struct sk_buff_head *queues)
470 {
471 	int i, q = -1, ac;
472 	u32 min_pkts = 0xffffffff;
473 
474 	/*
475 	 * Find a non-empty ac where:
476 	 * 1. There are packets to transmit
477 	 * 2. The FW has the least allocated blocks
478 	 *
479 	 * We prioritize the ACs according to VO>VI>BE>BK
480 	 */
481 	for (i = 0; i < NUM_TX_QUEUES; i++) {
482 		ac = wl1271_tx_get_queue(i);
483 		if (!skb_queue_empty(&queues[ac]) &&
484 		    (wl->tx_allocated_pkts[ac] < min_pkts)) {
485 			q = ac;
486 			min_pkts = wl->tx_allocated_pkts[q];
487 		}
488 	}
489 
490 	if (q == -1)
491 		return NULL;
492 
493 	return &queues[q];
494 }
495 
496 static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
497 					      struct wl1271_link *lnk)
498 {
499 	struct sk_buff *skb;
500 	unsigned long flags;
501 	struct sk_buff_head *queue;
502 
503 	queue = wl1271_select_queue(wl, lnk->tx_queue);
504 	if (!queue)
505 		return NULL;
506 
507 	skb = skb_dequeue(queue);
508 	if (skb) {
509 		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
510 		spin_lock_irqsave(&wl->wl_lock, flags);
511 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
512 		wl->tx_queue_count[q]--;
513 		spin_unlock_irqrestore(&wl->wl_lock, flags);
514 	}
515 
516 	return skb;
517 }
518 
519 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
520 					      struct wl12xx_vif *wlvif)
521 {
522 	struct sk_buff *skb = NULL;
523 	int i, h, start_hlid;
524 
525 	/* start from the link after the last one */
526 	start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
527 
528 	/* dequeue according to AC, round robin on each link */
529 	for (i = 0; i < WL12XX_MAX_LINKS; i++) {
530 		h = (start_hlid + i) % WL12XX_MAX_LINKS;
531 
532 		/* only consider connected stations */
533 		if (!test_bit(h, wlvif->links_map))
534 			continue;
535 
536 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
537 		if (!skb)
538 			continue;
539 
540 		wlvif->last_tx_hlid = h;
541 		break;
542 	}
543 
544 	if (!skb)
545 		wlvif->last_tx_hlid = 0;
546 
547 	return skb;
548 }
549 
550 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
551 {
552 	unsigned long flags;
553 	struct wl12xx_vif *wlvif = wl->last_wlvif;
554 	struct sk_buff *skb = NULL;
555 
556 	/* continue from last wlvif (round robin) */
557 	if (wlvif) {
558 		wl12xx_for_each_wlvif_continue(wl, wlvif) {
559 			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
560 			if (skb) {
561 				wl->last_wlvif = wlvif;
562 				break;
563 			}
564 		}
565 	}
566 
567 	/* dequeue from the system HLID before the restarting wlvif list */
568 	if (!skb)
569 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
570 
571 	/* do a new pass over the wlvif list */
572 	if (!skb) {
573 		wl12xx_for_each_wlvif(wl, wlvif) {
574 			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
575 			if (skb) {
576 				wl->last_wlvif = wlvif;
577 				break;
578 			}
579 
580 			/*
581 			 * No need to continue after last_wlvif. The previous
582 			 * pass should have found it.
583 			 */
584 			if (wlvif == wl->last_wlvif)
585 				break;
586 		}
587 	}
588 
589 	if (!skb &&
590 	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
591 		int q;
592 
593 		skb = wl->dummy_packet;
594 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
595 		spin_lock_irqsave(&wl->wl_lock, flags);
596 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
597 		wl->tx_queue_count[q]--;
598 		spin_unlock_irqrestore(&wl->wl_lock, flags);
599 	}
600 
601 	return skb;
602 }
603 
604 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
605 				  struct sk_buff *skb)
606 {
607 	unsigned long flags;
608 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
609 
610 	if (wl12xx_is_dummy_packet(wl, skb)) {
611 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
612 	} else {
613 		u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
614 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
615 
616 		/* make sure we dequeue the same packet next time */
617 		wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
618 				      WL12XX_MAX_LINKS;
619 	}
620 
621 	spin_lock_irqsave(&wl->wl_lock, flags);
622 	wl->tx_queue_count[q]++;
623 	spin_unlock_irqrestore(&wl->wl_lock, flags);
624 }
625 
626 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
627 {
628 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
629 
630 	return ieee80211_is_data_present(hdr->frame_control);
631 }
632 
633 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
634 {
635 	struct wl12xx_vif *wlvif;
636 	u32 timeout;
637 	u8 hlid;
638 
639 	if (!wl->conf.rx_streaming.interval)
640 		return;
641 
642 	if (!wl->conf.rx_streaming.always &&
643 	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
644 		return;
645 
646 	timeout = wl->conf.rx_streaming.duration;
647 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
648 		bool found = false;
649 		for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
650 			if (test_bit(hlid, wlvif->links_map)) {
651 				found  = true;
652 				break;
653 			}
654 		}
655 
656 		if (!found)
657 			continue;
658 
659 		/* enable rx streaming */
660 		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
661 			ieee80211_queue_work(wl->hw,
662 					     &wlvif->rx_streaming_enable_work);
663 
664 		mod_timer(&wlvif->rx_streaming_timer,
665 			  jiffies + msecs_to_jiffies(timeout));
666 	}
667 }
668 
669 /*
670  * Returns failure values only in case of failed bus ops within this function.
671  * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
672  * triggering recovery by higher layers when not necessary.
673  * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
674  * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
675  * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
676  * within prepare_tx_frame code but there's nothing we should do about those
677  * as well.
678  */
679 int wlcore_tx_work_locked(struct wl1271 *wl)
680 {
681 	struct wl12xx_vif *wlvif;
682 	struct sk_buff *skb;
683 	struct wl1271_tx_hw_descr *desc;
684 	u32 buf_offset = 0, last_len = 0;
685 	bool sent_packets = false;
686 	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
687 	int ret = 0;
688 	int bus_ret = 0;
689 
690 	if (unlikely(wl->state == WL1271_STATE_OFF))
691 		return 0;
692 
693 	while ((skb = wl1271_skb_dequeue(wl))) {
694 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
695 		bool has_data = false;
696 
697 		wlvif = NULL;
698 		if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
699 			wlvif = wl12xx_vif_to_data(info->control.vif);
700 
701 		has_data = wlvif && wl1271_tx_is_data_present(skb);
702 		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
703 		if (ret == -EAGAIN) {
704 			/*
705 			 * Aggregation buffer is full.
706 			 * Flush buffer and try again.
707 			 */
708 			wl1271_skb_queue_head(wl, wlvif, skb);
709 
710 			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
711 							    last_len);
712 			bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
713 					     wl->aggr_buf, buf_offset, true);
714 			if (bus_ret < 0)
715 				goto out;
716 
717 			sent_packets = true;
718 			buf_offset = 0;
719 			continue;
720 		} else if (ret == -EBUSY) {
721 			/*
722 			 * Firmware buffer is full.
723 			 * Queue back last skb, and stop aggregating.
724 			 */
725 			wl1271_skb_queue_head(wl, wlvif, skb);
726 			/* No work left, avoid scheduling redundant tx work */
727 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
728 			goto out_ack;
729 		} else if (ret < 0) {
730 			if (wl12xx_is_dummy_packet(wl, skb))
731 				/*
732 				 * fw still expects dummy packet,
733 				 * so re-enqueue it
734 				 */
735 				wl1271_skb_queue_head(wl, wlvif, skb);
736 			else
737 				ieee80211_free_txskb(wl->hw, skb);
738 			goto out_ack;
739 		}
740 		last_len = ret;
741 		buf_offset += last_len;
742 		wl->tx_packets_count++;
743 		if (has_data) {
744 			desc = (struct wl1271_tx_hw_descr *) skb->data;
745 			__set_bit(desc->hlid, active_hlids);
746 		}
747 	}
748 
749 out_ack:
750 	if (buf_offset) {
751 		buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
752 		bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
753 					     buf_offset, true);
754 		if (bus_ret < 0)
755 			goto out;
756 
757 		sent_packets = true;
758 	}
759 	if (sent_packets) {
760 		/*
761 		 * Interrupt the firmware with the new packets. This is only
762 		 * required for older hardware revisions
763 		 */
764 		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
765 			bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
766 					     wl->tx_packets_count);
767 			if (bus_ret < 0)
768 				goto out;
769 		}
770 
771 		wl1271_handle_tx_low_watermark(wl);
772 	}
773 	wl12xx_rearm_rx_streaming(wl, active_hlids);
774 
775 out:
776 	return bus_ret;
777 }
778 
779 void wl1271_tx_work(struct work_struct *work)
780 {
781 	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
782 	int ret;
783 
784 	mutex_lock(&wl->mutex);
785 	ret = wl1271_ps_elp_wakeup(wl);
786 	if (ret < 0)
787 		goto out;
788 
789 	ret = wlcore_tx_work_locked(wl);
790 	if (ret < 0) {
791 		wl12xx_queue_recovery_work(wl);
792 		goto out;
793 	}
794 
795 	wl1271_ps_elp_sleep(wl);
796 out:
797 	mutex_unlock(&wl->mutex);
798 }
799 
800 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
801 {
802 	u8 flags = 0;
803 
804 	/*
805 	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
806 	 * only it uses Tx-completion.
807 	 */
808 	if (rate_class_index <= 8)
809 		flags |= IEEE80211_TX_RC_MCS;
810 
811 	/*
812 	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
813 	 * only it uses Tx-completion.
814 	 */
815 	if (rate_class_index == 0)
816 		flags |= IEEE80211_TX_RC_SHORT_GI;
817 
818 	return flags;
819 }
820 
821 static void wl1271_tx_complete_packet(struct wl1271 *wl,
822 				      struct wl1271_tx_hw_res_descr *result)
823 {
824 	struct ieee80211_tx_info *info;
825 	struct ieee80211_vif *vif;
826 	struct wl12xx_vif *wlvif;
827 	struct sk_buff *skb;
828 	int id = result->id;
829 	int rate = -1;
830 	u8 rate_flags = 0;
831 	u8 retries = 0;
832 
833 	/* check for id legality */
834 	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
835 		wl1271_warning("TX result illegal id: %d", id);
836 		return;
837 	}
838 
839 	skb = wl->tx_frames[id];
840 	info = IEEE80211_SKB_CB(skb);
841 
842 	if (wl12xx_is_dummy_packet(wl, skb)) {
843 		wl1271_free_tx_id(wl, id);
844 		return;
845 	}
846 
847 	/* info->control is valid as long as we don't update info->status */
848 	vif = info->control.vif;
849 	wlvif = wl12xx_vif_to_data(vif);
850 
851 	/* update the TX status info */
852 	if (result->status == TX_SUCCESS) {
853 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
854 			info->flags |= IEEE80211_TX_STAT_ACK;
855 		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
856 					  wlvif->band);
857 		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
858 		retries = result->ack_failures;
859 	} else if (result->status == TX_RETRY_EXCEEDED) {
860 		wl->stats.excessive_retries++;
861 		retries = result->ack_failures;
862 	}
863 
864 	info->status.rates[0].idx = rate;
865 	info->status.rates[0].count = retries;
866 	info->status.rates[0].flags = rate_flags;
867 	info->status.ack_signal = -1;
868 
869 	wl->stats.retry_count += result->ack_failures;
870 
871 	/*
872 	 * update sequence number only when relevant, i.e. only in
873 	 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
874 	 */
875 	if (info->control.hw_key &&
876 	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
877 	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
878 	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
879 		u8 fw_lsb = result->tx_security_sequence_number_lsb;
880 		u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
881 
882 		/*
883 		 * update security sequence number, taking care of potential
884 		 * wrap-around
885 		 */
886 		wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
887 		wlvif->tx_security_last_seq_lsb = fw_lsb;
888 	}
889 
890 	/* remove private header from packet */
891 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
892 
893 	/* remove TKIP header space if present */
894 	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
895 	    info->control.hw_key &&
896 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
897 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
898 		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
899 			hdrlen);
900 		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
901 	}
902 
903 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
904 		     " status 0x%x",
905 		     result->id, skb, result->ack_failures,
906 		     result->rate_class_index, result->status);
907 
908 	/* return the packet to the stack */
909 	skb_queue_tail(&wl->deferred_tx_queue, skb);
910 	queue_work(wl->freezable_wq, &wl->netstack_work);
911 	wl1271_free_tx_id(wl, result->id);
912 }
913 
914 /* Called upon reception of a TX complete interrupt */
915 int wlcore_tx_complete(struct wl1271 *wl)
916 {
917 	struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
918 	u32 count, fw_counter;
919 	u32 i;
920 	int ret;
921 
922 	/* read the tx results from the chipset */
923 	ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
924 			  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
925 	if (ret < 0)
926 		goto out;
927 
928 	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
929 
930 	/* write host counter to chipset (to ack) */
931 	ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
932 			     offsetof(struct wl1271_tx_hw_res_if,
933 				      tx_result_host_counter), fw_counter);
934 	if (ret < 0)
935 		goto out;
936 
937 	count = fw_counter - wl->tx_results_count;
938 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
939 
940 	/* verify that the result buffer is not getting overrun */
941 	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
942 		wl1271_warning("TX result overflow from chipset: %d", count);
943 
944 	/* process the results */
945 	for (i = 0; i < count; i++) {
946 		struct wl1271_tx_hw_res_descr *result;
947 		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
948 
949 		/* process the packet */
950 		result =  &(wl->tx_res_if->tx_results_queue[offset]);
951 		wl1271_tx_complete_packet(wl, result);
952 
953 		wl->tx_results_count++;
954 	}
955 
956 out:
957 	return ret;
958 }
959 EXPORT_SYMBOL(wlcore_tx_complete);
960 
961 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
962 {
963 	struct sk_buff *skb;
964 	int i;
965 	unsigned long flags;
966 	struct ieee80211_tx_info *info;
967 	int total[NUM_TX_QUEUES];
968 
969 	for (i = 0; i < NUM_TX_QUEUES; i++) {
970 		total[i] = 0;
971 		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
972 			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
973 
974 			if (!wl12xx_is_dummy_packet(wl, skb)) {
975 				info = IEEE80211_SKB_CB(skb);
976 				info->status.rates[0].idx = -1;
977 				info->status.rates[0].count = 0;
978 				ieee80211_tx_status_ni(wl->hw, skb);
979 			}
980 
981 			total[i]++;
982 		}
983 	}
984 
985 	spin_lock_irqsave(&wl->wl_lock, flags);
986 	for (i = 0; i < NUM_TX_QUEUES; i++)
987 		wl->tx_queue_count[i] -= total[i];
988 	spin_unlock_irqrestore(&wl->wl_lock, flags);
989 
990 	wl1271_handle_tx_low_watermark(wl);
991 }
992 
993 /* caller must hold wl->mutex and TX must be stopped */
994 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
995 {
996 	int i;
997 
998 	/* TX failure */
999 	for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
1000 		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1001 			wl1271_free_sta(wl, wlvif, i);
1002 		else
1003 			wlvif->sta.ba_rx_bitmap = 0;
1004 
1005 		wl->links[i].allocated_pkts = 0;
1006 		wl->links[i].prev_freed_pkts = 0;
1007 	}
1008 	wlvif->last_tx_hlid = 0;
1009 
1010 }
1011 /* caller must hold wl->mutex and TX must be stopped */
1012 void wl12xx_tx_reset(struct wl1271 *wl)
1013 {
1014 	int i;
1015 	struct sk_buff *skb;
1016 	struct ieee80211_tx_info *info;
1017 
1018 	/* only reset the queues if something bad happened */
1019 	if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
1020 		for (i = 0; i < WL12XX_MAX_LINKS; i++)
1021 			wl1271_tx_reset_link_queues(wl, i);
1022 
1023 		for (i = 0; i < NUM_TX_QUEUES; i++)
1024 			wl->tx_queue_count[i] = 0;
1025 	}
1026 
1027 	/*
1028 	 * Make sure the driver is at a consistent state, in case this
1029 	 * function is called from a context other than interface removal.
1030 	 * This call will always wake the TX queues.
1031 	 */
1032 	wl1271_handle_tx_low_watermark(wl);
1033 
1034 	for (i = 0; i < wl->num_tx_desc; i++) {
1035 		if (wl->tx_frames[i] == NULL)
1036 			continue;
1037 
1038 		skb = wl->tx_frames[i];
1039 		wl1271_free_tx_id(wl, i);
1040 		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1041 
1042 		if (!wl12xx_is_dummy_packet(wl, skb)) {
1043 			/*
1044 			 * Remove private headers before passing the skb to
1045 			 * mac80211
1046 			 */
1047 			info = IEEE80211_SKB_CB(skb);
1048 			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1049 			if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1050 			    info->control.hw_key &&
1051 			    info->control.hw_key->cipher ==
1052 			    WLAN_CIPHER_SUITE_TKIP) {
1053 				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1054 				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1055 					skb->data, hdrlen);
1056 				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1057 			}
1058 
1059 			info->status.rates[0].idx = -1;
1060 			info->status.rates[0].count = 0;
1061 
1062 			ieee80211_tx_status_ni(wl->hw, skb);
1063 		}
1064 	}
1065 }
1066 
1067 #define WL1271_TX_FLUSH_TIMEOUT 500000
1068 
1069 /* caller must *NOT* hold wl->mutex */
1070 void wl1271_tx_flush(struct wl1271 *wl)
1071 {
1072 	unsigned long timeout;
1073 	int i;
1074 	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1075 
1076 	/* only one flush should be in progress, for consistent queue state */
1077 	mutex_lock(&wl->flush_mutex);
1078 
1079 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1080 
1081 	while (!time_after(jiffies, timeout)) {
1082 		mutex_lock(&wl->mutex);
1083 		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
1084 			     wl->tx_frames_cnt,
1085 			     wl1271_tx_total_queue_count(wl));
1086 		if ((wl->tx_frames_cnt == 0) &&
1087 		    (wl1271_tx_total_queue_count(wl) == 0)) {
1088 			mutex_unlock(&wl->mutex);
1089 			goto out;
1090 		}
1091 		mutex_unlock(&wl->mutex);
1092 		msleep(1);
1093 	}
1094 
1095 	wl1271_warning("Unable to flush all TX buffers, timed out.");
1096 
1097 	/* forcibly flush all Tx buffers on our queues */
1098 	mutex_lock(&wl->mutex);
1099 	for (i = 0; i < WL12XX_MAX_LINKS; i++)
1100 		wl1271_tx_reset_link_queues(wl, i);
1101 	mutex_unlock(&wl->mutex);
1102 
1103 out:
1104 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1105 	mutex_unlock(&wl->flush_mutex);
1106 }
1107 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1108 
1109 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1110 {
1111 	if (WARN_ON(!rate_set))
1112 		return 0;
1113 
1114 	return BIT(__ffs(rate_set));
1115 }
1116 
1117 void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
1118 			      enum wlcore_queue_stop_reason reason)
1119 {
1120 	bool stopped = !!wl->queue_stop_reasons[queue];
1121 
1122 	/* queue should not be stopped for this reason */
1123 	WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
1124 
1125 	if (stopped)
1126 		return;
1127 
1128 	ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1129 }
1130 
1131 void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
1132 		       enum wlcore_queue_stop_reason reason)
1133 {
1134 	unsigned long flags;
1135 
1136 	spin_lock_irqsave(&wl->wl_lock, flags);
1137 	wlcore_stop_queue_locked(wl, queue, reason);
1138 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1139 }
1140 
1141 void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
1142 		       enum wlcore_queue_stop_reason reason)
1143 {
1144 	unsigned long flags;
1145 
1146 	spin_lock_irqsave(&wl->wl_lock, flags);
1147 
1148 	/* queue should not be clear for this reason */
1149 	WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
1150 
1151 	if (wl->queue_stop_reasons[queue])
1152 		goto out;
1153 
1154 	ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1155 
1156 out:
1157 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1158 }
1159 
1160 void wlcore_stop_queues(struct wl1271 *wl,
1161 			enum wlcore_queue_stop_reason reason)
1162 {
1163 	int i;
1164 
1165 	for (i = 0; i < NUM_TX_QUEUES; i++)
1166 		wlcore_stop_queue(wl, i, reason);
1167 }
1168 EXPORT_SYMBOL_GPL(wlcore_stop_queues);
1169 
1170 void wlcore_wake_queues(struct wl1271 *wl,
1171 			enum wlcore_queue_stop_reason reason)
1172 {
1173 	int i;
1174 
1175 	for (i = 0; i < NUM_TX_QUEUES; i++)
1176 		wlcore_wake_queue(wl, i, reason);
1177 }
1178 EXPORT_SYMBOL_GPL(wlcore_wake_queues);
1179 
1180 void wlcore_reset_stopped_queues(struct wl1271 *wl)
1181 {
1182 	int i;
1183 	unsigned long flags;
1184 
1185 	spin_lock_irqsave(&wl->wl_lock, flags);
1186 
1187 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1188 		if (!wl->queue_stop_reasons[i])
1189 			continue;
1190 
1191 		wl->queue_stop_reasons[i] = 0;
1192 		ieee80211_wake_queue(wl->hw,
1193 				     wl1271_tx_get_mac80211_queue(i));
1194 	}
1195 
1196 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1197 }
1198 
1199 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
1200 			     enum wlcore_queue_stop_reason reason)
1201 {
1202 	return test_bit(reason, &wl->queue_stop_reasons[queue]);
1203 }
1204 
1205 bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
1206 {
1207 	return !!wl->queue_stop_reasons[queue];
1208 }
1209