1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Data transmitting implementation.
4  *
5  * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
6  * Copyright (c) 2010, ST-Ericsson
7  */
8 #include <net/mac80211.h>
9 #include <linux/etherdevice.h>
10 
11 #include "data_tx.h"
12 #include "wfx.h"
13 #include "bh.h"
14 #include "sta.h"
15 #include "queue.h"
16 #include "debug.h"
17 #include "traces.h"
18 #include "hif_tx_mib.h"
19 
wfx_get_hw_rate(struct wfx_dev * wdev,const struct ieee80211_tx_rate * rate)20 static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate)
21 {
22 	struct ieee80211_supported_band *band;
23 
24 	if (rate->idx < 0)
25 		return -1;
26 	if (rate->flags & IEEE80211_TX_RC_MCS) {
27 		if (rate->idx > 7) {
28 			WARN(1, "wrong rate->idx value: %d", rate->idx);
29 			return -1;
30 		}
31 		return rate->idx + 14;
32 	}
33 	/* The device only support 2GHz, else band information should be retrieved from
34 	 * ieee80211_tx_info
35 	 */
36 	band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
37 	if (rate->idx >= band->n_bitrates) {
38 		WARN(1, "wrong rate->idx value: %d", rate->idx);
39 		return -1;
40 	}
41 	return band->bitrates[rate->idx].hw_value;
42 }
43 
44 /* TX policy cache implementation */
45 
wfx_tx_policy_build(struct wfx_vif * wvif,struct wfx_tx_policy * policy,struct ieee80211_tx_rate * rates)46 static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy,
47 				struct ieee80211_tx_rate *rates)
48 {
49 	struct wfx_dev *wdev = wvif->wdev;
50 	int i, rateid;
51 	u8 count;
52 
53 	WARN(rates[0].idx < 0, "invalid rate policy");
54 	memset(policy, 0, sizeof(*policy));
55 	for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
56 		if (rates[i].idx < 0)
57 			break;
58 		WARN_ON(rates[i].count > 15);
59 		rateid = wfx_get_hw_rate(wdev, &rates[i]);
60 		/* Pack two values in each byte of policy->rates */
61 		count = rates[i].count;
62 		if (rateid % 2)
63 			count <<= 4;
64 		policy->rates[rateid / 2] |= count;
65 	}
66 }
67 
wfx_tx_policy_is_equal(const struct wfx_tx_policy * a,const struct wfx_tx_policy * b)68 static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b)
69 {
70 	return !memcmp(a->rates, b->rates, sizeof(a->rates));
71 }
72 
wfx_tx_policy_find(struct wfx_tx_policy_cache * cache,struct wfx_tx_policy * wanted)73 static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted)
74 {
75 	struct wfx_tx_policy *it;
76 
77 	list_for_each_entry(it, &cache->used, link)
78 		if (wfx_tx_policy_is_equal(wanted, it))
79 			return it - cache->cache;
80 	list_for_each_entry(it, &cache->free, link)
81 		if (wfx_tx_policy_is_equal(wanted, it))
82 			return it - cache->cache;
83 	return -1;
84 }
85 
wfx_tx_policy_use(struct wfx_tx_policy_cache * cache,struct wfx_tx_policy * entry)86 static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
87 {
88 	++entry->usage_count;
89 	list_move(&entry->link, &cache->used);
90 }
91 
wfx_tx_policy_release(struct wfx_tx_policy_cache * cache,struct wfx_tx_policy * entry)92 static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
93 {
94 	int ret = --entry->usage_count;
95 
96 	if (!ret)
97 		list_move(&entry->link, &cache->free);
98 	return ret;
99 }
100 
wfx_tx_policy_get(struct wfx_vif * wvif,struct ieee80211_tx_rate * rates,bool * renew)101 static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew)
102 {
103 	int idx;
104 	struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
105 	struct wfx_tx_policy wanted;
106 	struct wfx_tx_policy *entry;
107 
108 	wfx_tx_policy_build(wvif, &wanted, rates);
109 
110 	spin_lock_bh(&cache->lock);
111 	if (list_empty(&cache->free)) {
112 		WARN(1, "unable to get a valid Tx policy");
113 		spin_unlock_bh(&cache->lock);
114 		return HIF_TX_RETRY_POLICY_INVALID;
115 	}
116 	idx = wfx_tx_policy_find(cache, &wanted);
117 	if (idx >= 0) {
118 		*renew = false;
119 	} else {
120 		/* If policy is not found create a new one using the oldest entry in "free" list */
121 		*renew = true;
122 		entry = list_entry(cache->free.prev, struct wfx_tx_policy, link);
123 		memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
124 		entry->uploaded = false;
125 		entry->usage_count = 0;
126 		idx = entry - cache->cache;
127 	}
128 	wfx_tx_policy_use(cache, &cache->cache[idx]);
129 	if (list_empty(&cache->free))
130 		ieee80211_stop_queues(wvif->wdev->hw);
131 	spin_unlock_bh(&cache->lock);
132 	return idx;
133 }
134 
wfx_tx_policy_put(struct wfx_vif * wvif,int idx)135 static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
136 {
137 	int usage, locked;
138 	struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
139 
140 	if (idx == HIF_TX_RETRY_POLICY_INVALID)
141 		return;
142 	spin_lock_bh(&cache->lock);
143 	locked = list_empty(&cache->free);
144 	usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
145 	if (locked && !usage)
146 		ieee80211_wake_queues(wvif->wdev->hw);
147 	spin_unlock_bh(&cache->lock);
148 }
149 
wfx_tx_policy_upload(struct wfx_vif * wvif)150 static int wfx_tx_policy_upload(struct wfx_vif *wvif)
151 {
152 	struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache;
153 	u8 tmp_rates[12];
154 	int i, is_used;
155 
156 	do {
157 		spin_lock_bh(&wvif->tx_policy_cache.lock);
158 		for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
159 			is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates));
160 			if (!policies[i].uploaded && is_used)
161 				break;
162 		}
163 		if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
164 			policies[i].uploaded = true;
165 			memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
166 			spin_unlock_bh(&wvif->tx_policy_cache.lock);
167 			wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
168 		} else {
169 			spin_unlock_bh(&wvif->tx_policy_cache.lock);
170 		}
171 	} while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache));
172 	return 0;
173 }
174 
wfx_tx_policy_upload_work(struct work_struct * work)175 void wfx_tx_policy_upload_work(struct work_struct *work)
176 {
177 	struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work);
178 
179 	wfx_tx_policy_upload(wvif);
180 	wfx_tx_unlock(wvif->wdev);
181 }
182 
wfx_tx_policy_init(struct wfx_vif * wvif)183 void wfx_tx_policy_init(struct wfx_vif *wvif)
184 {
185 	struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
186 	int i;
187 
188 	memset(cache, 0, sizeof(*cache));
189 
190 	spin_lock_init(&cache->lock);
191 	INIT_LIST_HEAD(&cache->used);
192 	INIT_LIST_HEAD(&cache->free);
193 
194 	for (i = 0; i < ARRAY_SIZE(cache->cache); ++i)
195 		list_add(&cache->cache[i].link, &cache->free);
196 }
197 
198 /* Tx implementation */
199 
wfx_is_action_back(struct ieee80211_hdr * hdr)200 static bool wfx_is_action_back(struct ieee80211_hdr *hdr)
201 {
202 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
203 
204 	if (!ieee80211_is_action(mgmt->frame_control))
205 		return false;
206 	if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
207 		return false;
208 	return true;
209 }
210 
wfx_tx_get_link_id(struct wfx_vif * wvif,struct ieee80211_sta * sta,struct ieee80211_hdr * hdr)211 static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
212 			     struct ieee80211_hdr *hdr)
213 {
214 	struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
215 	struct ieee80211_vif *vif = wvif_to_vif(wvif);
216 	const u8 *da = ieee80211_get_DA(hdr);
217 
218 	if (sta_priv && sta_priv->link_id)
219 		return sta_priv->link_id;
220 	if (vif->type != NL80211_IFTYPE_AP)
221 		return 0;
222 	if (is_multicast_ether_addr(da))
223 		return 0;
224 	return HIF_LINK_ID_NOT_ASSOCIATED;
225 }
226 
wfx_tx_fixup_rates(struct ieee80211_tx_rate * rates)227 static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
228 {
229 	bool has_rate0 = false;
230 	int i, j;
231 
232 	for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
233 		if (rates[j].idx == -1)
234 			break;
235 		/* The device use the rates in descending order, whatever the request from minstrel.
236 		 * We have to trade off here. Most important is to respect the primary rate
237 		 * requested by minstrel. So, we drops the entries with rate higher than the
238 		 * previous.
239 		 */
240 		if (rates[j].idx >= rates[i - 1].idx) {
241 			rates[i - 1].count += rates[j].count;
242 			rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
243 		} else {
244 			memcpy(rates + i, rates + j, sizeof(rates[i]));
245 			if (rates[i].idx == 0)
246 				has_rate0 = true;
247 			/* The device apply Short GI only on the first rate */
248 			rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
249 			i++;
250 		}
251 	}
252 	/* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
253 	if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
254 		rates[i].idx = 0;
255 		rates[i].count = 8; /* == hw->max_rate_tries */
256 		rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
257 		i++;
258 	}
259 	for (; i < IEEE80211_TX_MAX_RATES; i++) {
260 		memset(rates + i, 0, sizeof(rates[i]));
261 		rates[i].idx = -1;
262 	}
263 }
264 
wfx_tx_get_retry_policy_id(struct wfx_vif * wvif,struct ieee80211_tx_info * tx_info)265 static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
266 {
267 	bool tx_policy_renew = false;
268 	u8 ret;
269 
270 	ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew);
271 	if (ret == HIF_TX_RETRY_POLICY_INVALID)
272 		dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
273 
274 	if (tx_policy_renew) {
275 		wfx_tx_lock(wvif->wdev);
276 		if (!schedule_work(&wvif->tx_policy_upload_work))
277 			wfx_tx_unlock(wvif->wdev);
278 	}
279 	return ret;
280 }
281 
wfx_tx_get_frame_format(struct ieee80211_tx_info * tx_info)282 static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info)
283 {
284 	if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS))
285 		return HIF_FRAME_FORMAT_NON_HT;
286 	else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD))
287 		return HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
288 	else
289 		return HIF_FRAME_FORMAT_GF_HT_11N;
290 }
291 
wfx_tx_get_icv_len(struct ieee80211_key_conf * hw_key)292 static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
293 {
294 	int mic_space;
295 
296 	if (!hw_key)
297 		return 0;
298 	if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
299 		return 0;
300 	mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
301 	return hw_key->icv_len + mic_space;
302 }
303 
wfx_tx_inner(struct wfx_vif * wvif,struct ieee80211_sta * sta,struct sk_buff * skb)304 static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb)
305 {
306 	struct wfx_hif_msg *hif_msg;
307 	struct wfx_hif_req_tx *req;
308 	struct wfx_tx_priv *tx_priv;
309 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
310 	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
311 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
312 	int queue_id = skb_get_queue_mapping(skb);
313 	size_t offset = (size_t)skb->data & 3;
314 	int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset;
315 
316 	WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
317 	wfx_tx_fixup_rates(tx_info->driver_rates);
318 
319 	/* From now tx_info->control is unusable */
320 	memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
321 	/* Fill tx_priv */
322 	tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
323 	tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
324 
325 	/* Fill hif_msg */
326 	WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
327 	WARN(offset & 1, "attempt to transmit an unaligned frame");
328 	skb_put(skb, tx_priv->icv_size);
329 	skb_push(skb, wmsg_len);
330 	memset(skb->data, 0, wmsg_len);
331 	hif_msg = (struct wfx_hif_msg *)skb->data;
332 	hif_msg->len = cpu_to_le16(skb->len);
333 	hif_msg->id = HIF_REQ_ID_TX;
334 	hif_msg->interface = wvif->id;
335 	if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) {
336 		dev_warn(wvif->wdev->dev,
337 			 "requested frame size (%d) is larger than maximum supported (%d)\n",
338 			 skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf));
339 		skb_pull(skb, wmsg_len);
340 		return -EIO;
341 	}
342 
343 	/* Fill tx request */
344 	req = (struct wfx_hif_req_tx *)hif_msg->body;
345 	/* packet_id just need to be unique on device. 32bits are more than necessary for that task,
346 	 * so we take advantage of it to add some extra data for debug.
347 	 */
348 	req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
349 	req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
350 	req->packet_id |= queue_id << 28;
351 
352 	req->fc_offset = offset;
353 	/* Queue index are inverted between firmware and Linux */
354 	req->queue_id = 3 - queue_id;
355 	req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
356 	req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info);
357 	req->frame_format = wfx_tx_get_frame_format(tx_info);
358 	if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
359 		req->short_gi = 1;
360 	if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
361 		req->after_dtim = 1;
362 
363 	/* Auxiliary operations */
364 	wfx_tx_queues_put(wvif, skb);
365 	if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
366 		schedule_work(&wvif->update_tim_work);
367 	wfx_bh_request_tx(wvif->wdev);
368 	return 0;
369 }
370 
wfx_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)371 void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb)
372 {
373 	struct wfx_dev *wdev = hw->priv;
374 	struct wfx_vif *wvif;
375 	struct ieee80211_sta *sta = control ? control->sta : NULL;
376 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
377 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
378 	size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data);
379 
380 	BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room,
381 			 "struct tx_priv is too large");
382 	WARN(skb->next || skb->prev, "skb is already member of a list");
383 	/* control.vif can be NULL for injected frames */
384 	if (tx_info->control.vif)
385 		wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
386 	else
387 		wvif = wvif_iterate(wdev, NULL);
388 	if (WARN_ON(!wvif))
389 		goto drop;
390 	/* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session
391 	 * management frame. The check below exist just in case.
392 	 */
393 	if (wfx_is_action_back(hdr)) {
394 		dev_info(wdev->dev, "drop BA action\n");
395 		goto drop;
396 	}
397 	if (wfx_tx_inner(wvif, sta, skb))
398 		goto drop;
399 
400 	return;
401 
402 drop:
403 	ieee80211_tx_status_irqsafe(wdev->hw, skb);
404 }
405 
wfx_skb_dtor(struct wfx_vif * wvif,struct sk_buff * skb)406 static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
407 {
408 	struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data;
409 	struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body;
410 	unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) +
411 			      req->fc_offset;
412 
413 	if (!wvif) {
414 		pr_warn("vif associated with the skb does not exist anymore\n");
415 		return;
416 	}
417 	wfx_tx_policy_put(wvif, req->retry_policy_index);
418 	skb_pull(skb, offset);
419 	ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
420 }
421 
wfx_tx_fill_rates(struct wfx_dev * wdev,struct ieee80211_tx_info * tx_info,const struct wfx_hif_cnf_tx * arg)422 static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info,
423 			      const struct wfx_hif_cnf_tx *arg)
424 {
425 	struct ieee80211_tx_rate *rate;
426 	int tx_count;
427 	int i;
428 
429 	tx_count = arg->ack_failures;
430 	if (!arg->status || arg->ack_failures)
431 		tx_count += 1; /* Also report success */
432 	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
433 		rate = &tx_info->status.rates[i];
434 		if (rate->idx < 0)
435 			break;
436 		if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
437 		    arg->ack_failures)
438 			dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
439 				rate->count, tx_count);
440 		if (tx_count <= rate->count && tx_count &&
441 		    arg->txed_rate != wfx_get_hw_rate(wdev, rate))
442 			dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
443 				arg->txed_rate, wfx_get_hw_rate(wdev, rate));
444 		if (tx_count > rate->count) {
445 			tx_count -= rate->count;
446 		} else if (!tx_count) {
447 			rate->count = 0;
448 			rate->idx = -1;
449 		} else {
450 			rate->count = tx_count;
451 			tx_count = 0;
452 		}
453 	}
454 	if (tx_count)
455 		dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
456 }
457 
wfx_tx_confirm_cb(struct wfx_dev * wdev,const struct wfx_hif_cnf_tx * arg)458 void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg)
459 {
460 	const struct wfx_tx_priv *tx_priv;
461 	struct ieee80211_tx_info *tx_info;
462 	struct wfx_vif *wvif;
463 	struct sk_buff *skb;
464 
465 	skb = wfx_pending_get(wdev, arg->packet_id);
466 	if (!skb) {
467 		dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
468 			 arg->packet_id);
469 		return;
470 	}
471 	tx_info = IEEE80211_SKB_CB(skb);
472 	tx_priv = wfx_skb_tx_priv(skb);
473 	wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface);
474 	WARN_ON(!wvif);
475 	if (!wvif)
476 		return;
477 
478 	/* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */
479 	_trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
480 	wfx_tx_fill_rates(wdev, tx_info, arg);
481 	skb_trim(skb, skb->len - tx_priv->icv_size);
482 
483 	/* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */
484 	/* FIXME: use ieee80211_tx_info_clear_status() */
485 	memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
486 	memset(tx_info->pad, 0, sizeof(tx_info->pad));
487 
488 	if (!arg->status) {
489 		tx_info->status.tx_time = le32_to_cpu(arg->media_delay) -
490 					  le32_to_cpu(arg->tx_queue_delay);
491 		if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
492 			tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
493 		else
494 			tx_info->flags |= IEEE80211_TX_STAT_ACK;
495 	} else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
496 		WARN(!arg->requeue, "incoherent status and result_flags");
497 		if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
498 			wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */
499 			schedule_work(&wvif->update_tim_work);
500 		}
501 		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
502 	}
503 	wfx_skb_dtor(wvif, skb);
504 }
505 
wfx_flush_vif(struct wfx_vif * wvif,u32 queues,struct sk_buff_head * dropped)506 static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped)
507 {
508 	struct wfx_queue *queue;
509 	int i;
510 
511 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
512 		if (!(BIT(i) & queues))
513 			continue;
514 		queue = &wvif->tx_queue[i];
515 		if (dropped)
516 			wfx_tx_queue_drop(wvif, queue, dropped);
517 	}
518 	if (wvif->wdev->chip_frozen)
519 		return;
520 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
521 		if (!(BIT(i) & queues))
522 			continue;
523 		queue = &wvif->tx_queue[i];
524 		if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue),
525 				       msecs_to_jiffies(1000)) <= 0)
526 			dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?");
527 	}
528 }
529 
wfx_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)530 void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop)
531 {
532 	struct wfx_dev *wdev = hw->priv;
533 	struct sk_buff_head dropped;
534 	struct wfx_vif *wvif;
535 	struct wfx_hif_msg *hif;
536 	struct sk_buff *skb;
537 
538 	skb_queue_head_init(&dropped);
539 	if (vif) {
540 		wvif = (struct wfx_vif *)vif->drv_priv;
541 		wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
542 	} else {
543 		wvif = NULL;
544 		while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
545 			wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
546 	}
547 	wfx_tx_flush(wdev);
548 	if (wdev->chip_frozen)
549 		wfx_pending_drop(wdev, &dropped);
550 	while ((skb = skb_dequeue(&dropped)) != NULL) {
551 		hif = (struct wfx_hif_msg *)skb->data;
552 		wvif = wdev_to_wvif(wdev, hif->interface);
553 		ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb));
554 		wfx_skb_dtor(wvif, skb);
555 	}
556 }
557