1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static int
9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 	if (!txq->sta)
12 		return MT_TXQ_BE;
13 
14 	return txq->ac;
15 }
16 
17 void
18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 	struct ieee80211_txq *txq;
22 	struct mt76_txq *mtxq;
23 	u8 tid;
24 
25 	if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 	    !ieee80211_is_data_present(hdr->frame_control))
27 		return;
28 
29 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 	txq = sta->txq[tid];
31 	mtxq = (struct mt76_txq *)txq->drv_priv;
32 	if (!mtxq->aggr)
33 		return;
34 
35 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38 
39 void
40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 		   __acquires(&dev->status_list.lock)
42 {
43 	__skb_queue_head_init(list);
44 	spin_lock_bh(&dev->status_list.lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47 
48 void
49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 		      __releases(&dev->status_list.lock)
51 {
52 	struct ieee80211_hw *hw;
53 	struct sk_buff *skb;
54 
55 	spin_unlock_bh(&dev->status_list.lock);
56 
57 	while ((skb = __skb_dequeue(list)) != NULL) {
58 		hw = mt76_tx_status_get_hw(dev, skb);
59 		ieee80211_tx_status(hw, skb);
60 	}
61 
62 }
63 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
64 
65 static void
66 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
67 			  struct sk_buff_head *list)
68 {
69 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
70 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
71 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
72 
73 	flags |= cb->flags;
74 	cb->flags = flags;
75 
76 	if ((flags & done) != done)
77 		return;
78 
79 	__skb_unlink(skb, &dev->status_list);
80 
81 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
82 	if (flags & MT_TX_CB_TXS_FAILED) {
83 		ieee80211_tx_info_clear_status(info);
84 		info->status.rates[0].idx = -1;
85 		info->flags |= IEEE80211_TX_STAT_ACK;
86 	}
87 
88 	__skb_queue_tail(list, skb);
89 }
90 
91 void
92 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
93 			struct sk_buff_head *list)
94 {
95 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
96 }
97 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
98 
99 int
100 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
101 		       struct sk_buff *skb)
102 {
103 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
104 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
105 	int pid;
106 
107 	if (!wcid)
108 		return MT_PACKET_ID_NO_ACK;
109 
110 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
111 		return MT_PACKET_ID_NO_ACK;
112 
113 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
114 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
115 		return MT_PACKET_ID_NO_SKB;
116 
117 	spin_lock_bh(&dev->status_list.lock);
118 
119 	memset(cb, 0, sizeof(*cb));
120 	wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
121 	if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
122 	    wcid->packet_id == MT_PACKET_ID_NO_SKB)
123 		wcid->packet_id = MT_PACKET_ID_FIRST;
124 
125 	pid = wcid->packet_id;
126 	cb->wcid = wcid->idx;
127 	cb->pktid = pid;
128 	cb->jiffies = jiffies;
129 
130 	__skb_queue_tail(&dev->status_list, skb);
131 	spin_unlock_bh(&dev->status_list.lock);
132 
133 	return pid;
134 }
135 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
136 
137 struct sk_buff *
138 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
139 		       struct sk_buff_head *list)
140 {
141 	struct sk_buff *skb, *tmp;
142 
143 	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
144 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
145 
146 		if (wcid && cb->wcid != wcid->idx)
147 			continue;
148 
149 		if (cb->pktid == pktid)
150 			return skb;
151 
152 		if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
153 					      MT_TX_STATUS_SKB_TIMEOUT))
154 			continue;
155 
156 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
157 						    MT_TX_CB_TXS_DONE, list);
158 	}
159 
160 	return NULL;
161 }
162 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
163 
164 void
165 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
166 {
167 	struct sk_buff_head list;
168 
169 	mt76_tx_status_lock(dev, &list);
170 	mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
171 	mt76_tx_status_unlock(dev, &list);
172 }
173 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
174 
175 static void
176 mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
177 {
178 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
179 	struct mt76_wcid *wcid;
180 	int pending;
181 
182 	if (info->tx_time_est)
183 		return;
184 
185 	if (wcid_idx >= ARRAY_SIZE(dev->wcid))
186 		return;
187 
188 	rcu_read_lock();
189 
190 	wcid = rcu_dereference(dev->wcid[wcid_idx]);
191 	if (wcid) {
192 		pending = atomic_dec_return(&wcid->non_aql_packets);
193 		if (pending < 0)
194 			atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
195 	}
196 
197 	rcu_read_unlock();
198 }
199 
200 void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
201 {
202 	struct ieee80211_hw *hw;
203 	struct sk_buff_head list;
204 
205 	mt76_tx_check_non_aql(dev, wcid_idx, skb);
206 
207 #ifdef CONFIG_NL80211_TESTMODE
208 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
209 		struct mt76_phy *phy = hw->priv;
210 
211 		if (skb == phy->test.tx_skb)
212 			phy->test.tx_done++;
213 		if (phy->test.tx_queued == phy->test.tx_done)
214 			wake_up(&dev->tx_wait);
215 
216 		dev_kfree_skb_any(skb);
217 		return;
218 	}
219 #endif
220 
221 	if (!skb->prev) {
222 		hw = mt76_tx_status_get_hw(dev, skb);
223 		ieee80211_free_txskb(hw, skb);
224 		return;
225 	}
226 
227 	mt76_tx_status_lock(dev, &list);
228 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
229 	mt76_tx_status_unlock(dev, &list);
230 }
231 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
232 
233 static int
234 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
235 		    struct mt76_wcid *wcid, struct ieee80211_sta *sta,
236 		    bool *stop)
237 {
238 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
239 	struct mt76_queue *q = phy->q_tx[qid];
240 	struct mt76_dev *dev = phy->dev;
241 	bool non_aql;
242 	int pending;
243 	int idx;
244 
245 	non_aql = !info->tx_time_est;
246 	idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
247 	if (idx < 0 || !sta || !non_aql)
248 		return idx;
249 
250 	wcid = (struct mt76_wcid *)sta->drv_priv;
251 	q->entry[idx].wcid = wcid->idx;
252 	pending = atomic_inc_return(&wcid->non_aql_packets);
253 	if (stop && pending >= MT_MAX_NON_AQL_PKT)
254 		*stop = true;
255 
256 	return idx;
257 }
258 
259 void
260 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
261 	struct mt76_wcid *wcid, struct sk_buff *skb)
262 {
263 	struct mt76_dev *dev = phy->dev;
264 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
265 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
266 	struct mt76_queue *q;
267 	int qid = skb_get_queue_mapping(skb);
268 	bool ext_phy = phy != &dev->phy;
269 
270 	if (mt76_testmode_enabled(phy)) {
271 		ieee80211_free_txskb(phy->hw, skb);
272 		return;
273 	}
274 
275 	if (WARN_ON(qid >= MT_TXQ_PSD)) {
276 		qid = MT_TXQ_BE;
277 		skb_set_queue_mapping(skb, qid);
278 	}
279 
280 	if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
281 	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
282 	    !ieee80211_is_data(hdr->frame_control) &&
283 	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
284 		qid = MT_TXQ_PSD;
285 		skb_set_queue_mapping(skb, qid);
286 	}
287 
288 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
289 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
290 				       info->control.rates, 1);
291 
292 	if (ext_phy)
293 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
294 
295 	q = phy->q_tx[qid];
296 
297 	spin_lock_bh(&q->lock);
298 	__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
299 	dev->queue_ops->kick(dev, q);
300 	spin_unlock_bh(&q->lock);
301 }
302 EXPORT_SYMBOL_GPL(mt76_tx);
303 
304 static struct sk_buff *
305 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
306 {
307 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
308 	struct ieee80211_tx_info *info;
309 	bool ext_phy = phy != &phy->dev->phy;
310 	struct sk_buff *skb;
311 
312 	skb = ieee80211_tx_dequeue(phy->hw, txq);
313 	if (!skb)
314 		return NULL;
315 
316 	info = IEEE80211_SKB_CB(skb);
317 	if (ext_phy)
318 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
319 
320 	return skb;
321 }
322 
323 static void
324 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
325 		  struct sk_buff *skb, bool last)
326 {
327 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
328 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
329 
330 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
331 	if (last)
332 		info->flags |= IEEE80211_TX_STATUS_EOSP |
333 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
334 
335 	mt76_skb_set_moredata(skb, !last);
336 	__mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
337 }
338 
339 void
340 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
341 			     u16 tids, int nframes,
342 			     enum ieee80211_frame_release_type reason,
343 			     bool more_data)
344 {
345 	struct mt76_phy *phy = hw->priv;
346 	struct mt76_dev *dev = phy->dev;
347 	struct sk_buff *last_skb = NULL;
348 	struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
349 	int i;
350 
351 	spin_lock_bh(&hwq->lock);
352 	for (i = 0; tids && nframes; i++, tids >>= 1) {
353 		struct ieee80211_txq *txq = sta->txq[i];
354 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
355 		struct sk_buff *skb;
356 
357 		if (!(tids & 1))
358 			continue;
359 
360 		do {
361 			skb = mt76_txq_dequeue(phy, mtxq);
362 			if (!skb)
363 				break;
364 
365 			nframes--;
366 			if (last_skb)
367 				mt76_queue_ps_skb(phy, sta, last_skb, false);
368 
369 			last_skb = skb;
370 		} while (nframes);
371 	}
372 
373 	if (last_skb) {
374 		mt76_queue_ps_skb(phy, sta, last_skb, true);
375 		dev->queue_ops->kick(dev, hwq);
376 	} else {
377 		ieee80211_sta_eosp(sta);
378 	}
379 
380 	spin_unlock_bh(&hwq->lock);
381 }
382 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
383 
384 static bool
385 mt76_txq_stopped(struct mt76_queue *q)
386 {
387 	return q->stopped || q->blocked ||
388 	       q->queued + MT_TXQ_FREE_THR >= q->ndesc;
389 }
390 
391 static int
392 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
393 		    struct mt76_txq *mtxq)
394 {
395 	struct mt76_dev *dev = phy->dev;
396 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
397 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
398 	struct mt76_wcid *wcid = mtxq->wcid;
399 	struct ieee80211_tx_info *info;
400 	struct sk_buff *skb;
401 	int n_frames = 1;
402 	bool stop = false;
403 	int idx;
404 
405 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
406 		return 0;
407 
408 	if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
409 		return 0;
410 
411 	skb = mt76_txq_dequeue(phy, mtxq);
412 	if (!skb)
413 		return 0;
414 
415 	info = IEEE80211_SKB_CB(skb);
416 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
417 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
418 				       info->control.rates, 1);
419 
420 	idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
421 	if (idx < 0)
422 		return idx;
423 
424 	do {
425 		if (test_bit(MT76_RESET, &phy->state))
426 			return -EBUSY;
427 
428 		if (stop || mt76_txq_stopped(q))
429 			break;
430 
431 		skb = mt76_txq_dequeue(phy, mtxq);
432 		if (!skb)
433 			break;
434 
435 		info = IEEE80211_SKB_CB(skb);
436 		if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
437 			ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
438 					       info->control.rates, 1);
439 
440 		idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
441 		if (idx < 0)
442 			break;
443 
444 		n_frames++;
445 	} while (1);
446 
447 	dev->queue_ops->kick(dev, q);
448 
449 	return n_frames;
450 }
451 
452 static int
453 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
454 {
455 	struct mt76_queue *q = phy->q_tx[qid];
456 	struct mt76_dev *dev = phy->dev;
457 	struct ieee80211_txq *txq;
458 	struct mt76_txq *mtxq;
459 	struct mt76_wcid *wcid;
460 	int ret = 0;
461 
462 	while (1) {
463 		int n_frames = 0;
464 
465 		if (test_bit(MT76_RESET, &phy->state))
466 			return -EBUSY;
467 
468 		if (dev->queue_ops->tx_cleanup &&
469 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
470 			dev->queue_ops->tx_cleanup(dev, q, false);
471 		}
472 
473 		txq = ieee80211_next_txq(phy->hw, qid);
474 		if (!txq)
475 			break;
476 
477 		mtxq = (struct mt76_txq *)txq->drv_priv;
478 		wcid = mtxq->wcid;
479 		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
480 			continue;
481 
482 		spin_lock_bh(&q->lock);
483 
484 		if (mtxq->send_bar && mtxq->aggr) {
485 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
486 			struct ieee80211_sta *sta = txq->sta;
487 			struct ieee80211_vif *vif = txq->vif;
488 			u16 agg_ssn = mtxq->agg_ssn;
489 			u8 tid = txq->tid;
490 
491 			mtxq->send_bar = false;
492 			spin_unlock_bh(&q->lock);
493 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
494 			spin_lock_bh(&q->lock);
495 		}
496 
497 		if (!mt76_txq_stopped(q))
498 			n_frames = mt76_txq_send_burst(phy, q, mtxq);
499 
500 		spin_unlock_bh(&q->lock);
501 
502 		ieee80211_return_txq(phy->hw, txq, false);
503 
504 		if (unlikely(n_frames < 0))
505 			return n_frames;
506 
507 		ret += n_frames;
508 	}
509 
510 	return ret;
511 }
512 
513 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
514 {
515 	int len;
516 
517 	if (qid >= 4)
518 		return;
519 
520 	rcu_read_lock();
521 
522 	do {
523 		ieee80211_txq_schedule_start(phy->hw, qid);
524 		len = mt76_txq_schedule_list(phy, qid);
525 		ieee80211_txq_schedule_end(phy->hw, qid);
526 	} while (len > 0);
527 
528 	rcu_read_unlock();
529 }
530 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
531 
532 void mt76_txq_schedule_all(struct mt76_phy *phy)
533 {
534 	int i;
535 
536 	for (i = 0; i <= MT_TXQ_BK; i++)
537 		mt76_txq_schedule(phy, i);
538 }
539 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
540 
541 void mt76_tx_worker_run(struct mt76_dev *dev)
542 {
543 	mt76_txq_schedule_all(&dev->phy);
544 	if (dev->phy2)
545 		mt76_txq_schedule_all(dev->phy2);
546 
547 #ifdef CONFIG_NL80211_TESTMODE
548 	if (dev->phy.test.tx_pending)
549 		mt76_testmode_tx_pending(&dev->phy);
550 	if (dev->phy2 && dev->phy2->test.tx_pending)
551 		mt76_testmode_tx_pending(dev->phy2);
552 #endif
553 }
554 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
555 
556 void mt76_tx_worker(struct mt76_worker *w)
557 {
558 	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
559 
560 	mt76_tx_worker_run(dev);
561 }
562 
563 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
564 			 bool send_bar)
565 {
566 	int i;
567 
568 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
569 		struct ieee80211_txq *txq = sta->txq[i];
570 		struct mt76_queue *hwq;
571 		struct mt76_txq *mtxq;
572 
573 		if (!txq)
574 			continue;
575 
576 		hwq = phy->q_tx[mt76_txq_get_qid(txq)];
577 		mtxq = (struct mt76_txq *)txq->drv_priv;
578 
579 		spin_lock_bh(&hwq->lock);
580 		mtxq->send_bar = mtxq->aggr && send_bar;
581 		spin_unlock_bh(&hwq->lock);
582 	}
583 }
584 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
585 
586 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
587 {
588 	struct mt76_phy *phy = hw->priv;
589 	struct mt76_dev *dev = phy->dev;
590 
591 	if (!test_bit(MT76_STATE_RUNNING, &phy->state))
592 		return;
593 
594 	mt76_worker_schedule(&dev->tx_worker);
595 }
596 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
597 
598 u8 mt76_ac_to_hwq(u8 ac)
599 {
600 	static const u8 wmm_queue_map[] = {
601 		[IEEE80211_AC_BE] = 0,
602 		[IEEE80211_AC_BK] = 1,
603 		[IEEE80211_AC_VI] = 2,
604 		[IEEE80211_AC_VO] = 3,
605 	};
606 
607 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
608 		return 0;
609 
610 	return wmm_queue_map[ac];
611 }
612 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
613 
614 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
615 {
616 	struct sk_buff *iter, *last = skb;
617 
618 	/* First packet of a A-MSDU burst keeps track of the whole burst
619 	 * length, need to update length of it and the last packet.
620 	 */
621 	skb_walk_frags(skb, iter) {
622 		last = iter;
623 		if (!iter->next) {
624 			skb->data_len += pad;
625 			skb->len += pad;
626 			break;
627 		}
628 	}
629 
630 	if (skb_pad(last, pad))
631 		return -ENOMEM;
632 
633 	__skb_put(last, pad);
634 
635 	return 0;
636 }
637 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
638 
639 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
640 			    struct mt76_queue_entry *e)
641 {
642 	if (e->skb)
643 		dev->drv->tx_complete_skb(dev, e);
644 
645 	spin_lock_bh(&q->lock);
646 	q->tail = (q->tail + 1) % q->ndesc;
647 	q->queued--;
648 	spin_unlock_bh(&q->lock);
649 }
650 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
651 
652 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
653 {
654 	struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
655 	struct mt76_queue *q, *q2 = NULL;
656 
657 	q = phy->q_tx[0];
658 	if (blocked == q->blocked)
659 		return;
660 
661 	q->blocked = blocked;
662 	if (phy2) {
663 		q2 = phy2->q_tx[0];
664 		q2->blocked = blocked;
665 	}
666 
667 	if (!blocked)
668 		mt76_worker_schedule(&dev->tx_worker);
669 }
670 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
671 
672 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
673 {
674 	int token;
675 
676 	spin_lock_bh(&dev->token_lock);
677 
678 	token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
679 			  GFP_ATOMIC);
680 	if (token >= 0)
681 		dev->token_count++;
682 
683 	if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
684 		__mt76_set_tx_blocked(dev, true);
685 
686 	spin_unlock_bh(&dev->token_lock);
687 
688 	return token;
689 }
690 EXPORT_SYMBOL_GPL(mt76_token_consume);
691 
692 struct mt76_txwi_cache *
693 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
694 {
695 	struct mt76_txwi_cache *txwi;
696 
697 	spin_lock_bh(&dev->token_lock);
698 
699 	txwi = idr_remove(&dev->token, token);
700 	if (txwi)
701 		dev->token_count--;
702 
703 	if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
704 	    dev->phy.q_tx[0]->blocked)
705 		*wake = true;
706 
707 	spin_unlock_bh(&dev->token_lock);
708 
709 	return txwi;
710 }
711 EXPORT_SYMBOL_GPL(mt76_token_release);
712