1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static int
9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 	if (!txq->sta)
12 		return MT_TXQ_BE;
13 
14 	return txq->ac;
15 }
16 
17 void
18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 	struct ieee80211_txq *txq;
22 	struct mt76_txq *mtxq;
23 	u8 tid;
24 
25 	if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 	    !ieee80211_is_data_present(hdr->frame_control))
27 		return;
28 
29 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 	txq = sta->txq[tid];
31 	mtxq = (struct mt76_txq *)txq->drv_priv;
32 	if (!mtxq->aggr)
33 		return;
34 
35 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38 
39 void
40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 		   __acquires(&dev->status_list.lock)
42 {
43 	__skb_queue_head_init(list);
44 	spin_lock_bh(&dev->status_list.lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47 
48 void
49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 		      __releases(&dev->status_list.lock)
51 {
52 	struct ieee80211_hw *hw;
53 	struct sk_buff *skb;
54 
55 	spin_unlock_bh(&dev->status_list.lock);
56 
57 	rcu_read_lock();
58 	while ((skb = __skb_dequeue(list)) != NULL) {
59 		struct ieee80211_tx_status status = {
60 			.skb = skb,
61 			.info = IEEE80211_SKB_CB(skb),
62 		};
63 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
64 		struct mt76_wcid *wcid;
65 
66 		wcid = rcu_dereference(dev->wcid[cb->wcid]);
67 		if (wcid)
68 			status.sta = wcid_to_sta(wcid);
69 
70 		hw = mt76_tx_status_get_hw(dev, skb);
71 		ieee80211_tx_status_ext(hw, &status);
72 	}
73 	rcu_read_unlock();
74 }
75 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
76 
77 static void
78 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
79 			  struct sk_buff_head *list)
80 {
81 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
82 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
83 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
84 
85 	flags |= cb->flags;
86 	cb->flags = flags;
87 
88 	if ((flags & done) != done)
89 		return;
90 
91 	__skb_unlink(skb, &dev->status_list);
92 
93 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
94 	if (flags & MT_TX_CB_TXS_FAILED) {
95 		info->status.rates[0].count = 0;
96 		info->status.rates[0].idx = -1;
97 		info->flags |= IEEE80211_TX_STAT_ACK;
98 	}
99 
100 	__skb_queue_tail(list, skb);
101 }
102 
103 void
104 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
105 			struct sk_buff_head *list)
106 {
107 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
108 }
109 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
110 
111 int
112 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
113 		       struct sk_buff *skb)
114 {
115 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
116 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
117 	int pid;
118 
119 	if (!wcid)
120 		return MT_PACKET_ID_NO_ACK;
121 
122 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
123 		return MT_PACKET_ID_NO_ACK;
124 
125 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
126 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
127 		return MT_PACKET_ID_NO_SKB;
128 
129 	spin_lock_bh(&dev->status_list.lock);
130 
131 	memset(cb, 0, sizeof(*cb));
132 	pid = mt76_get_next_pkt_id(wcid);
133 	cb->wcid = wcid->idx;
134 	cb->pktid = pid;
135 	cb->jiffies = jiffies;
136 
137 	__skb_queue_tail(&dev->status_list, skb);
138 	spin_unlock_bh(&dev->status_list.lock);
139 
140 	return pid;
141 }
142 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
143 
144 struct sk_buff *
145 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
146 		       struct sk_buff_head *list)
147 {
148 	struct sk_buff *skb, *tmp;
149 
150 	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
151 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
152 
153 		if (wcid && cb->wcid != wcid->idx)
154 			continue;
155 
156 		if (cb->pktid == pktid)
157 			return skb;
158 
159 		if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
160 					      MT_TX_STATUS_SKB_TIMEOUT))
161 			continue;
162 
163 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
164 						    MT_TX_CB_TXS_DONE, list);
165 	}
166 
167 	return NULL;
168 }
169 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
170 
171 void
172 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
173 {
174 	struct sk_buff_head list;
175 
176 	mt76_tx_status_lock(dev, &list);
177 	mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
178 	mt76_tx_status_unlock(dev, &list);
179 }
180 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
181 
182 static void
183 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
184 		      struct sk_buff *skb)
185 {
186 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
187 	int pending;
188 
189 	if (!wcid || info->tx_time_est)
190 		return;
191 
192 	pending = atomic_dec_return(&wcid->non_aql_packets);
193 	if (pending < 0)
194 		atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
195 }
196 
197 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
198 			    struct list_head *free_list)
199 {
200 	struct ieee80211_tx_status status = {
201 		.skb = skb,
202 		.free_list = free_list,
203 	};
204 	struct mt76_wcid *wcid = NULL;
205 	struct ieee80211_hw *hw;
206 	struct sk_buff_head list;
207 
208 	rcu_read_lock();
209 
210 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
211 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
212 
213 	mt76_tx_check_non_aql(dev, wcid, skb);
214 
215 #ifdef CONFIG_NL80211_TESTMODE
216 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
217 		struct mt76_phy *phy = hw->priv;
218 
219 		if (skb == phy->test.tx_skb)
220 			phy->test.tx_done++;
221 		if (phy->test.tx_queued == phy->test.tx_done)
222 			wake_up(&dev->tx_wait);
223 
224 		dev_kfree_skb_any(skb);
225 		goto out;
226 	}
227 #endif
228 
229 	if (!skb->prev) {
230 		hw = mt76_tx_status_get_hw(dev, skb);
231 		status.sta = wcid_to_sta(wcid);
232 		ieee80211_tx_status_ext(hw, &status);
233 		goto out;
234 	}
235 
236 	mt76_tx_status_lock(dev, &list);
237 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
238 	mt76_tx_status_unlock(dev, &list);
239 
240 out:
241 	rcu_read_unlock();
242 }
243 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
244 
245 static int
246 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
247 		    struct mt76_wcid *wcid, struct ieee80211_sta *sta,
248 		    bool *stop)
249 {
250 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
251 	struct mt76_queue *q = phy->q_tx[qid];
252 	struct mt76_dev *dev = phy->dev;
253 	bool non_aql;
254 	int pending;
255 	int idx;
256 
257 	non_aql = !info->tx_time_est;
258 	idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
259 	if (idx < 0 || !sta)
260 		return idx;
261 
262 	wcid = (struct mt76_wcid *)sta->drv_priv;
263 	q->entry[idx].wcid = wcid->idx;
264 
265 	if (!non_aql)
266 		return idx;
267 
268 	pending = atomic_inc_return(&wcid->non_aql_packets);
269 	if (stop && pending >= MT_MAX_NON_AQL_PKT)
270 		*stop = true;
271 
272 	return idx;
273 }
274 
275 void
276 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
277 	struct mt76_wcid *wcid, struct sk_buff *skb)
278 {
279 	struct mt76_dev *dev = phy->dev;
280 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
281 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
282 	struct mt76_queue *q;
283 	int qid = skb_get_queue_mapping(skb);
284 	bool ext_phy = phy != &dev->phy;
285 
286 	if (mt76_testmode_enabled(phy)) {
287 		ieee80211_free_txskb(phy->hw, skb);
288 		return;
289 	}
290 
291 	if (WARN_ON(qid >= MT_TXQ_PSD)) {
292 		qid = MT_TXQ_BE;
293 		skb_set_queue_mapping(skb, qid);
294 	}
295 
296 	if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
297 	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
298 	    !ieee80211_is_data(hdr->frame_control) &&
299 	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
300 		qid = MT_TXQ_PSD;
301 		skb_set_queue_mapping(skb, qid);
302 	}
303 
304 	if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
305 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
306 				       info->control.rates, 1);
307 
308 	if (ext_phy)
309 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
310 
311 	q = phy->q_tx[qid];
312 
313 	spin_lock_bh(&q->lock);
314 	__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
315 	dev->queue_ops->kick(dev, q);
316 	spin_unlock_bh(&q->lock);
317 }
318 EXPORT_SYMBOL_GPL(mt76_tx);
319 
320 static struct sk_buff *
321 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
322 {
323 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
324 	struct ieee80211_tx_info *info;
325 	bool ext_phy = phy != &phy->dev->phy;
326 	struct sk_buff *skb;
327 
328 	skb = ieee80211_tx_dequeue(phy->hw, txq);
329 	if (!skb)
330 		return NULL;
331 
332 	info = IEEE80211_SKB_CB(skb);
333 	if (ext_phy)
334 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
335 
336 	return skb;
337 }
338 
339 static void
340 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
341 		  struct sk_buff *skb, bool last)
342 {
343 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
344 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
345 
346 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
347 	if (last)
348 		info->flags |= IEEE80211_TX_STATUS_EOSP |
349 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
350 
351 	mt76_skb_set_moredata(skb, !last);
352 	__mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
353 }
354 
355 void
356 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
357 			     u16 tids, int nframes,
358 			     enum ieee80211_frame_release_type reason,
359 			     bool more_data)
360 {
361 	struct mt76_phy *phy = hw->priv;
362 	struct mt76_dev *dev = phy->dev;
363 	struct sk_buff *last_skb = NULL;
364 	struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
365 	int i;
366 
367 	spin_lock_bh(&hwq->lock);
368 	for (i = 0; tids && nframes; i++, tids >>= 1) {
369 		struct ieee80211_txq *txq = sta->txq[i];
370 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
371 		struct sk_buff *skb;
372 
373 		if (!(tids & 1))
374 			continue;
375 
376 		do {
377 			skb = mt76_txq_dequeue(phy, mtxq);
378 			if (!skb)
379 				break;
380 
381 			nframes--;
382 			if (last_skb)
383 				mt76_queue_ps_skb(phy, sta, last_skb, false);
384 
385 			last_skb = skb;
386 		} while (nframes);
387 	}
388 
389 	if (last_skb) {
390 		mt76_queue_ps_skb(phy, sta, last_skb, true);
391 		dev->queue_ops->kick(dev, hwq);
392 	} else {
393 		ieee80211_sta_eosp(sta);
394 	}
395 
396 	spin_unlock_bh(&hwq->lock);
397 }
398 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
399 
400 static bool
401 mt76_txq_stopped(struct mt76_queue *q)
402 {
403 	return q->stopped || q->blocked ||
404 	       q->queued + MT_TXQ_FREE_THR >= q->ndesc;
405 }
406 
407 static int
408 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
409 		    struct mt76_txq *mtxq)
410 {
411 	struct mt76_dev *dev = phy->dev;
412 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
413 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
414 	struct mt76_wcid *wcid = mtxq->wcid;
415 	struct ieee80211_tx_info *info;
416 	struct sk_buff *skb;
417 	int n_frames = 1;
418 	bool stop = false;
419 	int idx;
420 
421 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
422 		return 0;
423 
424 	if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
425 		return 0;
426 
427 	skb = mt76_txq_dequeue(phy, mtxq);
428 	if (!skb)
429 		return 0;
430 
431 	info = IEEE80211_SKB_CB(skb);
432 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
433 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
434 				       info->control.rates, 1);
435 
436 	idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
437 	if (idx < 0)
438 		return idx;
439 
440 	do {
441 		if (test_bit(MT76_RESET, &phy->state))
442 			return -EBUSY;
443 
444 		if (stop || mt76_txq_stopped(q))
445 			break;
446 
447 		skb = mt76_txq_dequeue(phy, mtxq);
448 		if (!skb)
449 			break;
450 
451 		info = IEEE80211_SKB_CB(skb);
452 		if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
453 			ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
454 					       info->control.rates, 1);
455 
456 		idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
457 		if (idx < 0)
458 			break;
459 
460 		n_frames++;
461 	} while (1);
462 
463 	dev->queue_ops->kick(dev, q);
464 
465 	return n_frames;
466 }
467 
468 static int
469 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
470 {
471 	struct mt76_queue *q = phy->q_tx[qid];
472 	struct mt76_dev *dev = phy->dev;
473 	struct ieee80211_txq *txq;
474 	struct mt76_txq *mtxq;
475 	struct mt76_wcid *wcid;
476 	int ret = 0;
477 
478 	while (1) {
479 		int n_frames = 0;
480 
481 		if (test_bit(MT76_RESET, &phy->state))
482 			return -EBUSY;
483 
484 		if (dev->queue_ops->tx_cleanup &&
485 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
486 			dev->queue_ops->tx_cleanup(dev, q, false);
487 		}
488 
489 		txq = ieee80211_next_txq(phy->hw, qid);
490 		if (!txq)
491 			break;
492 
493 		mtxq = (struct mt76_txq *)txq->drv_priv;
494 		wcid = mtxq->wcid;
495 		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
496 			continue;
497 
498 		spin_lock_bh(&q->lock);
499 
500 		if (mtxq->send_bar && mtxq->aggr) {
501 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
502 			struct ieee80211_sta *sta = txq->sta;
503 			struct ieee80211_vif *vif = txq->vif;
504 			u16 agg_ssn = mtxq->agg_ssn;
505 			u8 tid = txq->tid;
506 
507 			mtxq->send_bar = false;
508 			spin_unlock_bh(&q->lock);
509 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
510 			spin_lock_bh(&q->lock);
511 		}
512 
513 		if (!mt76_txq_stopped(q))
514 			n_frames = mt76_txq_send_burst(phy, q, mtxq);
515 
516 		spin_unlock_bh(&q->lock);
517 
518 		ieee80211_return_txq(phy->hw, txq, false);
519 
520 		if (unlikely(n_frames < 0))
521 			return n_frames;
522 
523 		ret += n_frames;
524 	}
525 
526 	return ret;
527 }
528 
529 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
530 {
531 	int len;
532 
533 	if (qid >= 4)
534 		return;
535 
536 	rcu_read_lock();
537 
538 	do {
539 		ieee80211_txq_schedule_start(phy->hw, qid);
540 		len = mt76_txq_schedule_list(phy, qid);
541 		ieee80211_txq_schedule_end(phy->hw, qid);
542 	} while (len > 0);
543 
544 	rcu_read_unlock();
545 }
546 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
547 
548 void mt76_txq_schedule_all(struct mt76_phy *phy)
549 {
550 	int i;
551 
552 	for (i = 0; i <= MT_TXQ_BK; i++)
553 		mt76_txq_schedule(phy, i);
554 }
555 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
556 
557 void mt76_tx_worker_run(struct mt76_dev *dev)
558 {
559 	mt76_txq_schedule_all(&dev->phy);
560 	if (dev->phy2)
561 		mt76_txq_schedule_all(dev->phy2);
562 
563 #ifdef CONFIG_NL80211_TESTMODE
564 	if (dev->phy.test.tx_pending)
565 		mt76_testmode_tx_pending(&dev->phy);
566 	if (dev->phy2 && dev->phy2->test.tx_pending)
567 		mt76_testmode_tx_pending(dev->phy2);
568 #endif
569 }
570 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
571 
572 void mt76_tx_worker(struct mt76_worker *w)
573 {
574 	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
575 
576 	mt76_tx_worker_run(dev);
577 }
578 
579 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
580 			 bool send_bar)
581 {
582 	int i;
583 
584 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
585 		struct ieee80211_txq *txq = sta->txq[i];
586 		struct mt76_queue *hwq;
587 		struct mt76_txq *mtxq;
588 
589 		if (!txq)
590 			continue;
591 
592 		hwq = phy->q_tx[mt76_txq_get_qid(txq)];
593 		mtxq = (struct mt76_txq *)txq->drv_priv;
594 
595 		spin_lock_bh(&hwq->lock);
596 		mtxq->send_bar = mtxq->aggr && send_bar;
597 		spin_unlock_bh(&hwq->lock);
598 	}
599 }
600 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
601 
602 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
603 {
604 	struct mt76_phy *phy = hw->priv;
605 	struct mt76_dev *dev = phy->dev;
606 
607 	if (!test_bit(MT76_STATE_RUNNING, &phy->state))
608 		return;
609 
610 	mt76_worker_schedule(&dev->tx_worker);
611 }
612 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
613 
614 u8 mt76_ac_to_hwq(u8 ac)
615 {
616 	static const u8 wmm_queue_map[] = {
617 		[IEEE80211_AC_BE] = 0,
618 		[IEEE80211_AC_BK] = 1,
619 		[IEEE80211_AC_VI] = 2,
620 		[IEEE80211_AC_VO] = 3,
621 	};
622 
623 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
624 		return 0;
625 
626 	return wmm_queue_map[ac];
627 }
628 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
629 
630 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
631 {
632 	struct sk_buff *iter, *last = skb;
633 
634 	/* First packet of a A-MSDU burst keeps track of the whole burst
635 	 * length, need to update length of it and the last packet.
636 	 */
637 	skb_walk_frags(skb, iter) {
638 		last = iter;
639 		if (!iter->next) {
640 			skb->data_len += pad;
641 			skb->len += pad;
642 			break;
643 		}
644 	}
645 
646 	if (skb_pad(last, pad))
647 		return -ENOMEM;
648 
649 	__skb_put(last, pad);
650 
651 	return 0;
652 }
653 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
654 
655 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
656 			    struct mt76_queue_entry *e)
657 {
658 	if (e->skb)
659 		dev->drv->tx_complete_skb(dev, e);
660 
661 	spin_lock_bh(&q->lock);
662 	q->tail = (q->tail + 1) % q->ndesc;
663 	q->queued--;
664 	spin_unlock_bh(&q->lock);
665 }
666 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
667 
668 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
669 {
670 	struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
671 	struct mt76_queue *q, *q2 = NULL;
672 
673 	q = phy->q_tx[0];
674 	if (blocked == q->blocked)
675 		return;
676 
677 	q->blocked = blocked;
678 	if (phy2) {
679 		q2 = phy2->q_tx[0];
680 		q2->blocked = blocked;
681 	}
682 
683 	if (!blocked)
684 		mt76_worker_schedule(&dev->tx_worker);
685 }
686 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
687 
688 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
689 {
690 	int token;
691 
692 	spin_lock_bh(&dev->token_lock);
693 
694 	token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
695 			  GFP_ATOMIC);
696 	if (token >= 0)
697 		dev->token_count++;
698 
699 	if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
700 		__mt76_set_tx_blocked(dev, true);
701 
702 	spin_unlock_bh(&dev->token_lock);
703 
704 	return token;
705 }
706 EXPORT_SYMBOL_GPL(mt76_token_consume);
707 
708 struct mt76_txwi_cache *
709 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
710 {
711 	struct mt76_txwi_cache *txwi;
712 
713 	spin_lock_bh(&dev->token_lock);
714 
715 	txwi = idr_remove(&dev->token, token);
716 	if (txwi)
717 		dev->token_count--;
718 
719 	if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
720 	    dev->phy.q_tx[0]->blocked)
721 		*wake = true;
722 
723 	spin_unlock_bh(&dev->token_lock);
724 
725 	return txwi;
726 }
727 EXPORT_SYMBOL_GPL(mt76_token_release);
728