1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static int
9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 	if (!txq->sta)
12 		return MT_TXQ_BE;
13 
14 	return txq->ac;
15 }
16 
17 void
18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 	struct ieee80211_txq *txq;
22 	struct mt76_txq *mtxq;
23 	u8 tid;
24 
25 	if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 	    !ieee80211_is_data_present(hdr->frame_control))
27 		return;
28 
29 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 	txq = sta->txq[tid];
31 	mtxq = (struct mt76_txq *)txq->drv_priv;
32 	if (!mtxq->aggr)
33 		return;
34 
35 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38 
39 void
40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 		   __acquires(&dev->status_lock)
42 {
43 	__skb_queue_head_init(list);
44 	spin_lock_bh(&dev->status_lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47 
48 void
49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 		      __releases(&dev->status_lock)
51 {
52 	struct ieee80211_hw *hw;
53 	struct sk_buff *skb;
54 
55 	spin_unlock_bh(&dev->status_lock);
56 
57 	rcu_read_lock();
58 	while ((skb = __skb_dequeue(list)) != NULL) {
59 		struct ieee80211_tx_status status = {
60 			.skb = skb,
61 			.info = IEEE80211_SKB_CB(skb),
62 		};
63 		struct ieee80211_rate_status rs = {};
64 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
65 		struct mt76_wcid *wcid;
66 
67 		wcid = rcu_dereference(dev->wcid[cb->wcid]);
68 		if (wcid) {
69 			status.sta = wcid_to_sta(wcid);
70 			if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
71 				rs.rate_idx = wcid->rate;
72 				status.rates = &rs;
73 				status.n_rates = 1;
74 			} else {
75 				status.n_rates = 0;
76 			}
77 		}
78 
79 		hw = mt76_tx_status_get_hw(dev, skb);
80 		spin_lock_bh(&dev->rx_lock);
81 		ieee80211_tx_status_ext(hw, &status);
82 		spin_unlock_bh(&dev->rx_lock);
83 	}
84 	rcu_read_unlock();
85 }
86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
87 
88 static void
89 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
90 			  struct sk_buff_head *list)
91 {
92 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
93 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
94 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
95 
96 	flags |= cb->flags;
97 	cb->flags = flags;
98 
99 	if ((flags & done) != done)
100 		return;
101 
102 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
103 	if (flags & MT_TX_CB_TXS_FAILED) {
104 		info->status.rates[0].count = 0;
105 		info->status.rates[0].idx = -1;
106 		info->flags |= IEEE80211_TX_STAT_ACK;
107 	}
108 
109 	__skb_queue_tail(list, skb);
110 }
111 
112 void
113 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
114 			struct sk_buff_head *list)
115 {
116 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
117 }
118 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
119 
120 int
121 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
122 		       struct sk_buff *skb)
123 {
124 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
125 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
126 	int pid;
127 
128 	memset(cb, 0, sizeof(*cb));
129 
130 	if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
131 		return MT_PACKET_ID_NO_ACK;
132 
133 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
134 		return MT_PACKET_ID_NO_ACK;
135 
136 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
137 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
138 		return MT_PACKET_ID_NO_SKB;
139 
140 	spin_lock_bh(&dev->status_lock);
141 
142 	pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
143 			MT_PACKET_ID_MASK, GFP_ATOMIC);
144 	if (pid < 0) {
145 		pid = MT_PACKET_ID_NO_SKB;
146 		goto out;
147 	}
148 
149 	cb->wcid = wcid->idx;
150 	cb->pktid = pid;
151 
152 	if (list_empty(&wcid->list))
153 		list_add_tail(&wcid->list, &dev->wcid_list);
154 
155 out:
156 	spin_unlock_bh(&dev->status_lock);
157 
158 	return pid;
159 }
160 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
161 
162 struct sk_buff *
163 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
164 		       struct sk_buff_head *list)
165 {
166 	struct sk_buff *skb;
167 	int id;
168 
169 	lockdep_assert_held(&dev->status_lock);
170 
171 	skb = idr_remove(&wcid->pktid, pktid);
172 	if (skb)
173 		goto out;
174 
175 	/* look for stale entries in the wcid idr queue */
176 	idr_for_each_entry(&wcid->pktid, skb, id) {
177 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
178 
179 		if (pktid >= 0) {
180 			if (!(cb->flags & MT_TX_CB_DMA_DONE))
181 				continue;
182 
183 			if (time_is_after_jiffies(cb->jiffies +
184 						   MT_TX_STATUS_SKB_TIMEOUT))
185 				continue;
186 		}
187 
188 		/* It has been too long since DMA_DONE, time out this packet
189 		 * and stop waiting for TXS callback.
190 		 */
191 		idr_remove(&wcid->pktid, cb->pktid);
192 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
193 						    MT_TX_CB_TXS_DONE, list);
194 	}
195 
196 out:
197 	if (idr_is_empty(&wcid->pktid))
198 		list_del_init(&wcid->list);
199 
200 	return skb;
201 }
202 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
203 
204 void
205 mt76_tx_status_check(struct mt76_dev *dev, bool flush)
206 {
207 	struct mt76_wcid *wcid, *tmp;
208 	struct sk_buff_head list;
209 
210 	mt76_tx_status_lock(dev, &list);
211 	list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
212 		mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
213 	mt76_tx_status_unlock(dev, &list);
214 }
215 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
216 
217 static void
218 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
219 		      struct sk_buff *skb)
220 {
221 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
222 	int pending;
223 
224 	if (!wcid || info->tx_time_est)
225 		return;
226 
227 	pending = atomic_dec_return(&wcid->non_aql_packets);
228 	if (pending < 0)
229 		atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
230 }
231 
232 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
233 			    struct list_head *free_list)
234 {
235 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
236 	struct ieee80211_tx_status status = {
237 		.skb = skb,
238 		.free_list = free_list,
239 	};
240 	struct mt76_wcid *wcid = NULL;
241 	struct ieee80211_hw *hw;
242 	struct sk_buff_head list;
243 
244 	rcu_read_lock();
245 
246 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
247 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
248 
249 	mt76_tx_check_non_aql(dev, wcid, skb);
250 
251 #ifdef CONFIG_NL80211_TESTMODE
252 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
253 		struct mt76_phy *phy = hw->priv;
254 
255 		if (skb == phy->test.tx_skb)
256 			phy->test.tx_done++;
257 		if (phy->test.tx_queued == phy->test.tx_done)
258 			wake_up(&dev->tx_wait);
259 
260 		dev_kfree_skb_any(skb);
261 		goto out;
262 	}
263 #endif
264 
265 	if (cb->pktid < MT_PACKET_ID_FIRST) {
266 		hw = mt76_tx_status_get_hw(dev, skb);
267 		status.sta = wcid_to_sta(wcid);
268 		spin_lock_bh(&dev->rx_lock);
269 		ieee80211_tx_status_ext(hw, &status);
270 		spin_unlock_bh(&dev->rx_lock);
271 		goto out;
272 	}
273 
274 	mt76_tx_status_lock(dev, &list);
275 	cb->jiffies = jiffies;
276 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
277 	mt76_tx_status_unlock(dev, &list);
278 
279 out:
280 	rcu_read_unlock();
281 }
282 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
283 
284 static int
285 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
286 		    struct mt76_wcid *wcid, struct ieee80211_sta *sta,
287 		    bool *stop)
288 {
289 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
290 	struct mt76_queue *q = phy->q_tx[qid];
291 	struct mt76_dev *dev = phy->dev;
292 	bool non_aql;
293 	int pending;
294 	int idx;
295 
296 	non_aql = !info->tx_time_est;
297 	idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
298 	if (idx < 0 || !sta)
299 		return idx;
300 
301 	wcid = (struct mt76_wcid *)sta->drv_priv;
302 	q->entry[idx].wcid = wcid->idx;
303 
304 	if (!non_aql)
305 		return idx;
306 
307 	pending = atomic_inc_return(&wcid->non_aql_packets);
308 	if (stop && pending >= MT_MAX_NON_AQL_PKT)
309 		*stop = true;
310 
311 	return idx;
312 }
313 
314 void
315 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
316 	struct mt76_wcid *wcid, struct sk_buff *skb)
317 {
318 	struct mt76_dev *dev = phy->dev;
319 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
320 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
321 	struct mt76_queue *q;
322 	int qid = skb_get_queue_mapping(skb);
323 
324 	if (mt76_testmode_enabled(phy)) {
325 		ieee80211_free_txskb(phy->hw, skb);
326 		return;
327 	}
328 
329 	if (WARN_ON(qid >= MT_TXQ_PSD)) {
330 		qid = MT_TXQ_BE;
331 		skb_set_queue_mapping(skb, qid);
332 	}
333 
334 	if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
335 	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
336 	    !ieee80211_is_data(hdr->frame_control) &&
337 	    !ieee80211_is_bufferable_mmpdu(skb)) {
338 		qid = MT_TXQ_PSD;
339 	}
340 
341 	if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
342 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
343 				       info->control.rates, 1);
344 
345 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
346 	q = phy->q_tx[qid];
347 
348 	spin_lock_bh(&q->lock);
349 	__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
350 	dev->queue_ops->kick(dev, q);
351 	spin_unlock_bh(&q->lock);
352 }
353 EXPORT_SYMBOL_GPL(mt76_tx);
354 
355 static struct sk_buff *
356 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
357 {
358 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
359 	struct ieee80211_tx_info *info;
360 	struct sk_buff *skb;
361 
362 	skb = ieee80211_tx_dequeue(phy->hw, txq);
363 	if (!skb)
364 		return NULL;
365 
366 	info = IEEE80211_SKB_CB(skb);
367 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
368 
369 	return skb;
370 }
371 
372 static void
373 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
374 		  struct sk_buff *skb, bool last)
375 {
376 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
377 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
378 
379 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
380 	if (last)
381 		info->flags |= IEEE80211_TX_STATUS_EOSP |
382 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
383 
384 	mt76_skb_set_moredata(skb, !last);
385 	__mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
386 }
387 
388 void
389 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
390 			     u16 tids, int nframes,
391 			     enum ieee80211_frame_release_type reason,
392 			     bool more_data)
393 {
394 	struct mt76_phy *phy = hw->priv;
395 	struct mt76_dev *dev = phy->dev;
396 	struct sk_buff *last_skb = NULL;
397 	struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
398 	int i;
399 
400 	spin_lock_bh(&hwq->lock);
401 	for (i = 0; tids && nframes; i++, tids >>= 1) {
402 		struct ieee80211_txq *txq = sta->txq[i];
403 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
404 		struct sk_buff *skb;
405 
406 		if (!(tids & 1))
407 			continue;
408 
409 		do {
410 			skb = mt76_txq_dequeue(phy, mtxq);
411 			if (!skb)
412 				break;
413 
414 			nframes--;
415 			if (last_skb)
416 				mt76_queue_ps_skb(phy, sta, last_skb, false);
417 
418 			last_skb = skb;
419 		} while (nframes);
420 	}
421 
422 	if (last_skb) {
423 		mt76_queue_ps_skb(phy, sta, last_skb, true);
424 		dev->queue_ops->kick(dev, hwq);
425 	} else {
426 		ieee80211_sta_eosp(sta);
427 	}
428 
429 	spin_unlock_bh(&hwq->lock);
430 }
431 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
432 
433 static bool
434 mt76_txq_stopped(struct mt76_queue *q)
435 {
436 	return q->stopped || q->blocked ||
437 	       q->queued + MT_TXQ_FREE_THR >= q->ndesc;
438 }
439 
440 static int
441 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
442 		    struct mt76_txq *mtxq, struct mt76_wcid *wcid)
443 {
444 	struct mt76_dev *dev = phy->dev;
445 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
446 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
447 	struct ieee80211_tx_info *info;
448 	struct sk_buff *skb;
449 	int n_frames = 1;
450 	bool stop = false;
451 	int idx;
452 
453 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
454 		return 0;
455 
456 	if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
457 		return 0;
458 
459 	skb = mt76_txq_dequeue(phy, mtxq);
460 	if (!skb)
461 		return 0;
462 
463 	info = IEEE80211_SKB_CB(skb);
464 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
465 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
466 				       info->control.rates, 1);
467 
468 	spin_lock(&q->lock);
469 	idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
470 	spin_unlock(&q->lock);
471 	if (idx < 0)
472 		return idx;
473 
474 	do {
475 		if (test_bit(MT76_RESET, &phy->state))
476 			return -EBUSY;
477 
478 		if (stop || mt76_txq_stopped(q))
479 			break;
480 
481 		skb = mt76_txq_dequeue(phy, mtxq);
482 		if (!skb)
483 			break;
484 
485 		info = IEEE80211_SKB_CB(skb);
486 		if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
487 			ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
488 					       info->control.rates, 1);
489 
490 		spin_lock(&q->lock);
491 		idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
492 		spin_unlock(&q->lock);
493 		if (idx < 0)
494 			break;
495 
496 		n_frames++;
497 	} while (1);
498 
499 	spin_lock(&q->lock);
500 	dev->queue_ops->kick(dev, q);
501 	spin_unlock(&q->lock);
502 
503 	return n_frames;
504 }
505 
506 static int
507 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
508 {
509 	struct mt76_queue *q = phy->q_tx[qid];
510 	struct mt76_dev *dev = phy->dev;
511 	struct ieee80211_txq *txq;
512 	struct mt76_txq *mtxq;
513 	struct mt76_wcid *wcid;
514 	int ret = 0;
515 
516 	while (1) {
517 		int n_frames = 0;
518 
519 		if (test_bit(MT76_RESET, &phy->state))
520 			return -EBUSY;
521 
522 		if (dev->queue_ops->tx_cleanup &&
523 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
524 			dev->queue_ops->tx_cleanup(dev, q, false);
525 		}
526 
527 		txq = ieee80211_next_txq(phy->hw, qid);
528 		if (!txq)
529 			break;
530 
531 		mtxq = (struct mt76_txq *)txq->drv_priv;
532 		wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
533 		if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
534 			continue;
535 
536 		if (mtxq->send_bar && mtxq->aggr) {
537 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
538 			struct ieee80211_sta *sta = txq->sta;
539 			struct ieee80211_vif *vif = txq->vif;
540 			u16 agg_ssn = mtxq->agg_ssn;
541 			u8 tid = txq->tid;
542 
543 			mtxq->send_bar = false;
544 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
545 		}
546 
547 		if (!mt76_txq_stopped(q))
548 			n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
549 
550 		ieee80211_return_txq(phy->hw, txq, false);
551 
552 		if (unlikely(n_frames < 0))
553 			return n_frames;
554 
555 		ret += n_frames;
556 	}
557 
558 	return ret;
559 }
560 
561 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
562 {
563 	int len;
564 
565 	if (qid >= 4)
566 		return;
567 
568 	local_bh_disable();
569 	rcu_read_lock();
570 
571 	do {
572 		ieee80211_txq_schedule_start(phy->hw, qid);
573 		len = mt76_txq_schedule_list(phy, qid);
574 		ieee80211_txq_schedule_end(phy->hw, qid);
575 	} while (len > 0);
576 
577 	rcu_read_unlock();
578 	local_bh_enable();
579 }
580 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
581 
582 void mt76_txq_schedule_all(struct mt76_phy *phy)
583 {
584 	int i;
585 
586 	for (i = 0; i <= MT_TXQ_BK; i++)
587 		mt76_txq_schedule(phy, i);
588 }
589 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
590 
591 void mt76_tx_worker_run(struct mt76_dev *dev)
592 {
593 	struct mt76_phy *phy;
594 	int i;
595 
596 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
597 		phy = dev->phys[i];
598 		if (!phy)
599 			continue;
600 
601 		mt76_txq_schedule_all(phy);
602 	}
603 
604 #ifdef CONFIG_NL80211_TESTMODE
605 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
606 		phy = dev->phys[i];
607 		if (!phy || !phy->test.tx_pending)
608 			continue;
609 
610 		mt76_testmode_tx_pending(phy);
611 	}
612 #endif
613 }
614 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
615 
616 void mt76_tx_worker(struct mt76_worker *w)
617 {
618 	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
619 
620 	mt76_tx_worker_run(dev);
621 }
622 
623 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
624 			 bool send_bar)
625 {
626 	int i;
627 
628 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
629 		struct ieee80211_txq *txq = sta->txq[i];
630 		struct mt76_queue *hwq;
631 		struct mt76_txq *mtxq;
632 
633 		if (!txq)
634 			continue;
635 
636 		hwq = phy->q_tx[mt76_txq_get_qid(txq)];
637 		mtxq = (struct mt76_txq *)txq->drv_priv;
638 
639 		spin_lock_bh(&hwq->lock);
640 		mtxq->send_bar = mtxq->aggr && send_bar;
641 		spin_unlock_bh(&hwq->lock);
642 	}
643 }
644 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
645 
646 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
647 {
648 	struct mt76_phy *phy = hw->priv;
649 	struct mt76_dev *dev = phy->dev;
650 
651 	if (!test_bit(MT76_STATE_RUNNING, &phy->state))
652 		return;
653 
654 	mt76_worker_schedule(&dev->tx_worker);
655 }
656 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
657 
658 u8 mt76_ac_to_hwq(u8 ac)
659 {
660 	static const u8 wmm_queue_map[] = {
661 		[IEEE80211_AC_BE] = 0,
662 		[IEEE80211_AC_BK] = 1,
663 		[IEEE80211_AC_VI] = 2,
664 		[IEEE80211_AC_VO] = 3,
665 	};
666 
667 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
668 		return 0;
669 
670 	return wmm_queue_map[ac];
671 }
672 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
673 
674 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
675 {
676 	struct sk_buff *iter, *last = skb;
677 
678 	/* First packet of a A-MSDU burst keeps track of the whole burst
679 	 * length, need to update length of it and the last packet.
680 	 */
681 	skb_walk_frags(skb, iter) {
682 		last = iter;
683 		if (!iter->next) {
684 			skb->data_len += pad;
685 			skb->len += pad;
686 			break;
687 		}
688 	}
689 
690 	if (skb_pad(last, pad))
691 		return -ENOMEM;
692 
693 	__skb_put(last, pad);
694 
695 	return 0;
696 }
697 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
698 
699 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
700 			    struct mt76_queue_entry *e)
701 {
702 	if (e->skb)
703 		dev->drv->tx_complete_skb(dev, e);
704 
705 	spin_lock_bh(&q->lock);
706 	q->tail = (q->tail + 1) % q->ndesc;
707 	q->queued--;
708 	spin_unlock_bh(&q->lock);
709 }
710 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
711 
712 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
713 {
714 	struct mt76_phy *phy = &dev->phy;
715 	struct mt76_queue *q = phy->q_tx[0];
716 
717 	if (blocked == q->blocked)
718 		return;
719 
720 	q->blocked = blocked;
721 
722 	phy = dev->phys[MT_BAND1];
723 	if (phy) {
724 		q = phy->q_tx[0];
725 		q->blocked = blocked;
726 	}
727 	phy = dev->phys[MT_BAND2];
728 	if (phy) {
729 		q = phy->q_tx[0];
730 		q->blocked = blocked;
731 	}
732 
733 	if (!blocked)
734 		mt76_worker_schedule(&dev->tx_worker);
735 }
736 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
737 
738 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
739 {
740 	int token;
741 
742 	spin_lock_bh(&dev->token_lock);
743 
744 	token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
745 	if (token >= 0)
746 		dev->token_count++;
747 
748 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
749 	if (mtk_wed_device_active(&dev->mmio.wed) &&
750 	    token >= dev->mmio.wed.wlan.token_start)
751 		dev->wed_token_count++;
752 #endif
753 
754 	if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
755 		__mt76_set_tx_blocked(dev, true);
756 
757 	spin_unlock_bh(&dev->token_lock);
758 
759 	return token;
760 }
761 EXPORT_SYMBOL_GPL(mt76_token_consume);
762 
763 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
764 			  struct mt76_txwi_cache *t, dma_addr_t phys)
765 {
766 	int token;
767 
768 	spin_lock_bh(&dev->rx_token_lock);
769 	token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
770 			  GFP_ATOMIC);
771 	if (token >= 0) {
772 		t->ptr = ptr;
773 		t->dma_addr = phys;
774 	}
775 	spin_unlock_bh(&dev->rx_token_lock);
776 
777 	return token;
778 }
779 EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
780 
781 struct mt76_txwi_cache *
782 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
783 {
784 	struct mt76_txwi_cache *txwi;
785 
786 	spin_lock_bh(&dev->token_lock);
787 
788 	txwi = idr_remove(&dev->token, token);
789 	if (txwi) {
790 		dev->token_count--;
791 
792 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
793 		if (mtk_wed_device_active(&dev->mmio.wed) &&
794 		    token >= dev->mmio.wed.wlan.token_start &&
795 		    --dev->wed_token_count == 0)
796 			wake_up(&dev->tx_wait);
797 #endif
798 	}
799 
800 	if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
801 	    dev->phy.q_tx[0]->blocked)
802 		*wake = true;
803 
804 	spin_unlock_bh(&dev->token_lock);
805 
806 	return txwi;
807 }
808 EXPORT_SYMBOL_GPL(mt76_token_release);
809 
810 struct mt76_txwi_cache *
811 mt76_rx_token_release(struct mt76_dev *dev, int token)
812 {
813 	struct mt76_txwi_cache *t;
814 
815 	spin_lock_bh(&dev->rx_token_lock);
816 	t = idr_remove(&dev->rx_token, token);
817 	spin_unlock_bh(&dev->rx_token_lock);
818 
819 	return t;
820 }
821 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
822