1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static struct mt76_txwi_cache *
9 mt76_alloc_txwi(struct mt76_dev *dev)
10 {
11 	struct mt76_txwi_cache *t;
12 	dma_addr_t addr;
13 	u8 *txwi;
14 	int size;
15 
16 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
17 	txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
18 	if (!txwi)
19 		return NULL;
20 
21 	addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
22 			      DMA_TO_DEVICE);
23 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
24 	t->dma_addr = addr;
25 
26 	return t;
27 }
28 
29 static struct mt76_txwi_cache *
30 __mt76_get_txwi(struct mt76_dev *dev)
31 {
32 	struct mt76_txwi_cache *t = NULL;
33 
34 	spin_lock_bh(&dev->lock);
35 	if (!list_empty(&dev->txwi_cache)) {
36 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
37 				     list);
38 		list_del(&t->list);
39 	}
40 	spin_unlock_bh(&dev->lock);
41 
42 	return t;
43 }
44 
45 struct mt76_txwi_cache *
46 mt76_get_txwi(struct mt76_dev *dev)
47 {
48 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
49 
50 	if (t)
51 		return t;
52 
53 	return mt76_alloc_txwi(dev);
54 }
55 
56 void
57 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
58 {
59 	if (!t)
60 		return;
61 
62 	spin_lock_bh(&dev->lock);
63 	list_add(&t->list, &dev->txwi_cache);
64 	spin_unlock_bh(&dev->lock);
65 }
66 EXPORT_SYMBOL_GPL(mt76_put_txwi);
67 
68 void mt76_tx_free(struct mt76_dev *dev)
69 {
70 	struct mt76_txwi_cache *t;
71 
72 	while ((t = __mt76_get_txwi(dev)) != NULL)
73 		dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
74 				 DMA_TO_DEVICE);
75 }
76 
77 static int
78 mt76_txq_get_qid(struct ieee80211_txq *txq)
79 {
80 	if (!txq->sta)
81 		return MT_TXQ_BE;
82 
83 	return txq->ac;
84 }
85 
86 static void
87 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
88 {
89 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
90 
91 	if (!ieee80211_is_data_qos(hdr->frame_control) ||
92 	    !ieee80211_is_data_present(hdr->frame_control))
93 		return;
94 
95 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
96 }
97 
98 void
99 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
100 		   __acquires(&dev->status_list.lock)
101 {
102 	__skb_queue_head_init(list);
103 	spin_lock_bh(&dev->status_list.lock);
104 }
105 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
106 
107 void
108 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
109 		      __releases(&dev->status_list.lock)
110 {
111 	struct ieee80211_hw *hw;
112 	struct sk_buff *skb;
113 
114 	spin_unlock_bh(&dev->status_list.lock);
115 
116 	while ((skb = __skb_dequeue(list)) != NULL) {
117 		hw = mt76_tx_status_get_hw(dev, skb);
118 		ieee80211_tx_status(hw, skb);
119 	}
120 
121 }
122 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
123 
124 static void
125 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
126 			  struct sk_buff_head *list)
127 {
128 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
129 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
130 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
131 
132 	flags |= cb->flags;
133 	cb->flags = flags;
134 
135 	if ((flags & done) != done)
136 		return;
137 
138 	__skb_unlink(skb, &dev->status_list);
139 
140 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
141 	if (flags & MT_TX_CB_TXS_FAILED) {
142 		ieee80211_tx_info_clear_status(info);
143 		info->status.rates[0].idx = -1;
144 		info->flags |= IEEE80211_TX_STAT_ACK;
145 	}
146 
147 	__skb_queue_tail(list, skb);
148 }
149 
150 void
151 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
152 			struct sk_buff_head *list)
153 {
154 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
155 }
156 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
157 
158 int
159 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
160 		       struct sk_buff *skb)
161 {
162 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
163 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
164 	int pid;
165 
166 	if (!wcid)
167 		return MT_PACKET_ID_NO_ACK;
168 
169 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
170 		return MT_PACKET_ID_NO_ACK;
171 
172 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
173 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
174 		return MT_PACKET_ID_NO_SKB;
175 
176 	spin_lock_bh(&dev->status_list.lock);
177 
178 	memset(cb, 0, sizeof(*cb));
179 	wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
180 	if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
181 	    wcid->packet_id == MT_PACKET_ID_NO_SKB)
182 		wcid->packet_id = MT_PACKET_ID_FIRST;
183 
184 	pid = wcid->packet_id;
185 	cb->wcid = wcid->idx;
186 	cb->pktid = pid;
187 	cb->jiffies = jiffies;
188 
189 	__skb_queue_tail(&dev->status_list, skb);
190 	spin_unlock_bh(&dev->status_list.lock);
191 
192 	return pid;
193 }
194 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
195 
196 struct sk_buff *
197 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
198 		       struct sk_buff_head *list)
199 {
200 	struct sk_buff *skb, *tmp;
201 
202 	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
203 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
204 
205 		if (wcid && cb->wcid != wcid->idx)
206 			continue;
207 
208 		if (cb->pktid == pktid)
209 			return skb;
210 
211 		if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
212 					      MT_TX_STATUS_SKB_TIMEOUT))
213 			continue;
214 
215 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
216 						    MT_TX_CB_TXS_DONE, list);
217 	}
218 
219 	return NULL;
220 }
221 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
222 
223 void
224 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
225 {
226 	struct sk_buff_head list;
227 
228 	mt76_tx_status_lock(dev, &list);
229 	mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
230 	mt76_tx_status_unlock(dev, &list);
231 }
232 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
233 
234 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
235 {
236 	struct ieee80211_hw *hw;
237 	struct sk_buff_head list;
238 
239 #ifdef CONFIG_NL80211_TESTMODE
240 	if (skb == dev->test.tx_skb) {
241 		dev->test.tx_done++;
242 		if (dev->test.tx_queued == dev->test.tx_done)
243 			wake_up(&dev->tx_wait);
244 	}
245 #endif
246 
247 	if (!skb->prev) {
248 		hw = mt76_tx_status_get_hw(dev, skb);
249 		ieee80211_free_txskb(hw, skb);
250 		return;
251 	}
252 
253 	mt76_tx_status_lock(dev, &list);
254 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
255 	mt76_tx_status_unlock(dev, &list);
256 }
257 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
258 
259 void
260 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
261 	struct mt76_wcid *wcid, struct sk_buff *skb)
262 {
263 	struct mt76_dev *dev = phy->dev;
264 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
265 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
266 	struct mt76_queue *q;
267 	int qid = skb_get_queue_mapping(skb);
268 	bool ext_phy = phy != &dev->phy;
269 
270 	if (mt76_testmode_enabled(dev)) {
271 		ieee80211_free_txskb(phy->hw, skb);
272 		return;
273 	}
274 
275 	if (WARN_ON(qid >= MT_TXQ_PSD)) {
276 		qid = MT_TXQ_BE;
277 		skb_set_queue_mapping(skb, qid);
278 	}
279 
280 	if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
281 	    !ieee80211_is_data(hdr->frame_control) &&
282 	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
283 		qid = MT_TXQ_PSD;
284 		skb_set_queue_mapping(skb, qid);
285 	}
286 
287 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
288 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
289 				       info->control.rates, 1);
290 
291 	if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
292 		struct ieee80211_txq *txq;
293 		struct mt76_txq *mtxq;
294 		u8 tid;
295 
296 		tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
297 		txq = sta->txq[tid];
298 		mtxq = (struct mt76_txq *)txq->drv_priv;
299 
300 		if (mtxq->aggr)
301 			mt76_check_agg_ssn(mtxq, skb);
302 	}
303 
304 	if (ext_phy)
305 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
306 
307 	q = dev->q_tx[qid].q;
308 
309 	spin_lock_bh(&q->lock);
310 	dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
311 	dev->queue_ops->kick(dev, q);
312 
313 	if (q->queued > q->ndesc - 8 && !q->stopped) {
314 		ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
315 		q->stopped = true;
316 	}
317 
318 	spin_unlock_bh(&q->lock);
319 }
320 EXPORT_SYMBOL_GPL(mt76_tx);
321 
322 static struct sk_buff *
323 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
324 {
325 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
326 	struct ieee80211_tx_info *info;
327 	bool ext_phy = phy != &phy->dev->phy;
328 	struct sk_buff *skb;
329 
330 	skb = skb_dequeue(&mtxq->retry_q);
331 	if (skb) {
332 		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
333 
334 		if (ps && skb_queue_empty(&mtxq->retry_q))
335 			ieee80211_sta_set_buffered(txq->sta, tid, false);
336 
337 		return skb;
338 	}
339 
340 	skb = ieee80211_tx_dequeue(phy->hw, txq);
341 	if (!skb)
342 		return NULL;
343 
344 	info = IEEE80211_SKB_CB(skb);
345 	if (ext_phy)
346 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
347 
348 	return skb;
349 }
350 
351 static void
352 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
353 		  struct sk_buff *skb, bool last)
354 {
355 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
356 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
357 
358 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
359 	if (last)
360 		info->flags |= IEEE80211_TX_STATUS_EOSP |
361 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
362 
363 	mt76_skb_set_moredata(skb, !last);
364 	dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
365 }
366 
367 void
368 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
369 			     u16 tids, int nframes,
370 			     enum ieee80211_frame_release_type reason,
371 			     bool more_data)
372 {
373 	struct mt76_phy *phy = hw->priv;
374 	struct mt76_dev *dev = phy->dev;
375 	struct sk_buff *last_skb = NULL;
376 	struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
377 	int i;
378 
379 	spin_lock_bh(&hwq->lock);
380 	for (i = 0; tids && nframes; i++, tids >>= 1) {
381 		struct ieee80211_txq *txq = sta->txq[i];
382 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
383 		struct sk_buff *skb;
384 
385 		if (!(tids & 1))
386 			continue;
387 
388 		do {
389 			skb = mt76_txq_dequeue(phy, mtxq, true);
390 			if (!skb)
391 				break;
392 
393 			if (mtxq->aggr)
394 				mt76_check_agg_ssn(mtxq, skb);
395 
396 			nframes--;
397 			if (last_skb)
398 				mt76_queue_ps_skb(dev, sta, last_skb, false);
399 
400 			last_skb = skb;
401 		} while (nframes);
402 	}
403 
404 	if (last_skb) {
405 		mt76_queue_ps_skb(dev, sta, last_skb, true);
406 		dev->queue_ops->kick(dev, hwq);
407 	} else {
408 		ieee80211_sta_eosp(sta);
409 	}
410 
411 	spin_unlock_bh(&hwq->lock);
412 }
413 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
414 
415 static int
416 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
417 		    struct mt76_txq *mtxq)
418 {
419 	struct mt76_dev *dev = phy->dev;
420 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
421 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
422 	struct mt76_wcid *wcid = mtxq->wcid;
423 	struct mt76_queue *hwq = sq->q;
424 	struct ieee80211_tx_info *info;
425 	struct sk_buff *skb;
426 	int n_frames = 1, limit;
427 	struct ieee80211_tx_rate tx_rate;
428 	bool ampdu;
429 	bool probe;
430 	int idx;
431 
432 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
433 		return 0;
434 
435 	skb = mt76_txq_dequeue(phy, mtxq, false);
436 	if (!skb)
437 		return 0;
438 
439 	info = IEEE80211_SKB_CB(skb);
440 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
441 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
442 				       info->control.rates, 1);
443 	tx_rate = info->control.rates[0];
444 
445 	probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
446 	ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
447 	limit = ampdu ? 16 : 3;
448 
449 	if (ampdu)
450 		mt76_check_agg_ssn(mtxq, skb);
451 
452 	idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
453 
454 	if (idx < 0)
455 		return idx;
456 
457 	do {
458 		bool cur_ampdu;
459 
460 		if (probe)
461 			break;
462 
463 		if (test_bit(MT76_RESET, &phy->state))
464 			return -EBUSY;
465 
466 		skb = mt76_txq_dequeue(phy, mtxq, false);
467 		if (!skb)
468 			break;
469 
470 		info = IEEE80211_SKB_CB(skb);
471 		cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
472 
473 		if (ampdu != cur_ampdu ||
474 		    (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
475 			skb_queue_tail(&mtxq->retry_q, skb);
476 			break;
477 		}
478 
479 		info->control.rates[0] = tx_rate;
480 
481 		if (cur_ampdu)
482 			mt76_check_agg_ssn(mtxq, skb);
483 
484 		idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
485 						   txq->sta);
486 		if (idx < 0)
487 			return idx;
488 
489 		n_frames++;
490 	} while (n_frames < limit);
491 
492 	if (!probe) {
493 		hwq->entry[idx].qid = sq - dev->q_tx;
494 		hwq->entry[idx].schedule = true;
495 		sq->swq_queued++;
496 	}
497 
498 	dev->queue_ops->kick(dev, hwq);
499 
500 	return n_frames;
501 }
502 
503 static int
504 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
505 {
506 	struct mt76_dev *dev = phy->dev;
507 	struct mt76_sw_queue *sq = &dev->q_tx[qid];
508 	struct mt76_queue *hwq = sq->q;
509 	struct ieee80211_txq *txq;
510 	struct mt76_txq *mtxq;
511 	struct mt76_wcid *wcid;
512 	int ret = 0;
513 
514 	spin_lock_bh(&hwq->lock);
515 	while (1) {
516 		if (sq->swq_queued >= 4)
517 			break;
518 
519 		if (test_bit(MT76_RESET, &phy->state)) {
520 			ret = -EBUSY;
521 			break;
522 		}
523 
524 		txq = ieee80211_next_txq(phy->hw, qid);
525 		if (!txq)
526 			break;
527 
528 		mtxq = (struct mt76_txq *)txq->drv_priv;
529 		wcid = mtxq->wcid;
530 		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
531 			continue;
532 
533 		if (mtxq->send_bar && mtxq->aggr) {
534 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
535 			struct ieee80211_sta *sta = txq->sta;
536 			struct ieee80211_vif *vif = txq->vif;
537 			u16 agg_ssn = mtxq->agg_ssn;
538 			u8 tid = txq->tid;
539 
540 			mtxq->send_bar = false;
541 			spin_unlock_bh(&hwq->lock);
542 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
543 			spin_lock_bh(&hwq->lock);
544 		}
545 
546 		ret += mt76_txq_send_burst(phy, sq, mtxq);
547 		ieee80211_return_txq(phy->hw, txq,
548 				     !skb_queue_empty(&mtxq->retry_q));
549 	}
550 	spin_unlock_bh(&hwq->lock);
551 
552 	return ret;
553 }
554 
555 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
556 {
557 	struct mt76_dev *dev = phy->dev;
558 	struct mt76_sw_queue *sq = &dev->q_tx[qid];
559 	int len;
560 
561 	if (qid >= 4)
562 		return;
563 
564 	if (sq->swq_queued >= 4)
565 		return;
566 
567 	rcu_read_lock();
568 
569 	do {
570 		ieee80211_txq_schedule_start(phy->hw, qid);
571 		len = mt76_txq_schedule_list(phy, qid);
572 		ieee80211_txq_schedule_end(phy->hw, qid);
573 	} while (len > 0);
574 
575 	rcu_read_unlock();
576 }
577 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
578 
579 void mt76_txq_schedule_all(struct mt76_phy *phy)
580 {
581 	int i;
582 
583 	for (i = 0; i <= MT_TXQ_BK; i++)
584 		mt76_txq_schedule(phy, i);
585 }
586 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
587 
588 void mt76_tx_tasklet(unsigned long data)
589 {
590 	struct mt76_dev *dev = (struct mt76_dev *)data;
591 
592 	mt76_txq_schedule_all(&dev->phy);
593 	if (dev->phy2)
594 		mt76_txq_schedule_all(dev->phy2);
595 
596 #ifdef CONFIG_NL80211_TESTMODE
597 	if (dev->test.tx_pending)
598 		mt76_testmode_tx_pending(dev);
599 #endif
600 }
601 
602 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
603 			 bool send_bar)
604 {
605 	int i;
606 
607 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
608 		struct ieee80211_txq *txq = sta->txq[i];
609 		struct mt76_queue *hwq;
610 		struct mt76_txq *mtxq;
611 
612 		if (!txq)
613 			continue;
614 
615 		mtxq = (struct mt76_txq *)txq->drv_priv;
616 		hwq = mtxq->swq->q;
617 
618 		spin_lock_bh(&hwq->lock);
619 		mtxq->send_bar = mtxq->aggr && send_bar;
620 		spin_unlock_bh(&hwq->lock);
621 	}
622 }
623 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
624 
625 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
626 {
627 	struct mt76_phy *phy = hw->priv;
628 	struct mt76_dev *dev = phy->dev;
629 
630 	if (!test_bit(MT76_STATE_RUNNING, &phy->state))
631 		return;
632 
633 	tasklet_schedule(&dev->tx_tasklet);
634 }
635 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
636 
637 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
638 {
639 	struct ieee80211_hw *hw;
640 	struct mt76_txq *mtxq;
641 	struct sk_buff *skb;
642 
643 	if (!txq)
644 		return;
645 
646 	mtxq = (struct mt76_txq *)txq->drv_priv;
647 
648 	while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
649 		hw = mt76_tx_status_get_hw(dev, skb);
650 		ieee80211_free_txskb(hw, skb);
651 	}
652 }
653 EXPORT_SYMBOL_GPL(mt76_txq_remove);
654 
655 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
656 {
657 	struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
658 
659 	skb_queue_head_init(&mtxq->retry_q);
660 
661 	mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
662 }
663 EXPORT_SYMBOL_GPL(mt76_txq_init);
664 
665 u8 mt76_ac_to_hwq(u8 ac)
666 {
667 	static const u8 wmm_queue_map[] = {
668 		[IEEE80211_AC_BE] = 0,
669 		[IEEE80211_AC_BK] = 1,
670 		[IEEE80211_AC_VI] = 2,
671 		[IEEE80211_AC_VO] = 3,
672 	};
673 
674 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
675 		return 0;
676 
677 	return wmm_queue_map[ac];
678 }
679 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
680 
681 int mt76_skb_adjust_pad(struct sk_buff *skb)
682 {
683 	struct sk_buff *iter, *last = skb;
684 	u32 pad;
685 
686 	/* Add zero pad of 4 - 7 bytes */
687 	pad = round_up(skb->len, 4) + 4 - skb->len;
688 
689 	/* First packet of a A-MSDU burst keeps track of the whole burst
690 	 * length, need to update length of it and the last packet.
691 	 */
692 	skb_walk_frags(skb, iter) {
693 		last = iter;
694 		if (!iter->next) {
695 			skb->data_len += pad;
696 			skb->len += pad;
697 			break;
698 		}
699 	}
700 
701 	if (skb_pad(last, pad))
702 		return -ENOMEM;
703 
704 	__skb_put(last, pad);
705 
706 	return 0;
707 }
708 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
709