1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7921.h"
8 #include "../dma.h"
9 #include "mac.h"
10 #include "mcu.h"
11 
12 #define to_rssi(field, rxv)	((FIELD_GET(field, rxv) - 220) / 2)
13 
14 #define HE_BITS(f)		cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
15 #define HE_PREP(f, m, v)	le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
16 						 IEEE80211_RADIOTAP_HE_##f)
17 
18 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
19 					    u16 idx, bool unicast)
20 {
21 	struct mt7921_sta *sta;
22 	struct mt76_wcid *wcid;
23 
24 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
25 		return NULL;
26 
27 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
28 	if (unicast || !wcid)
29 		return wcid;
30 
31 	if (!wcid->sta)
32 		return NULL;
33 
34 	sta = container_of(wcid, struct mt7921_sta, wcid);
35 	if (!sta->vif)
36 		return NULL;
37 
38 	return &sta->vif->sta.wcid;
39 }
40 
41 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
42 {
43 }
44 
45 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
46 {
47 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
48 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
49 
50 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
51 			 0, 5000);
52 }
53 
54 static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid)
55 {
56 	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
57 		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
58 
59 	return MT_WTBL_LMAC_OFFS(wcid, 0);
60 }
61 
62 static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
63 {
64 	static const u8 ac_to_tid[] = {
65 		[IEEE80211_AC_BE] = 0,
66 		[IEEE80211_AC_BK] = 1,
67 		[IEEE80211_AC_VI] = 4,
68 		[IEEE80211_AC_VO] = 6
69 	};
70 	struct ieee80211_sta *sta;
71 	struct mt7921_sta *msta;
72 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
73 	LIST_HEAD(sta_poll_list);
74 	int i;
75 
76 	spin_lock_bh(&dev->sta_poll_lock);
77 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
78 	spin_unlock_bh(&dev->sta_poll_lock);
79 
80 	rcu_read_lock();
81 
82 	while (true) {
83 		bool clear = false;
84 		u32 addr;
85 		u16 idx;
86 
87 		spin_lock_bh(&dev->sta_poll_lock);
88 		if (list_empty(&sta_poll_list)) {
89 			spin_unlock_bh(&dev->sta_poll_lock);
90 			break;
91 		}
92 		msta = list_first_entry(&sta_poll_list,
93 					struct mt7921_sta, poll_list);
94 		list_del_init(&msta->poll_list);
95 		spin_unlock_bh(&dev->sta_poll_lock);
96 
97 		idx = msta->wcid.idx;
98 		addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
99 
100 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
101 			u32 tx_last = msta->airtime_ac[i];
102 			u32 rx_last = msta->airtime_ac[i + 4];
103 
104 			msta->airtime_ac[i] = mt76_rr(dev, addr);
105 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
106 
107 			tx_time[i] = msta->airtime_ac[i] - tx_last;
108 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
109 
110 			if ((tx_last | rx_last) & BIT(30))
111 				clear = true;
112 
113 			addr += 8;
114 		}
115 
116 		if (clear) {
117 			mt7921_mac_wtbl_update(dev, idx,
118 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
119 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
120 		}
121 
122 		if (!msta->wcid.sta)
123 			continue;
124 
125 		sta = container_of((void *)msta, struct ieee80211_sta,
126 				   drv_priv);
127 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
128 			u8 q = mt7921_lmac_mapping(dev, i);
129 			u32 tx_cur = tx_time[q];
130 			u32 rx_cur = rx_time[q];
131 			u8 tid = ac_to_tid[i];
132 
133 			if (!tx_cur && !rx_cur)
134 				continue;
135 
136 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
137 						       rx_cur);
138 		}
139 	}
140 
141 	rcu_read_unlock();
142 }
143 
144 static void
145 mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
146 				 struct ieee80211_radiotap_he *he,
147 				 __le32 *rxv)
148 {
149 	u32 ru_h, ru_l;
150 	u8 ru, offs = 0;
151 
152 	ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
153 	ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
154 	ru = (u8)(ru_l | ru_h << 4);
155 
156 	status->bw = RATE_INFO_BW_HE_RU;
157 
158 	switch (ru) {
159 	case 0 ... 36:
160 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
161 		offs = ru;
162 		break;
163 	case 37 ... 52:
164 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
165 		offs = ru - 37;
166 		break;
167 	case 53 ... 60:
168 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
169 		offs = ru - 53;
170 		break;
171 	case 61 ... 64:
172 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
173 		offs = ru - 61;
174 		break;
175 	case 65 ... 66:
176 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
177 		offs = ru - 65;
178 		break;
179 	case 67:
180 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
181 		break;
182 	case 68:
183 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
184 		break;
185 	}
186 
187 	he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
188 	he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
189 		     le16_encode_bits(offs,
190 				      IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
191 }
192 
193 static void
194 mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
195 			      struct mt76_rx_status *status,
196 			      __le32 *rxv, u32 phy)
197 {
198 	/* TODO: struct ieee80211_radiotap_he_mu */
199 	static const struct ieee80211_radiotap_he known = {
200 		.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
201 			 HE_BITS(DATA1_DATA_DCM_KNOWN) |
202 			 HE_BITS(DATA1_STBC_KNOWN) |
203 			 HE_BITS(DATA1_CODING_KNOWN) |
204 			 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
205 			 HE_BITS(DATA1_DOPPLER_KNOWN) |
206 			 HE_BITS(DATA1_BSS_COLOR_KNOWN),
207 		.data2 = HE_BITS(DATA2_GI_KNOWN) |
208 			 HE_BITS(DATA2_TXBF_KNOWN) |
209 			 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
210 			 HE_BITS(DATA2_TXOP_KNOWN),
211 	};
212 	struct ieee80211_radiotap_he *he = NULL;
213 	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
214 
215 	he = skb_push(skb, sizeof(known));
216 	memcpy(he, &known, sizeof(known));
217 
218 	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
219 		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
220 	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
221 		    le16_encode_bits(ltf_size,
222 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
223 	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
224 		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
225 
226 	switch (phy) {
227 	case MT_PHY_TYPE_HE_SU:
228 		he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
229 			     HE_BITS(DATA1_UL_DL_KNOWN) |
230 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
231 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
232 
233 		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
234 			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
235 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
236 		break;
237 	case MT_PHY_TYPE_HE_EXT_SU:
238 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
239 			     HE_BITS(DATA1_UL_DL_KNOWN);
240 
241 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
242 		break;
243 	case MT_PHY_TYPE_HE_MU:
244 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
245 			     HE_BITS(DATA1_UL_DL_KNOWN) |
246 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
247 
248 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
249 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
250 
251 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
252 		break;
253 	case MT_PHY_TYPE_HE_TB:
254 		he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
255 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
256 			     HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
257 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
258 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
259 
260 		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
261 			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
262 			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
263 			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
264 
265 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
266 		break;
267 	default:
268 		break;
269 	}
270 }
271 
272 static void
273 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
274 			    struct mt76_rx_status *status, u8 chfreq)
275 {
276 	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
277 	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
278 	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
279 		status->freq = mphy->chandef.chan->center_freq;
280 		status->band = mphy->chandef.chan->band;
281 		return;
282 	}
283 
284 	status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
285 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
286 }
287 
288 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
289 {
290 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
291 	struct mt76_phy *mphy = &dev->mt76.phy;
292 	struct mt7921_phy *phy = &dev->phy;
293 	struct ieee80211_supported_band *sband;
294 	struct ieee80211_hdr *hdr;
295 	__le32 *rxd = (__le32 *)skb->data;
296 	__le32 *rxv = NULL;
297 	u32 mode = 0;
298 	u32 rxd1 = le32_to_cpu(rxd[1]);
299 	u32 rxd2 = le32_to_cpu(rxd[2]);
300 	u32 rxd3 = le32_to_cpu(rxd[3]);
301 	bool unicast, insert_ccmp_hdr = false;
302 	u8 remove_pad;
303 	int i, idx;
304 	u8 chfreq;
305 
306 	memset(status, 0, sizeof(*status));
307 
308 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
309 		return -EINVAL;
310 
311 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
312 		return -EINVAL;
313 
314 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
315 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
316 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
317 	status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
318 
319 	if (status->wcid) {
320 		struct mt7921_sta *msta;
321 
322 		msta = container_of(status->wcid, struct mt7921_sta, wcid);
323 		spin_lock_bh(&dev->sta_poll_lock);
324 		if (list_empty(&msta->poll_list))
325 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
326 		spin_unlock_bh(&dev->sta_poll_lock);
327 	}
328 
329 	mt7921_get_status_freq_info(dev, mphy, status, chfreq);
330 
331 	if (status->band == NL80211_BAND_5GHZ)
332 		sband = &mphy->sband_5g.sband;
333 	else
334 		sband = &mphy->sband_2g.sband;
335 
336 	if (!sband->channels)
337 		return -EINVAL;
338 
339 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
340 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
341 
342 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
343 		status->flag |= RX_FLAG_MMIC_ERROR;
344 
345 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
346 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
347 		status->flag |= RX_FLAG_DECRYPTED;
348 		status->flag |= RX_FLAG_IV_STRIPPED;
349 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
350 	}
351 
352 	if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
353 		status->flag |= RX_FLAG_AMPDU_DETAILS;
354 
355 		/* all subframes of an A-MPDU have the same timestamp */
356 		if (phy->rx_ampdu_ts != rxd[14]) {
357 			if (!++phy->ampdu_ref)
358 				phy->ampdu_ref++;
359 		}
360 		phy->rx_ampdu_ts = rxd[14];
361 
362 		status->ampdu_ref = phy->ampdu_ref;
363 	}
364 
365 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
366 
367 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
368 		return -EINVAL;
369 
370 	rxd += 6;
371 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
372 		rxd += 4;
373 		if ((u8 *)rxd - skb->data >= skb->len)
374 			return -EINVAL;
375 	}
376 
377 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
378 		u8 *data = (u8 *)rxd;
379 
380 		if (status->flag & RX_FLAG_DECRYPTED) {
381 			status->iv[0] = data[5];
382 			status->iv[1] = data[4];
383 			status->iv[2] = data[3];
384 			status->iv[3] = data[2];
385 			status->iv[4] = data[1];
386 			status->iv[5] = data[0];
387 
388 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
389 		}
390 		rxd += 4;
391 		if ((u8 *)rxd - skb->data >= skb->len)
392 			return -EINVAL;
393 	}
394 
395 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
396 		rxd += 2;
397 		if ((u8 *)rxd - skb->data >= skb->len)
398 			return -EINVAL;
399 	}
400 
401 	/* RXD Group 3 - P-RXV */
402 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
403 		u32 v0, v1, v2;
404 
405 		rxv = rxd;
406 		rxd += 2;
407 		if ((u8 *)rxd - skb->data >= skb->len)
408 			return -EINVAL;
409 
410 		v0 = le32_to_cpu(rxv[0]);
411 		v1 = le32_to_cpu(rxv[1]);
412 		v2 = le32_to_cpu(rxv[2]);
413 
414 		if (v0 & MT_PRXV_HT_AD_CODE)
415 			status->enc_flags |= RX_ENC_FLAG_LDPC;
416 
417 		status->chains = mphy->antenna_mask;
418 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
419 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
420 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
421 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
422 		status->signal = status->chain_signal[0];
423 
424 		for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
425 			if (!(status->chains & BIT(i)))
426 				continue;
427 
428 			status->signal = max(status->signal,
429 					     status->chain_signal[i]);
430 		}
431 
432 		/* RXD Group 5 - C-RXV */
433 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
434 			u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
435 			u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
436 			bool cck = false;
437 
438 			rxd += 18;
439 			if ((u8 *)rxd - skb->data >= skb->len)
440 				return -EINVAL;
441 
442 			idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
443 			mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
444 
445 			switch (mode) {
446 			case MT_PHY_TYPE_CCK:
447 				cck = true;
448 				fallthrough;
449 			case MT_PHY_TYPE_OFDM:
450 				i = mt76_get_rate(&dev->mt76, sband, i, cck);
451 				break;
452 			case MT_PHY_TYPE_HT_GF:
453 			case MT_PHY_TYPE_HT:
454 				status->encoding = RX_ENC_HT;
455 				if (i > 31)
456 					return -EINVAL;
457 				break;
458 			case MT_PHY_TYPE_VHT:
459 				status->nss =
460 					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
461 				status->encoding = RX_ENC_VHT;
462 				if (i > 9)
463 					return -EINVAL;
464 				break;
465 			case MT_PHY_TYPE_HE_MU:
466 				status->flag |= RX_FLAG_RADIOTAP_HE_MU;
467 				fallthrough;
468 			case MT_PHY_TYPE_HE_SU:
469 			case MT_PHY_TYPE_HE_EXT_SU:
470 			case MT_PHY_TYPE_HE_TB:
471 				status->nss =
472 					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
473 				status->encoding = RX_ENC_HE;
474 				status->flag |= RX_FLAG_RADIOTAP_HE;
475 				i &= GENMASK(3, 0);
476 
477 				if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
478 					status->he_gi = gi;
479 
480 				status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
481 				break;
482 			default:
483 				return -EINVAL;
484 			}
485 			status->rate_idx = i;
486 
487 			switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
488 			case IEEE80211_STA_RX_BW_20:
489 				break;
490 			case IEEE80211_STA_RX_BW_40:
491 				if (mode & MT_PHY_TYPE_HE_EXT_SU &&
492 				    (idx & MT_PRXV_TX_ER_SU_106T)) {
493 					status->bw = RATE_INFO_BW_HE_RU;
494 					status->he_ru =
495 						NL80211_RATE_INFO_HE_RU_ALLOC_106;
496 				} else {
497 					status->bw = RATE_INFO_BW_40;
498 				}
499 				break;
500 			case IEEE80211_STA_RX_BW_80:
501 				status->bw = RATE_INFO_BW_80;
502 				break;
503 			case IEEE80211_STA_RX_BW_160:
504 				status->bw = RATE_INFO_BW_160;
505 				break;
506 			default:
507 				return -EINVAL;
508 			}
509 
510 			status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
511 			if (mode < MT_PHY_TYPE_HE_SU && gi)
512 				status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
513 		}
514 	}
515 
516 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
517 
518 	if (insert_ccmp_hdr) {
519 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
520 
521 		mt76_insert_ccmp_hdr(skb, key_id);
522 	}
523 
524 	if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
525 		mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
526 
527 	hdr = mt76_skb_get_hdr(skb);
528 	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
529 		return 0;
530 
531 	status->aggr = unicast &&
532 		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
533 	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
534 	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
535 
536 	return 0;
537 }
538 
539 static void
540 mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
541 			   struct sk_buff *skb, struct mt76_wcid *wcid)
542 {
543 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
544 	u8 fc_type, fc_stype;
545 	bool wmm = false;
546 	u32 val;
547 
548 	if (wcid->sta) {
549 		struct ieee80211_sta *sta;
550 
551 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
552 		wmm = sta->wme;
553 	}
554 
555 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
556 	      FIELD_PREP(MT_TXD1_TID, tid);
557 
558 	if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
559 		val |= MT_TXD1_ETH_802_3;
560 
561 	txwi[1] |= cpu_to_le32(val);
562 
563 	fc_type = IEEE80211_FTYPE_DATA >> 2;
564 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
565 
566 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
567 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
568 
569 	txwi[2] |= cpu_to_le32(val);
570 
571 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
572 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
573 	txwi[7] |= cpu_to_le32(val);
574 }
575 
576 static void
577 mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
578 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
579 {
580 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
581 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
582 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
583 	bool multicast = is_multicast_ether_addr(hdr->addr1);
584 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
585 	__le16 fc = hdr->frame_control;
586 	u8 fc_type, fc_stype;
587 	u32 val;
588 
589 	if (ieee80211_is_action(fc) &&
590 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
591 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
592 		u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
593 
594 		txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
595 		tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
596 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
597 		struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
598 		u16 control = le16_to_cpu(bar->control);
599 
600 		tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
601 	}
602 
603 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
604 	      FIELD_PREP(MT_TXD1_HDR_INFO,
605 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
606 	      FIELD_PREP(MT_TXD1_TID, tid);
607 	txwi[1] |= cpu_to_le32(val);
608 
609 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
610 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
611 
612 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
613 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
614 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
615 
616 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
617 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
618 		val |= MT_TXD2_BIP;
619 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
620 	}
621 
622 	if (!ieee80211_is_data(fc) || multicast)
623 		val |= MT_TXD2_FIX_RATE;
624 
625 	txwi[2] |= cpu_to_le32(val);
626 
627 	if (ieee80211_is_beacon(fc)) {
628 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
629 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
630 	}
631 
632 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
633 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
634 
635 		if (ieee80211_is_back_req(hdr->frame_control)) {
636 			struct ieee80211_bar *bar;
637 
638 			bar = (struct ieee80211_bar *)skb->data;
639 			seqno = le16_to_cpu(bar->start_seq_num);
640 		}
641 
642 		val = MT_TXD3_SN_VALID |
643 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
644 		txwi[3] |= cpu_to_le32(val);
645 	}
646 
647 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
648 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
649 	txwi[7] |= cpu_to_le32(val);
650 }
651 
652 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
653 			   struct sk_buff *skb, struct mt76_wcid *wcid,
654 			   struct ieee80211_key_conf *key, bool beacon)
655 {
656 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
657 	struct ieee80211_vif *vif = info->control.vif;
658 	struct mt76_phy *mphy = &dev->mphy;
659 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
660 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
661 	u16 tx_count = 15;
662 	u32 val;
663 
664 	if (vif) {
665 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
666 
667 		omac_idx = mvif->omac_idx;
668 		wmm_idx = mvif->wmm_idx;
669 	}
670 
671 	if (beacon) {
672 		p_fmt = MT_TX_TYPE_FW;
673 		q_idx = MT_LMAC_BCN0;
674 	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
675 		p_fmt = MT_TX_TYPE_CT;
676 		q_idx = MT_LMAC_ALTX0;
677 	} else {
678 		p_fmt = MT_TX_TYPE_CT;
679 		q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
680 			mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
681 	}
682 
683 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
684 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
685 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
686 	txwi[0] = cpu_to_le32(val);
687 
688 	val = MT_TXD1_LONG_FORMAT |
689 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
690 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
691 
692 	txwi[1] = cpu_to_le32(val);
693 	txwi[2] = 0;
694 
695 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
696 	if (key)
697 		val |= MT_TXD3_PROTECT_FRAME;
698 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
699 		val |= MT_TXD3_NO_ACK;
700 
701 	txwi[3] = cpu_to_le32(val);
702 	txwi[4] = 0;
703 	txwi[5] = 0;
704 	txwi[6] = 0;
705 	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
706 
707 	if (is_8023)
708 		mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
709 	else
710 		mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
711 
712 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
713 		u16 rate;
714 
715 		/* hardware won't add HTC for mgmt/ctrl frame */
716 		txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
717 
718 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
719 			rate = MT7921_5G_RATE_DEFAULT;
720 		else
721 			rate = MT7921_2G_RATE_DEFAULT;
722 
723 		val = MT_TXD6_FIXED_BW |
724 		      FIELD_PREP(MT_TXD6_TX_RATE, rate);
725 		txwi[6] |= cpu_to_le32(val);
726 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
727 	}
728 }
729 
730 static void
731 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
732 		    void *txp_ptr, u32 id)
733 {
734 	struct mt7921_hw_txp *txp = txp_ptr;
735 	struct mt7921_txp_ptr *ptr = &txp->ptr[0];
736 	int i, nbuf = tx_info->nbuf - 1;
737 
738 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
739 	tx_info->nbuf = 1;
740 
741 	txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
742 
743 	for (i = 0; i < nbuf; i++) {
744 		u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
745 		u32 addr = tx_info->buf[i + 1].addr;
746 
747 		if (i == nbuf - 1)
748 			len |= MT_TXD_LEN_LAST;
749 
750 		if (i & 1) {
751 			ptr->buf1 = cpu_to_le32(addr);
752 			ptr->len1 = cpu_to_le16(len);
753 			ptr++;
754 		} else {
755 			ptr->buf0 = cpu_to_le32(addr);
756 			ptr->len0 = cpu_to_le16(len);
757 		}
758 	}
759 }
760 
761 static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
762 {
763 	struct mt76_phy *mphy = &dev->mphy;
764 	struct mt76_queue *q;
765 
766 	q = mphy->q_tx[0];
767 	if (blocked == q->blocked)
768 		return;
769 
770 	q->blocked = blocked;
771 	if (!blocked)
772 		mt76_worker_schedule(&dev->mt76.tx_worker);
773 }
774 
775 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
776 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
777 			  struct ieee80211_sta *sta,
778 			  struct mt76_tx_info *tx_info)
779 {
780 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
781 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
782 	struct ieee80211_key_conf *key = info->control.hw_key;
783 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
784 	struct mt76_txwi_cache *t;
785 	struct mt7921_txp_common *txp;
786 	int id;
787 	u8 *txwi = (u8 *)txwi_ptr;
788 
789 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
790 		return -EINVAL;
791 
792 	if (!wcid)
793 		wcid = &dev->mt76.global_wcid;
794 
795 	cb->wcid = wcid->idx;
796 
797 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
798 	t->skb = tx_info->skb;
799 
800 	spin_lock_bh(&dev->token_lock);
801 	id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
802 	if (id >= 0)
803 		dev->token_count++;
804 
805 	if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
806 		mt7921_set_tx_blocked(dev, true);
807 	spin_unlock_bh(&dev->token_lock);
808 
809 	if (id < 0)
810 		return id;
811 
812 	mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
813 			      false);
814 
815 	txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
816 	memset(txp, 0, sizeof(struct mt7921_txp_common));
817 	mt7921_write_hw_txp(dev, tx_info, txp, id);
818 
819 	tx_info->skb = DMA_DUMMY_DATA;
820 
821 	return 0;
822 }
823 
824 static void
825 mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
826 {
827 	struct mt7921_sta *msta;
828 	u16 fc, tid;
829 	u32 val;
830 
831 	if (!sta || !sta->ht_cap.ht_supported)
832 		return;
833 
834 	tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
835 	if (tid >= 6) /* skip VO queue */
836 		return;
837 
838 	val = le32_to_cpu(txwi[2]);
839 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
840 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
841 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
842 		return;
843 
844 	msta = (struct mt7921_sta *)sta->drv_priv;
845 	if (!test_and_set_bit(tid, &msta->ampdu_state))
846 		ieee80211_start_tx_ba_session(sta, tid, 0);
847 }
848 
849 static void
850 mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
851 			  struct ieee80211_sta *sta, u8 stat,
852 			  struct list_head *free_list)
853 {
854 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
855 	struct ieee80211_tx_status status = {
856 		.sta = sta,
857 		.info = info,
858 		.skb = skb,
859 		.free_list = free_list,
860 	};
861 	struct ieee80211_hw *hw;
862 
863 	if (sta) {
864 		struct mt7921_sta *msta;
865 
866 		msta = (struct mt7921_sta *)sta->drv_priv;
867 		status.rate = &msta->stats.tx_rate;
868 	}
869 
870 	hw = mt76_tx_status_get_hw(mdev, skb);
871 
872 	if (info->flags & IEEE80211_TX_CTL_AMPDU)
873 		info->flags |= IEEE80211_TX_STAT_AMPDU;
874 
875 	if (stat)
876 		ieee80211_tx_info_clear_status(info);
877 
878 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
879 		info->flags |= IEEE80211_TX_STAT_ACK;
880 
881 	info->status.tx_time = 0;
882 	ieee80211_tx_status_ext(hw, &status);
883 }
884 
885 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
886 			  struct mt76_txwi_cache *t)
887 {
888 	struct mt7921_txp_common *txp;
889 	int i;
890 
891 	txp = mt7921_txwi_to_txp(dev, t);
892 
893 	for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
894 		struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
895 		bool last;
896 		u16 len;
897 
898 		len = le16_to_cpu(ptr->len0);
899 		last = len & MT_TXD_LEN_LAST;
900 		len &= MT_TXD_LEN_MASK;
901 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
902 				 DMA_TO_DEVICE);
903 		if (last)
904 			break;
905 
906 		len = le16_to_cpu(ptr->len1);
907 		last = len & MT_TXD_LEN_LAST;
908 		len &= MT_TXD_LEN_MASK;
909 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
910 				 DMA_TO_DEVICE);
911 		if (last)
912 			break;
913 	}
914 }
915 
916 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
917 {
918 	struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
919 	struct mt76_dev *mdev = &dev->mt76;
920 	struct mt76_txwi_cache *txwi;
921 	struct ieee80211_sta *sta = NULL;
922 	LIST_HEAD(free_list);
923 	struct sk_buff *tmp;
924 	bool wake = false;
925 	u8 i, count;
926 
927 	/* clean DMA queues and unmap buffers first */
928 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
929 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
930 
931 	/* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
932 	 * to the time ack is received or dropped by hw (air + hw queue time).
933 	 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
934 	 */
935 	count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
936 	for (i = 0; i < count; i++) {
937 		u32 msdu, info = le32_to_cpu(free->info[i]);
938 		u8 stat;
939 
940 		/* 1'b1: new wcid pair.
941 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
942 		 */
943 		if (info & MT_TX_FREE_PAIR) {
944 			struct mt7921_sta *msta;
945 			struct mt7921_phy *phy;
946 			struct mt76_wcid *wcid;
947 			u16 idx;
948 
949 			count++;
950 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
951 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
952 			sta = wcid_to_sta(wcid);
953 			if (!sta)
954 				continue;
955 
956 			msta = container_of(wcid, struct mt7921_sta, wcid);
957 			phy = msta->vif->phy;
958 			spin_lock_bh(&dev->sta_poll_lock);
959 			if (list_empty(&msta->stats_list))
960 				list_add_tail(&msta->stats_list, &phy->stats_list);
961 			if (list_empty(&msta->poll_list))
962 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
963 			spin_unlock_bh(&dev->sta_poll_lock);
964 			continue;
965 		}
966 
967 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
968 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
969 
970 		spin_lock_bh(&dev->token_lock);
971 		txwi = idr_remove(&dev->token, msdu);
972 		if (txwi)
973 			dev->token_count--;
974 		if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
975 		    dev->mphy.q_tx[0]->blocked)
976 			wake = true;
977 		spin_unlock_bh(&dev->token_lock);
978 
979 		if (!txwi)
980 			continue;
981 
982 		mt7921_txp_skb_unmap(mdev, txwi);
983 		if (txwi->skb) {
984 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
985 			void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
986 
987 			if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
988 				mt7921_tx_check_aggr(sta, txwi_ptr);
989 
990 			if (sta && !info->tx_time_est) {
991 				struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
992 				int pending;
993 
994 				pending = atomic_dec_return(&wcid->non_aql_packets);
995 				if (pending < 0)
996 					atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
997 			}
998 
999 			mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
1000 			txwi->skb = NULL;
1001 		}
1002 
1003 		mt76_put_txwi(mdev, txwi);
1004 	}
1005 
1006 	if (wake) {
1007 		spin_lock_bh(&dev->token_lock);
1008 		mt7921_set_tx_blocked(dev, false);
1009 		spin_unlock_bh(&dev->token_lock);
1010 	}
1011 
1012 	napi_consume_skb(skb, 1);
1013 
1014 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1015 		skb_list_del_init(skb);
1016 		napi_consume_skb(skb, 1);
1017 	}
1018 
1019 	if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
1020 		return;
1021 
1022 	mt7921_mac_sta_poll(dev);
1023 
1024 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1025 
1026 	mt76_worker_schedule(&dev->mt76.tx_worker);
1027 }
1028 
1029 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1030 {
1031 	struct mt7921_dev *dev;
1032 
1033 	if (!e->txwi) {
1034 		dev_kfree_skb_any(e->skb);
1035 		return;
1036 	}
1037 
1038 	dev = container_of(mdev, struct mt7921_dev, mt76);
1039 
1040 	/* error path */
1041 	if (e->skb == DMA_DUMMY_DATA) {
1042 		struct mt76_txwi_cache *t;
1043 		struct mt7921_txp_common *txp;
1044 		u16 token;
1045 
1046 		txp = mt7921_txwi_to_txp(mdev, e->txwi);
1047 
1048 		token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
1049 		spin_lock_bh(&dev->token_lock);
1050 		t = idr_remove(&dev->token, token);
1051 		spin_unlock_bh(&dev->token_lock);
1052 		e->skb = t ? t->skb : NULL;
1053 	}
1054 
1055 	if (e->skb) {
1056 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
1057 		struct mt76_wcid *wcid;
1058 
1059 		wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
1060 
1061 		mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
1062 					  NULL);
1063 	}
1064 }
1065 
1066 void mt7921_mac_reset_counters(struct mt7921_phy *phy)
1067 {
1068 	struct mt7921_dev *dev = phy->dev;
1069 	int i;
1070 
1071 	for (i = 0; i < 4; i++) {
1072 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1073 		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1074 	}
1075 
1076 	dev->mt76.phy.survey_time = ktime_get_boottime();
1077 	memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
1078 
1079 	/* reset airtime counters */
1080 	mt76_rr(dev, MT_MIB_SDR9(0));
1081 	mt76_rr(dev, MT_MIB_SDR36(0));
1082 	mt76_rr(dev, MT_MIB_SDR37(0));
1083 
1084 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1085 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1086 }
1087 
1088 void mt7921_mac_set_timing(struct mt7921_phy *phy)
1089 {
1090 	s16 coverage_class = phy->coverage_class;
1091 	struct mt7921_dev *dev = phy->dev;
1092 	u32 val, reg_offset;
1093 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1094 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1095 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1096 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1097 	int sifs, offset;
1098 	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1099 
1100 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1101 		return;
1102 
1103 	if (is_5ghz)
1104 		sifs = 16;
1105 	else
1106 		sifs = 10;
1107 
1108 	mt76_set(dev, MT_ARB_SCR(0),
1109 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1110 	udelay(1);
1111 
1112 	offset = 3 * coverage_class;
1113 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1114 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1115 
1116 	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
1117 	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
1118 	mt76_wr(dev, MT_TMAC_ICR0(0),
1119 		FIELD_PREP(MT_IFS_EIFS, 360) |
1120 		FIELD_PREP(MT_IFS_RIFS, 2) |
1121 		FIELD_PREP(MT_IFS_SIFS, sifs) |
1122 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1123 
1124 	if (phy->slottime < 20 || is_5ghz)
1125 		val = MT7921_CFEND_RATE_DEFAULT;
1126 	else
1127 		val = MT7921_CFEND_RATE_11B;
1128 
1129 	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
1130 	mt76_clear(dev, MT_ARB_SCR(0),
1131 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1132 }
1133 
1134 static u8
1135 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
1136 {
1137 	return 0;
1138 }
1139 
1140 static void
1141 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
1142 {
1143 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
1144 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
1145 	struct mt76_channel_state *state;
1146 	u64 busy_time, tx_time, rx_time, obss_time;
1147 	int nf;
1148 
1149 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1150 				   MT_MIB_SDR9_BUSY_MASK);
1151 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1152 				 MT_MIB_SDR36_TXTIME_MASK);
1153 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1154 				 MT_MIB_SDR37_RXTIME_MASK);
1155 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1156 				   MT_MIB_OBSSTIME_MASK);
1157 
1158 	nf = mt7921_phy_get_nf(phy, idx);
1159 	if (!phy->noise)
1160 		phy->noise = nf << 4;
1161 	else if (nf)
1162 		phy->noise += nf - (phy->noise >> 4);
1163 
1164 	state = mphy->chan_state;
1165 	state->cc_busy += busy_time;
1166 	state->cc_tx += tx_time;
1167 	state->cc_rx += rx_time + obss_time;
1168 	state->cc_bss_rx += rx_time;
1169 	state->noise = -(phy->noise >> 4);
1170 }
1171 
1172 void mt7921_update_channel(struct mt76_dev *mdev)
1173 {
1174 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1175 
1176 	if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1177 		return;
1178 
1179 	mt7921_phy_update_channel(&mdev->phy, 0);
1180 	/* reset obss airtime */
1181 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1182 
1183 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1184 }
1185 
1186 static bool
1187 mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state)
1188 {
1189 	bool ret;
1190 
1191 	ret = wait_event_timeout(dev->reset_wait,
1192 				 (READ_ONCE(dev->reset_state) & state),
1193 				 MT7921_RESET_TIMEOUT);
1194 
1195 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1196 	return ret;
1197 }
1198 
1199 static void
1200 mt7921_dma_reset(struct mt7921_phy *phy)
1201 {
1202 	struct mt7921_dev *dev = phy->dev;
1203 	int i;
1204 
1205 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1206 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1207 
1208 	usleep_range(1000, 2000);
1209 
1210 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
1211 	for (i = 0; i < __MT_TXQ_MAX; i++)
1212 		mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
1213 
1214 	mt76_for_each_q_rx(&dev->mt76, i) {
1215 		mt76_queue_rx_reset(dev, i);
1216 	}
1217 
1218 	/* re-init prefetch settings after reset */
1219 	mt7921_dma_prefetch(dev);
1220 
1221 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
1222 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1223 }
1224 
1225 void mt7921_tx_token_put(struct mt7921_dev *dev)
1226 {
1227 	struct mt76_txwi_cache *txwi;
1228 	int id;
1229 
1230 	spin_lock_bh(&dev->token_lock);
1231 	idr_for_each_entry(&dev->token, txwi, id) {
1232 		mt7921_txp_skb_unmap(&dev->mt76, txwi);
1233 		if (txwi->skb) {
1234 			struct ieee80211_hw *hw;
1235 
1236 			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1237 			ieee80211_free_txskb(hw, txwi->skb);
1238 		}
1239 		mt76_put_txwi(&dev->mt76, txwi);
1240 		dev->token_count--;
1241 	}
1242 	spin_unlock_bh(&dev->token_lock);
1243 	idr_destroy(&dev->token);
1244 }
1245 
1246 /* system error recovery */
1247 void mt7921_mac_reset_work(struct work_struct *work)
1248 {
1249 	struct mt7921_dev *dev;
1250 
1251 	dev = container_of(work, struct mt7921_dev, reset_work);
1252 
1253 	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1254 		return;
1255 
1256 	ieee80211_stop_queues(mt76_hw(dev));
1257 
1258 	set_bit(MT76_RESET, &dev->mphy.state);
1259 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1260 	wake_up(&dev->mt76.mcu.wait);
1261 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1262 
1263 	/* lock/unlock all queues to ensure that no tx is pending */
1264 	mt76_txq_schedule_all(&dev->mphy);
1265 
1266 	mt76_worker_disable(&dev->mt76.tx_worker);
1267 	napi_disable(&dev->mt76.napi[0]);
1268 	napi_disable(&dev->mt76.napi[1]);
1269 	napi_disable(&dev->mt76.napi[2]);
1270 	napi_disable(&dev->mt76.tx_napi);
1271 
1272 	mt7921_mutex_acquire(dev);
1273 
1274 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1275 
1276 	mt7921_tx_token_put(dev);
1277 	idr_init(&dev->token);
1278 
1279 	if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1280 		mt7921_dma_reset(&dev->phy);
1281 
1282 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1283 		mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1284 	}
1285 
1286 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1287 	clear_bit(MT76_RESET, &dev->mphy.state);
1288 
1289 	mt76_worker_enable(&dev->mt76.tx_worker);
1290 	napi_enable(&dev->mt76.tx_napi);
1291 	napi_schedule(&dev->mt76.tx_napi);
1292 
1293 	napi_enable(&dev->mt76.napi[0]);
1294 	napi_schedule(&dev->mt76.napi[0]);
1295 
1296 	napi_enable(&dev->mt76.napi[1]);
1297 	napi_schedule(&dev->mt76.napi[1]);
1298 
1299 	napi_enable(&dev->mt76.napi[2]);
1300 	napi_schedule(&dev->mt76.napi[2]);
1301 
1302 	ieee80211_wake_queues(mt76_hw(dev));
1303 
1304 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1305 	mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1306 
1307 	mt7921_mutex_release(dev);
1308 
1309 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1310 				     MT7921_WATCHDOG_TIME);
1311 }
1312 
1313 static void
1314 mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
1315 {
1316 	struct mt7921_dev *dev = phy->dev;
1317 	struct mib_stats *mib = &phy->mib;
1318 	int i, aggr0 = 0, aggr1;
1319 
1320 	memset(mib, 0, sizeof(*mib));
1321 
1322 	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
1323 					  MT_MIB_SDR3_FCS_ERR_MASK);
1324 
1325 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1326 		u32 val, val2;
1327 
1328 		val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
1329 
1330 		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1331 		if (val2 > mib->ack_fail_cnt)
1332 			mib->ack_fail_cnt = val2;
1333 
1334 		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1335 		if (val2 > mib->ba_miss_cnt)
1336 			mib->ba_miss_cnt = val2;
1337 
1338 		val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
1339 		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1340 		if (val2 > mib->rts_retries_cnt) {
1341 			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1342 			mib->rts_retries_cnt = val2;
1343 		}
1344 
1345 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1346 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1347 
1348 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1349 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
1350 		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
1351 		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
1352 	}
1353 }
1354 
1355 static void
1356 mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
1357 {
1358 	struct mt7921_dev *dev = phy->dev;
1359 	struct mt7921_sta *msta;
1360 	LIST_HEAD(list);
1361 
1362 	spin_lock_bh(&dev->sta_poll_lock);
1363 	list_splice_init(&phy->stats_list, &list);
1364 
1365 	while (!list_empty(&list)) {
1366 		msta = list_first_entry(&list, struct mt7921_sta, stats_list);
1367 		list_del_init(&msta->stats_list);
1368 		spin_unlock_bh(&dev->sta_poll_lock);
1369 
1370 		/* query wtbl info to report tx rate for further devices */
1371 		mt7921_get_wtbl_info(dev, msta->wcid.idx);
1372 
1373 		spin_lock_bh(&dev->sta_poll_lock);
1374 	}
1375 
1376 	spin_unlock_bh(&dev->sta_poll_lock);
1377 }
1378 
1379 void mt7921_mac_work(struct work_struct *work)
1380 {
1381 	struct mt7921_phy *phy;
1382 	struct mt76_phy *mphy;
1383 
1384 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1385 					       mac_work.work);
1386 	phy = mphy->priv;
1387 
1388 	if (test_bit(MT76_STATE_PM, &mphy->state))
1389 		goto out;
1390 
1391 	mt7921_mutex_acquire(phy->dev);
1392 
1393 	mt76_update_survey(mphy->dev);
1394 	if (++mphy->mac_work_count == 5) {
1395 		mphy->mac_work_count = 0;
1396 
1397 		mt7921_mac_update_mib_stats(phy);
1398 	}
1399 	if (++phy->sta_work_count == 10) {
1400 		phy->sta_work_count = 0;
1401 		mt7921_mac_sta_stats_work(phy);
1402 	};
1403 
1404 	mt7921_mutex_release(phy->dev);
1405 
1406 out:
1407 	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
1408 				     MT7921_WATCHDOG_TIME);
1409 }
1410 
1411 void mt7921_pm_wake_work(struct work_struct *work)
1412 {
1413 	struct mt7921_dev *dev;
1414 	struct mt76_phy *mphy;
1415 
1416 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1417 						pm.wake_work);
1418 	mphy = dev->phy.mt76;
1419 
1420 	if (!mt7921_mcu_drv_pmctrl(dev))
1421 		mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1422 	else
1423 		dev_err(mphy->dev->dev, "failed to wake device\n");
1424 
1425 	ieee80211_wake_queues(mphy->hw);
1426 	complete_all(&dev->pm.wake_cmpl);
1427 }
1428 
1429 void mt7921_pm_power_save_work(struct work_struct *work)
1430 {
1431 	struct mt7921_dev *dev;
1432 	unsigned long delta;
1433 
1434 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1435 						pm.ps_work.work);
1436 
1437 	delta = dev->pm.idle_timeout;
1438 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1439 		delta = dev->pm.last_activity + delta - jiffies;
1440 		goto out;
1441 	}
1442 
1443 	if (!mt7921_mcu_fw_pmctrl(dev))
1444 		return;
1445 out:
1446 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1447 }
1448 
1449 int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
1450 				 struct ieee80211_vif *vif,
1451 				 bool enable)
1452 {
1453 	struct mt7921_dev *dev = phy->dev;
1454 	bool ext_phy = phy != &dev->phy;
1455 	int err;
1456 
1457 	if (!dev->pm.enable)
1458 		return -EOPNOTSUPP;
1459 
1460 	err = mt7921_mcu_set_bss_pm(dev, vif, enable);
1461 	if (err)
1462 		return err;
1463 
1464 	if (enable) {
1465 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
1466 		mt76_set(dev, MT_WF_RFCR(ext_phy),
1467 			 MT_WF_RFCR_DROP_OTHER_BEACON);
1468 	} else {
1469 		vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
1470 		mt76_clear(dev, MT_WF_RFCR(ext_phy),
1471 			   MT_WF_RFCR_DROP_OTHER_BEACON);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 void mt7921_coredump_work(struct work_struct *work)
1478 {
1479 	struct mt7921_dev *dev;
1480 	char *dump, *data;
1481 
1482 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1483 						coredump.work.work);
1484 
1485 	if (time_is_after_jiffies(dev->coredump.last_activity +
1486 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1487 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1488 				   MT76_CONNAC_COREDUMP_TIMEOUT);
1489 		return;
1490 	}
1491 
1492 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1493 	data = dump;
1494 
1495 	while (true) {
1496 		struct sk_buff *skb;
1497 
1498 		spin_lock_bh(&dev->mt76.lock);
1499 		skb = __skb_dequeue(&dev->coredump.msg_list);
1500 		spin_unlock_bh(&dev->mt76.lock);
1501 
1502 		if (!skb)
1503 			break;
1504 
1505 		skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
1506 		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
1507 			break;
1508 
1509 		memcpy(data, skb->data, skb->len);
1510 		data += skb->len;
1511 
1512 		dev_kfree_skb(skb);
1513 	}
1514 	dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1515 		      GFP_KERNEL);
1516 }
1517