1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7921.h"
8 #include "../dma.h"
9 #include "mac.h"
10 #include "mcu.h"
11 
12 #define HE_BITS(f)		cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13 #define HE_PREP(f, m, v)	le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 						 IEEE80211_RADIOTAP_HE_##f)
15 
16 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
17 					    u16 idx, bool unicast)
18 {
19 	struct mt7921_sta *sta;
20 	struct mt76_wcid *wcid;
21 
22 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
23 		return NULL;
24 
25 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
26 	if (unicast || !wcid)
27 		return wcid;
28 
29 	if (!wcid->sta)
30 		return NULL;
31 
32 	sta = container_of(wcid, struct mt7921_sta, wcid);
33 	if (!sta->vif)
34 		return NULL;
35 
36 	return &sta->vif->sta.wcid;
37 }
38 
39 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
40 {
41 }
42 
43 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
44 {
45 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
46 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
47 
48 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
49 			 0, 5000);
50 }
51 
52 static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
53 {
54 	static const u8 ac_to_tid[] = {
55 		[IEEE80211_AC_BE] = 0,
56 		[IEEE80211_AC_BK] = 1,
57 		[IEEE80211_AC_VI] = 4,
58 		[IEEE80211_AC_VO] = 6
59 	};
60 	struct ieee80211_sta *sta;
61 	struct mt7921_sta *msta;
62 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
63 	LIST_HEAD(sta_poll_list);
64 	int i;
65 
66 	spin_lock_bh(&dev->sta_poll_lock);
67 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
68 	spin_unlock_bh(&dev->sta_poll_lock);
69 
70 	rcu_read_lock();
71 
72 	while (true) {
73 		bool clear = false;
74 		u32 addr;
75 		u16 idx;
76 
77 		spin_lock_bh(&dev->sta_poll_lock);
78 		if (list_empty(&sta_poll_list)) {
79 			spin_unlock_bh(&dev->sta_poll_lock);
80 			break;
81 		}
82 		msta = list_first_entry(&sta_poll_list,
83 					struct mt7921_sta, poll_list);
84 		list_del_init(&msta->poll_list);
85 		spin_unlock_bh(&dev->sta_poll_lock);
86 
87 		idx = msta->wcid.idx;
88 		addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4;
89 
90 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
91 			u32 tx_last = msta->airtime_ac[i];
92 			u32 rx_last = msta->airtime_ac[i + 4];
93 
94 			msta->airtime_ac[i] = mt76_rr(dev, addr);
95 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
96 
97 			tx_time[i] = msta->airtime_ac[i] - tx_last;
98 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
99 
100 			if ((tx_last | rx_last) & BIT(30))
101 				clear = true;
102 
103 			addr += 8;
104 		}
105 
106 		if (clear) {
107 			mt7921_mac_wtbl_update(dev, idx,
108 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
109 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
110 		}
111 
112 		if (!msta->wcid.sta)
113 			continue;
114 
115 		sta = container_of((void *)msta, struct ieee80211_sta,
116 				   drv_priv);
117 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
118 			u8 q = mt7921_lmac_mapping(dev, i);
119 			u32 tx_cur = tx_time[q];
120 			u32 rx_cur = rx_time[q];
121 			u8 tid = ac_to_tid[i];
122 
123 			if (!tx_cur && !rx_cur)
124 				continue;
125 
126 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
127 						       rx_cur);
128 		}
129 	}
130 
131 	rcu_read_unlock();
132 }
133 
134 static void
135 mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
136 				 struct ieee80211_radiotap_he *he,
137 				 __le32 *rxv)
138 {
139 	u32 ru_h, ru_l;
140 	u8 ru, offs = 0;
141 
142 	ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
143 	ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
144 	ru = (u8)(ru_l | ru_h << 4);
145 
146 	status->bw = RATE_INFO_BW_HE_RU;
147 
148 	switch (ru) {
149 	case 0 ... 36:
150 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
151 		offs = ru;
152 		break;
153 	case 37 ... 52:
154 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
155 		offs = ru - 37;
156 		break;
157 	case 53 ... 60:
158 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
159 		offs = ru - 53;
160 		break;
161 	case 61 ... 64:
162 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
163 		offs = ru - 61;
164 		break;
165 	case 65 ... 66:
166 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
167 		offs = ru - 65;
168 		break;
169 	case 67:
170 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
171 		break;
172 	case 68:
173 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
174 		break;
175 	}
176 
177 	he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
178 	he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
179 		     le16_encode_bits(offs,
180 				      IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
181 }
182 
183 static void
184 mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
185 			      struct mt76_rx_status *status,
186 			      __le32 *rxv, u32 phy)
187 {
188 	/* TODO: struct ieee80211_radiotap_he_mu */
189 	static const struct ieee80211_radiotap_he known = {
190 		.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
191 			 HE_BITS(DATA1_DATA_DCM_KNOWN) |
192 			 HE_BITS(DATA1_STBC_KNOWN) |
193 			 HE_BITS(DATA1_CODING_KNOWN) |
194 			 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
195 			 HE_BITS(DATA1_DOPPLER_KNOWN) |
196 			 HE_BITS(DATA1_BSS_COLOR_KNOWN),
197 		.data2 = HE_BITS(DATA2_GI_KNOWN) |
198 			 HE_BITS(DATA2_TXBF_KNOWN) |
199 			 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
200 			 HE_BITS(DATA2_TXOP_KNOWN),
201 	};
202 	struct ieee80211_radiotap_he *he = NULL;
203 	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
204 
205 	he = skb_push(skb, sizeof(known));
206 	memcpy(he, &known, sizeof(known));
207 
208 	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
209 		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
210 	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
211 		    le16_encode_bits(ltf_size,
212 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
213 	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
214 		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
215 
216 	switch (phy) {
217 	case MT_PHY_TYPE_HE_SU:
218 		he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
219 			     HE_BITS(DATA1_UL_DL_KNOWN) |
220 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
221 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
222 
223 		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
224 			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
225 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
226 		break;
227 	case MT_PHY_TYPE_HE_EXT_SU:
228 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
229 			     HE_BITS(DATA1_UL_DL_KNOWN);
230 
231 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
232 		break;
233 	case MT_PHY_TYPE_HE_MU:
234 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
235 			     HE_BITS(DATA1_UL_DL_KNOWN) |
236 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
237 
238 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
239 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
240 
241 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
242 		break;
243 	case MT_PHY_TYPE_HE_TB:
244 		he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
245 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
246 			     HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
247 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
248 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
249 
250 		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
251 			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
252 			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
253 			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
254 
255 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
256 		break;
257 	default:
258 		break;
259 	}
260 }
261 
262 static void
263 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
264 			    struct mt76_rx_status *status, u8 chfreq)
265 {
266 	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
267 	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
268 	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
269 		status->freq = mphy->chandef.chan->center_freq;
270 		status->band = mphy->chandef.chan->band;
271 		return;
272 	}
273 
274 	status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
275 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
276 }
277 
278 static void
279 mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
280 {
281 	struct sk_buff *skb = priv;
282 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
283 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
284 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
285 
286 	if (status->signal > 0)
287 		return;
288 
289 	if (!ether_addr_equal(vif->addr, hdr->addr1))
290 		return;
291 
292 	ewma_rssi_add(&mvif->rssi, -status->signal);
293 }
294 
295 static void
296 mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
297 {
298 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
299 
300 	if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
301 	    !ieee80211_is_auth(hdr->frame_control))
302 		return;
303 
304 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
305 		IEEE80211_IFACE_ITER_RESUME_ALL,
306 		mt7921_mac_rssi_iter, skb);
307 }
308 
309 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
310 {
311 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
312 	struct mt76_phy *mphy = &dev->mt76.phy;
313 	struct mt7921_phy *phy = &dev->phy;
314 	struct ieee80211_supported_band *sband;
315 	struct ieee80211_hdr *hdr;
316 	__le32 *rxd = (__le32 *)skb->data;
317 	__le32 *rxv = NULL;
318 	u32 mode = 0;
319 	u32 rxd1 = le32_to_cpu(rxd[1]);
320 	u32 rxd2 = le32_to_cpu(rxd[2]);
321 	u32 rxd3 = le32_to_cpu(rxd[3]);
322 	bool unicast, insert_ccmp_hdr = false;
323 	u8 remove_pad;
324 	int i, idx;
325 	u8 chfreq;
326 
327 	memset(status, 0, sizeof(*status));
328 
329 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
330 		return -EINVAL;
331 
332 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
333 		return -EINVAL;
334 
335 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
336 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
337 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
338 	status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
339 
340 	if (status->wcid) {
341 		struct mt7921_sta *msta;
342 
343 		msta = container_of(status->wcid, struct mt7921_sta, wcid);
344 		spin_lock_bh(&dev->sta_poll_lock);
345 		if (list_empty(&msta->poll_list))
346 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
347 		spin_unlock_bh(&dev->sta_poll_lock);
348 	}
349 
350 	mt7921_get_status_freq_info(dev, mphy, status, chfreq);
351 
352 	if (status->band == NL80211_BAND_5GHZ)
353 		sband = &mphy->sband_5g.sband;
354 	else
355 		sband = &mphy->sband_2g.sband;
356 
357 	if (!sband->channels)
358 		return -EINVAL;
359 
360 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
361 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
362 
363 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
364 		status->flag |= RX_FLAG_MMIC_ERROR;
365 
366 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
367 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
368 		status->flag |= RX_FLAG_DECRYPTED;
369 		status->flag |= RX_FLAG_IV_STRIPPED;
370 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
371 	}
372 
373 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
374 
375 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
376 		return -EINVAL;
377 
378 	rxd += 6;
379 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
380 		rxd += 4;
381 		if ((u8 *)rxd - skb->data >= skb->len)
382 			return -EINVAL;
383 	}
384 
385 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
386 		u8 *data = (u8 *)rxd;
387 
388 		if (status->flag & RX_FLAG_DECRYPTED) {
389 			status->iv[0] = data[5];
390 			status->iv[1] = data[4];
391 			status->iv[2] = data[3];
392 			status->iv[3] = data[2];
393 			status->iv[4] = data[1];
394 			status->iv[5] = data[0];
395 
396 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
397 		}
398 		rxd += 4;
399 		if ((u8 *)rxd - skb->data >= skb->len)
400 			return -EINVAL;
401 	}
402 
403 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
404 		status->timestamp = le32_to_cpu(rxd[0]);
405 		status->flag |= RX_FLAG_MACTIME_START;
406 
407 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
408 			status->flag |= RX_FLAG_AMPDU_DETAILS;
409 
410 			/* all subframes of an A-MPDU have the same timestamp */
411 			if (phy->rx_ampdu_ts != status->timestamp) {
412 				if (!++phy->ampdu_ref)
413 					phy->ampdu_ref++;
414 			}
415 			phy->rx_ampdu_ts = status->timestamp;
416 
417 			status->ampdu_ref = phy->ampdu_ref;
418 		}
419 
420 		rxd += 2;
421 		if ((u8 *)rxd - skb->data >= skb->len)
422 			return -EINVAL;
423 	}
424 
425 	/* RXD Group 3 - P-RXV */
426 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
427 		u8 stbc, gi;
428 		u32 v0, v1;
429 		bool cck;
430 
431 		rxv = rxd;
432 		rxd += 2;
433 		if ((u8 *)rxd - skb->data >= skb->len)
434 			return -EINVAL;
435 
436 		v0 = le32_to_cpu(rxv[0]);
437 		v1 = le32_to_cpu(rxv[1]);
438 
439 		if (v0 & MT_PRXV_HT_AD_CODE)
440 			status->enc_flags |= RX_ENC_FLAG_LDPC;
441 
442 		status->chains = mphy->antenna_mask;
443 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
444 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
445 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
446 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
447 		status->signal = status->chain_signal[0];
448 
449 		for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
450 			if (!(status->chains & BIT(i)))
451 				continue;
452 
453 			status->signal = max(status->signal,
454 					     status->chain_signal[i]);
455 		}
456 
457 		stbc = FIELD_GET(MT_PRXV_STBC, v0);
458 		gi = FIELD_GET(MT_PRXV_SGI, v0);
459 		cck = false;
460 
461 		idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
462 		mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
463 
464 		switch (mode) {
465 		case MT_PHY_TYPE_CCK:
466 			cck = true;
467 			fallthrough;
468 		case MT_PHY_TYPE_OFDM:
469 			i = mt76_get_rate(&dev->mt76, sband, i, cck);
470 			break;
471 		case MT_PHY_TYPE_HT_GF:
472 		case MT_PHY_TYPE_HT:
473 			status->encoding = RX_ENC_HT;
474 			if (i > 31)
475 				return -EINVAL;
476 			break;
477 		case MT_PHY_TYPE_VHT:
478 			status->nss =
479 				FIELD_GET(MT_PRXV_NSTS, v0) + 1;
480 			status->encoding = RX_ENC_VHT;
481 			if (i > 9)
482 				return -EINVAL;
483 			break;
484 		case MT_PHY_TYPE_HE_MU:
485 			status->flag |= RX_FLAG_RADIOTAP_HE_MU;
486 			fallthrough;
487 		case MT_PHY_TYPE_HE_SU:
488 		case MT_PHY_TYPE_HE_EXT_SU:
489 		case MT_PHY_TYPE_HE_TB:
490 			status->nss =
491 				FIELD_GET(MT_PRXV_NSTS, v0) + 1;
492 			status->encoding = RX_ENC_HE;
493 			status->flag |= RX_FLAG_RADIOTAP_HE;
494 			i &= GENMASK(3, 0);
495 
496 			if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
497 				status->he_gi = gi;
498 
499 			status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
500 			break;
501 		default:
502 			return -EINVAL;
503 		}
504 
505 		status->rate_idx = i;
506 
507 		switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
508 		case IEEE80211_STA_RX_BW_20:
509 			break;
510 		case IEEE80211_STA_RX_BW_40:
511 			if (mode & MT_PHY_TYPE_HE_EXT_SU &&
512 			    (idx & MT_PRXV_TX_ER_SU_106T)) {
513 				status->bw = RATE_INFO_BW_HE_RU;
514 				status->he_ru =
515 					NL80211_RATE_INFO_HE_RU_ALLOC_106;
516 			} else {
517 				status->bw = RATE_INFO_BW_40;
518 			}
519 			break;
520 		case IEEE80211_STA_RX_BW_80:
521 			status->bw = RATE_INFO_BW_80;
522 			break;
523 		case IEEE80211_STA_RX_BW_160:
524 			status->bw = RATE_INFO_BW_160;
525 			break;
526 		default:
527 			return -EINVAL;
528 		}
529 
530 		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
531 		if (mode < MT_PHY_TYPE_HE_SU && gi)
532 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
533 
534 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
535 			rxd += 18;
536 			if ((u8 *)rxd - skb->data >= skb->len)
537 				return -EINVAL;
538 		}
539 	}
540 
541 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
542 
543 	if (insert_ccmp_hdr) {
544 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
545 
546 		mt76_insert_ccmp_hdr(skb, key_id);
547 	}
548 
549 	mt7921_mac_assoc_rssi(dev, skb);
550 
551 	if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
552 		mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
553 
554 	hdr = mt76_skb_get_hdr(skb);
555 	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
556 		return 0;
557 
558 	status->aggr = unicast &&
559 		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
560 	status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
561 	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
562 
563 	return 0;
564 }
565 
566 static void
567 mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
568 			   struct sk_buff *skb, struct mt76_wcid *wcid)
569 {
570 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
571 	u8 fc_type, fc_stype;
572 	bool wmm = false;
573 	u32 val;
574 
575 	if (wcid->sta) {
576 		struct ieee80211_sta *sta;
577 
578 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
579 		wmm = sta->wme;
580 	}
581 
582 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
583 	      FIELD_PREP(MT_TXD1_TID, tid);
584 
585 	if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
586 		val |= MT_TXD1_ETH_802_3;
587 
588 	txwi[1] |= cpu_to_le32(val);
589 
590 	fc_type = IEEE80211_FTYPE_DATA >> 2;
591 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
592 
593 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
594 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
595 
596 	txwi[2] |= cpu_to_le32(val);
597 
598 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
599 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
600 	txwi[7] |= cpu_to_le32(val);
601 }
602 
603 static void
604 mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
605 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
606 {
607 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
608 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
609 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
610 	bool multicast = is_multicast_ether_addr(hdr->addr1);
611 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
612 	__le16 fc = hdr->frame_control;
613 	u8 fc_type, fc_stype;
614 	u32 val;
615 
616 	if (ieee80211_is_action(fc) &&
617 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
618 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
619 		u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
620 
621 		txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
622 		tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
623 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
624 		struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
625 		u16 control = le16_to_cpu(bar->control);
626 
627 		tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
628 	}
629 
630 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
631 	      FIELD_PREP(MT_TXD1_HDR_INFO,
632 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
633 	      FIELD_PREP(MT_TXD1_TID, tid);
634 	txwi[1] |= cpu_to_le32(val);
635 
636 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
637 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
638 
639 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
640 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
641 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
642 
643 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
644 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
645 		val |= MT_TXD2_BIP;
646 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
647 	}
648 
649 	if (!ieee80211_is_data(fc) || multicast)
650 		val |= MT_TXD2_FIX_RATE;
651 
652 	txwi[2] |= cpu_to_le32(val);
653 
654 	if (ieee80211_is_beacon(fc)) {
655 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
656 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
657 	}
658 
659 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
660 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
661 
662 		if (ieee80211_is_back_req(hdr->frame_control)) {
663 			struct ieee80211_bar *bar;
664 
665 			bar = (struct ieee80211_bar *)skb->data;
666 			seqno = le16_to_cpu(bar->start_seq_num);
667 		}
668 
669 		val = MT_TXD3_SN_VALID |
670 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
671 		txwi[3] |= cpu_to_le32(val);
672 	}
673 
674 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
675 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
676 	txwi[7] |= cpu_to_le32(val);
677 }
678 
679 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
680 			   struct sk_buff *skb, struct mt76_wcid *wcid,
681 			   struct ieee80211_key_conf *key, bool beacon)
682 {
683 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
684 	struct ieee80211_vif *vif = info->control.vif;
685 	struct mt76_phy *mphy = &dev->mphy;
686 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
687 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
688 	u16 tx_count = 15;
689 	u32 val;
690 
691 	if (vif) {
692 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
693 
694 		omac_idx = mvif->omac_idx;
695 		wmm_idx = mvif->wmm_idx;
696 	}
697 
698 	if (beacon) {
699 		p_fmt = MT_TX_TYPE_FW;
700 		q_idx = MT_LMAC_BCN0;
701 	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
702 		p_fmt = MT_TX_TYPE_CT;
703 		q_idx = MT_LMAC_ALTX0;
704 	} else {
705 		p_fmt = MT_TX_TYPE_CT;
706 		q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
707 			mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
708 	}
709 
710 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
711 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
712 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
713 	txwi[0] = cpu_to_le32(val);
714 
715 	val = MT_TXD1_LONG_FORMAT |
716 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
717 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
718 
719 	txwi[1] = cpu_to_le32(val);
720 	txwi[2] = 0;
721 
722 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
723 	if (key)
724 		val |= MT_TXD3_PROTECT_FRAME;
725 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
726 		val |= MT_TXD3_NO_ACK;
727 
728 	txwi[3] = cpu_to_le32(val);
729 	txwi[4] = 0;
730 	txwi[5] = 0;
731 	txwi[6] = 0;
732 	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
733 
734 	if (is_8023)
735 		mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
736 	else
737 		mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
738 
739 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
740 		u16 rate;
741 
742 		/* hardware won't add HTC for mgmt/ctrl frame */
743 		txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
744 
745 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
746 			rate = MT7921_5G_RATE_DEFAULT;
747 		else
748 			rate = MT7921_2G_RATE_DEFAULT;
749 
750 		val = MT_TXD6_FIXED_BW |
751 		      FIELD_PREP(MT_TXD6_TX_RATE, rate);
752 		txwi[6] |= cpu_to_le32(val);
753 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
754 	}
755 }
756 
757 static void
758 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
759 		    void *txp_ptr, u32 id)
760 {
761 	struct mt7921_hw_txp *txp = txp_ptr;
762 	struct mt7921_txp_ptr *ptr = &txp->ptr[0];
763 	int i, nbuf = tx_info->nbuf - 1;
764 
765 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
766 	tx_info->nbuf = 1;
767 
768 	txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
769 
770 	for (i = 0; i < nbuf; i++) {
771 		u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
772 		u32 addr = tx_info->buf[i + 1].addr;
773 
774 		if (i == nbuf - 1)
775 			len |= MT_TXD_LEN_LAST;
776 
777 		if (i & 1) {
778 			ptr->buf1 = cpu_to_le32(addr);
779 			ptr->len1 = cpu_to_le16(len);
780 			ptr++;
781 		} else {
782 			ptr->buf0 = cpu_to_le32(addr);
783 			ptr->len0 = cpu_to_le16(len);
784 		}
785 	}
786 }
787 
788 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
789 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
790 			  struct ieee80211_sta *sta,
791 			  struct mt76_tx_info *tx_info)
792 {
793 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
794 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
795 	struct ieee80211_key_conf *key = info->control.hw_key;
796 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
797 	struct mt76_txwi_cache *t;
798 	struct mt7921_txp_common *txp;
799 	int id;
800 	u8 *txwi = (u8 *)txwi_ptr;
801 
802 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
803 		return -EINVAL;
804 
805 	if (!wcid)
806 		wcid = &dev->mt76.global_wcid;
807 
808 	cb->wcid = wcid->idx;
809 
810 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
811 	t->skb = tx_info->skb;
812 
813 	id = mt76_token_consume(mdev, &t);
814 	if (id < 0)
815 		return id;
816 
817 	mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
818 			      false);
819 
820 	txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
821 	memset(txp, 0, sizeof(struct mt7921_txp_common));
822 	mt7921_write_hw_txp(dev, tx_info, txp, id);
823 
824 	tx_info->skb = DMA_DUMMY_DATA;
825 
826 	return 0;
827 }
828 
829 static void
830 mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
831 {
832 	struct mt7921_sta *msta;
833 	u16 fc, tid;
834 	u32 val;
835 
836 	if (!sta || !sta->ht_cap.ht_supported)
837 		return;
838 
839 	tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
840 	if (tid >= 6) /* skip VO queue */
841 		return;
842 
843 	val = le32_to_cpu(txwi[2]);
844 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
845 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
846 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
847 		return;
848 
849 	msta = (struct mt7921_sta *)sta->drv_priv;
850 	if (!test_and_set_bit(tid, &msta->ampdu_state))
851 		ieee80211_start_tx_ba_session(sta, tid, 0);
852 }
853 
854 static void
855 mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
856 			  struct ieee80211_sta *sta, u8 stat,
857 			  struct list_head *free_list)
858 {
859 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
860 	struct ieee80211_tx_status status = {
861 		.sta = sta,
862 		.info = info,
863 		.skb = skb,
864 		.free_list = free_list,
865 	};
866 	struct ieee80211_hw *hw;
867 
868 	if (sta) {
869 		struct mt7921_sta *msta;
870 
871 		msta = (struct mt7921_sta *)sta->drv_priv;
872 		status.rate = &msta->stats.tx_rate;
873 	}
874 
875 	hw = mt76_tx_status_get_hw(mdev, skb);
876 
877 	if (info->flags & IEEE80211_TX_CTL_AMPDU)
878 		info->flags |= IEEE80211_TX_STAT_AMPDU;
879 
880 	if (stat)
881 		ieee80211_tx_info_clear_status(info);
882 
883 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
884 		info->flags |= IEEE80211_TX_STAT_ACK;
885 
886 	info->status.tx_time = 0;
887 	ieee80211_tx_status_ext(hw, &status);
888 }
889 
890 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
891 			  struct mt76_txwi_cache *t)
892 {
893 	struct mt7921_txp_common *txp;
894 	int i;
895 
896 	txp = mt7921_txwi_to_txp(dev, t);
897 
898 	for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
899 		struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
900 		bool last;
901 		u16 len;
902 
903 		len = le16_to_cpu(ptr->len0);
904 		last = len & MT_TXD_LEN_LAST;
905 		len &= MT_TXD_LEN_MASK;
906 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
907 				 DMA_TO_DEVICE);
908 		if (last)
909 			break;
910 
911 		len = le16_to_cpu(ptr->len1);
912 		last = len & MT_TXD_LEN_LAST;
913 		len &= MT_TXD_LEN_MASK;
914 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
915 				 DMA_TO_DEVICE);
916 		if (last)
917 			break;
918 	}
919 }
920 
921 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
922 {
923 	struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
924 	struct mt76_dev *mdev = &dev->mt76;
925 	struct mt76_txwi_cache *txwi;
926 	struct ieee80211_sta *sta = NULL;
927 	LIST_HEAD(free_list);
928 	struct sk_buff *tmp;
929 	bool wake = false;
930 	u8 i, count;
931 
932 	/* clean DMA queues and unmap buffers first */
933 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
934 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
935 
936 	/* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
937 	 * to the time ack is received or dropped by hw (air + hw queue time).
938 	 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
939 	 */
940 	count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
941 	for (i = 0; i < count; i++) {
942 		u32 msdu, info = le32_to_cpu(free->info[i]);
943 		u8 stat;
944 
945 		/* 1'b1: new wcid pair.
946 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
947 		 */
948 		if (info & MT_TX_FREE_PAIR) {
949 			struct mt7921_sta *msta;
950 			struct mt7921_phy *phy;
951 			struct mt76_wcid *wcid;
952 			u16 idx;
953 
954 			count++;
955 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
956 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
957 			sta = wcid_to_sta(wcid);
958 			if (!sta)
959 				continue;
960 
961 			msta = container_of(wcid, struct mt7921_sta, wcid);
962 			phy = msta->vif->phy;
963 			spin_lock_bh(&dev->sta_poll_lock);
964 			if (list_empty(&msta->stats_list))
965 				list_add_tail(&msta->stats_list, &phy->stats_list);
966 			if (list_empty(&msta->poll_list))
967 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
968 			spin_unlock_bh(&dev->sta_poll_lock);
969 			continue;
970 		}
971 
972 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
973 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
974 
975 		txwi = mt76_token_release(mdev, msdu, &wake);
976 		if (!txwi)
977 			continue;
978 
979 		mt7921_txp_skb_unmap(mdev, txwi);
980 		if (txwi->skb) {
981 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
982 			void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
983 
984 			if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
985 				mt7921_tx_check_aggr(sta, txwi_ptr);
986 
987 			if (sta && !info->tx_time_est) {
988 				struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
989 				int pending;
990 
991 				pending = atomic_dec_return(&wcid->non_aql_packets);
992 				if (pending < 0)
993 					atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
994 			}
995 
996 			mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
997 			txwi->skb = NULL;
998 		}
999 
1000 		mt76_put_txwi(mdev, txwi);
1001 	}
1002 
1003 	if (wake)
1004 		mt76_set_tx_blocked(&dev->mt76, false);
1005 
1006 	napi_consume_skb(skb, 1);
1007 
1008 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1009 		skb_list_del_init(skb);
1010 		napi_consume_skb(skb, 1);
1011 	}
1012 
1013 	mt7921_mac_sta_poll(dev);
1014 	mt76_worker_schedule(&dev->mt76.tx_worker);
1015 }
1016 
1017 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1018 {
1019 	struct mt7921_dev *dev;
1020 
1021 	if (!e->txwi) {
1022 		dev_kfree_skb_any(e->skb);
1023 		return;
1024 	}
1025 
1026 	dev = container_of(mdev, struct mt7921_dev, mt76);
1027 
1028 	/* error path */
1029 	if (e->skb == DMA_DUMMY_DATA) {
1030 		struct mt76_txwi_cache *t;
1031 		struct mt7921_txp_common *txp;
1032 		u16 token;
1033 
1034 		txp = mt7921_txwi_to_txp(mdev, e->txwi);
1035 		token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
1036 		t = mt76_token_put(mdev, token);
1037 		e->skb = t ? t->skb : NULL;
1038 	}
1039 
1040 	if (e->skb) {
1041 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
1042 		struct mt76_wcid *wcid;
1043 
1044 		wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
1045 
1046 		mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
1047 					  NULL);
1048 	}
1049 }
1050 
1051 void mt7921_mac_reset_counters(struct mt7921_phy *phy)
1052 {
1053 	struct mt7921_dev *dev = phy->dev;
1054 	int i;
1055 
1056 	for (i = 0; i < 4; i++) {
1057 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1058 		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1059 	}
1060 
1061 	dev->mt76.phy.survey_time = ktime_get_boottime();
1062 	memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
1063 
1064 	/* reset airtime counters */
1065 	mt76_rr(dev, MT_MIB_SDR9(0));
1066 	mt76_rr(dev, MT_MIB_SDR36(0));
1067 	mt76_rr(dev, MT_MIB_SDR37(0));
1068 
1069 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1070 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1071 }
1072 
1073 void mt7921_mac_set_timing(struct mt7921_phy *phy)
1074 {
1075 	s16 coverage_class = phy->coverage_class;
1076 	struct mt7921_dev *dev = phy->dev;
1077 	u32 val, reg_offset;
1078 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1079 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1080 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1081 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1082 	int sifs, offset;
1083 	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1084 
1085 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1086 		return;
1087 
1088 	if (is_5ghz)
1089 		sifs = 16;
1090 	else
1091 		sifs = 10;
1092 
1093 	mt76_set(dev, MT_ARB_SCR(0),
1094 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1095 	udelay(1);
1096 
1097 	offset = 3 * coverage_class;
1098 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1099 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1100 
1101 	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
1102 	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
1103 	mt76_wr(dev, MT_TMAC_ICR0(0),
1104 		FIELD_PREP(MT_IFS_EIFS, 360) |
1105 		FIELD_PREP(MT_IFS_RIFS, 2) |
1106 		FIELD_PREP(MT_IFS_SIFS, sifs) |
1107 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1108 
1109 	if (phy->slottime < 20 || is_5ghz)
1110 		val = MT7921_CFEND_RATE_DEFAULT;
1111 	else
1112 		val = MT7921_CFEND_RATE_11B;
1113 
1114 	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
1115 	mt76_clear(dev, MT_ARB_SCR(0),
1116 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1117 }
1118 
1119 static u8
1120 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
1121 {
1122 	return 0;
1123 }
1124 
1125 static void
1126 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
1127 {
1128 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
1129 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
1130 	struct mt76_channel_state *state;
1131 	u64 busy_time, tx_time, rx_time, obss_time;
1132 	int nf;
1133 
1134 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1135 				   MT_MIB_SDR9_BUSY_MASK);
1136 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1137 				 MT_MIB_SDR36_TXTIME_MASK);
1138 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1139 				 MT_MIB_SDR37_RXTIME_MASK);
1140 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1141 				   MT_MIB_OBSSTIME_MASK);
1142 
1143 	nf = mt7921_phy_get_nf(phy, idx);
1144 	if (!phy->noise)
1145 		phy->noise = nf << 4;
1146 	else if (nf)
1147 		phy->noise += nf - (phy->noise >> 4);
1148 
1149 	state = mphy->chan_state;
1150 	state->cc_busy += busy_time;
1151 	state->cc_tx += tx_time;
1152 	state->cc_rx += rx_time + obss_time;
1153 	state->cc_bss_rx += rx_time;
1154 	state->noise = -(phy->noise >> 4);
1155 }
1156 
1157 void mt7921_update_channel(struct mt76_dev *mdev)
1158 {
1159 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1160 
1161 	if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1162 		return;
1163 
1164 	mt7921_phy_update_channel(&mdev->phy, 0);
1165 	/* reset obss airtime */
1166 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1167 
1168 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1169 }
1170 
1171 void mt7921_tx_token_put(struct mt7921_dev *dev)
1172 {
1173 	struct mt76_txwi_cache *txwi;
1174 	int id;
1175 
1176 	spin_lock_bh(&dev->mt76.token_lock);
1177 	idr_for_each_entry(&dev->mt76.token, txwi, id) {
1178 		mt7921_txp_skb_unmap(&dev->mt76, txwi);
1179 		if (txwi->skb) {
1180 			struct ieee80211_hw *hw;
1181 
1182 			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1183 			ieee80211_free_txskb(hw, txwi->skb);
1184 		}
1185 		mt76_put_txwi(&dev->mt76, txwi);
1186 		dev->mt76.token_count--;
1187 	}
1188 	spin_unlock_bh(&dev->mt76.token_lock);
1189 	idr_destroy(&dev->mt76.token);
1190 }
1191 
1192 static void
1193 mt7921_vif_connect_iter(void *priv, u8 *mac,
1194 			struct ieee80211_vif *vif)
1195 {
1196 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
1197 	struct mt7921_dev *dev = mvif->phy->dev;
1198 
1199 	ieee80211_disconnect(vif, true);
1200 
1201 	mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
1202 	mt7921_mcu_set_tx(dev, vif);
1203 }
1204 
1205 static int
1206 mt7921_mac_reset(struct mt7921_dev *dev)
1207 {
1208 	int i, err;
1209 
1210 	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
1211 
1212 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
1213 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1214 
1215 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1216 	wake_up(&dev->mt76.mcu.wait);
1217 	skb_queue_purge(&dev->mt76.mcu.res_q);
1218 
1219 	mt76_txq_schedule_all(&dev->mphy);
1220 
1221 	mt76_worker_disable(&dev->mt76.tx_worker);
1222 	napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
1223 	napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
1224 	napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
1225 	napi_disable(&dev->mt76.tx_napi);
1226 
1227 	mt7921_tx_token_put(dev);
1228 	idr_init(&dev->mt76.token);
1229 
1230 	err = mt7921_wpdma_reset(dev, true);
1231 	if (err)
1232 		return err;
1233 
1234 	mt76_for_each_q_rx(&dev->mt76, i) {
1235 		napi_enable(&dev->mt76.napi[i]);
1236 		napi_schedule(&dev->mt76.napi[i]);
1237 	}
1238 
1239 	napi_enable(&dev->mt76.tx_napi);
1240 	napi_schedule(&dev->mt76.tx_napi);
1241 	mt76_worker_enable(&dev->mt76.tx_worker);
1242 
1243 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1244 	clear_bit(MT76_STATE_PM, &dev->mphy.state);
1245 
1246 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
1247 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1248 
1249 	err = mt7921_run_firmware(dev);
1250 	if (err)
1251 		return err;
1252 
1253 	err = mt7921_mcu_set_eeprom(dev);
1254 	if (err)
1255 		return err;
1256 
1257 	mt7921_mac_init(dev);
1258 	return __mt7921_start(&dev->phy);
1259 }
1260 
1261 /* system error recovery */
1262 void mt7921_mac_reset_work(struct work_struct *work)
1263 {
1264 	struct ieee80211_hw *hw;
1265 	struct mt7921_dev *dev;
1266 	int i;
1267 
1268 	dev = container_of(work, struct mt7921_dev, reset_work);
1269 	hw = mt76_hw(dev);
1270 
1271 	dev_err(dev->mt76.dev, "chip reset\n");
1272 	ieee80211_stop_queues(hw);
1273 
1274 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1275 	cancel_delayed_work_sync(&dev->pm.ps_work);
1276 	cancel_work_sync(&dev->pm.wake_work);
1277 
1278 	mutex_lock(&dev->mt76.mutex);
1279 	for (i = 0; i < 10; i++) {
1280 		if (!mt7921_mac_reset(dev))
1281 			break;
1282 	}
1283 	mutex_unlock(&dev->mt76.mutex);
1284 
1285 	if (i == 10)
1286 		dev_err(dev->mt76.dev, "chip reset failed\n");
1287 
1288 	if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1289 		struct cfg80211_scan_info info = {
1290 			.aborted = true,
1291 		};
1292 
1293 		ieee80211_scan_completed(dev->mphy.hw, &info);
1294 	}
1295 
1296 	ieee80211_wake_queues(hw);
1297 	ieee80211_iterate_active_interfaces(hw,
1298 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1299 					    mt7921_vif_connect_iter, NULL);
1300 }
1301 
1302 void mt7921_reset(struct mt76_dev *mdev)
1303 {
1304 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1305 
1306 	queue_work(dev->mt76.wq, &dev->reset_work);
1307 }
1308 
1309 static void
1310 mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
1311 {
1312 	struct mt7921_dev *dev = phy->dev;
1313 	struct mib_stats *mib = &phy->mib;
1314 	int i, aggr0 = 0, aggr1;
1315 
1316 	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
1317 					   MT_MIB_SDR3_FCS_ERR_MASK);
1318 	mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
1319 					    MT_MIB_ACK_FAIL_COUNT_MASK);
1320 	mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
1321 					   MT_MIB_BA_FAIL_COUNT_MASK);
1322 	mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
1323 				       MT_MIB_RTS_COUNT_MASK);
1324 	mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
1325 					       MT_MIB_RTS_FAIL_COUNT_MASK);
1326 
1327 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1328 		u32 val, val2;
1329 
1330 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1331 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1332 
1333 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1334 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
1335 		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
1336 		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
1337 	}
1338 }
1339 
1340 static void
1341 mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
1342 {
1343 	struct mt7921_dev *dev = phy->dev;
1344 	struct mt7921_sta *msta;
1345 	LIST_HEAD(list);
1346 
1347 	spin_lock_bh(&dev->sta_poll_lock);
1348 	list_splice_init(&phy->stats_list, &list);
1349 
1350 	while (!list_empty(&list)) {
1351 		msta = list_first_entry(&list, struct mt7921_sta, stats_list);
1352 		list_del_init(&msta->stats_list);
1353 		spin_unlock_bh(&dev->sta_poll_lock);
1354 
1355 		/* query wtbl info to report tx rate for further devices */
1356 		mt7921_get_wtbl_info(dev, msta->wcid.idx);
1357 
1358 		spin_lock_bh(&dev->sta_poll_lock);
1359 	}
1360 
1361 	spin_unlock_bh(&dev->sta_poll_lock);
1362 }
1363 
1364 void mt7921_mac_work(struct work_struct *work)
1365 {
1366 	struct mt7921_phy *phy;
1367 	struct mt76_phy *mphy;
1368 
1369 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1370 					       mac_work.work);
1371 	phy = mphy->priv;
1372 
1373 	mt7921_mutex_acquire(phy->dev);
1374 
1375 	mt76_update_survey(mphy->dev);
1376 	if (++mphy->mac_work_count == 2) {
1377 		mphy->mac_work_count = 0;
1378 
1379 		mt7921_mac_update_mib_stats(phy);
1380 	}
1381 	if (++phy->sta_work_count == 4) {
1382 		phy->sta_work_count = 0;
1383 		mt7921_mac_sta_stats_work(phy);
1384 	}
1385 
1386 	mt7921_mutex_release(phy->dev);
1387 	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
1388 				     MT7921_WATCHDOG_TIME);
1389 }
1390 
1391 void mt7921_pm_wake_work(struct work_struct *work)
1392 {
1393 	struct mt7921_dev *dev;
1394 	struct mt76_phy *mphy;
1395 
1396 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1397 						pm.wake_work);
1398 	mphy = dev->phy.mt76;
1399 
1400 	if (!mt7921_mcu_drv_pmctrl(dev)) {
1401 		int i;
1402 
1403 		mt76_for_each_q_rx(&dev->mt76, i)
1404 			napi_schedule(&dev->mt76.napi[i]);
1405 		mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1406 		mt7921_tx_cleanup(dev);
1407 		ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1408 					     MT7921_WATCHDOG_TIME);
1409 	}
1410 
1411 	ieee80211_wake_queues(mphy->hw);
1412 	wake_up(&dev->pm.wait);
1413 }
1414 
1415 void mt7921_pm_power_save_work(struct work_struct *work)
1416 {
1417 	struct mt7921_dev *dev;
1418 	unsigned long delta;
1419 
1420 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1421 						pm.ps_work.work);
1422 
1423 	delta = dev->pm.idle_timeout;
1424 	if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
1425 	    test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
1426 		goto out;
1427 
1428 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1429 		delta = dev->pm.last_activity + delta - jiffies;
1430 		goto out;
1431 	}
1432 
1433 	if (!mt7921_mcu_fw_pmctrl(dev))
1434 		return;
1435 out:
1436 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1437 }
1438 
1439 int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
1440 				 struct ieee80211_vif *vif,
1441 				 bool enable)
1442 {
1443 	struct mt7921_dev *dev = phy->dev;
1444 	bool ext_phy = phy != &dev->phy;
1445 	int err;
1446 
1447 	if (!dev->pm.enable)
1448 		return -EOPNOTSUPP;
1449 
1450 	err = mt7921_mcu_set_bss_pm(dev, vif, enable);
1451 	if (err)
1452 		return err;
1453 
1454 	if (enable) {
1455 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
1456 		mt76_set(dev, MT_WF_RFCR(ext_phy),
1457 			 MT_WF_RFCR_DROP_OTHER_BEACON);
1458 	} else {
1459 		vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
1460 		mt76_clear(dev, MT_WF_RFCR(ext_phy),
1461 			   MT_WF_RFCR_DROP_OTHER_BEACON);
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 void mt7921_coredump_work(struct work_struct *work)
1468 {
1469 	struct mt7921_dev *dev;
1470 	char *dump, *data;
1471 
1472 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1473 						coredump.work.work);
1474 
1475 	if (time_is_after_jiffies(dev->coredump.last_activity +
1476 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1477 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1478 				   MT76_CONNAC_COREDUMP_TIMEOUT);
1479 		return;
1480 	}
1481 
1482 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1483 	data = dump;
1484 
1485 	while (true) {
1486 		struct sk_buff *skb;
1487 
1488 		spin_lock_bh(&dev->mt76.lock);
1489 		skb = __skb_dequeue(&dev->coredump.msg_list);
1490 		spin_unlock_bh(&dev->mt76.lock);
1491 
1492 		if (!skb)
1493 			break;
1494 
1495 		skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
1496 		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1497 			dev_kfree_skb(skb);
1498 			continue;
1499 		}
1500 
1501 		memcpy(data, skb->data, skb->len);
1502 		data += skb->len;
1503 
1504 		dev_kfree_skb(skb);
1505 	}
1506 	dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1507 		      GFP_KERNEL);
1508 	mt7921_reset(&dev->mt76);
1509 }
1510