xref: /openbmc/linux/drivers/net/wireless/mediatek/mt76/mt7915/mac.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1  // SPDX-License-Identifier: ISC
2  /* Copyright (C) 2020 MediaTek Inc. */
3  
4  #include <linux/etherdevice.h>
5  #include <linux/timekeeping.h>
6  #include "coredump.h"
7  #include "mt7915.h"
8  #include "../dma.h"
9  #include "mac.h"
10  #include "mcu.h"
11  
12  #define to_rssi(field, rcpi)	((FIELD_GET(field, rcpi) - 220) / 2)
13  
14  static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
15  	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
16  	.radar_pattern = {
17  		[5] =  { 1, 0,  6, 32, 28, 0,  990, 5010, 17, 1, 1 },
18  		[6] =  { 1, 0,  9, 32, 28, 0,  615, 5010, 27, 1, 1 },
19  		[7] =  { 1, 0, 15, 32, 28, 0,  240,  445, 27, 1, 1 },
20  		[8] =  { 1, 0, 12, 32, 28, 0,  240,  510, 42, 1, 1 },
21  		[9] =  { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
22  		[10] = { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
23  		[11] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 18, 32, 28, { },  54 },
24  		[12] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 27, 32, 24, { },  54 },
25  	},
26  };
27  
28  static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
29  	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
30  	.radar_pattern = {
31  		[0] = { 1, 0,  8,  32, 28, 0, 508, 3076, 13, 1,  1 },
32  		[1] = { 1, 0, 12,  32, 28, 0, 140,  240, 17, 1,  1 },
33  		[2] = { 1, 0,  8,  32, 28, 0, 190,  510, 22, 1,  1 },
34  		[3] = { 1, 0,  6,  32, 28, 0, 190,  510, 32, 1,  1 },
35  		[4] = { 1, 0,  9, 255, 28, 0, 323,  343, 13, 1, 32 },
36  	},
37  };
38  
39  static const struct mt7915_dfs_radar_spec jp_radar_specs = {
40  	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
41  	.radar_pattern = {
42  		[0] =  { 1, 0,  8,  32, 28, 0,  508, 3076,  13, 1,  1 },
43  		[1] =  { 1, 0, 12,  32, 28, 0,  140,  240,  17, 1,  1 },
44  		[2] =  { 1, 0,  8,  32, 28, 0,  190,  510,  22, 1,  1 },
45  		[3] =  { 1, 0,  6,  32, 28, 0,  190,  510,  32, 1,  1 },
46  		[4] =  { 1, 0,  9, 255, 28, 0,  323,  343,  13, 1, 32 },
47  		[13] = { 1, 0,  7,  32, 28, 0, 3836, 3856,  14, 1,  1 },
48  		[14] = { 1, 0,  6,  32, 28, 0,  615, 5010, 110, 1,  1 },
49  		[15] = { 1, 1,  0,   0,  0, 0,   15, 5010, 110, 0,  0, 12, 32, 28 },
50  	},
51  };
52  
mt7915_rx_get_wcid(struct mt7915_dev * dev,u16 idx,bool unicast)53  static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
54  					    u16 idx, bool unicast)
55  {
56  	struct mt7915_sta *sta;
57  	struct mt76_wcid *wcid;
58  
59  	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
60  		return NULL;
61  
62  	wcid = rcu_dereference(dev->mt76.wcid[idx]);
63  	if (unicast || !wcid)
64  		return wcid;
65  
66  	if (!wcid->sta)
67  		return NULL;
68  
69  	sta = container_of(wcid, struct mt7915_sta, wcid);
70  	if (!sta->vif)
71  		return NULL;
72  
73  	return &sta->vif->sta.wcid;
74  }
75  
mt7915_mac_wtbl_update(struct mt7915_dev * dev,int idx,u32 mask)76  bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
77  {
78  	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
79  		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
80  
81  	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
82  			 0, 5000);
83  }
84  
mt7915_mac_wtbl_lmac_addr(struct mt7915_dev * dev,u16 wcid,u8 dw)85  u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw)
86  {
87  	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
88  		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
89  
90  	return MT_WTBL_LMAC_OFFS(wcid, dw);
91  }
92  
mt7915_mac_sta_poll(struct mt7915_dev * dev)93  static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
94  {
95  	static const u8 ac_to_tid[] = {
96  		[IEEE80211_AC_BE] = 0,
97  		[IEEE80211_AC_BK] = 1,
98  		[IEEE80211_AC_VI] = 4,
99  		[IEEE80211_AC_VO] = 6
100  	};
101  	struct ieee80211_sta *sta;
102  	struct mt7915_sta *msta;
103  	struct rate_info *rate;
104  	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
105  	LIST_HEAD(sta_poll_list);
106  	int i;
107  
108  	spin_lock_bh(&dev->mt76.sta_poll_lock);
109  	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
110  	spin_unlock_bh(&dev->mt76.sta_poll_lock);
111  
112  	rcu_read_lock();
113  
114  	while (true) {
115  		bool clear = false;
116  		u32 addr, val;
117  		u16 idx;
118  		s8 rssi[4];
119  		u8 bw;
120  
121  		spin_lock_bh(&dev->mt76.sta_poll_lock);
122  		if (list_empty(&sta_poll_list)) {
123  			spin_unlock_bh(&dev->mt76.sta_poll_lock);
124  			break;
125  		}
126  		msta = list_first_entry(&sta_poll_list,
127  					struct mt7915_sta, wcid.poll_list);
128  		list_del_init(&msta->wcid.poll_list);
129  		spin_unlock_bh(&dev->mt76.sta_poll_lock);
130  
131  		idx = msta->wcid.idx;
132  
133  		/* refresh peer's airtime reporting */
134  		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
135  
136  		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
137  			u32 tx_last = msta->airtime_ac[i];
138  			u32 rx_last = msta->airtime_ac[i + 4];
139  
140  			msta->airtime_ac[i] = mt76_rr(dev, addr);
141  			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
142  
143  			tx_time[i] = msta->airtime_ac[i] - tx_last;
144  			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
145  
146  			if ((tx_last | rx_last) & BIT(30))
147  				clear = true;
148  
149  			addr += 8;
150  		}
151  
152  		if (clear) {
153  			mt7915_mac_wtbl_update(dev, idx,
154  					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
155  			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
156  		}
157  
158  		if (!msta->wcid.sta)
159  			continue;
160  
161  		sta = container_of((void *)msta, struct ieee80211_sta,
162  				   drv_priv);
163  		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
164  			u8 queue = mt76_connac_lmac_mapping(i);
165  			u32 tx_cur = tx_time[queue];
166  			u32 rx_cur = rx_time[queue];
167  			u8 tid = ac_to_tid[i];
168  
169  			if (!tx_cur && !rx_cur)
170  				continue;
171  
172  			ieee80211_sta_register_airtime(sta, tid, tx_cur,
173  						       rx_cur);
174  		}
175  
176  		/*
177  		 * We don't support reading GI info from txs packets.
178  		 * For accurate tx status reporting and AQL improvement,
179  		 * we need to make sure that flags match so polling GI
180  		 * from per-sta counters directly.
181  		 */
182  		rate = &msta->wcid.rate;
183  		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7);
184  		val = mt76_rr(dev, addr);
185  
186  		switch (rate->bw) {
187  		case RATE_INFO_BW_160:
188  			bw = IEEE80211_STA_RX_BW_160;
189  			break;
190  		case RATE_INFO_BW_80:
191  			bw = IEEE80211_STA_RX_BW_80;
192  			break;
193  		case RATE_INFO_BW_40:
194  			bw = IEEE80211_STA_RX_BW_40;
195  			break;
196  		default:
197  			bw = IEEE80211_STA_RX_BW_20;
198  			break;
199  		}
200  
201  		if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
202  			u8 offs = 24 + 2 * bw;
203  
204  			rate->he_gi = (val & (0x3 << offs)) >> offs;
205  		} else if (rate->flags &
206  			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
207  			if (val & BIT(12 + bw))
208  				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
209  			else
210  				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
211  		}
212  
213  		/* get signal strength of resp frames (CTS/BA/ACK) */
214  		addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30);
215  		val = mt76_rr(dev, addr);
216  
217  		rssi[0] = to_rssi(GENMASK(7, 0), val);
218  		rssi[1] = to_rssi(GENMASK(15, 8), val);
219  		rssi[2] = to_rssi(GENMASK(23, 16), val);
220  		rssi[3] = to_rssi(GENMASK(31, 14), val);
221  
222  		msta->ack_signal =
223  			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
224  
225  		ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
226  	}
227  
228  	rcu_read_unlock();
229  }
230  
mt7915_mac_enable_rtscts(struct mt7915_dev * dev,struct ieee80211_vif * vif,bool enable)231  void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
232  			      struct ieee80211_vif *vif, bool enable)
233  {
234  	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
235  	u32 addr;
236  
237  	addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
238  	if (enable)
239  		mt76_set(dev, addr, BIT(5));
240  	else
241  		mt76_clear(dev, addr, BIT(5));
242  }
243  
244  static void
mt7915_wed_check_ppe(struct mt7915_dev * dev,struct mt76_queue * q,struct mt7915_sta * msta,struct sk_buff * skb,u32 info)245  mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
246  		     struct mt7915_sta *msta, struct sk_buff *skb,
247  		     u32 info)
248  {
249  	struct ieee80211_vif *vif;
250  	struct wireless_dev *wdev;
251  
252  	if (!msta || !msta->vif)
253  		return;
254  
255  	if (!mt76_queue_is_wed_rx(q))
256  		return;
257  
258  	if (!(info & MT_DMA_INFO_PPE_VLD))
259  		return;
260  
261  	vif = container_of((void *)msta->vif, struct ieee80211_vif,
262  			   drv_priv);
263  	wdev = ieee80211_vif_to_wdev(vif);
264  	skb->dev = wdev->netdev;
265  
266  	mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
267  				 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
268  				 FIELD_GET(MT_DMA_PPE_ENTRY, info));
269  }
270  
271  static int
mt7915_mac_fill_rx(struct mt7915_dev * dev,struct sk_buff * skb,enum mt76_rxq_id q,u32 * info)272  mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
273  		   enum mt76_rxq_id q, u32 *info)
274  {
275  	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
276  	struct mt76_phy *mphy = &dev->mt76.phy;
277  	struct mt7915_phy *phy = &dev->phy;
278  	struct ieee80211_supported_band *sband;
279  	__le32 *rxd = (__le32 *)skb->data;
280  	__le32 *rxv = NULL;
281  	u32 rxd0 = le32_to_cpu(rxd[0]);
282  	u32 rxd1 = le32_to_cpu(rxd[1]);
283  	u32 rxd2 = le32_to_cpu(rxd[2]);
284  	u32 rxd3 = le32_to_cpu(rxd[3]);
285  	u32 rxd4 = le32_to_cpu(rxd[4]);
286  	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
287  	bool unicast, insert_ccmp_hdr = false;
288  	u8 remove_pad, amsdu_info;
289  	u8 mode = 0, qos_ctl = 0;
290  	struct mt7915_sta *msta = NULL;
291  	u32 csum_status = *(u32 *)skb->cb;
292  	bool hdr_trans;
293  	u16 hdr_gap;
294  	u16 seq_ctrl = 0;
295  	__le16 fc = 0;
296  	int idx;
297  
298  	memset(status, 0, sizeof(*status));
299  
300  	if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) {
301  		mphy = dev->mt76.phys[MT_BAND1];
302  		if (!mphy)
303  			return -EINVAL;
304  
305  		phy = mphy->priv;
306  		status->phy_idx = 1;
307  	}
308  
309  	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
310  		return -EINVAL;
311  
312  	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
313  		return -EINVAL;
314  
315  	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
316  	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
317  		return -EINVAL;
318  
319  	/* ICV error or CCMP/BIP/WPI MIC error */
320  	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
321  		status->flag |= RX_FLAG_ONLY_MONITOR;
322  
323  	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
324  	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
325  	status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
326  
327  	if (status->wcid) {
328  		msta = container_of(status->wcid, struct mt7915_sta, wcid);
329  		spin_lock_bh(&dev->mt76.sta_poll_lock);
330  		if (list_empty(&msta->wcid.poll_list))
331  			list_add_tail(&msta->wcid.poll_list,
332  				      &dev->mt76.sta_poll_list);
333  		spin_unlock_bh(&dev->mt76.sta_poll_lock);
334  	}
335  
336  	status->freq = mphy->chandef.chan->center_freq;
337  	status->band = mphy->chandef.chan->band;
338  	if (status->band == NL80211_BAND_5GHZ)
339  		sband = &mphy->sband_5g.sband;
340  	else if (status->band == NL80211_BAND_6GHZ)
341  		sband = &mphy->sband_6g.sband;
342  	else
343  		sband = &mphy->sband_2g.sband;
344  
345  	if (!sband->channels)
346  		return -EINVAL;
347  
348  	if ((rxd0 & csum_mask) == csum_mask &&
349  	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
350  		skb->ip_summed = CHECKSUM_UNNECESSARY;
351  
352  	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
353  		status->flag |= RX_FLAG_FAILED_FCS_CRC;
354  
355  	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
356  		status->flag |= RX_FLAG_MMIC_ERROR;
357  
358  	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
359  	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
360  		status->flag |= RX_FLAG_DECRYPTED;
361  		status->flag |= RX_FLAG_IV_STRIPPED;
362  		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
363  	}
364  
365  	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
366  
367  	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
368  		return -EINVAL;
369  
370  	rxd += 6;
371  	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
372  		u32 v0 = le32_to_cpu(rxd[0]);
373  		u32 v2 = le32_to_cpu(rxd[2]);
374  
375  		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
376  		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
377  		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
378  
379  		rxd += 4;
380  		if ((u8 *)rxd - skb->data >= skb->len)
381  			return -EINVAL;
382  	}
383  
384  	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
385  		u8 *data = (u8 *)rxd;
386  
387  		if (status->flag & RX_FLAG_DECRYPTED) {
388  			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
389  			case MT_CIPHER_AES_CCMP:
390  			case MT_CIPHER_CCMP_CCX:
391  			case MT_CIPHER_CCMP_256:
392  				insert_ccmp_hdr =
393  					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
394  				fallthrough;
395  			case MT_CIPHER_TKIP:
396  			case MT_CIPHER_TKIP_NO_MIC:
397  			case MT_CIPHER_GCMP:
398  			case MT_CIPHER_GCMP_256:
399  				status->iv[0] = data[5];
400  				status->iv[1] = data[4];
401  				status->iv[2] = data[3];
402  				status->iv[3] = data[2];
403  				status->iv[4] = data[1];
404  				status->iv[5] = data[0];
405  				break;
406  			default:
407  				break;
408  			}
409  		}
410  		rxd += 4;
411  		if ((u8 *)rxd - skb->data >= skb->len)
412  			return -EINVAL;
413  	}
414  
415  	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
416  		status->timestamp = le32_to_cpu(rxd[0]);
417  		status->flag |= RX_FLAG_MACTIME_START;
418  
419  		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
420  			status->flag |= RX_FLAG_AMPDU_DETAILS;
421  
422  			/* all subframes of an A-MPDU have the same timestamp */
423  			if (phy->rx_ampdu_ts != status->timestamp) {
424  				if (!++phy->ampdu_ref)
425  					phy->ampdu_ref++;
426  			}
427  			phy->rx_ampdu_ts = status->timestamp;
428  
429  			status->ampdu_ref = phy->ampdu_ref;
430  		}
431  
432  		rxd += 2;
433  		if ((u8 *)rxd - skb->data >= skb->len)
434  			return -EINVAL;
435  	}
436  
437  	/* RXD Group 3 - P-RXV */
438  	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
439  		u32 v0, v1;
440  		int ret;
441  
442  		rxv = rxd;
443  		rxd += 2;
444  		if ((u8 *)rxd - skb->data >= skb->len)
445  			return -EINVAL;
446  
447  		v0 = le32_to_cpu(rxv[0]);
448  		v1 = le32_to_cpu(rxv[1]);
449  
450  		if (v0 & MT_PRXV_HT_AD_CODE)
451  			status->enc_flags |= RX_ENC_FLAG_LDPC;
452  
453  		status->chains = mphy->antenna_mask;
454  		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
455  		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
456  		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
457  		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
458  
459  		/* RXD Group 5 - C-RXV */
460  		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
461  			rxd += 18;
462  			if ((u8 *)rxd - skb->data >= skb->len)
463  				return -EINVAL;
464  		}
465  
466  		if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
467  			ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
468  							    sband, rxv, &mode);
469  			if (ret < 0)
470  				return ret;
471  		}
472  	}
473  
474  	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
475  	status->amsdu = !!amsdu_info;
476  	if (status->amsdu) {
477  		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
478  		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
479  	}
480  
481  	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
482  	if (hdr_trans && ieee80211_has_morefrags(fc)) {
483  		struct ieee80211_vif *vif;
484  		int err;
485  
486  		if (!msta || !msta->vif)
487  			return -EINVAL;
488  
489  		vif = container_of((void *)msta->vif, struct ieee80211_vif,
490  				   drv_priv);
491  		err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
492  		if (err)
493  			return err;
494  
495  		hdr_trans = false;
496  	} else {
497  		int pad_start = 0;
498  
499  		skb_pull(skb, hdr_gap);
500  		if (!hdr_trans && status->amsdu) {
501  			pad_start = ieee80211_get_hdrlen_from_skb(skb);
502  		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
503  			/*
504  			 * When header translation failure is indicated,
505  			 * the hardware will insert an extra 2-byte field
506  			 * containing the data length after the protocol
507  			 * type field. This happens either when the LLC-SNAP
508  			 * pattern did not match, or if a VLAN header was
509  			 * detected.
510  			 */
511  			pad_start = 12;
512  			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
513  				pad_start += 4;
514  			else
515  				pad_start = 0;
516  		}
517  
518  		if (pad_start) {
519  			memmove(skb->data + 2, skb->data, pad_start);
520  			skb_pull(skb, 2);
521  		}
522  	}
523  
524  	if (!hdr_trans) {
525  		struct ieee80211_hdr *hdr;
526  
527  		if (insert_ccmp_hdr) {
528  			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
529  
530  			mt76_insert_ccmp_hdr(skb, key_id);
531  		}
532  
533  		hdr = mt76_skb_get_hdr(skb);
534  		fc = hdr->frame_control;
535  		if (ieee80211_is_data_qos(fc)) {
536  			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
537  			qos_ctl = *ieee80211_get_qos_ctl(hdr);
538  		}
539  	} else {
540  		status->flag |= RX_FLAG_8023;
541  		mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
542  				     *info);
543  	}
544  
545  	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
546  		mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
547  
548  	if (!status->wcid || !ieee80211_is_data_qos(fc))
549  		return 0;
550  
551  	status->aggr = unicast &&
552  		       !ieee80211_is_qos_nullfunc(fc);
553  	status->qos_ctl = qos_ctl;
554  	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
555  
556  	return 0;
557  }
558  
559  static void
mt7915_mac_fill_rx_vector(struct mt7915_dev * dev,struct sk_buff * skb)560  mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
561  {
562  #ifdef CONFIG_NL80211_TESTMODE
563  	struct mt7915_phy *phy = &dev->phy;
564  	__le32 *rxd = (__le32 *)skb->data;
565  	__le32 *rxv_hdr = rxd + 2;
566  	__le32 *rxv = rxd + 4;
567  	u32 rcpi, ib_rssi, wb_rssi, v20, v21;
568  	u8 band_idx;
569  	s32 foe;
570  	u8 snr;
571  	int i;
572  
573  	band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
574  	if (band_idx && !phy->mt76->band_idx) {
575  		phy = mt7915_ext_phy(dev);
576  		if (!phy)
577  			goto out;
578  	}
579  
580  	rcpi = le32_to_cpu(rxv[6]);
581  	ib_rssi = le32_to_cpu(rxv[7]);
582  	wb_rssi = le32_to_cpu(rxv[8]) >> 5;
583  
584  	for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
585  		if (i == 3)
586  			wb_rssi = le32_to_cpu(rxv[9]);
587  
588  		phy->test.last_rcpi[i] = rcpi & 0xff;
589  		phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
590  		phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
591  	}
592  
593  	v20 = le32_to_cpu(rxv[20]);
594  	v21 = le32_to_cpu(rxv[21]);
595  
596  	foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
597  	      (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
598  
599  	snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
600  
601  	phy->test.last_freq_offset = foe;
602  	phy->test.last_snr = snr;
603  out:
604  #endif
605  	dev_kfree_skb(skb);
606  }
607  
608  static void
mt7915_mac_write_txwi_tm(struct mt7915_phy * phy,__le32 * txwi,struct sk_buff * skb)609  mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
610  			 struct sk_buff *skb)
611  {
612  #ifdef CONFIG_NL80211_TESTMODE
613  	struct mt76_testmode_data *td = &phy->mt76->test;
614  	const struct ieee80211_rate *r;
615  	u8 bw, mode, nss = td->tx_rate_nss;
616  	u8 rate_idx = td->tx_rate_idx;
617  	u16 rateval = 0;
618  	u32 val;
619  	bool cck = false;
620  	int band;
621  
622  	if (skb != phy->mt76->test.tx_skb)
623  		return;
624  
625  	switch (td->tx_rate_mode) {
626  	case MT76_TM_TX_MODE_HT:
627  		nss = 1 + (rate_idx >> 3);
628  		mode = MT_PHY_TYPE_HT;
629  		break;
630  	case MT76_TM_TX_MODE_VHT:
631  		mode = MT_PHY_TYPE_VHT;
632  		break;
633  	case MT76_TM_TX_MODE_HE_SU:
634  		mode = MT_PHY_TYPE_HE_SU;
635  		break;
636  	case MT76_TM_TX_MODE_HE_EXT_SU:
637  		mode = MT_PHY_TYPE_HE_EXT_SU;
638  		break;
639  	case MT76_TM_TX_MODE_HE_TB:
640  		mode = MT_PHY_TYPE_HE_TB;
641  		break;
642  	case MT76_TM_TX_MODE_HE_MU:
643  		mode = MT_PHY_TYPE_HE_MU;
644  		break;
645  	case MT76_TM_TX_MODE_CCK:
646  		cck = true;
647  		fallthrough;
648  	case MT76_TM_TX_MODE_OFDM:
649  		band = phy->mt76->chandef.chan->band;
650  		if (band == NL80211_BAND_2GHZ && !cck)
651  			rate_idx += 4;
652  
653  		r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
654  		val = cck ? r->hw_value_short : r->hw_value;
655  
656  		mode = val >> 8;
657  		rate_idx = val & 0xff;
658  		break;
659  	default:
660  		mode = MT_PHY_TYPE_OFDM;
661  		break;
662  	}
663  
664  	switch (phy->mt76->chandef.width) {
665  	case NL80211_CHAN_WIDTH_40:
666  		bw = 1;
667  		break;
668  	case NL80211_CHAN_WIDTH_80:
669  		bw = 2;
670  		break;
671  	case NL80211_CHAN_WIDTH_80P80:
672  	case NL80211_CHAN_WIDTH_160:
673  		bw = 3;
674  		break;
675  	default:
676  		bw = 0;
677  		break;
678  	}
679  
680  	if (td->tx_rate_stbc && nss == 1) {
681  		nss++;
682  		rateval |= MT_TX_RATE_STBC;
683  	}
684  
685  	rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
686  		   FIELD_PREP(MT_TX_RATE_MODE, mode) |
687  		   FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
688  
689  	txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
690  
691  	le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
692  	if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
693  		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
694  
695  	val = MT_TXD6_FIXED_BW |
696  	      FIELD_PREP(MT_TXD6_BW, bw) |
697  	      FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
698  	      FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
699  
700  	/* for HE_SU/HE_EXT_SU PPDU
701  	 * - 1x, 2x, 4x LTF + 0.8us GI
702  	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
703  	 * for HE_MU PPDU
704  	 * - 2x, 4x LTF + 0.8us GI
705  	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
706  	 * for HE_TB PPDU
707  	 * - 1x, 2x LTF + 1.6us GI
708  	 * - 4x LTF + 3.2us GI
709  	 */
710  	if (mode >= MT_PHY_TYPE_HE_SU)
711  		val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
712  
713  	if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
714  		val |= MT_TXD6_LDPC;
715  
716  	txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
717  	txwi[6] |= cpu_to_le32(val);
718  	txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
719  					  phy->test.spe_idx));
720  #endif
721  }
722  
mt7915_mac_write_txwi(struct mt76_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,int pid,struct ieee80211_key_conf * key,enum mt76_txq_id qid,u32 changed)723  void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
724  			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
725  			   struct ieee80211_key_conf *key,
726  			   enum mt76_txq_id qid, u32 changed)
727  {
728  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
729  	u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
730  	struct mt76_phy *mphy = &dev->phy;
731  
732  	if (phy_idx && dev->phys[MT_BAND1])
733  		mphy = dev->phys[MT_BAND1];
734  
735  	mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
736  
737  	if (mt76_testmode_enabled(mphy))
738  		mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
739  }
740  
mt7915_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)741  int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
742  			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
743  			  struct ieee80211_sta *sta,
744  			  struct mt76_tx_info *tx_info)
745  {
746  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
747  	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
748  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
749  	struct ieee80211_key_conf *key = info->control.hw_key;
750  	struct ieee80211_vif *vif = info->control.vif;
751  	struct mt76_connac_fw_txp *txp;
752  	struct mt76_txwi_cache *t;
753  	int id, i, nbuf = tx_info->nbuf - 1;
754  	u8 *txwi = (u8 *)txwi_ptr;
755  	int pid;
756  
757  	if (unlikely(tx_info->skb->len <= ETH_HLEN))
758  		return -EINVAL;
759  
760  	if (!wcid)
761  		wcid = &dev->mt76.global_wcid;
762  
763  	if (sta) {
764  		struct mt7915_sta *msta;
765  
766  		msta = (struct mt7915_sta *)sta->drv_priv;
767  
768  		if (time_after(jiffies, msta->jiffies + HZ / 4)) {
769  			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
770  			msta->jiffies = jiffies;
771  		}
772  	}
773  
774  	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
775  	t->skb = tx_info->skb;
776  
777  	id = mt76_token_consume(mdev, &t);
778  	if (id < 0)
779  		return id;
780  
781  	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
782  	mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
783  			      qid, 0);
784  
785  	txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
786  	for (i = 0; i < nbuf; i++) {
787  		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
788  		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
789  	}
790  	txp->nbuf = nbuf;
791  
792  	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
793  
794  	if (!key)
795  		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
796  
797  	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
798  	    ieee80211_is_mgmt(hdr->frame_control))
799  		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
800  
801  	if (vif) {
802  		struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
803  
804  		txp->bss_idx = mvif->mt76.idx;
805  	}
806  
807  	txp->token = cpu_to_le16(id);
808  	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
809  		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
810  	else
811  		txp->rept_wds_wcid = cpu_to_le16(0x3ff);
812  	tx_info->skb = NULL;
813  
814  	/* pass partial skb header to fw */
815  	tx_info->buf[1].len = MT_CT_PARSE_LEN;
816  	tx_info->buf[1].skip_unmap = true;
817  	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
818  
819  	return 0;
820  }
821  
mt7915_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)822  u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
823  {
824  	struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
825  	__le32 *txwi = ptr;
826  	u32 val;
827  
828  	memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
829  
830  	val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
831  	      FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
832  	txwi[0] = cpu_to_le32(val);
833  
834  	val = MT_TXD1_LONG_FORMAT |
835  	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
836  	txwi[1] = cpu_to_le32(val);
837  
838  	txp->token = cpu_to_le16(token_id);
839  	txp->nbuf = 1;
840  	txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
841  
842  	return MT_TXD_SIZE + sizeof(*txp);
843  }
844  
845  static void
mt7915_mac_tx_free_prepare(struct mt7915_dev * dev)846  mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
847  {
848  	struct mt76_dev *mdev = &dev->mt76;
849  	struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
850  
851  	/* clean DMA queues and unmap buffers first */
852  	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
853  	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
854  	if (mphy_ext) {
855  		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
856  		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
857  	}
858  }
859  
860  static void
mt7915_mac_tx_free_done(struct mt7915_dev * dev,struct list_head * free_list,bool wake)861  mt7915_mac_tx_free_done(struct mt7915_dev *dev,
862  			struct list_head *free_list, bool wake)
863  {
864  	struct sk_buff *skb, *tmp;
865  
866  	mt7915_mac_sta_poll(dev);
867  
868  	if (wake)
869  		mt76_set_tx_blocked(&dev->mt76, false);
870  
871  	mt76_worker_schedule(&dev->mt76.tx_worker);
872  
873  	list_for_each_entry_safe(skb, tmp, free_list, list) {
874  		skb_list_del_init(skb);
875  		napi_consume_skb(skb, 1);
876  	}
877  }
878  
879  static void
mt7915_mac_tx_free(struct mt7915_dev * dev,void * data,int len)880  mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
881  {
882  	struct mt76_connac_tx_free *free = data;
883  	__le32 *tx_info = (__le32 *)(data + sizeof(*free));
884  	struct mt76_dev *mdev = &dev->mt76;
885  	struct mt76_txwi_cache *txwi;
886  	struct ieee80211_sta *sta = NULL;
887  	struct mt76_wcid *wcid = NULL;
888  	LIST_HEAD(free_list);
889  	void *end = data + len;
890  	bool v3, wake = false;
891  	u16 total, count = 0;
892  	u32 txd = le32_to_cpu(free->txd);
893  	__le32 *cur_info;
894  
895  	mt7915_mac_tx_free_prepare(dev);
896  
897  	total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
898  	v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
899  
900  	for (cur_info = tx_info; count < total; cur_info++) {
901  		u32 msdu, info;
902  		u8 i;
903  
904  		if (WARN_ON_ONCE((void *)cur_info >= end))
905  			return;
906  
907  		/*
908  		 * 1'b1: new wcid pair.
909  		 * 1'b0: msdu_id with the same 'wcid pair' as above.
910  		 */
911  		info = le32_to_cpu(*cur_info);
912  		if (info & MT_TX_FREE_PAIR) {
913  			struct mt7915_sta *msta;
914  			u16 idx;
915  
916  			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
917  			wcid = rcu_dereference(dev->mt76.wcid[idx]);
918  			sta = wcid_to_sta(wcid);
919  			if (!sta)
920  				continue;
921  
922  			msta = container_of(wcid, struct mt7915_sta, wcid);
923  			spin_lock_bh(&mdev->sta_poll_lock);
924  			if (list_empty(&msta->wcid.poll_list))
925  				list_add_tail(&msta->wcid.poll_list,
926  					      &mdev->sta_poll_list);
927  			spin_unlock_bh(&mdev->sta_poll_lock);
928  			continue;
929  		}
930  
931  		if (!mtk_wed_device_active(&mdev->mmio.wed) && wcid) {
932  			u32 tx_retries = 0, tx_failed = 0;
933  
934  			if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) {
935  				tx_retries =
936  					FIELD_GET(MT_TX_FREE_COUNT_V3, info) - 1;
937  				tx_failed = tx_retries +
938  					!!FIELD_GET(MT_TX_FREE_STAT_V3, info);
939  			} else if (!v3 && (info & MT_TX_FREE_MPDU_HEADER)) {
940  				tx_retries =
941  					FIELD_GET(MT_TX_FREE_COUNT, info) - 1;
942  				tx_failed = tx_retries +
943  					!!FIELD_GET(MT_TX_FREE_STAT, info);
944  			}
945  			wcid->stats.tx_retries += tx_retries;
946  			wcid->stats.tx_failed += tx_failed;
947  		}
948  
949  		if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3))
950  			continue;
951  
952  		for (i = 0; i < 1 + v3; i++) {
953  			if (v3) {
954  				msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
955  				if (msdu == MT_TX_FREE_MSDU_ID_V3)
956  					continue;
957  			} else {
958  				msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
959  			}
960  			count++;
961  			txwi = mt76_token_release(mdev, msdu, &wake);
962  			if (!txwi)
963  				continue;
964  
965  			mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
966  		}
967  	}
968  
969  	mt7915_mac_tx_free_done(dev, &free_list, wake);
970  }
971  
972  static void
mt7915_mac_tx_free_v0(struct mt7915_dev * dev,void * data,int len)973  mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
974  {
975  	struct mt76_connac_tx_free *free = data;
976  	__le16 *info = (__le16 *)(data + sizeof(*free));
977  	struct mt76_dev *mdev = &dev->mt76;
978  	void *end = data + len;
979  	LIST_HEAD(free_list);
980  	bool wake = false;
981  	u8 i, count;
982  
983  	mt7915_mac_tx_free_prepare(dev);
984  
985  	count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
986  	if (WARN_ON_ONCE((void *)&info[count] > end))
987  		return;
988  
989  	for (i = 0; i < count; i++) {
990  		struct mt76_txwi_cache *txwi;
991  		u16 msdu = le16_to_cpu(info[i]);
992  
993  		txwi = mt76_token_release(mdev, msdu, &wake);
994  		if (!txwi)
995  			continue;
996  
997  		mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list);
998  	}
999  
1000  	mt7915_mac_tx_free_done(dev, &free_list, wake);
1001  }
1002  
mt7915_mac_add_txs(struct mt7915_dev * dev,void * data)1003  static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
1004  {
1005  	struct mt7915_sta *msta = NULL;
1006  	struct mt76_wcid *wcid;
1007  	__le32 *txs_data = data;
1008  	u16 wcidx;
1009  	u8 pid;
1010  
1011  	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1012  	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1013  
1014  	if (pid < MT_PACKET_ID_WED)
1015  		return;
1016  
1017  	if (wcidx >= mt7915_wtbl_size(dev))
1018  		return;
1019  
1020  	rcu_read_lock();
1021  
1022  	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1023  	if (!wcid)
1024  		goto out;
1025  
1026  	msta = container_of(wcid, struct mt7915_sta, wcid);
1027  
1028  	if (pid == MT_PACKET_ID_WED)
1029  		mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
1030  	else
1031  		mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
1032  
1033  	if (!wcid->sta)
1034  		goto out;
1035  
1036  	spin_lock_bh(&dev->mt76.sta_poll_lock);
1037  	if (list_empty(&msta->wcid.poll_list))
1038  		list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1039  	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1040  
1041  out:
1042  	rcu_read_unlock();
1043  }
1044  
mt7915_rx_check(struct mt76_dev * mdev,void * data,int len)1045  bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
1046  {
1047  	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1048  	__le32 *rxd = (__le32 *)data;
1049  	__le32 *end = (__le32 *)&rxd[len / 4];
1050  	enum rx_pkt_type type;
1051  
1052  	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1053  
1054  	switch (type) {
1055  	case PKT_TYPE_TXRX_NOTIFY:
1056  		mt7915_mac_tx_free(dev, data, len);
1057  		return false;
1058  	case PKT_TYPE_TXRX_NOTIFY_V0:
1059  		mt7915_mac_tx_free_v0(dev, data, len);
1060  		return false;
1061  	case PKT_TYPE_TXS:
1062  		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1063  			mt7915_mac_add_txs(dev, rxd);
1064  		return false;
1065  	case PKT_TYPE_RX_FW_MONITOR:
1066  		mt7915_debugfs_rx_fw_monitor(dev, data, len);
1067  		return false;
1068  	default:
1069  		return true;
1070  	}
1071  }
1072  
mt7915_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1073  void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1074  			 struct sk_buff *skb, u32 *info)
1075  {
1076  	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1077  	__le32 *rxd = (__le32 *)skb->data;
1078  	__le32 *end = (__le32 *)&skb->data[skb->len];
1079  	enum rx_pkt_type type;
1080  
1081  	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1082  
1083  	switch (type) {
1084  	case PKT_TYPE_TXRX_NOTIFY:
1085  		mt7915_mac_tx_free(dev, skb->data, skb->len);
1086  		napi_consume_skb(skb, 1);
1087  		break;
1088  	case PKT_TYPE_TXRX_NOTIFY_V0:
1089  		mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
1090  		napi_consume_skb(skb, 1);
1091  		break;
1092  	case PKT_TYPE_RX_EVENT:
1093  		mt7915_mcu_rx_event(dev, skb);
1094  		break;
1095  	case PKT_TYPE_TXRXV:
1096  		mt7915_mac_fill_rx_vector(dev, skb);
1097  		break;
1098  	case PKT_TYPE_TXS:
1099  		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1100  			mt7915_mac_add_txs(dev, rxd);
1101  		dev_kfree_skb(skb);
1102  		break;
1103  	case PKT_TYPE_RX_FW_MONITOR:
1104  		mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1105  		dev_kfree_skb(skb);
1106  		break;
1107  	case PKT_TYPE_NORMAL:
1108  		if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
1109  			mt76_rx(&dev->mt76, q, skb);
1110  			return;
1111  		}
1112  		fallthrough;
1113  	default:
1114  		dev_kfree_skb(skb);
1115  		break;
1116  	}
1117  }
1118  
mt7915_mac_cca_stats_reset(struct mt7915_phy * phy)1119  void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
1120  {
1121  	struct mt7915_dev *dev = phy->dev;
1122  	u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx);
1123  
1124  	mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
1125  	mt76_set(dev, reg, BIT(11) | BIT(9));
1126  }
1127  
mt7915_mac_reset_counters(struct mt7915_phy * phy)1128  void mt7915_mac_reset_counters(struct mt7915_phy *phy)
1129  {
1130  	struct mt7915_dev *dev = phy->dev;
1131  	int i;
1132  
1133  	for (i = 0; i < 4; i++) {
1134  		mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i));
1135  		mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i));
1136  	}
1137  
1138  	phy->mt76->survey_time = ktime_get_boottime();
1139  	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1140  
1141  	/* reset airtime counters */
1142  	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx),
1143  		 MT_WF_RMAC_MIB_RXTIME_CLR);
1144  
1145  	mt7915_mcu_get_chan_mib_info(phy, true);
1146  }
1147  
mt7915_mac_set_timing(struct mt7915_phy * phy)1148  void mt7915_mac_set_timing(struct mt7915_phy *phy)
1149  {
1150  	s16 coverage_class = phy->coverage_class;
1151  	struct mt7915_dev *dev = phy->dev;
1152  	struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
1153  	u32 val, reg_offset;
1154  	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1155  		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1156  	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1157  		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1158  	u8 band = phy->mt76->band_idx;
1159  	int eifs_ofdm = 360, sifs = 10, offset;
1160  	bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
1161  
1162  	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1163  		return;
1164  
1165  	if (ext_phy)
1166  		coverage_class = max_t(s16, dev->phy.coverage_class,
1167  				       ext_phy->coverage_class);
1168  
1169  	mt76_set(dev, MT_ARB_SCR(band),
1170  		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1171  	udelay(1);
1172  
1173  	offset = 3 * coverage_class;
1174  	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1175  		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1176  
1177  	if (!is_mt7915(&dev->mt76)) {
1178  		if (!a_band) {
1179  			mt76_wr(dev, MT_TMAC_ICR1(band),
1180  				FIELD_PREP(MT_IFS_EIFS_CCK, 314));
1181  			eifs_ofdm = 78;
1182  		} else {
1183  			eifs_ofdm = 84;
1184  		}
1185  	} else if (a_band) {
1186  		sifs = 16;
1187  	}
1188  
1189  	mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset);
1190  	mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset);
1191  	mt76_wr(dev, MT_TMAC_ICR0(band),
1192  		FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
1193  		FIELD_PREP(MT_IFS_RIFS, 2) |
1194  		FIELD_PREP(MT_IFS_SIFS, sifs) |
1195  		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1196  
1197  	if (phy->slottime < 20 || a_band)
1198  		val = MT7915_CFEND_RATE_DEFAULT;
1199  	else
1200  		val = MT7915_CFEND_RATE_11B;
1201  
1202  	mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val);
1203  	mt76_clear(dev, MT_ARB_SCR(band),
1204  		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1205  }
1206  
mt7915_mac_enable_nf(struct mt7915_dev * dev,bool band)1207  void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band)
1208  {
1209  	u32 reg;
1210  
1211  	reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) :
1212  				      MT_WF_PHY_RXTD12_MT7916(band);
1213  	mt76_set(dev, reg,
1214  		 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1215  		 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1216  
1217  	reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) :
1218  				      MT_WF_PHY_RX_CTRL1_MT7916(band);
1219  	mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1220  }
1221  
1222  static u8
mt7915_phy_get_nf(struct mt7915_phy * phy,int idx)1223  mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
1224  {
1225  	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1226  	struct mt7915_dev *dev = phy->dev;
1227  	u32 val, sum = 0, n = 0;
1228  	int nss, i;
1229  
1230  	for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
1231  		u32 reg = is_mt7915(&dev->mt76) ?
1232  			MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
1233  			MT_WF_IRPI_NSS_MT7916(idx, nss);
1234  
1235  		for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1236  			val = mt76_rr(dev, reg);
1237  			sum += val * nf_power[i];
1238  			n += val;
1239  		}
1240  	}
1241  
1242  	if (!n)
1243  		return 0;
1244  
1245  	return sum / n;
1246  }
1247  
mt7915_update_channel(struct mt76_phy * mphy)1248  void mt7915_update_channel(struct mt76_phy *mphy)
1249  {
1250  	struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
1251  	struct mt76_channel_state *state = mphy->chan_state;
1252  	int nf;
1253  
1254  	mt7915_mcu_get_chan_mib_info(phy, false);
1255  
1256  	nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx);
1257  	if (!phy->noise)
1258  		phy->noise = nf << 4;
1259  	else if (nf)
1260  		phy->noise += nf - (phy->noise >> 4);
1261  
1262  	state->noise = -(phy->noise >> 4);
1263  }
1264  
1265  static bool
mt7915_wait_reset_state(struct mt7915_dev * dev,u32 state)1266  mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1267  {
1268  	bool ret;
1269  
1270  	ret = wait_event_timeout(dev->reset_wait,
1271  				 (READ_ONCE(dev->recovery.state) & state),
1272  				 MT7915_RESET_TIMEOUT);
1273  
1274  	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1275  	return ret;
1276  }
1277  
1278  static void
mt7915_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1279  mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1280  {
1281  	struct ieee80211_hw *hw = priv;
1282  
1283  	switch (vif->type) {
1284  	case NL80211_IFTYPE_MESH_POINT:
1285  	case NL80211_IFTYPE_ADHOC:
1286  	case NL80211_IFTYPE_AP:
1287  		mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
1288  				      BSS_CHANGED_BEACON_ENABLED);
1289  		break;
1290  	default:
1291  		break;
1292  	}
1293  }
1294  
1295  static void
mt7915_update_beacons(struct mt7915_dev * dev)1296  mt7915_update_beacons(struct mt7915_dev *dev)
1297  {
1298  	struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
1299  
1300  	ieee80211_iterate_active_interfaces(dev->mt76.hw,
1301  		IEEE80211_IFACE_ITER_RESUME_ALL,
1302  		mt7915_update_vif_beacon, dev->mt76.hw);
1303  
1304  	if (!mphy_ext)
1305  		return;
1306  
1307  	ieee80211_iterate_active_interfaces(mphy_ext->hw,
1308  		IEEE80211_IFACE_ITER_RESUME_ALL,
1309  		mt7915_update_vif_beacon, mphy_ext->hw);
1310  }
1311  
1312  static int
mt7915_mac_restart(struct mt7915_dev * dev)1313  mt7915_mac_restart(struct mt7915_dev *dev)
1314  {
1315  	struct mt7915_phy *phy2;
1316  	struct mt76_phy *ext_phy;
1317  	struct mt76_dev *mdev = &dev->mt76;
1318  	int i, ret;
1319  
1320  	ext_phy = dev->mt76.phys[MT_BAND1];
1321  	phy2 = ext_phy ? ext_phy->priv : NULL;
1322  
1323  	if (dev->hif2) {
1324  		mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1325  		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1326  	}
1327  
1328  	if (dev_is_pci(mdev->dev)) {
1329  		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1330  		if (dev->hif2) {
1331  			if (is_mt7915(mdev))
1332  				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1333  			else
1334  				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0x0);
1335  		}
1336  	}
1337  
1338  	set_bit(MT76_RESET, &dev->mphy.state);
1339  	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1340  	wake_up(&dev->mt76.mcu.wait);
1341  	if (ext_phy) {
1342  		set_bit(MT76_RESET, &ext_phy->state);
1343  		set_bit(MT76_MCU_RESET, &ext_phy->state);
1344  	}
1345  
1346  	/* lock/unlock all queues to ensure that no tx is pending */
1347  	mt76_txq_schedule_all(&dev->mphy);
1348  	if (ext_phy)
1349  		mt76_txq_schedule_all(ext_phy);
1350  
1351  	/* disable all tx/rx napi */
1352  	mt76_worker_disable(&dev->mt76.tx_worker);
1353  	mt76_for_each_q_rx(mdev, i) {
1354  		if (mdev->q_rx[i].ndesc)
1355  			napi_disable(&dev->mt76.napi[i]);
1356  	}
1357  	napi_disable(&dev->mt76.tx_napi);
1358  
1359  	/* token reinit */
1360  	mt76_connac2_tx_token_put(&dev->mt76);
1361  	idr_init(&dev->mt76.token);
1362  
1363  	mt7915_dma_reset(dev, true);
1364  
1365  	local_bh_disable();
1366  	mt76_for_each_q_rx(mdev, i) {
1367  		if (mdev->q_rx[i].ndesc) {
1368  			napi_enable(&dev->mt76.napi[i]);
1369  			napi_schedule(&dev->mt76.napi[i]);
1370  		}
1371  	}
1372  	local_bh_enable();
1373  	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1374  	clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1375  
1376  	mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1377  	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1378  
1379  	if (dev->hif2) {
1380  		mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1381  		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1382  	}
1383  	if (dev_is_pci(mdev->dev)) {
1384  		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1385  		if (dev->hif2) {
1386  			mt76_wr(dev, MT_PCIE_RECOG_ID,
1387  				dev->hif2->index | MT_PCIE_RECOG_ID_SEM);
1388  			if (is_mt7915(mdev))
1389  				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1390  			else
1391  				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
1392  		}
1393  	}
1394  
1395  	/* load firmware */
1396  	ret = mt7915_mcu_init_firmware(dev);
1397  	if (ret)
1398  		goto out;
1399  
1400  	/* set the necessary init items */
1401  	ret = mt7915_mcu_set_eeprom(dev);
1402  	if (ret)
1403  		goto out;
1404  
1405  	mt7915_mac_init(dev);
1406  	mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
1407  	mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
1408  	ret = mt7915_txbf_init(dev);
1409  
1410  	if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1411  		ret = mt7915_run(dev->mphy.hw);
1412  		if (ret)
1413  			goto out;
1414  	}
1415  
1416  	if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) {
1417  		ret = mt7915_run(ext_phy->hw);
1418  		if (ret)
1419  			goto out;
1420  	}
1421  
1422  out:
1423  	/* reset done */
1424  	clear_bit(MT76_RESET, &dev->mphy.state);
1425  	if (phy2)
1426  		clear_bit(MT76_RESET, &phy2->mt76->state);
1427  
1428  	local_bh_disable();
1429  	napi_enable(&dev->mt76.tx_napi);
1430  	napi_schedule(&dev->mt76.tx_napi);
1431  	local_bh_enable();
1432  
1433  	mt76_worker_enable(&dev->mt76.tx_worker);
1434  
1435  	return ret;
1436  }
1437  
1438  static void
mt7915_mac_full_reset(struct mt7915_dev * dev)1439  mt7915_mac_full_reset(struct mt7915_dev *dev)
1440  {
1441  	struct mt76_phy *ext_phy;
1442  	struct mt7915_phy *phy2;
1443  	int i;
1444  
1445  	ext_phy = dev->mt76.phys[MT_BAND1];
1446  	phy2 = ext_phy ? ext_phy->priv : NULL;
1447  
1448  	dev->recovery.hw_full_reset = true;
1449  
1450  	wake_up(&dev->mt76.mcu.wait);
1451  	ieee80211_stop_queues(mt76_hw(dev));
1452  	if (ext_phy)
1453  		ieee80211_stop_queues(ext_phy->hw);
1454  
1455  	cancel_delayed_work_sync(&dev->mphy.mac_work);
1456  	if (ext_phy)
1457  		cancel_delayed_work_sync(&ext_phy->mac_work);
1458  
1459  	mutex_lock(&dev->mt76.mutex);
1460  	for (i = 0; i < 10; i++) {
1461  		if (!mt7915_mac_restart(dev))
1462  			break;
1463  	}
1464  
1465  	if (i == 10)
1466  		dev_err(dev->mt76.dev, "chip full reset failed\n");
1467  
1468  	spin_lock_bh(&dev->mt76.sta_poll_lock);
1469  	while (!list_empty(&dev->mt76.sta_poll_list))
1470  		list_del_init(dev->mt76.sta_poll_list.next);
1471  	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1472  
1473  	memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
1474  	dev->mt76.vif_mask = 0;
1475  	dev->phy.omac_mask = 0;
1476  	if (phy2)
1477  		phy2->omac_mask = 0;
1478  
1479  	i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
1480  	dev->mt76.global_wcid.idx = i;
1481  	dev->recovery.hw_full_reset = false;
1482  
1483  	mutex_unlock(&dev->mt76.mutex);
1484  
1485  	ieee80211_restart_hw(mt76_hw(dev));
1486  	if (ext_phy)
1487  		ieee80211_restart_hw(ext_phy->hw);
1488  }
1489  
1490  /* system error recovery */
mt7915_mac_reset_work(struct work_struct * work)1491  void mt7915_mac_reset_work(struct work_struct *work)
1492  {
1493  	struct mt7915_phy *phy2;
1494  	struct mt76_phy *ext_phy;
1495  	struct mt7915_dev *dev;
1496  	int i;
1497  
1498  	dev = container_of(work, struct mt7915_dev, reset_work);
1499  	ext_phy = dev->mt76.phys[MT_BAND1];
1500  	phy2 = ext_phy ? ext_phy->priv : NULL;
1501  
1502  	/* chip full reset */
1503  	if (dev->recovery.restart) {
1504  		/* disable WA/WM WDT */
1505  		mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1506  			   MT_MCU_CMD_WDT_MASK);
1507  
1508  		if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1509  			dev->recovery.wa_reset_count++;
1510  		else
1511  			dev->recovery.wm_reset_count++;
1512  
1513  		mt7915_mac_full_reset(dev);
1514  
1515  		/* enable mcu irq */
1516  		mt7915_irq_enable(dev, MT_INT_MCU_CMD);
1517  		mt7915_irq_disable(dev, 0);
1518  
1519  		/* enable WA/WM WDT */
1520  		mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1521  
1522  		dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1523  		dev->recovery.restart = false;
1524  		return;
1525  	}
1526  
1527  	/* chip partial reset */
1528  	if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1529  		return;
1530  
1531  	if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1532  		mtk_wed_device_stop(&dev->mt76.mmio.wed);
1533  		if (!is_mt798x(&dev->mt76))
1534  			mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
1535  	}
1536  
1537  	ieee80211_stop_queues(mt76_hw(dev));
1538  	if (ext_phy)
1539  		ieee80211_stop_queues(ext_phy->hw);
1540  
1541  	set_bit(MT76_RESET, &dev->mphy.state);
1542  	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1543  	wake_up(&dev->mt76.mcu.wait);
1544  	cancel_delayed_work_sync(&dev->mphy.mac_work);
1545  	if (phy2) {
1546  		set_bit(MT76_RESET, &phy2->mt76->state);
1547  		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1548  	}
1549  
1550  	mutex_lock(&dev->mt76.mutex);
1551  
1552  	mt76_worker_disable(&dev->mt76.tx_worker);
1553  	mt76_for_each_q_rx(&dev->mt76, i)
1554  		napi_disable(&dev->mt76.napi[i]);
1555  	napi_disable(&dev->mt76.tx_napi);
1556  
1557  
1558  	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1559  
1560  	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1561  		mt7915_dma_reset(dev, false);
1562  
1563  		mt76_connac2_tx_token_put(&dev->mt76);
1564  		idr_init(&dev->mt76.token);
1565  
1566  		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1567  		mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1568  	}
1569  
1570  	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1571  	mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1572  
1573  	/* enable DMA Tx/Rx and interrupt */
1574  	mt7915_dma_start(dev, false, false);
1575  
1576  	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1577  	clear_bit(MT76_RESET, &dev->mphy.state);
1578  	if (phy2)
1579  		clear_bit(MT76_RESET, &phy2->mt76->state);
1580  
1581  	local_bh_disable();
1582  	mt76_for_each_q_rx(&dev->mt76, i) {
1583  		napi_enable(&dev->mt76.napi[i]);
1584  		napi_schedule(&dev->mt76.napi[i]);
1585  	}
1586  	local_bh_enable();
1587  
1588  	tasklet_schedule(&dev->mt76.irq_tasklet);
1589  
1590  	mt76_worker_enable(&dev->mt76.tx_worker);
1591  
1592  	local_bh_disable();
1593  	napi_enable(&dev->mt76.tx_napi);
1594  	napi_schedule(&dev->mt76.tx_napi);
1595  	local_bh_enable();
1596  
1597  	ieee80211_wake_queues(mt76_hw(dev));
1598  	if (ext_phy)
1599  		ieee80211_wake_queues(ext_phy->hw);
1600  
1601  	mutex_unlock(&dev->mt76.mutex);
1602  
1603  	mt7915_update_beacons(dev);
1604  
1605  	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1606  				     MT7915_WATCHDOG_TIME);
1607  	if (phy2)
1608  		ieee80211_queue_delayed_work(ext_phy->hw,
1609  					     &phy2->mt76->mac_work,
1610  					     MT7915_WATCHDOG_TIME);
1611  }
1612  
1613  /* firmware coredump */
mt7915_mac_dump_work(struct work_struct * work)1614  void mt7915_mac_dump_work(struct work_struct *work)
1615  {
1616  	const struct mt7915_mem_region *mem_region;
1617  	struct mt7915_crash_data *crash_data;
1618  	struct mt7915_dev *dev;
1619  	struct mt7915_mem_hdr *hdr;
1620  	size_t buf_len;
1621  	int i;
1622  	u32 num;
1623  	u8 *buf;
1624  
1625  	dev = container_of(work, struct mt7915_dev, dump_work);
1626  
1627  	mutex_lock(&dev->dump_mutex);
1628  
1629  	crash_data = mt7915_coredump_new(dev);
1630  	if (!crash_data) {
1631  		mutex_unlock(&dev->dump_mutex);
1632  		goto skip_coredump;
1633  	}
1634  
1635  	mem_region = mt7915_coredump_get_mem_layout(dev, &num);
1636  	if (!mem_region || !crash_data->memdump_buf_len) {
1637  		mutex_unlock(&dev->dump_mutex);
1638  		goto skip_memdump;
1639  	}
1640  
1641  	buf = crash_data->memdump_buf;
1642  	buf_len = crash_data->memdump_buf_len;
1643  
1644  	/* dumping memory content... */
1645  	memset(buf, 0, buf_len);
1646  	for (i = 0; i < num; i++) {
1647  		if (mem_region->len > buf_len) {
1648  			dev_warn(dev->mt76.dev, "%s len %lu is too large\n",
1649  				 mem_region->name,
1650  				 (unsigned long)mem_region->len);
1651  			break;
1652  		}
1653  
1654  		/* reserve space for the header */
1655  		hdr = (void *)buf;
1656  		buf += sizeof(*hdr);
1657  		buf_len -= sizeof(*hdr);
1658  
1659  		mt7915_memcpy_fromio(dev, buf, mem_region->start,
1660  				     mem_region->len);
1661  
1662  		hdr->start = mem_region->start;
1663  		hdr->len = mem_region->len;
1664  
1665  		if (!mem_region->len)
1666  			/* note: the header remains, just with zero length */
1667  			break;
1668  
1669  		buf += mem_region->len;
1670  		buf_len -= mem_region->len;
1671  
1672  		mem_region++;
1673  	}
1674  
1675  	mutex_unlock(&dev->dump_mutex);
1676  
1677  skip_memdump:
1678  	mt7915_coredump_submit(dev);
1679  skip_coredump:
1680  	queue_work(dev->mt76.wq, &dev->reset_work);
1681  }
1682  
mt7915_reset(struct mt7915_dev * dev)1683  void mt7915_reset(struct mt7915_dev *dev)
1684  {
1685  	if (!dev->recovery.hw_init_done)
1686  		return;
1687  
1688  	if (dev->recovery.hw_full_reset)
1689  		return;
1690  
1691  	/* wm/wa exception: do full recovery */
1692  	if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
1693  		dev->recovery.restart = true;
1694  		dev_info(dev->mt76.dev,
1695  			 "%s indicated firmware crash, attempting recovery\n",
1696  			 wiphy_name(dev->mt76.hw->wiphy));
1697  
1698  		mt7915_irq_disable(dev, MT_INT_MCU_CMD);
1699  		queue_work(dev->mt76.wq, &dev->dump_work);
1700  		return;
1701  	}
1702  
1703  	queue_work(dev->mt76.wq, &dev->reset_work);
1704  	wake_up(&dev->reset_wait);
1705  }
1706  
mt7915_mac_update_stats(struct mt7915_phy * phy)1707  void mt7915_mac_update_stats(struct mt7915_phy *phy)
1708  {
1709  	struct mt76_mib_stats *mib = &phy->mib;
1710  	struct mt7915_dev *dev = phy->dev;
1711  	int i, aggr0 = 0, aggr1, cnt;
1712  	u8 band = phy->mt76->band_idx;
1713  	u32 val;
1714  
1715  	cnt = mt76_rr(dev, MT_MIB_SDR3(band));
1716  	mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
1717  		FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
1718  		FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
1719  
1720  	cnt = mt76_rr(dev, MT_MIB_SDR4(band));
1721  	mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
1722  
1723  	cnt = mt76_rr(dev, MT_MIB_SDR5(band));
1724  	mib->rx_mpdu_cnt += cnt;
1725  
1726  	cnt = mt76_rr(dev, MT_MIB_SDR6(band));
1727  	mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
1728  
1729  	cnt = mt76_rr(dev, MT_MIB_SDR7(band));
1730  	mib->rx_vector_mismatch_cnt +=
1731  		FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
1732  
1733  	cnt = mt76_rr(dev, MT_MIB_SDR8(band));
1734  	mib->rx_delimiter_fail_cnt +=
1735  		FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
1736  
1737  	cnt = mt76_rr(dev, MT_MIB_SDR10(band));
1738  	mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
1739  		FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
1740  		FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
1741  
1742  	cnt = mt76_rr(dev, MT_MIB_SDR11(band));
1743  	mib->rx_len_mismatch_cnt +=
1744  		FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
1745  
1746  	cnt = mt76_rr(dev, MT_MIB_SDR12(band));
1747  	mib->tx_ampdu_cnt += cnt;
1748  
1749  	cnt = mt76_rr(dev, MT_MIB_SDR13(band));
1750  	mib->tx_stop_q_empty_cnt +=
1751  		FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
1752  
1753  	cnt = mt76_rr(dev, MT_MIB_SDR14(band));
1754  	mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
1755  		FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
1756  		FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
1757  
1758  	cnt = mt76_rr(dev, MT_MIB_SDR15(band));
1759  	mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
1760  		FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
1761  		FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
1762  
1763  	cnt = mt76_rr(dev, MT_MIB_SDR16(band));
1764  	mib->primary_cca_busy_time +=
1765  		FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
1766  
1767  	cnt = mt76_rr(dev, MT_MIB_SDR17(band));
1768  	mib->secondary_cca_busy_time +=
1769  		FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
1770  
1771  	cnt = mt76_rr(dev, MT_MIB_SDR18(band));
1772  	mib->primary_energy_detect_time +=
1773  		FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
1774  
1775  	cnt = mt76_rr(dev, MT_MIB_SDR19(band));
1776  	mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
1777  
1778  	cnt = mt76_rr(dev, MT_MIB_SDR20(band));
1779  	mib->ofdm_mdrdy_time +=
1780  		FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
1781  
1782  	cnt = mt76_rr(dev, MT_MIB_SDR21(band));
1783  	mib->green_mdrdy_time +=
1784  		FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
1785  
1786  	cnt = mt76_rr(dev, MT_MIB_SDR22(band));
1787  	mib->rx_ampdu_cnt += cnt;
1788  
1789  	cnt = mt76_rr(dev, MT_MIB_SDR23(band));
1790  	mib->rx_ampdu_bytes_cnt += cnt;
1791  
1792  	cnt = mt76_rr(dev, MT_MIB_SDR24(band));
1793  	mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
1794  		FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
1795  		FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
1796  
1797  	cnt = mt76_rr(dev, MT_MIB_SDR25(band));
1798  	mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
1799  
1800  	cnt = mt76_rr(dev, MT_MIB_SDR27(band));
1801  	mib->tx_rwp_fail_cnt +=
1802  		FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
1803  
1804  	cnt = mt76_rr(dev, MT_MIB_SDR28(band));
1805  	mib->tx_rwp_need_cnt +=
1806  		FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
1807  
1808  	cnt = mt76_rr(dev, MT_MIB_SDR29(band));
1809  	mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
1810  		FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
1811  		FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
1812  
1813  	cnt = mt76_rr(dev, MT_MIB_SDRVEC(band));
1814  	mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
1815  		FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
1816  		FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
1817  
1818  	cnt = mt76_rr(dev, MT_MIB_SDR31(band));
1819  	mib->rx_ba_cnt += cnt;
1820  
1821  	cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band));
1822  	mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
1823  
1824  	cnt = mt76_rr(dev, MT_MIB_DR8(band));
1825  	mib->tx_mu_mpdu_cnt += cnt;
1826  
1827  	cnt = mt76_rr(dev, MT_MIB_DR9(band));
1828  	mib->tx_mu_acked_mpdu_cnt += cnt;
1829  
1830  	cnt = mt76_rr(dev, MT_MIB_DR11(band));
1831  	mib->tx_su_acked_mpdu_cnt += cnt;
1832  
1833  	cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band));
1834  	mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
1835  	mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
1836  	mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
1837  
1838  	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
1839  		cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
1840  		mib->tx_amsdu[i] += cnt;
1841  		mib->tx_amsdu_cnt += cnt;
1842  	}
1843  
1844  	if (is_mt7915(&dev->mt76)) {
1845  		for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
1846  			val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4)));
1847  			mib->ba_miss_cnt +=
1848  				FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1849  			mib->ack_fail_cnt +=
1850  				FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1851  
1852  			val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4)));
1853  			mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1854  			mib->rts_retries_cnt +=
1855  				FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1856  
1857  			val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
1858  			phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
1859  			phy->mt76->aggr_stats[aggr0++] += val >> 16;
1860  
1861  			val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i));
1862  			phy->mt76->aggr_stats[aggr1++] += val & 0xffff;
1863  			phy->mt76->aggr_stats[aggr1++] += val >> 16;
1864  		}
1865  
1866  		cnt = mt76_rr(dev, MT_MIB_SDR32(band));
1867  		mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1868  
1869  		cnt = mt76_rr(dev, MT_MIB_SDR33(band));
1870  		mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
1871  
1872  		cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band));
1873  		mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
1874  		mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
1875  
1876  		cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band));
1877  		mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
1878  		mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
1879  
1880  		cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band));
1881  		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
1882  		mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
1883  		mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
1884  		mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
1885  	} else {
1886  		for (i = 0; i < 2; i++) {
1887  			/* rts count */
1888  			val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2)));
1889  			mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
1890  			mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
1891  
1892  			/* rts retry count */
1893  			val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2)));
1894  			mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
1895  			mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
1896  
1897  			/* ba miss count */
1898  			val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2)));
1899  			mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
1900  			mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
1901  
1902  			/* ack fail count */
1903  			val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2)));
1904  			mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
1905  			mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
1906  		}
1907  
1908  		for (i = 0; i < 8; i++) {
1909  			val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
1910  			phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
1911  			phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
1912  		}
1913  
1914  		cnt = mt76_rr(dev, MT_MIB_SDR32(band));
1915  		mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1916  		mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1917  		mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1918  		mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1919  
1920  		cnt = mt76_rr(dev, MT_MIB_BFCR7(band));
1921  		mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
1922  
1923  		cnt = mt76_rr(dev, MT_MIB_BFCR2(band));
1924  		mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
1925  
1926  		cnt = mt76_rr(dev, MT_MIB_BFCR0(band));
1927  		mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1928  		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1929  		mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1930  		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1931  
1932  		cnt = mt76_rr(dev, MT_MIB_BFCR1(band));
1933  		mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1934  		mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1935  	}
1936  }
1937  
mt7915_mac_severe_check(struct mt7915_phy * phy)1938  static void mt7915_mac_severe_check(struct mt7915_phy *phy)
1939  {
1940  	struct mt7915_dev *dev = phy->dev;
1941  	u32 trb;
1942  
1943  	if (!phy->omac_mask)
1944  		return;
1945  
1946  	/* In rare cases, TRB pointers might be out of sync leads to RMAC
1947  	 * stopping Rx, so check status periodically to see if TRB hardware
1948  	 * requires minimal recovery.
1949  	 */
1950  	trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx));
1951  
1952  	if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
1953  	     FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
1954  	    (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
1955  	     FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
1956  	    trb == phy->trb_ts)
1957  		mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
1958  				   phy->mt76->band_idx);
1959  
1960  	phy->trb_ts = trb;
1961  }
1962  
mt7915_mac_sta_rc_work(struct work_struct * work)1963  void mt7915_mac_sta_rc_work(struct work_struct *work)
1964  {
1965  	struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1966  	struct ieee80211_sta *sta;
1967  	struct ieee80211_vif *vif;
1968  	struct mt7915_sta *msta;
1969  	u32 changed;
1970  	LIST_HEAD(list);
1971  
1972  	spin_lock_bh(&dev->mt76.sta_poll_lock);
1973  	list_splice_init(&dev->sta_rc_list, &list);
1974  
1975  	while (!list_empty(&list)) {
1976  		msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1977  		list_del_init(&msta->rc_list);
1978  		changed = msta->changed;
1979  		msta->changed = 0;
1980  		spin_unlock_bh(&dev->mt76.sta_poll_lock);
1981  
1982  		sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1983  		vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1984  
1985  		if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1986  			       IEEE80211_RC_NSS_CHANGED |
1987  			       IEEE80211_RC_BW_CHANGED))
1988  			mt7915_mcu_add_rate_ctrl(dev, vif, sta, true);
1989  
1990  		if (changed & IEEE80211_RC_SMPS_CHANGED)
1991  			mt7915_mcu_add_smps(dev, vif, sta);
1992  
1993  		spin_lock_bh(&dev->mt76.sta_poll_lock);
1994  	}
1995  
1996  	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1997  }
1998  
mt7915_mac_work(struct work_struct * work)1999  void mt7915_mac_work(struct work_struct *work)
2000  {
2001  	struct mt7915_phy *phy;
2002  	struct mt76_phy *mphy;
2003  
2004  	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2005  					       mac_work.work);
2006  	phy = mphy->priv;
2007  
2008  	mutex_lock(&mphy->dev->mutex);
2009  
2010  	mt76_update_survey(mphy);
2011  	if (++mphy->mac_work_count == 5) {
2012  		mphy->mac_work_count = 0;
2013  
2014  		mt7915_mac_update_stats(phy);
2015  		mt7915_mac_severe_check(phy);
2016  
2017  		if (phy->dev->muru_debug)
2018  			mt7915_mcu_muru_debug_get(phy);
2019  	}
2020  
2021  	mutex_unlock(&mphy->dev->mutex);
2022  
2023  	mt76_tx_status_check(mphy->dev, false);
2024  
2025  	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2026  				     MT7915_WATCHDOG_TIME);
2027  }
2028  
mt7915_dfs_stop_radar_detector(struct mt7915_phy * phy)2029  static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
2030  {
2031  	struct mt7915_dev *dev = phy->dev;
2032  
2033  	if (phy->rdd_state & BIT(0))
2034  		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
2035  					MT_RX_SEL0, 0);
2036  	if (phy->rdd_state & BIT(1))
2037  		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
2038  					MT_RX_SEL0, 0);
2039  }
2040  
mt7915_dfs_start_rdd(struct mt7915_dev * dev,int chain)2041  static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
2042  {
2043  	int err, region;
2044  
2045  	switch (dev->mt76.region) {
2046  	case NL80211_DFS_ETSI:
2047  		region = 0;
2048  		break;
2049  	case NL80211_DFS_JP:
2050  		region = 2;
2051  		break;
2052  	case NL80211_DFS_FCC:
2053  	default:
2054  		region = 1;
2055  		break;
2056  	}
2057  
2058  	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
2059  				      MT_RX_SEL0, region);
2060  	if (err < 0)
2061  		return err;
2062  
2063  	if (is_mt7915(&dev->mt76)) {
2064  		err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
2065  					      0, dev->dbdc_support ? 2 : 0);
2066  		if (err < 0)
2067  			return err;
2068  	}
2069  
2070  	return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
2071  				       MT_RX_SEL0, 1);
2072  }
2073  
mt7915_dfs_start_radar_detector(struct mt7915_phy * phy)2074  static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
2075  {
2076  	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2077  	struct mt7915_dev *dev = phy->dev;
2078  	int err;
2079  
2080  	/* start CAC */
2081  	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
2082  				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2083  	if (err < 0)
2084  		return err;
2085  
2086  	err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
2087  	if (err < 0)
2088  		return err;
2089  
2090  	phy->rdd_state |= BIT(phy->mt76->band_idx);
2091  
2092  	if (!is_mt7915(&dev->mt76))
2093  		return 0;
2094  
2095  	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2096  	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2097  		err = mt7915_dfs_start_rdd(dev, 1);
2098  		if (err < 0)
2099  			return err;
2100  
2101  		phy->rdd_state |= BIT(1);
2102  	}
2103  
2104  	return 0;
2105  }
2106  
2107  static int
mt7915_dfs_init_radar_specs(struct mt7915_phy * phy)2108  mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
2109  {
2110  	const struct mt7915_dfs_radar_spec *radar_specs;
2111  	struct mt7915_dev *dev = phy->dev;
2112  	int err, i;
2113  
2114  	switch (dev->mt76.region) {
2115  	case NL80211_DFS_FCC:
2116  		radar_specs = &fcc_radar_specs;
2117  		err = mt7915_mcu_set_fcc5_lpn(dev, 8);
2118  		if (err < 0)
2119  			return err;
2120  		break;
2121  	case NL80211_DFS_ETSI:
2122  		radar_specs = &etsi_radar_specs;
2123  		break;
2124  	case NL80211_DFS_JP:
2125  		radar_specs = &jp_radar_specs;
2126  		break;
2127  	default:
2128  		return -EINVAL;
2129  	}
2130  
2131  	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2132  		err = mt7915_mcu_set_radar_th(dev, i,
2133  					      &radar_specs->radar_pattern[i]);
2134  		if (err < 0)
2135  			return err;
2136  	}
2137  
2138  	return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2139  }
2140  
mt7915_dfs_init_radar_detector(struct mt7915_phy * phy)2141  int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
2142  {
2143  	struct mt7915_dev *dev = phy->dev;
2144  	enum mt76_dfs_state dfs_state, prev_state;
2145  	int err;
2146  
2147  	prev_state = phy->mt76->dfs_state;
2148  	dfs_state = mt76_phy_dfs_state(phy->mt76);
2149  
2150  	if (prev_state == dfs_state)
2151  		return 0;
2152  
2153  	if (prev_state == MT_DFS_STATE_UNKNOWN)
2154  		mt7915_dfs_stop_radar_detector(phy);
2155  
2156  	if (dfs_state == MT_DFS_STATE_DISABLED)
2157  		goto stop;
2158  
2159  	if (prev_state <= MT_DFS_STATE_DISABLED) {
2160  		err = mt7915_dfs_init_radar_specs(phy);
2161  		if (err < 0)
2162  			return err;
2163  
2164  		err = mt7915_dfs_start_radar_detector(phy);
2165  		if (err < 0)
2166  			return err;
2167  
2168  		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2169  	}
2170  
2171  	if (dfs_state == MT_DFS_STATE_CAC)
2172  		return 0;
2173  
2174  	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
2175  				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2176  	if (err < 0) {
2177  		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2178  		return err;
2179  	}
2180  
2181  	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2182  	return 0;
2183  
2184  stop:
2185  	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
2186  				      phy->mt76->band_idx, MT_RX_SEL0, 0);
2187  	if (err < 0)
2188  		return err;
2189  
2190  	if (is_mt7915(&dev->mt76)) {
2191  		err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
2192  					      phy->mt76->band_idx, 0,
2193  					      dev->dbdc_support ? 2 : 0);
2194  		if (err < 0)
2195  			return err;
2196  	}
2197  
2198  	mt7915_dfs_stop_radar_detector(phy);
2199  	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2200  
2201  	return 0;
2202  }
2203  
2204  static int
mt7915_mac_twt_duration_align(int duration)2205  mt7915_mac_twt_duration_align(int duration)
2206  {
2207  	return duration << 8;
2208  }
2209  
2210  static u64
mt7915_mac_twt_sched_list_add(struct mt7915_dev * dev,struct mt7915_twt_flow * flow)2211  mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev,
2212  			      struct mt7915_twt_flow *flow)
2213  {
2214  	struct mt7915_twt_flow *iter, *iter_next;
2215  	u32 duration = flow->duration << 8;
2216  	u64 start_tsf;
2217  
2218  	iter = list_first_entry_or_null(&dev->twt_list,
2219  					struct mt7915_twt_flow, list);
2220  	if (!iter || !iter->sched || iter->start_tsf > duration) {
2221  		/* add flow as first entry in the list */
2222  		list_add(&flow->list, &dev->twt_list);
2223  		return 0;
2224  	}
2225  
2226  	list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2227  		start_tsf = iter->start_tsf +
2228  			    mt7915_mac_twt_duration_align(iter->duration);
2229  		if (list_is_last(&iter->list, &dev->twt_list))
2230  			break;
2231  
2232  		if (!iter_next->sched ||
2233  		    iter_next->start_tsf > start_tsf + duration) {
2234  			list_add(&flow->list, &iter->list);
2235  			goto out;
2236  		}
2237  	}
2238  
2239  	/* add flow as last entry in the list */
2240  	list_add_tail(&flow->list, &dev->twt_list);
2241  out:
2242  	return start_tsf;
2243  }
2244  
mt7915_mac_check_twt_req(struct ieee80211_twt_setup * twt)2245  static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2246  {
2247  	struct ieee80211_twt_params *twt_agrt;
2248  	u64 interval, duration;
2249  	u16 mantissa;
2250  	u8 exp;
2251  
2252  	/* only individual agreement supported */
2253  	if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2254  		return -EOPNOTSUPP;
2255  
2256  	/* only 256us unit supported */
2257  	if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2258  		return -EOPNOTSUPP;
2259  
2260  	twt_agrt = (struct ieee80211_twt_params *)twt->params;
2261  
2262  	/* explicit agreement not supported */
2263  	if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2264  		return -EOPNOTSUPP;
2265  
2266  	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2267  			le16_to_cpu(twt_agrt->req_type));
2268  	mantissa = le16_to_cpu(twt_agrt->mantissa);
2269  	duration = twt_agrt->min_twt_dur << 8;
2270  
2271  	interval = (u64)mantissa << exp;
2272  	if (interval < duration)
2273  		return -EOPNOTSUPP;
2274  
2275  	return 0;
2276  }
2277  
2278  static bool
mt7915_mac_twt_param_equal(struct mt7915_sta * msta,struct ieee80211_twt_params * twt_agrt)2279  mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
2280  			   struct ieee80211_twt_params *twt_agrt)
2281  {
2282  	u16 type = le16_to_cpu(twt_agrt->req_type);
2283  	u8 exp;
2284  	int i;
2285  
2286  	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2287  	for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
2288  		struct mt7915_twt_flow *f;
2289  
2290  		if (!(msta->twt.flowid_mask & BIT(i)))
2291  			continue;
2292  
2293  		f = &msta->twt.flow[i];
2294  		if (f->duration == twt_agrt->min_twt_dur &&
2295  		    f->mantissa == twt_agrt->mantissa &&
2296  		    f->exp == exp &&
2297  		    f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2298  		    f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2299  		    f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2300  			return true;
2301  	}
2302  
2303  	return false;
2304  }
2305  
mt7915_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2306  void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
2307  			      struct ieee80211_sta *sta,
2308  			      struct ieee80211_twt_setup *twt)
2309  {
2310  	enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2311  	struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
2312  	struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2313  	u16 req_type = le16_to_cpu(twt_agrt->req_type);
2314  	enum ieee80211_twt_setup_cmd sta_setup_cmd;
2315  	struct mt7915_dev *dev = mt7915_hw_dev(hw);
2316  	struct mt7915_twt_flow *flow;
2317  	int flowid, table_id;
2318  	u8 exp;
2319  
2320  	if (mt7915_mac_check_twt_req(twt))
2321  		goto out;
2322  
2323  	mutex_lock(&dev->mt76.mutex);
2324  
2325  	if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT)
2326  		goto unlock;
2327  
2328  	if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2329  		goto unlock;
2330  
2331  	if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
2332  		setup_cmd = TWT_SETUP_CMD_DICTATE;
2333  		twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
2334  		goto unlock;
2335  	}
2336  
2337  	flowid = ffs(~msta->twt.flowid_mask) - 1;
2338  	twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2339  	twt_agrt->req_type |= le16_encode_bits(flowid,
2340  					       IEEE80211_TWT_REQTYPE_FLOWID);
2341  
2342  	table_id = ffs(~dev->twt.table_mask) - 1;
2343  	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2344  	sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2345  
2346  	if (mt7915_mac_twt_param_equal(msta, twt_agrt))
2347  		goto unlock;
2348  
2349  	flow = &msta->twt.flow[flowid];
2350  	memset(flow, 0, sizeof(*flow));
2351  	INIT_LIST_HEAD(&flow->list);
2352  	flow->wcid = msta->wcid.idx;
2353  	flow->table_id = table_id;
2354  	flow->id = flowid;
2355  	flow->duration = twt_agrt->min_twt_dur;
2356  	flow->mantissa = twt_agrt->mantissa;
2357  	flow->exp = exp;
2358  	flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2359  	flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2360  	flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2361  
2362  	if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2363  	    sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2364  		u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2365  		u64 flow_tsf, curr_tsf;
2366  		u32 rem;
2367  
2368  		flow->sched = true;
2369  		flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow);
2370  		curr_tsf = __mt7915_get_tsf(hw, msta->vif);
2371  		div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2372  		flow_tsf = curr_tsf + interval - rem;
2373  		twt_agrt->twt = cpu_to_le64(flow_tsf);
2374  	} else {
2375  		list_add_tail(&flow->list, &dev->twt_list);
2376  	}
2377  	flow->tsf = le64_to_cpu(twt_agrt->twt);
2378  
2379  	if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2380  		goto unlock;
2381  
2382  	setup_cmd = TWT_SETUP_CMD_ACCEPT;
2383  	dev->twt.table_mask |= BIT(table_id);
2384  	msta->twt.flowid_mask |= BIT(flowid);
2385  	dev->twt.n_agrt++;
2386  
2387  unlock:
2388  	mutex_unlock(&dev->mt76.mutex);
2389  out:
2390  	twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2391  	twt_agrt->req_type |=
2392  		le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2393  	twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2394  		       (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2395  }
2396  
mt7915_mac_twt_teardown_flow(struct mt7915_dev * dev,struct mt7915_sta * msta,u8 flowid)2397  void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
2398  				  struct mt7915_sta *msta,
2399  				  u8 flowid)
2400  {
2401  	struct mt7915_twt_flow *flow;
2402  
2403  	lockdep_assert_held(&dev->mt76.mutex);
2404  
2405  	if (flowid >= ARRAY_SIZE(msta->twt.flow))
2406  		return;
2407  
2408  	if (!(msta->twt.flowid_mask & BIT(flowid)))
2409  		return;
2410  
2411  	flow = &msta->twt.flow[flowid];
2412  	if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow,
2413  				       MCU_TWT_AGRT_DELETE))
2414  		return;
2415  
2416  	list_del_init(&flow->list);
2417  	msta->twt.flowid_mask &= ~BIT(flowid);
2418  	dev->twt.table_mask &= ~BIT(flow->table_id);
2419  	dev->twt.n_agrt--;
2420  }
2421