1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2019 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/timekeeping.h>
12 #include "mt7615.h"
13 #include "../dma.h"
14 #include "mac.h"
15 
16 static inline s8 to_rssi(u32 field, u32 rxv)
17 {
18 	return (FIELD_GET(field, rxv) - 220) / 2;
19 }
20 
21 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
22 					    u8 idx, bool unicast)
23 {
24 	struct mt7615_sta *sta;
25 	struct mt76_wcid *wcid;
26 
27 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
28 		return NULL;
29 
30 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
31 	if (unicast || !wcid)
32 		return wcid;
33 
34 	if (!wcid->sta)
35 		return NULL;
36 
37 	sta = container_of(wcid, struct mt7615_sta, wcid);
38 	if (!sta->vif)
39 		return NULL;
40 
41 	return &sta->vif->sta.wcid;
42 }
43 
44 int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
45 {
46 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
47 	struct ieee80211_supported_band *sband;
48 	struct ieee80211_hdr *hdr;
49 	__le32 *rxd = (__le32 *)skb->data;
50 	u32 rxd0 = le32_to_cpu(rxd[0]);
51 	u32 rxd1 = le32_to_cpu(rxd[1]);
52 	u32 rxd2 = le32_to_cpu(rxd[2]);
53 	bool unicast, remove_pad, insert_ccmp_hdr = false;
54 	int i, idx;
55 
56 	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
57 		return -EINVAL;
58 
59 	memset(status, 0, sizeof(*status));
60 
61 	unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
62 	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
63 	status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
64 
65 	/* TODO: properly support DBDC */
66 	status->freq = dev->mt76.chandef.chan->center_freq;
67 	status->band = dev->mt76.chandef.chan->band;
68 	if (status->band == NL80211_BAND_5GHZ)
69 		sband = &dev->mt76.sband_5g.sband;
70 	else
71 		sband = &dev->mt76.sband_2g.sband;
72 
73 	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
74 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
75 
76 	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
77 		status->flag |= RX_FLAG_MMIC_ERROR;
78 
79 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
80 	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
81 		status->flag |= RX_FLAG_DECRYPTED;
82 		status->flag |= RX_FLAG_IV_STRIPPED;
83 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
84 	}
85 
86 	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
87 
88 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
89 		return -EINVAL;
90 
91 	if (!sband->channels)
92 		return -EINVAL;
93 
94 	rxd += 4;
95 	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
96 		rxd += 4;
97 		if ((u8 *)rxd - skb->data >= skb->len)
98 			return -EINVAL;
99 	}
100 
101 	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
102 		u8 *data = (u8 *)rxd;
103 
104 		if (status->flag & RX_FLAG_DECRYPTED) {
105 			status->iv[0] = data[5];
106 			status->iv[1] = data[4];
107 			status->iv[2] = data[3];
108 			status->iv[3] = data[2];
109 			status->iv[4] = data[1];
110 			status->iv[5] = data[0];
111 
112 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
113 		}
114 		rxd += 4;
115 		if ((u8 *)rxd - skb->data >= skb->len)
116 			return -EINVAL;
117 	}
118 
119 	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
120 		rxd += 2;
121 		if ((u8 *)rxd - skb->data >= skb->len)
122 			return -EINVAL;
123 	}
124 
125 	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
126 		u32 rxdg0 = le32_to_cpu(rxd[0]);
127 		u32 rxdg1 = le32_to_cpu(rxd[1]);
128 		u32 rxdg3 = le32_to_cpu(rxd[3]);
129 		u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
130 		bool cck = false;
131 
132 		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
133 		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
134 		case MT_PHY_TYPE_CCK:
135 			cck = true;
136 			/* fall through */
137 		case MT_PHY_TYPE_OFDM:
138 			i = mt76_get_rate(&dev->mt76, sband, i, cck);
139 			break;
140 		case MT_PHY_TYPE_HT_GF:
141 		case MT_PHY_TYPE_HT:
142 			status->encoding = RX_ENC_HT;
143 			if (i > 31)
144 				return -EINVAL;
145 			break;
146 		case MT_PHY_TYPE_VHT:
147 			status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
148 			status->encoding = RX_ENC_VHT;
149 			break;
150 		default:
151 			return -EINVAL;
152 		}
153 		status->rate_idx = i;
154 
155 		switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
156 		case MT_PHY_BW_20:
157 			break;
158 		case MT_PHY_BW_40:
159 			status->bw = RATE_INFO_BW_40;
160 			break;
161 		case MT_PHY_BW_80:
162 			status->bw = RATE_INFO_BW_80;
163 			break;
164 		case MT_PHY_BW_160:
165 			status->bw = RATE_INFO_BW_160;
166 			break;
167 		default:
168 			return -EINVAL;
169 		}
170 
171 		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
172 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
173 		if (rxdg0 & MT_RXV1_HT_AD_CODE)
174 			status->enc_flags |= RX_ENC_FLAG_LDPC;
175 
176 		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
177 
178 		status->chains = dev->mt76.antenna_mask;
179 		status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
180 		status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
181 		status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
182 		status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
183 		status->signal = status->chain_signal[0];
184 
185 		for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
186 			if (!(status->chains & BIT(i)))
187 				continue;
188 
189 			status->signal = max(status->signal,
190 					     status->chain_signal[i]);
191 		}
192 
193 		rxd += 6;
194 		if ((u8 *)rxd - skb->data >= skb->len)
195 			return -EINVAL;
196 	}
197 
198 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
199 
200 	if (insert_ccmp_hdr) {
201 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
202 
203 		mt76_insert_ccmp_hdr(skb, key_id);
204 	}
205 
206 	hdr = (struct ieee80211_hdr *)skb->data;
207 	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
208 		return 0;
209 
210 	status->aggr = unicast &&
211 		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
212 	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
213 	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
214 
215 	return 0;
216 }
217 
218 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
219 {
220 }
221 
222 void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
223 			    struct mt76_queue_entry *e)
224 {
225 	if (!e->txwi) {
226 		dev_kfree_skb_any(e->skb);
227 		return;
228 	}
229 
230 	/* error path */
231 	if (e->skb == DMA_DUMMY_DATA) {
232 		struct mt76_txwi_cache *t;
233 		struct mt7615_dev *dev;
234 		struct mt7615_txp *txp;
235 
236 		dev = container_of(mdev, struct mt7615_dev, mt76);
237 		txp = mt7615_txwi_to_txp(mdev, e->txwi);
238 
239 		spin_lock_bh(&dev->token_lock);
240 		t = idr_remove(&dev->token, le16_to_cpu(txp->token));
241 		spin_unlock_bh(&dev->token_lock);
242 		e->skb = t ? t->skb : NULL;
243 	}
244 
245 	if (e->skb)
246 		mt76_tx_complete_skb(mdev, e->skb);
247 }
248 
249 static u16
250 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
251 		       const struct ieee80211_tx_rate *rate,
252 		       bool stbc, u8 *bw)
253 {
254 	u8 phy, nss, rate_idx;
255 	u16 rateval = 0;
256 
257 	*bw = 0;
258 
259 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
260 		rate_idx = ieee80211_rate_get_vht_mcs(rate);
261 		nss = ieee80211_rate_get_vht_nss(rate);
262 		phy = MT_PHY_TYPE_VHT;
263 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
264 			*bw = 1;
265 		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
266 			*bw = 2;
267 		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
268 			*bw = 3;
269 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
270 		rate_idx = rate->idx;
271 		nss = 1 + (rate->idx >> 3);
272 		phy = MT_PHY_TYPE_HT;
273 		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
274 			phy = MT_PHY_TYPE_HT_GF;
275 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
276 			*bw = 1;
277 	} else {
278 		const struct ieee80211_rate *r;
279 		int band = dev->mt76.chandef.chan->band;
280 		u16 val;
281 
282 		nss = 1;
283 		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
284 		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
285 			val = r->hw_value_short;
286 		else
287 			val = r->hw_value;
288 
289 		phy = val >> 8;
290 		rate_idx = val & 0xff;
291 	}
292 
293 	if (stbc && nss == 1) {
294 		nss++;
295 		rateval |= MT_TX_RATE_STBC;
296 	}
297 
298 	rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
299 		    FIELD_PREP(MT_TX_RATE_MODE, phy) |
300 		    FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
301 
302 	return rateval;
303 }
304 
305 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
306 			  struct sk_buff *skb, struct mt76_wcid *wcid,
307 			  struct ieee80211_sta *sta, int pid,
308 			  struct ieee80211_key_conf *key)
309 {
310 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
311 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
312 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
313 	bool multicast = is_multicast_ether_addr(hdr->addr1);
314 	struct ieee80211_vif *vif = info->control.vif;
315 	int tx_count = 8;
316 	u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
317 	__le16 fc = hdr->frame_control;
318 	u16 seqno = 0;
319 	u32 val;
320 
321 	if (vif) {
322 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
323 
324 		omac_idx = mvif->omac_idx;
325 		wmm_idx = mvif->wmm_idx;
326 	}
327 
328 	if (sta) {
329 		struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
330 
331 		tx_count = msta->rate_count;
332 	}
333 
334 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
335 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
336 
337 	if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
338 		q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
339 			skb_get_queue_mapping(skb);
340 		p_fmt = MT_TX_TYPE_CT;
341 	} else if (ieee80211_is_beacon(fc)) {
342 		q_idx = MT_LMAC_BCN0;
343 		p_fmt = MT_TX_TYPE_FW;
344 	} else {
345 		q_idx = MT_LMAC_ALTX0;
346 		p_fmt = MT_TX_TYPE_CT;
347 	}
348 
349 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
350 	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
351 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
352 	txwi[0] = cpu_to_le32(val);
353 
354 	val = MT_TXD1_LONG_FORMAT |
355 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
356 	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
357 	      FIELD_PREP(MT_TXD1_HDR_INFO,
358 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
359 	      FIELD_PREP(MT_TXD1_TID,
360 			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
361 	      FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
362 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
363 	txwi[1] = cpu_to_le32(val);
364 
365 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
366 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
367 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
368 	if (key) {
369 		if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
370 		    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
371 			val |= MT_TXD2_BIP;
372 			txwi[3] = 0;
373 		} else {
374 			txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
375 		}
376 	} else {
377 		txwi[3] = 0;
378 	}
379 	txwi[2] = cpu_to_le32(val);
380 
381 	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
382 		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
383 
384 	txwi[4] = 0;
385 	txwi[6] = 0;
386 
387 	if (rate->idx >= 0 && rate->count &&
388 	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
389 		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
390 		u8 bw;
391 		u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
392 
393 		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
394 
395 		val = MT_TXD6_FIXED_BW |
396 		      FIELD_PREP(MT_TXD6_BW, bw) |
397 		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
398 		txwi[6] |= cpu_to_le32(val);
399 
400 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
401 			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
402 
403 		if (info->flags & IEEE80211_TX_CTL_LDPC)
404 			txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
405 
406 		if (!(rate->flags & (IEEE80211_TX_RC_MCS |
407 				     IEEE80211_TX_RC_VHT_MCS)))
408 			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
409 
410 		tx_count = rate->count;
411 	}
412 
413 	if (!ieee80211_is_beacon(fc)) {
414 		val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
415 		      FIELD_PREP(MT_TXD5_PID, pid);
416 		txwi[5] = cpu_to_le32(val);
417 	} else {
418 		txwi[5] = 0;
419 		/* use maximum tx count for beacons */
420 		tx_count = 0x1f;
421 	}
422 
423 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
424 	if (ieee80211_is_data_qos(hdr->frame_control)) {
425 		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
426 		val |= MT_TXD3_SN_VALID;
427 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
428 		struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
429 
430 		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
431 		val |= MT_TXD3_SN_VALID;
432 	}
433 	val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
434 
435 	txwi[3] |= cpu_to_le32(val);
436 
437 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
438 		txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
439 
440 	txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
441 		  FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
442 
443 	return 0;
444 }
445 
446 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
447 			  struct mt76_txwi_cache *t)
448 {
449 	struct mt7615_txp *txp;
450 	int i;
451 
452 	txp = mt7615_txwi_to_txp(dev, t);
453 	for (i = 1; i < txp->nbuf; i++)
454 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
455 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
456 }
457 
458 static u32 mt7615_mac_wtbl_addr(int wcid)
459 {
460 	return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
461 }
462 
463 void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
464 			  struct ieee80211_tx_rate *probe_rate,
465 			  struct ieee80211_tx_rate *rates)
466 {
467 	struct ieee80211_tx_rate *ref;
468 	int wcid = sta->wcid.idx;
469 	u32 addr = mt7615_mac_wtbl_addr(wcid);
470 	bool stbc = false;
471 	int n_rates = sta->n_rates;
472 	u8 bw, bw_prev, bw_idx = 0;
473 	u16 val[4];
474 	u16 probe_val;
475 	u32 w5, w27;
476 	bool rateset;
477 	int i, k;
478 
479 	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
480 		return;
481 
482 	for (i = n_rates; i < 4; i++)
483 		rates[i] = rates[n_rates - 1];
484 
485 	rateset = !(sta->rate_set_tsf & BIT(0));
486 	memcpy(sta->rateset[rateset].rates, rates,
487 	       sizeof(sta->rateset[rateset].rates));
488 	if (probe_rate) {
489 		sta->rateset[rateset].probe_rate = *probe_rate;
490 		ref = &sta->rateset[rateset].probe_rate;
491 	} else {
492 		sta->rateset[rateset].probe_rate.idx = -1;
493 		ref = &sta->rateset[rateset].rates[0];
494 	}
495 
496 	rates = sta->rateset[rateset].rates;
497 	for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
498 		/*
499 		 * We don't support switching between short and long GI
500 		 * within the rate set. For accurate tx status reporting, we
501 		 * need to make sure that flags match.
502 		 * For improved performance, avoid duplicate entries by
503 		 * decrementing the MCS index if necessary
504 		 */
505 		if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
506 			rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
507 
508 		for (k = 0; k < i; k++) {
509 			if (rates[i].idx != rates[k].idx)
510 				continue;
511 			if ((rates[i].flags ^ rates[k].flags) &
512 			    (IEEE80211_TX_RC_40_MHZ_WIDTH |
513 			     IEEE80211_TX_RC_80_MHZ_WIDTH |
514 			     IEEE80211_TX_RC_160_MHZ_WIDTH))
515 				continue;
516 
517 			if (!rates[i].idx)
518 				continue;
519 
520 			rates[i].idx--;
521 		}
522 	}
523 
524 	val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
525 	bw_prev = bw;
526 
527 	if (probe_rate) {
528 		probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
529 		if (bw)
530 			bw_idx = 1;
531 		else
532 			bw_prev = 0;
533 	} else {
534 		probe_val = val[0];
535 	}
536 
537 	val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
538 	if (bw_prev) {
539 		bw_idx = 3;
540 		bw_prev = bw;
541 	}
542 
543 	val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
544 	if (bw_prev) {
545 		bw_idx = 5;
546 		bw_prev = bw;
547 	}
548 
549 	val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
550 	if (bw_prev)
551 		bw_idx = 7;
552 
553 	w27 = mt76_rr(dev, addr + 27 * 4);
554 	w27 &= ~MT_WTBL_W27_CC_BW_SEL;
555 	w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw);
556 
557 	w5 = mt76_rr(dev, addr + 5 * 4);
558 	w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
559 		MT_WTBL_W5_MPDU_OK_COUNT |
560 		MT_WTBL_W5_MPDU_FAIL_COUNT |
561 		MT_WTBL_W5_RATE_IDX);
562 	w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) |
563 	      FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7);
564 
565 	mt76_wr(dev, MT_WTBL_RIUCR0, w5);
566 
567 	mt76_wr(dev, MT_WTBL_RIUCR1,
568 		FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
569 		FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
570 		FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
571 
572 	mt76_wr(dev, MT_WTBL_RIUCR2,
573 		FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
574 		FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
575 		FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
576 		FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
577 
578 	mt76_wr(dev, MT_WTBL_RIUCR3,
579 		FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
580 		FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
581 		FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
582 
583 	mt76_wr(dev, MT_WTBL_UPDATE,
584 		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
585 		MT_WTBL_UPDATE_RATE_UPDATE |
586 		MT_WTBL_UPDATE_TX_COUNT_CLEAR);
587 
588 	mt76_wr(dev, addr + 27 * 4, w27);
589 
590 	mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
591 	sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
592 
593 	if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
594 		mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
595 
596 	sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
597 	sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
598 }
599 
600 static enum mt7615_cipher_type
601 mt7615_mac_get_cipher(int cipher)
602 {
603 	switch (cipher) {
604 	case WLAN_CIPHER_SUITE_WEP40:
605 		return MT_CIPHER_WEP40;
606 	case WLAN_CIPHER_SUITE_WEP104:
607 		return MT_CIPHER_WEP104;
608 	case WLAN_CIPHER_SUITE_TKIP:
609 		return MT_CIPHER_TKIP;
610 	case WLAN_CIPHER_SUITE_AES_CMAC:
611 		return MT_CIPHER_BIP_CMAC_128;
612 	case WLAN_CIPHER_SUITE_CCMP:
613 		return MT_CIPHER_AES_CCMP;
614 	case WLAN_CIPHER_SUITE_CCMP_256:
615 		return MT_CIPHER_CCMP_256;
616 	case WLAN_CIPHER_SUITE_GCMP:
617 		return MT_CIPHER_GCMP;
618 	case WLAN_CIPHER_SUITE_GCMP_256:
619 		return MT_CIPHER_GCMP_256;
620 	case WLAN_CIPHER_SUITE_SMS4:
621 		return MT_CIPHER_WAPI;
622 	default:
623 		return MT_CIPHER_NONE;
624 	}
625 }
626 
627 static int
628 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
629 			   struct ieee80211_key_conf *key,
630 			   enum mt7615_cipher_type cipher,
631 			   enum set_key_cmd cmd)
632 {
633 	u32 addr = mt7615_mac_wtbl_addr(wcid->idx) + 30 * 4;
634 	u8 data[32] = {};
635 
636 	if (key->keylen > sizeof(data))
637 		return -EINVAL;
638 
639 	mt76_rr_copy(dev, addr, data, sizeof(data));
640 	if (cmd == SET_KEY) {
641 		if (cipher == MT_CIPHER_TKIP) {
642 			/* Rx/Tx MIC keys are swapped */
643 			memcpy(data + 16, key->key + 24, 8);
644 			memcpy(data + 24, key->key + 16, 8);
645 		}
646 		if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
647 			memmove(data + 16, data, 16);
648 		if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
649 			memcpy(data, key->key, key->keylen);
650 		else if (cipher == MT_CIPHER_BIP_CMAC_128)
651 			memcpy(data + 16, key->key, 16);
652 	} else {
653 		if (wcid->cipher & ~BIT(cipher)) {
654 			if (cipher != MT_CIPHER_BIP_CMAC_128)
655 				memmove(data, data + 16, 16);
656 			memset(data + 16, 0, 16);
657 		} else {
658 			memset(data, 0, sizeof(data));
659 		}
660 	}
661 	mt76_wr_copy(dev, addr, data, sizeof(data));
662 
663 	return 0;
664 }
665 
666 static int
667 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
668 			  enum mt7615_cipher_type cipher, int keyidx,
669 			  enum set_key_cmd cmd)
670 {
671 	u32 addr = mt7615_mac_wtbl_addr(wcid->idx), w0, w1;
672 
673 	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
674 		return -ETIMEDOUT;
675 
676 	w0 = mt76_rr(dev, addr);
677 	w1 = mt76_rr(dev, addr + 4);
678 	if (cmd == SET_KEY) {
679 		w0 |= MT_WTBL_W0_RX_KEY_VALID |
680 		      FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
681 				 cipher == MT_CIPHER_BIP_CMAC_128);
682 		if (cipher != MT_CIPHER_BIP_CMAC_128 ||
683 		    !wcid->cipher)
684 			w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
685 	}  else {
686 		if (!(wcid->cipher & ~BIT(cipher)))
687 			w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
688 				MT_WTBL_W0_KEY_IDX);
689 		if (cipher == MT_CIPHER_BIP_CMAC_128)
690 			w0 &= ~MT_WTBL_W0_RX_IK_VALID;
691 	}
692 	mt76_wr(dev, MT_WTBL_RICR0, w0);
693 	mt76_wr(dev, MT_WTBL_RICR1, w1);
694 
695 	mt76_wr(dev, MT_WTBL_UPDATE,
696 		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid->idx) |
697 		MT_WTBL_UPDATE_RXINFO_UPDATE);
698 
699 	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
700 		return -ETIMEDOUT;
701 
702 	return 0;
703 }
704 
705 static void
706 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
707 			      enum mt7615_cipher_type cipher,
708 			      enum set_key_cmd cmd)
709 {
710 	u32 addr = mt7615_mac_wtbl_addr(wcid->idx);
711 
712 	if (cmd == SET_KEY) {
713 		if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
714 			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
715 				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
716 	} else {
717 		if (cipher != MT_CIPHER_BIP_CMAC_128 &&
718 		    wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
719 			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
720 				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
721 					    MT_CIPHER_BIP_CMAC_128));
722 		else if (!(wcid->cipher & ~BIT(cipher)))
723 			mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
724 	}
725 }
726 
727 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
728 			    struct mt76_wcid *wcid,
729 			    struct ieee80211_key_conf *key,
730 			    enum set_key_cmd cmd)
731 {
732 	enum mt7615_cipher_type cipher;
733 	int err;
734 
735 	cipher = mt7615_mac_get_cipher(key->cipher);
736 	if (cipher == MT_CIPHER_NONE)
737 		return -EOPNOTSUPP;
738 
739 	spin_lock_bh(&dev->mt76.lock);
740 
741 	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
742 	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
743 	if (err < 0)
744 		goto out;
745 
746 	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
747 					cmd);
748 	if (err < 0)
749 		goto out;
750 
751 	if (cmd == SET_KEY)
752 		wcid->cipher |= BIT(cipher);
753 	else
754 		wcid->cipher &= ~BIT(cipher);
755 
756 out:
757 	spin_unlock_bh(&dev->mt76.lock);
758 
759 	return err;
760 }
761 
762 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
763 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
764 			  struct ieee80211_sta *sta,
765 			  struct mt76_tx_info *tx_info)
766 {
767 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
768 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
769 	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
770 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
771 	struct ieee80211_key_conf *key = info->control.hw_key;
772 	struct ieee80211_vif *vif = info->control.vif;
773 	int i, pid, id, nbuf = tx_info->nbuf - 1;
774 	u8 *txwi = (u8 *)txwi_ptr;
775 	struct mt76_txwi_cache *t;
776 	struct mt7615_txp *txp;
777 
778 	if (!wcid)
779 		wcid = &dev->mt76.global_wcid;
780 
781 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
782 
783 	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
784 		spin_lock_bh(&dev->mt76.lock);
785 		mt7615_mac_set_rates(dev, msta, &info->control.rates[0],
786 				     msta->rates);
787 		msta->rate_probe = true;
788 		spin_unlock_bh(&dev->mt76.lock);
789 	}
790 
791 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
792 			      pid, key);
793 
794 	txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
795 	for (i = 0; i < nbuf; i++) {
796 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
797 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
798 	}
799 	txp->nbuf = nbuf;
800 
801 	/* pass partial skb header to fw */
802 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
803 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
804 
805 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
806 
807 	if (!key)
808 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
809 
810 	if (ieee80211_is_mgmt(hdr->frame_control))
811 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
812 
813 	if (vif) {
814 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
815 
816 		txp->bss_idx = mvif->idx;
817 	}
818 
819 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
820 	t->skb = tx_info->skb;
821 
822 	spin_lock_bh(&dev->token_lock);
823 	id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
824 	spin_unlock_bh(&dev->token_lock);
825 	if (id < 0)
826 		return id;
827 
828 	txp->token = cpu_to_le16(id);
829 	txp->rept_wds_wcid = 0xff;
830 	tx_info->skb = DMA_DUMMY_DATA;
831 
832 	return 0;
833 }
834 
835 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
836 			    struct ieee80211_tx_info *info, __le32 *txs_data)
837 {
838 	struct ieee80211_supported_band *sband;
839 	struct mt7615_rate_set *rs;
840 	int first_idx = 0, last_idx;
841 	int i, idx, count;
842 	bool fixed_rate, ack_timeout;
843 	bool probe, ampdu, cck = false;
844 	bool rs_idx;
845 	u32 rate_set_tsf;
846 	u32 final_rate, final_rate_flags, final_nss, txs;
847 
848 	fixed_rate = info->status.rates[0].count;
849 	probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
850 
851 	txs = le32_to_cpu(txs_data[1]);
852 	ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
853 
854 	txs = le32_to_cpu(txs_data[3]);
855 	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
856 	last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
857 
858 	txs = le32_to_cpu(txs_data[0]);
859 	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
860 	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
861 
862 	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
863 		return false;
864 
865 	if (txs & MT_TXS0_QUEUE_TIMEOUT)
866 		return false;
867 
868 	if (!ack_timeout)
869 		info->flags |= IEEE80211_TX_STAT_ACK;
870 
871 	info->status.ampdu_len = 1;
872 	info->status.ampdu_ack_len = !!(info->flags &
873 					IEEE80211_TX_STAT_ACK);
874 
875 	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
876 		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
877 
878 	first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY);
879 
880 	if (fixed_rate && !probe) {
881 		info->status.rates[0].count = count;
882 		i = 0;
883 		goto out;
884 	}
885 
886 	rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
887 	rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
888 			 rate_set_tsf) < 1000000);
889 	rs_idx ^= rate_set_tsf & BIT(0);
890 	rs = &sta->rateset[rs_idx];
891 
892 	if (!first_idx && rs->probe_rate.idx >= 0) {
893 		info->status.rates[0] = rs->probe_rate;
894 
895 		spin_lock_bh(&dev->mt76.lock);
896 		if (sta->rate_probe) {
897 			mt7615_mac_set_rates(dev, sta, NULL, sta->rates);
898 			sta->rate_probe = false;
899 		}
900 		spin_unlock_bh(&dev->mt76.lock);
901 	} else {
902 		info->status.rates[0] = rs->rates[first_idx / 2];
903 	}
904 	info->status.rates[0].count = 0;
905 
906 	for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
907 		struct ieee80211_tx_rate *cur_rate;
908 		int cur_count;
909 
910 		cur_rate = &rs->rates[idx / 2];
911 		cur_count = min_t(int, MT7615_RATE_RETRY, count);
912 		count -= cur_count;
913 
914 		if (idx && (cur_rate->idx != info->status.rates[i].idx ||
915 			    cur_rate->flags != info->status.rates[i].flags)) {
916 			i++;
917 			if (i == ARRAY_SIZE(info->status.rates))
918 				break;
919 
920 			info->status.rates[i] = *cur_rate;
921 			info->status.rates[i].count = 0;
922 		}
923 
924 		info->status.rates[i].count += cur_count;
925 	}
926 
927 out:
928 	final_rate_flags = info->status.rates[i].flags;
929 
930 	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
931 	case MT_PHY_TYPE_CCK:
932 		cck = true;
933 		/* fall through */
934 	case MT_PHY_TYPE_OFDM:
935 		if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
936 			sband = &dev->mt76.sband_5g.sband;
937 		else
938 			sband = &dev->mt76.sband_2g.sband;
939 		final_rate &= MT_TX_RATE_IDX;
940 		final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
941 					   cck);
942 		final_rate_flags = 0;
943 		break;
944 	case MT_PHY_TYPE_HT_GF:
945 	case MT_PHY_TYPE_HT:
946 		final_rate_flags |= IEEE80211_TX_RC_MCS;
947 		final_rate &= MT_TX_RATE_IDX;
948 		if (final_rate > 31)
949 			return false;
950 		break;
951 	case MT_PHY_TYPE_VHT:
952 		final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
953 
954 		if ((final_rate & MT_TX_RATE_STBC) && final_nss)
955 			final_nss--;
956 
957 		final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
958 		final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
959 		break;
960 	default:
961 		return false;
962 	}
963 
964 	info->status.rates[i].idx = final_rate;
965 	info->status.rates[i].flags = final_rate_flags;
966 
967 	return true;
968 }
969 
970 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
971 				   struct mt7615_sta *sta, int pid,
972 				   __le32 *txs_data)
973 {
974 	struct mt76_dev *mdev = &dev->mt76;
975 	struct sk_buff_head list;
976 	struct sk_buff *skb;
977 
978 	if (pid < MT_PACKET_ID_FIRST)
979 		return false;
980 
981 	mt76_tx_status_lock(mdev, &list);
982 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
983 	if (skb) {
984 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
985 
986 		if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
987 			ieee80211_tx_info_clear_status(info);
988 			info->status.rates[0].idx = -1;
989 		}
990 
991 		mt76_tx_status_skb_done(mdev, skb, &list);
992 	}
993 	mt76_tx_status_unlock(mdev, &list);
994 
995 	return !!skb;
996 }
997 
998 void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
999 {
1000 	struct ieee80211_tx_info info = {};
1001 	struct ieee80211_sta *sta = NULL;
1002 	struct mt7615_sta *msta = NULL;
1003 	struct mt76_wcid *wcid;
1004 	__le32 *txs_data = data;
1005 	u32 txs;
1006 	u8 wcidx;
1007 	u8 pid;
1008 
1009 	txs = le32_to_cpu(txs_data[0]);
1010 	pid = FIELD_GET(MT_TXS0_PID, txs);
1011 	txs = le32_to_cpu(txs_data[2]);
1012 	wcidx = FIELD_GET(MT_TXS2_WCID, txs);
1013 
1014 	if (pid == MT_PACKET_ID_NO_ACK)
1015 		return;
1016 
1017 	if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
1018 		return;
1019 
1020 	rcu_read_lock();
1021 
1022 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1023 	if (!wcid)
1024 		goto out;
1025 
1026 	msta = container_of(wcid, struct mt7615_sta, wcid);
1027 	sta = wcid_to_sta(wcid);
1028 
1029 	if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
1030 		goto out;
1031 
1032 	if (wcidx >= MT7615_WTBL_STA || !sta)
1033 		goto out;
1034 
1035 	if (mt7615_fill_txs(dev, msta, &info, txs_data))
1036 		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
1037 
1038 out:
1039 	rcu_read_unlock();
1040 }
1041 
1042 void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
1043 {
1044 	struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
1045 	struct mt76_dev *mdev = &dev->mt76;
1046 	struct mt76_txwi_cache *txwi;
1047 	u8 i, count;
1048 
1049 	count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
1050 	for (i = 0; i < count; i++) {
1051 		spin_lock_bh(&dev->token_lock);
1052 		txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
1053 		spin_unlock_bh(&dev->token_lock);
1054 
1055 		if (!txwi)
1056 			continue;
1057 
1058 		mt7615_txp_skb_unmap(mdev, txwi);
1059 		if (txwi->skb) {
1060 			mt76_tx_complete_skb(mdev, txwi->skb);
1061 			txwi->skb = NULL;
1062 		}
1063 
1064 		mt76_put_txwi(mdev, txwi);
1065 	}
1066 	dev_kfree_skb(skb);
1067 }
1068 
1069 static void
1070 mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev)
1071 {
1072 	mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1073 		 MT_WF_PHY_B0_PD_OFDM_MASK,
1074 		 MT_WF_PHY_B0_PD_OFDM(0x13c));
1075 	mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
1076 		 MT_WF_PHY_B1_PD_OFDM_MASK,
1077 		 MT_WF_PHY_B1_PD_OFDM(0x13c));
1078 
1079 	mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
1080 		 MT_WF_PHY_B0_PD_CCK_MASK,
1081 		 MT_WF_PHY_B0_PD_CCK(0x92));
1082 	mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
1083 		 MT_WF_PHY_B1_PD_CCK_MASK,
1084 		 MT_WF_PHY_B1_PD_CCK(0x92));
1085 
1086 	dev->ofdm_sensitivity = -98;
1087 	dev->cck_sensitivity = -110;
1088 	dev->last_cca_adj = jiffies;
1089 }
1090 
1091 void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
1092 {
1093 	mutex_lock(&dev->mt76.mutex);
1094 
1095 	if (dev->scs_en == enable)
1096 		goto out;
1097 
1098 	if (enable) {
1099 		/* DBDC not supported */
1100 		mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1101 			 MT_WF_PHY_B0_PD_BLK);
1102 		if (is_mt7622(&dev->mt76)) {
1103 			mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
1104 			mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
1105 		}
1106 	} else {
1107 		mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1108 			   MT_WF_PHY_B0_PD_BLK);
1109 		mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
1110 			   MT_WF_PHY_B1_PD_BLK);
1111 	}
1112 
1113 	mt7615_mac_set_default_sensitivity(dev);
1114 	dev->scs_en = enable;
1115 
1116 out:
1117 	mutex_unlock(&dev->mt76.mutex);
1118 }
1119 
1120 void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev)
1121 {
1122 	mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20));
1123 	mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20));
1124 }
1125 
1126 static void
1127 mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
1128 			      u32 rts_err_rate, bool ofdm)
1129 {
1130 	int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck;
1131 	u16 def_th = ofdm ? -98 : -110;
1132 	bool update = false;
1133 	s8 *sensitivity;
1134 	int signal;
1135 
1136 	sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity;
1137 	signal = mt76_get_min_avg_rssi(&dev->mt76);
1138 	if (!signal) {
1139 		mt7615_mac_set_default_sensitivity(dev);
1140 		return;
1141 	}
1142 
1143 	signal = min(signal, -72);
1144 	if (false_cca > 500) {
1145 		if (rts_err_rate > MT_FRAC(40, 100))
1146 			return;
1147 
1148 		/* decrease coverage */
1149 		if (*sensitivity == def_th && signal > -90) {
1150 			*sensitivity = -90;
1151 			update = true;
1152 		} else if (*sensitivity + 2 < signal) {
1153 			*sensitivity += 2;
1154 			update = true;
1155 		}
1156 	} else if ((false_cca > 0 && false_cca < 50) ||
1157 		   rts_err_rate > MT_FRAC(60, 100)) {
1158 		/* increase coverage */
1159 		if (*sensitivity - 2 >= def_th) {
1160 			*sensitivity -= 2;
1161 			update = true;
1162 		}
1163 	}
1164 
1165 	if (*sensitivity > signal) {
1166 		*sensitivity = signal;
1167 		update = true;
1168 	}
1169 
1170 	if (update) {
1171 		u16 val;
1172 
1173 		if (ofdm) {
1174 			/* DBDC not supported */
1175 			val = *sensitivity * 2 + 512;
1176 			mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1177 				 MT_WF_PHY_B0_PD_OFDM_MASK,
1178 				 MT_WF_PHY_B0_PD_OFDM(val));
1179 		} else {
1180 			val = *sensitivity + 256;
1181 			mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
1182 				 MT_WF_PHY_B0_PD_CCK_MASK,
1183 				 MT_WF_PHY_B0_PD_CCK(val));
1184 			mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
1185 				 MT_WF_PHY_B1_PD_CCK_MASK,
1186 				 MT_WF_PHY_B1_PD_CCK(val));
1187 		}
1188 		dev->last_cca_adj = jiffies;
1189 	}
1190 }
1191 
1192 static void
1193 mt7615_mac_scs_check(struct mt7615_dev *dev)
1194 {
1195 	u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0;
1196 	u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
1197 	int i;
1198 
1199 	if (!dev->scs_en)
1200 		return;
1201 
1202 	for (i = 0; i < 4; i++) {
1203 		u32 data;
1204 
1205 		val = mt76_rr(dev, MT_MIB_MB_SDR0(i));
1206 		data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1207 		if (data > rts_retries_cnt) {
1208 			rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1209 			rts_retries_cnt = data;
1210 		}
1211 	}
1212 
1213 	val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0);
1214 	pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
1215 	pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
1216 
1217 	val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5);
1218 	mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
1219 	mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
1220 
1221 	dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1222 	dev->false_cca_cck = pd_cck - mdrdy_cck;
1223 	mt7615_mac_cca_stats_reset(dev);
1224 
1225 	if (rts_cnt + rts_retries_cnt)
1226 		rts_err_rate = MT_FRAC(rts_retries_cnt,
1227 				       rts_cnt + rts_retries_cnt);
1228 
1229 	/* cck */
1230 	mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false);
1231 	/* ofdm */
1232 	mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true);
1233 
1234 	if (time_after(jiffies, dev->last_cca_adj + 10 * HZ))
1235 		mt7615_mac_set_default_sensitivity(dev);
1236 }
1237 
1238 void mt7615_update_channel(struct mt76_dev *mdev)
1239 {
1240 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1241 	struct mt76_channel_state *state;
1242 	ktime_t cur_time;
1243 	u32 busy;
1244 
1245 	if (!test_bit(MT76_STATE_RUNNING, &mdev->state))
1246 		return;
1247 
1248 	state = mt76_channel_state(mdev, mdev->chandef.chan);
1249 	/* TODO: add DBDC support */
1250 	busy = mt76_get_field(dev, MT_MIB_SDR16(0), MT_MIB_BUSY_MASK);
1251 
1252 	spin_lock_bh(&mdev->cc_lock);
1253 	cur_time = ktime_get_boottime();
1254 	state->cc_busy += busy;
1255 	state->cc_active += ktime_to_us(ktime_sub(cur_time,
1256 						  mdev->survey_time));
1257 	mdev->survey_time = cur_time;
1258 	spin_unlock_bh(&mdev->cc_lock);
1259 }
1260 
1261 void mt7615_mac_work(struct work_struct *work)
1262 {
1263 	struct mt7615_dev *dev;
1264 
1265 	dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
1266 						mac_work.work);
1267 
1268 	mutex_lock(&dev->mt76.mutex);
1269 	mt7615_update_channel(&dev->mt76);
1270 	if (++dev->mac_work_count == 5) {
1271 		mt7615_mac_scs_check(dev);
1272 		dev->mac_work_count = 0;
1273 	}
1274 	mutex_unlock(&dev->mt76.mutex);
1275 
1276 	mt76_tx_status_check(&dev->mt76, NULL, false);
1277 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
1278 				     MT7615_WATCHDOG_TIME);
1279 }
1280 
1281 int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev)
1282 {
1283 	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
1284 	int err;
1285 
1286 	err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0,
1287 				 MT_RX_SEL0, 0);
1288 	if (err < 0)
1289 		return err;
1290 
1291 	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1292 	    chandef->width == NL80211_CHAN_WIDTH_80P80)
1293 		err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1,
1294 					 MT_RX_SEL0, 0);
1295 	return err;
1296 }
1297 
1298 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
1299 {
1300 	int err;
1301 
1302 	err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
1303 	if (err < 0)
1304 		return err;
1305 
1306 	return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
1307 				  MT_RX_SEL0, 1);
1308 }
1309 
1310 int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev)
1311 {
1312 	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
1313 	int err;
1314 
1315 	/* start CAC */
1316 	err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0,
1317 				 MT_RX_SEL0, 0);
1318 	if (err < 0)
1319 		return err;
1320 
1321 	/* TODO: DBDC support */
1322 
1323 	err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0);
1324 	if (err < 0)
1325 		return err;
1326 
1327 	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1328 	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
1329 		err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1);
1330 		if (err < 0)
1331 			return err;
1332 	}
1333 
1334 	return 0;
1335 }
1336 
1337 int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev)
1338 {
1339 	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
1340 	int err;
1341 
1342 	if (dev->mt76.region == NL80211_DFS_UNSET)
1343 		return 0;
1344 
1345 	if (test_bit(MT76_SCANNING, &dev->mt76.state))
1346 		return 0;
1347 
1348 	if (dev->dfs_state == chandef->chan->dfs_state)
1349 		return 0;
1350 
1351 	dev->dfs_state = chandef->chan->dfs_state;
1352 
1353 	if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
1354 		if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
1355 			return mt7615_dfs_start_radar_detector(dev);
1356 		else
1357 			return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
1358 						  MT_RX_SEL0, 0);
1359 	} else {
1360 		err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START,
1361 					 MT_HW_RDD0, MT_RX_SEL0, 0);
1362 		if (err < 0)
1363 			return err;
1364 
1365 		return mt7615_dfs_stop_radar_detector(dev);
1366 	}
1367 }
1368