1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "mt76x02.h"
19 #include "mt76x02_trace.h"
20 
21 static enum mt76x02_cipher_type
22 mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
23 {
24 	memset(key_data, 0, 32);
25 	if (!key)
26 		return MT_CIPHER_NONE;
27 
28 	if (key->keylen > 32)
29 		return MT_CIPHER_NONE;
30 
31 	memcpy(key_data, key->key, key->keylen);
32 
33 	switch (key->cipher) {
34 	case WLAN_CIPHER_SUITE_WEP40:
35 		return MT_CIPHER_WEP40;
36 	case WLAN_CIPHER_SUITE_WEP104:
37 		return MT_CIPHER_WEP104;
38 	case WLAN_CIPHER_SUITE_TKIP:
39 		return MT_CIPHER_TKIP;
40 	case WLAN_CIPHER_SUITE_CCMP:
41 		return MT_CIPHER_AES_CCMP;
42 	default:
43 		return MT_CIPHER_NONE;
44 	}
45 }
46 
47 int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
48 				 u8 key_idx, struct ieee80211_key_conf *key)
49 {
50 	enum mt76x02_cipher_type cipher;
51 	u8 key_data[32];
52 	u32 val;
53 
54 	cipher = mt76x02_mac_get_key_info(key, key_data);
55 	if (cipher == MT_CIPHER_NONE && key)
56 		return -EOPNOTSUPP;
57 
58 	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
59 	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
60 	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
61 	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
62 
63 	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
64 		     sizeof(key_data));
65 
66 	return 0;
67 }
68 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
69 
70 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
71 			      struct ieee80211_key_conf *key)
72 {
73 	enum mt76x02_cipher_type cipher;
74 	u8 key_data[32];
75 	u32 iv, eiv;
76 	u64 pn;
77 
78 	cipher = mt76x02_mac_get_key_info(key, key_data);
79 	iv = mt76_rr(dev, MT_WCID_IV(idx));
80 	eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
81 
82 	pn = (u64)eiv << 16;
83 	if (cipher == MT_CIPHER_TKIP) {
84 		pn |= (iv >> 16) & 0xff;
85 		pn |= (iv & 0xff) << 8;
86 	} else if (cipher >= MT_CIPHER_AES_CCMP) {
87 		pn |= iv & 0xffff;
88 	} else {
89 		return;
90 	}
91 
92 	atomic64_set(&key->tx_pn, pn);
93 }
94 
95 
96 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
97 			     struct ieee80211_key_conf *key)
98 {
99 	enum mt76x02_cipher_type cipher;
100 	u8 key_data[32];
101 	u8 iv_data[8];
102 	u64 pn;
103 
104 	cipher = mt76x02_mac_get_key_info(key, key_data);
105 	if (cipher == MT_CIPHER_NONE && key)
106 		return -EOPNOTSUPP;
107 
108 	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
109 	mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
110 
111 	memset(iv_data, 0, sizeof(iv_data));
112 	if (key) {
113 		mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
114 			       !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
115 
116 		pn = atomic64_read(&key->tx_pn);
117 
118 		iv_data[3] = key->keyidx << 6;
119 		if (cipher >= MT_CIPHER_TKIP) {
120 			iv_data[3] |= 0x20;
121 			put_unaligned_le32(pn >> 16, &iv_data[4]);
122 		}
123 
124 		if (cipher == MT_CIPHER_TKIP) {
125 			iv_data[0] = (pn >> 8) & 0xff;
126 			iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
127 			iv_data[2] = pn & 0xff;
128 		} else if (cipher >= MT_CIPHER_AES_CCMP) {
129 			put_unaligned_le16((pn & 0xffff), &iv_data[0]);
130 		}
131 	}
132 
133 	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
134 
135 	return 0;
136 }
137 
138 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
139 			    u8 vif_idx, u8 *mac)
140 {
141 	struct mt76_wcid_addr addr = {};
142 	u32 attr;
143 
144 	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
145 	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
146 
147 	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
148 
149 	if (idx >= 128)
150 		return;
151 
152 	if (mac)
153 		memcpy(addr.macaddr, mac, ETH_ALEN);
154 
155 	mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
156 }
157 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
158 
159 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
160 {
161 	u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
162 	u32 bit = MT_WCID_DROP_MASK(idx);
163 
164 	/* prevent unnecessary writes */
165 	if ((val & bit) != (bit * drop))
166 		mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
167 }
168 
169 static __le16
170 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
171 			const struct ieee80211_tx_rate *rate, u8 *nss_val)
172 {
173 	u8 phy, rate_idx, nss, bw = 0;
174 	u16 rateval;
175 
176 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
177 		rate_idx = rate->idx;
178 		nss = 1 + (rate->idx >> 4);
179 		phy = MT_PHY_TYPE_VHT;
180 		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
181 			bw = 2;
182 		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
183 			bw = 1;
184 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
185 		rate_idx = rate->idx;
186 		nss = 1 + (rate->idx >> 3);
187 		phy = MT_PHY_TYPE_HT;
188 		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
189 			phy = MT_PHY_TYPE_HT_GF;
190 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
191 			bw = 1;
192 	} else {
193 		const struct ieee80211_rate *r;
194 		int band = dev->mt76.chandef.chan->band;
195 		u16 val;
196 
197 		r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
198 		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
199 			val = r->hw_value_short;
200 		else
201 			val = r->hw_value;
202 
203 		phy = val >> 8;
204 		rate_idx = val & 0xff;
205 		nss = 1;
206 	}
207 
208 	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
209 	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
210 	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
211 	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
212 		rateval |= MT_RXWI_RATE_SGI;
213 
214 	*nss_val = nss;
215 	return cpu_to_le16(rateval);
216 }
217 
218 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
219 			       const struct ieee80211_tx_rate *rate)
220 {
221 	s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
222 	__le16 rateval;
223 	u32 tx_info;
224 	s8 nss;
225 
226 	rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
227 	tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
228 		  FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
229 		  FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
230 		  MT_WCID_TX_INFO_SET;
231 	wcid->tx_info = tx_info;
232 }
233 
234 void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
235 {
236 	if (enable)
237 		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
238 	else
239 		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
240 }
241 
242 bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
243 				struct mt76x02_tx_status *stat)
244 {
245 	u32 stat1, stat2;
246 
247 	stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
248 	stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
249 
250 	stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
251 	if (!stat->valid)
252 		return false;
253 
254 	stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
255 	stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
256 	stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
257 	stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
258 	stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
259 
260 	stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
261 	stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
262 
263 	trace_mac_txstat_fetch(dev, stat);
264 
265 	return true;
266 }
267 
268 static int
269 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
270 			   enum nl80211_band band)
271 {
272 	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
273 
274 	txrate->idx = 0;
275 	txrate->flags = 0;
276 	txrate->count = 1;
277 
278 	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
279 	case MT_PHY_TYPE_OFDM:
280 		if (band == NL80211_BAND_2GHZ)
281 			idx += 4;
282 
283 		txrate->idx = idx;
284 		return 0;
285 	case MT_PHY_TYPE_CCK:
286 		if (idx >= 8)
287 			idx -= 8;
288 
289 		txrate->idx = idx;
290 		return 0;
291 	case MT_PHY_TYPE_HT_GF:
292 		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
293 		/* fall through */
294 	case MT_PHY_TYPE_HT:
295 		txrate->flags |= IEEE80211_TX_RC_MCS;
296 		txrate->idx = idx;
297 		break;
298 	case MT_PHY_TYPE_VHT:
299 		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
300 		txrate->idx = idx;
301 		break;
302 	default:
303 		return -EINVAL;
304 	}
305 
306 	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
307 	case MT_PHY_BW_20:
308 		break;
309 	case MT_PHY_BW_40:
310 		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
311 		break;
312 	case MT_PHY_BW_80:
313 		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
314 		break;
315 	default:
316 		return -EINVAL;
317 	}
318 
319 	if (rate & MT_RXWI_RATE_SGI)
320 		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
321 
322 	return 0;
323 }
324 
325 void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
326 			    struct sk_buff *skb, struct mt76_wcid *wcid,
327 			    struct ieee80211_sta *sta, int len)
328 {
329 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
330 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
331 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
332 	struct ieee80211_key_conf *key = info->control.hw_key;
333 	u32 wcid_tx_info;
334 	u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
335 	u16 txwi_flags = 0;
336 	u8 nss;
337 	s8 txpwr_adj, max_txpwr_adj;
338 	u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
339 
340 	memset(txwi, 0, sizeof(*txwi));
341 
342 	if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
343 	    ieee80211_has_protected(hdr->frame_control)) {
344 		wcid = NULL;
345 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
346 		                       info->control.rates, 1);
347 	}
348 
349 	if (wcid)
350 		txwi->wcid = wcid->idx;
351 	else
352 		txwi->wcid = 0xff;
353 
354 	if (wcid && wcid->sw_iv && key) {
355 		u64 pn = atomic64_inc_return(&key->tx_pn);
356 		ccmp_pn[0] = pn;
357 		ccmp_pn[1] = pn >> 8;
358 		ccmp_pn[2] = 0;
359 		ccmp_pn[3] = 0x20 | (key->keyidx << 6);
360 		ccmp_pn[4] = pn >> 16;
361 		ccmp_pn[5] = pn >> 24;
362 		ccmp_pn[6] = pn >> 32;
363 		ccmp_pn[7] = pn >> 40;
364 		txwi->iv = *((__le32 *)&ccmp_pn[0]);
365 		txwi->eiv = *((__le32 *)&ccmp_pn[4]);
366 	}
367 
368 	if (wcid && (rate->idx < 0 || !rate->count)) {
369 		wcid_tx_info = wcid->tx_info;
370 		txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
371 		max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
372 					  wcid_tx_info);
373 		nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
374 	} else {
375 		txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
376 		max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
377 	}
378 
379 	txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
380 					     max_txpwr_adj);
381 	txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
382 
383 	if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
384 		txwi->txstream = 0x13;
385 	else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
386 		 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
387 		txwi->txstream = 0x93;
388 
389 	if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
390 		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
391 	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
392 		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
393 	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
394 		txwi_flags |= MT_TXWI_FLAGS_MMPS;
395 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
396 		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
397 	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
398 		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
399 	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
400 		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
401 
402 		ba_size <<= sta->ht_cap.ampdu_factor;
403 		ba_size = min_t(int, 63, ba_size - 1);
404 		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
405 			ba_size = 0;
406 		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
407 
408 		txwi_flags |= MT_TXWI_FLAGS_AMPDU |
409 			 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
410 				    sta->ht_cap.ampdu_density);
411 	}
412 
413 	if (ieee80211_is_probe_resp(hdr->frame_control) ||
414 	    ieee80211_is_beacon(hdr->frame_control))
415 		txwi_flags |= MT_TXWI_FLAGS_TS;
416 
417 	txwi->flags |= cpu_to_le16(txwi_flags);
418 	txwi->len_ctl = cpu_to_le16(len);
419 }
420 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
421 
422 static void
423 mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
424 {
425 	u8 mcs, nss;
426 
427 	if (!idx)
428 		return;
429 
430 	rates += idx - 1;
431 	rates[1] = rates[0];
432 	switch (phy) {
433 	case MT_PHY_TYPE_VHT:
434 		mcs = ieee80211_rate_get_vht_mcs(rates);
435 		nss = ieee80211_rate_get_vht_nss(rates);
436 
437 		if (mcs == 0)
438 			nss = max_t(int, nss - 1, 1);
439 		else
440 			mcs--;
441 
442 		ieee80211_rate_set_vht(rates + 1, mcs, nss);
443 		break;
444 	case MT_PHY_TYPE_HT_GF:
445 	case MT_PHY_TYPE_HT:
446 		/* MCS 8 falls back to MCS 0 */
447 		if (rates[0].idx == 8) {
448 		    rates[1].idx = 0;
449 		    break;
450 		}
451 		/* fall through */
452 	default:
453 		rates[1].idx = max_t(int, rates[0].idx - 1, 0);
454 		break;
455 	}
456 }
457 
458 static void
459 mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
460 			   struct ieee80211_tx_info *info,
461 			   struct mt76x02_tx_status *st, int n_frames)
462 {
463 	struct ieee80211_tx_rate *rate = info->status.rates;
464 	struct ieee80211_tx_rate last_rate;
465 	u16 first_rate;
466 	int retry = st->retry;
467 	int phy;
468 	int i;
469 
470 	if (!n_frames)
471 		return;
472 
473 	phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
474 
475 	if (st->pktid & MT_PACKET_ID_HAS_RATE) {
476 		first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
477 		first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
478 
479 		mt76x02_mac_process_tx_rate(&rate[0], first_rate,
480 					    dev->mt76.chandef.chan->band);
481 	} else if (rate[0].idx < 0) {
482 		if (!msta)
483 			return;
484 
485 		mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
486 					    dev->mt76.chandef.chan->band);
487 	}
488 
489 	mt76x02_mac_process_tx_rate(&last_rate, st->rate,
490 				    dev->mt76.chandef.chan->band);
491 
492 	for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
493 		retry--;
494 		if (i + 1 == ARRAY_SIZE(info->status.rates)) {
495 			info->status.rates[i] = last_rate;
496 			info->status.rates[i].count = max_t(int, retry, 1);
497 			break;
498 		}
499 
500 		mt76x02_tx_rate_fallback(info->status.rates, i, phy);
501 		if (info->status.rates[i].idx == last_rate.idx)
502 			break;
503 	}
504 
505 	if (i + 1 < ARRAY_SIZE(info->status.rates)) {
506 		info->status.rates[i + 1].idx = -1;
507 		info->status.rates[i + 1].count = 0;
508 	}
509 
510 	info->status.ampdu_len = n_frames;
511 	info->status.ampdu_ack_len = st->success ? n_frames : 0;
512 
513 	if (st->aggr)
514 		info->flags |= IEEE80211_TX_CTL_AMPDU |
515 			       IEEE80211_TX_STAT_AMPDU;
516 
517 	if (!st->ack_req)
518 		info->flags |= IEEE80211_TX_CTL_NO_ACK;
519 	else if (st->success)
520 		info->flags |= IEEE80211_TX_STAT_ACK;
521 }
522 
523 void mt76x02_send_tx_status(struct mt76x02_dev *dev,
524 			    struct mt76x02_tx_status *stat, u8 *update)
525 {
526 	struct ieee80211_tx_info info = {};
527 	struct ieee80211_tx_status status = {
528 		.info = &info
529 	};
530 	struct mt76_wcid *wcid = NULL;
531 	struct mt76x02_sta *msta = NULL;
532 	struct mt76_dev *mdev = &dev->mt76;
533 	struct sk_buff_head list;
534 
535 	if (stat->pktid == MT_PACKET_ID_NO_ACK)
536 		return;
537 
538 	rcu_read_lock();
539 
540 	if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
541 		wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
542 
543 	if (wcid && wcid->sta) {
544 		void *priv;
545 
546 		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
547 		status.sta = container_of(priv, struct ieee80211_sta,
548 					  drv_priv);
549 	}
550 
551 	mt76_tx_status_lock(mdev, &list);
552 
553 	if (wcid) {
554 		if (mt76_is_skb_pktid(stat->pktid))
555 			status.skb = mt76_tx_status_skb_get(mdev, wcid,
556 							    stat->pktid, &list);
557 		if (status.skb)
558 			status.info = IEEE80211_SKB_CB(status.skb);
559 	}
560 
561 	if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
562 		mt76_tx_status_unlock(mdev, &list);
563 		rcu_read_unlock();
564 		return;
565 	}
566 
567 	if (msta && stat->aggr && !status.skb) {
568 		u32 stat_val, stat_cache;
569 
570 		stat_val = stat->rate;
571 		stat_val |= ((u32) stat->retry) << 16;
572 		stat_cache = msta->status.rate;
573 		stat_cache |= ((u32) msta->status.retry) << 16;
574 
575 		if (*update == 0 && stat_val == stat_cache &&
576 		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
577 			msta->n_frames++;
578 			mt76_tx_status_unlock(mdev, &list);
579 			rcu_read_unlock();
580 			return;
581 		}
582 
583 		mt76x02_mac_fill_tx_status(dev, msta, status.info,
584 					   &msta->status, msta->n_frames);
585 
586 		msta->status = *stat;
587 		msta->n_frames = 1;
588 		*update = 0;
589 	} else {
590 		mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
591 		*update = 1;
592 	}
593 
594 	if (status.skb)
595 		mt76_tx_status_skb_done(mdev, status.skb, &list);
596 	mt76_tx_status_unlock(mdev, &list);
597 
598 	if (!status.skb)
599 		ieee80211_tx_status_ext(mt76_hw(dev), &status);
600 	rcu_read_unlock();
601 }
602 
603 static int
604 mt76x02_mac_process_rate(struct mt76x02_dev *dev,
605 			 struct mt76_rx_status *status,
606 			 u16 rate)
607 {
608 	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
609 
610 	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
611 	case MT_PHY_TYPE_OFDM:
612 		if (idx >= 8)
613 			idx = 0;
614 
615 		if (status->band == NL80211_BAND_2GHZ)
616 			idx += 4;
617 
618 		status->rate_idx = idx;
619 		return 0;
620 	case MT_PHY_TYPE_CCK:
621 		if (idx >= 8) {
622 			idx -= 8;
623 			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
624 		}
625 
626 		if (idx >= 4)
627 			idx = 0;
628 
629 		status->rate_idx = idx;
630 		return 0;
631 	case MT_PHY_TYPE_HT_GF:
632 		status->enc_flags |= RX_ENC_FLAG_HT_GF;
633 		/* fall through */
634 	case MT_PHY_TYPE_HT:
635 		status->encoding = RX_ENC_HT;
636 		status->rate_idx = idx;
637 		break;
638 	case MT_PHY_TYPE_VHT: {
639 		u8 n_rxstream = dev->mt76.chainmask & 0xf;
640 
641 		status->encoding = RX_ENC_VHT;
642 		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
643 		status->nss = min_t(u8, n_rxstream,
644 				    FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
645 		break;
646 	}
647 	default:
648 		return -EINVAL;
649 	}
650 
651 	if (rate & MT_RXWI_RATE_LDPC)
652 		status->enc_flags |= RX_ENC_FLAG_LDPC;
653 
654 	if (rate & MT_RXWI_RATE_SGI)
655 		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
656 
657 	if (rate & MT_RXWI_RATE_STBC)
658 		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
659 
660 	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
661 	case MT_PHY_BW_20:
662 		break;
663 	case MT_PHY_BW_40:
664 		status->bw = RATE_INFO_BW_40;
665 		break;
666 	case MT_PHY_BW_80:
667 		status->bw = RATE_INFO_BW_80;
668 		break;
669 	default:
670 		break;
671 	}
672 
673 	return 0;
674 }
675 
676 void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
677 {
678 	static const u8 null_addr[ETH_ALEN] = {};
679 	int i;
680 
681 	ether_addr_copy(dev->mt76.macaddr, addr);
682 
683 	if (!is_valid_ether_addr(dev->mt76.macaddr)) {
684 		eth_random_addr(dev->mt76.macaddr);
685 		dev_info(dev->mt76.dev,
686 			 "Invalid MAC address, using random address %pM\n",
687 			 dev->mt76.macaddr);
688 	}
689 
690 	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
691 	mt76_wr(dev, MT_MAC_ADDR_DW1,
692 		get_unaligned_le16(dev->mt76.macaddr + 4) |
693 		FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
694 
695 	mt76_wr(dev, MT_MAC_BSSID_DW0,
696 		get_unaligned_le32(dev->mt76.macaddr));
697 	mt76_wr(dev, MT_MAC_BSSID_DW1,
698 		get_unaligned_le16(dev->mt76.macaddr + 4) |
699 		FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
700 		MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
701 
702 	for (i = 0; i < 16; i++)
703 		mt76x02_mac_set_bssid(dev, i, null_addr);
704 }
705 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
706 
707 static int
708 mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
709 {
710 	struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
711 
712 	rssi += cal->rssi_offset[chain];
713 	rssi -= cal->lna_gain;
714 
715 	return rssi;
716 }
717 
718 int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
719 			   void *rxi)
720 {
721 	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
722 	struct mt76x02_rxwi *rxwi = rxi;
723 	struct mt76x02_sta *sta;
724 	u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
725 	u32 ctl = le32_to_cpu(rxwi->ctl);
726 	u16 rate = le16_to_cpu(rxwi->rate);
727 	u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
728 	bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
729 	int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
730 	s8 signal;
731 	u8 pn_len;
732 	u8 wcid;
733 	int len;
734 
735 	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
736 		return -EINVAL;
737 
738 	if (rxinfo & MT_RXINFO_L2PAD)
739 		pad_len += 2;
740 
741 	if (rxinfo & MT_RXINFO_DECRYPT) {
742 		status->flag |= RX_FLAG_DECRYPTED;
743 		status->flag |= RX_FLAG_MMIC_STRIPPED;
744 		status->flag |= RX_FLAG_MIC_STRIPPED;
745 		status->flag |= RX_FLAG_IV_STRIPPED;
746 	}
747 
748 	wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
749 	sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
750 	status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
751 
752 	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
753 	pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
754 	if (pn_len) {
755 		int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
756 		u8 *data = skb->data + offset;
757 
758 		status->iv[0] = data[7];
759 		status->iv[1] = data[6];
760 		status->iv[2] = data[5];
761 		status->iv[3] = data[4];
762 		status->iv[4] = data[1];
763 		status->iv[5] = data[0];
764 
765 		/*
766 		 * Driver CCMP validation can't deal with fragments.
767 		 * Let mac80211 take care of it.
768 		 */
769 		if (rxinfo & MT_RXINFO_FRAG) {
770 			status->flag &= ~RX_FLAG_IV_STRIPPED;
771 		} else {
772 			pad_len += pn_len << 2;
773 			len -= pn_len << 2;
774 		}
775 	}
776 
777 	mt76x02_remove_hdr_pad(skb, pad_len);
778 
779 	if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
780 		status->aggr = true;
781 
782 	if (WARN_ON_ONCE(len > skb->len))
783 		return -EINVAL;
784 
785 	pskb_trim(skb, len);
786 
787 	status->chains = BIT(0);
788 	signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
789 	status->chain_signal[0] = signal;
790 	if (nstreams > 1) {
791 		status->chains |= BIT(1);
792 		status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
793 							       rxwi->rssi[1],
794 							       1);
795 		signal = max_t(s8, signal, status->chain_signal[1]);
796 	}
797 	status->signal = signal;
798 	status->freq = dev->mt76.chandef.chan->center_freq;
799 	status->band = dev->mt76.chandef.chan->band;
800 
801 	status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
802 	status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
803 
804 	return mt76x02_mac_process_rate(dev, status, rate);
805 }
806 
807 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
808 {
809 	struct mt76x02_tx_status stat = {};
810 	u8 update = 1;
811 	bool ret;
812 
813 	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
814 		return;
815 
816 	trace_mac_txstat_poll(dev);
817 
818 	while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
819 		if (!spin_trylock(&dev->txstatus_fifo_lock))
820 			break;
821 
822 		ret = mt76x02_mac_load_tx_status(dev, &stat);
823 		spin_unlock(&dev->txstatus_fifo_lock);
824 
825 		if (!ret)
826 			break;
827 
828 		if (!irq) {
829 			mt76x02_send_tx_status(dev, &stat, &update);
830 			continue;
831 		}
832 
833 		kfifo_put(&dev->txstatus_fifo, stat);
834 	}
835 }
836 
837 void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
838 			     struct mt76_queue_entry *e)
839 {
840 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
841 	struct mt76x02_txwi *txwi;
842 	u8 *txwi_ptr;
843 
844 	if (!e->txwi) {
845 		dev_kfree_skb_any(e->skb);
846 		return;
847 	}
848 
849 	mt76x02_mac_poll_tx_status(dev, false);
850 
851 	txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
852 	txwi = (struct mt76x02_txwi *)txwi_ptr;
853 	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
854 
855 	mt76_tx_complete_skb(mdev, e->skb);
856 }
857 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
858 
859 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
860 {
861 	u32 data = 0;
862 
863 	if (val != ~0)
864 		data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
865 		       MT_PROT_CFG_RTS_THRESH;
866 
867 	mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
868 
869 	mt76_rmw(dev, MT_CCK_PROT_CFG,
870 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
871 	mt76_rmw(dev, MT_OFDM_PROT_CFG,
872 		 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
873 }
874 
875 void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
876 				   int ht_mode)
877 {
878 	int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
879 	bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
880 	u32 prot[6];
881 	u32 vht_prot[3];
882 	int i;
883 	u16 rts_thr;
884 
885 	for (i = 0; i < ARRAY_SIZE(prot); i++) {
886 		prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
887 		prot[i] &= ~MT_PROT_CFG_CTRL;
888 		if (i >= 2)
889 			prot[i] &= ~MT_PROT_CFG_RATE;
890 	}
891 
892 	for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
893 		vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
894 		vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
895 	}
896 
897 	rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
898 
899 	if (rts_thr != 0xffff)
900 		prot[0] |= MT_PROT_CTRL_RTS_CTS;
901 
902 	if (legacy_prot) {
903 		prot[1] |= MT_PROT_CTRL_CTS2SELF;
904 
905 		prot[2] |= MT_PROT_RATE_CCK_11;
906 		prot[3] |= MT_PROT_RATE_CCK_11;
907 		prot[4] |= MT_PROT_RATE_CCK_11;
908 		prot[5] |= MT_PROT_RATE_CCK_11;
909 
910 		vht_prot[0] |= MT_PROT_RATE_CCK_11;
911 		vht_prot[1] |= MT_PROT_RATE_CCK_11;
912 		vht_prot[2] |= MT_PROT_RATE_CCK_11;
913 	} else {
914 		if (rts_thr != 0xffff)
915 			prot[1] |= MT_PROT_CTRL_RTS_CTS;
916 
917 		prot[2] |= MT_PROT_RATE_OFDM_24;
918 		prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
919 		prot[4] |= MT_PROT_RATE_OFDM_24;
920 		prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
921 
922 		vht_prot[0] |= MT_PROT_RATE_OFDM_24;
923 		vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
924 		vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
925 	}
926 
927 	switch (mode) {
928 	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
929 	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
930 		prot[2] |= MT_PROT_CTRL_RTS_CTS;
931 		prot[3] |= MT_PROT_CTRL_RTS_CTS;
932 		prot[4] |= MT_PROT_CTRL_RTS_CTS;
933 		prot[5] |= MT_PROT_CTRL_RTS_CTS;
934 		vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
935 		vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
936 		vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
937 		break;
938 	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
939 		prot[3] |= MT_PROT_CTRL_RTS_CTS;
940 		prot[5] |= MT_PROT_CTRL_RTS_CTS;
941 		vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
942 		vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
943 		break;
944 	}
945 
946 	if (non_gf) {
947 		prot[4] |= MT_PROT_CTRL_RTS_CTS;
948 		prot[5] |= MT_PROT_CTRL_RTS_CTS;
949 	}
950 
951 	for (i = 0; i < ARRAY_SIZE(prot); i++)
952 		mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
953 
954 	for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
955 		mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
956 }
957 
958 void mt76x02_update_channel(struct mt76_dev *mdev)
959 {
960 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
961 	struct mt76_channel_state *state;
962 	u32 active, busy;
963 
964 	state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
965 
966 	busy = mt76_rr(dev, MT_CH_BUSY);
967 	active = busy + mt76_rr(dev, MT_CH_IDLE);
968 
969 	spin_lock_bh(&dev->mt76.cc_lock);
970 	state->cc_busy += busy;
971 	state->cc_active += active;
972 	spin_unlock_bh(&dev->mt76.cc_lock);
973 }
974 EXPORT_SYMBOL_GPL(mt76x02_update_channel);
975 
976 static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
977 {
978 	u32 val = mt76_rr(dev, 0x10f4);
979 
980 	if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
981 		return;
982 
983 	dev_err(dev->mt76.dev, "mac specific condition occurred\n");
984 
985 	mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
986 	udelay(10);
987 	mt76_wr(dev, MT_MAC_SYS_CTRL,
988 		MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
989 }
990 
991 static void
992 mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
993 {
994 	if (enable) {
995 		u32 data;
996 
997 		mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
998 		mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
999 		/* enable pa-lna */
1000 		data = mt76_rr(dev, MT_TX_PIN_CFG);
1001 		data |= MT_TX_PIN_CFG_TXANT |
1002 			MT_TX_PIN_CFG_RXANT |
1003 			MT_TX_PIN_RFTR_EN |
1004 			MT_TX_PIN_TRSW_EN;
1005 		mt76_wr(dev, MT_TX_PIN_CFG, data);
1006 	} else {
1007 		mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
1008 		mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
1009 		/* disable pa-lna */
1010 		mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
1011 		mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
1012 	}
1013 	dev->ed_tx_blocked = !enable;
1014 }
1015 
1016 void mt76x02_edcca_init(struct mt76x02_dev *dev)
1017 {
1018 	dev->ed_trigger = 0;
1019 	dev->ed_silent = 0;
1020 
1021 	if (dev->ed_monitor) {
1022 		struct ieee80211_channel *chan = dev->mt76.chandef.chan;
1023 		u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
1024 
1025 		mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1026 		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1027 		mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
1028 			 ed_th << 8 | ed_th);
1029 		mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
1030 	} else {
1031 		mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1032 		mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1033 		if (is_mt76x2(dev)) {
1034 			mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
1035 			mt76_set(dev, MT_TXOP_HLDR_ET,
1036 				 MT_TXOP_HLDR_TX40M_BLK_EN);
1037 		} else {
1038 			mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
1039 			mt76_clear(dev, MT_TXOP_HLDR_ET,
1040 				   MT_TXOP_HLDR_TX40M_BLK_EN);
1041 		}
1042 	}
1043 	mt76x02_edcca_tx_enable(dev, true);
1044 	dev->ed_monitor_learning = true;
1045 
1046 	/* clear previous CCA timer value */
1047 	mt76_rr(dev, MT_ED_CCA_TIMER);
1048 	dev->ed_time = ktime_get_boottime();
1049 }
1050 EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
1051 
1052 #define MT_EDCCA_TH		92
1053 #define MT_EDCCA_BLOCK_TH	2
1054 #define MT_EDCCA_LEARN_TH	50
1055 #define MT_EDCCA_LEARN_CCA	180
1056 #define MT_EDCCA_LEARN_TIMEOUT	(20 * HZ)
1057 
1058 static void mt76x02_edcca_check(struct mt76x02_dev *dev)
1059 {
1060 	ktime_t cur_time;
1061 	u32 active, val, busy;
1062 
1063 	cur_time = ktime_get_boottime();
1064 	val = mt76_rr(dev, MT_ED_CCA_TIMER);
1065 
1066 	active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1067 	dev->ed_time = cur_time;
1068 
1069 	busy = (val * 100) / active;
1070 	busy = min_t(u32, busy, 100);
1071 
1072 	if (busy > MT_EDCCA_TH) {
1073 		dev->ed_trigger++;
1074 		dev->ed_silent = 0;
1075 	} else {
1076 		dev->ed_silent++;
1077 		dev->ed_trigger = 0;
1078 	}
1079 
1080 	if (dev->cal.agc_lowest_gain &&
1081 	    dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
1082 	    dev->ed_trigger > MT_EDCCA_LEARN_TH) {
1083 		dev->ed_monitor_learning = false;
1084 		dev->ed_trigger_timeout = jiffies + 20 * HZ;
1085 	} else if (!dev->ed_monitor_learning &&
1086 		   time_is_after_jiffies(dev->ed_trigger_timeout)) {
1087 		dev->ed_monitor_learning = true;
1088 		mt76x02_edcca_tx_enable(dev, true);
1089 	}
1090 
1091 	if (dev->ed_monitor_learning)
1092 		return;
1093 
1094 	if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
1095 		mt76x02_edcca_tx_enable(dev, false);
1096 	else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
1097 		mt76x02_edcca_tx_enable(dev, true);
1098 }
1099 
1100 void mt76x02_mac_work(struct work_struct *work)
1101 {
1102 	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
1103 					       mt76.mac_work.work);
1104 	int i, idx;
1105 
1106 	mutex_lock(&dev->mt76.mutex);
1107 
1108 	mt76x02_update_channel(&dev->mt76);
1109 	for (i = 0, idx = 0; i < 16; i++) {
1110 		u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1111 
1112 		dev->aggr_stats[idx++] += val & 0xffff;
1113 		dev->aggr_stats[idx++] += val >> 16;
1114 	}
1115 
1116 	if (!dev->mt76.beacon_mask)
1117 		mt76x02_check_mac_err(dev);
1118 
1119 	if (dev->ed_monitor)
1120 		mt76x02_edcca_check(dev);
1121 
1122 	mutex_unlock(&dev->mt76.mutex);
1123 
1124 	mt76_tx_status_check(&dev->mt76, NULL, false);
1125 
1126 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
1127 				     MT_MAC_WORK_INTERVAL);
1128 }
1129 
1130 void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
1131 {
1132 	idx &= 7;
1133 	mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
1134 	mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
1135 		       get_unaligned_le16(addr + 4));
1136 }
1137