1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include <net/page_pool.h>
8 #include "mt76.h"
9 
10 #define CHAN2G(_idx, _freq) {			\
11 	.band = NL80211_BAND_2GHZ,		\
12 	.center_freq = (_freq),			\
13 	.hw_value = (_idx),			\
14 	.max_power = 30,			\
15 }
16 
17 #define CHAN5G(_idx, _freq) {			\
18 	.band = NL80211_BAND_5GHZ,		\
19 	.center_freq = (_freq),			\
20 	.hw_value = (_idx),			\
21 	.max_power = 30,			\
22 }
23 
24 #define CHAN6G(_idx, _freq) {			\
25 	.band = NL80211_BAND_6GHZ,		\
26 	.center_freq = (_freq),			\
27 	.hw_value = (_idx),			\
28 	.max_power = 30,			\
29 }
30 
31 static const struct ieee80211_channel mt76_channels_2ghz[] = {
32 	CHAN2G(1, 2412),
33 	CHAN2G(2, 2417),
34 	CHAN2G(3, 2422),
35 	CHAN2G(4, 2427),
36 	CHAN2G(5, 2432),
37 	CHAN2G(6, 2437),
38 	CHAN2G(7, 2442),
39 	CHAN2G(8, 2447),
40 	CHAN2G(9, 2452),
41 	CHAN2G(10, 2457),
42 	CHAN2G(11, 2462),
43 	CHAN2G(12, 2467),
44 	CHAN2G(13, 2472),
45 	CHAN2G(14, 2484),
46 };
47 
48 static const struct ieee80211_channel mt76_channels_5ghz[] = {
49 	CHAN5G(36, 5180),
50 	CHAN5G(40, 5200),
51 	CHAN5G(44, 5220),
52 	CHAN5G(48, 5240),
53 
54 	CHAN5G(52, 5260),
55 	CHAN5G(56, 5280),
56 	CHAN5G(60, 5300),
57 	CHAN5G(64, 5320),
58 
59 	CHAN5G(100, 5500),
60 	CHAN5G(104, 5520),
61 	CHAN5G(108, 5540),
62 	CHAN5G(112, 5560),
63 	CHAN5G(116, 5580),
64 	CHAN5G(120, 5600),
65 	CHAN5G(124, 5620),
66 	CHAN5G(128, 5640),
67 	CHAN5G(132, 5660),
68 	CHAN5G(136, 5680),
69 	CHAN5G(140, 5700),
70 	CHAN5G(144, 5720),
71 
72 	CHAN5G(149, 5745),
73 	CHAN5G(153, 5765),
74 	CHAN5G(157, 5785),
75 	CHAN5G(161, 5805),
76 	CHAN5G(165, 5825),
77 	CHAN5G(169, 5845),
78 	CHAN5G(173, 5865),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 
201 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
202 		return 0;
203 
204 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
205 		 wiphy_name(hw->wiphy));
206 
207 	phy->leds.cdev.name = phy->leds.name;
208 	phy->leds.cdev.default_trigger =
209 		ieee80211_create_tpt_led_trigger(hw,
210 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 					mt76_tpt_blink,
212 					ARRAY_SIZE(mt76_tpt_blink));
213 
214 	if (phy == &dev->phy) {
215 		struct device_node *np = dev->dev->of_node;
216 
217 		np = of_get_child_by_name(np, "led");
218 		if (np) {
219 			int led_pin;
220 
221 			if (!of_property_read_u32(np, "led-sources", &led_pin))
222 				phy->leds.pin = led_pin;
223 			phy->leds.al = of_property_read_bool(np,
224 							     "led-active-low");
225 			of_node_put(np);
226 		}
227 	}
228 
229 	return led_classdev_register(dev->dev, &phy->leds.cdev);
230 }
231 
232 static void mt76_led_cleanup(struct mt76_phy *phy)
233 {
234 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
235 		return;
236 
237 	led_classdev_unregister(&phy->leds.cdev);
238 }
239 
240 static void mt76_init_stream_cap(struct mt76_phy *phy,
241 				 struct ieee80211_supported_band *sband,
242 				 bool vht)
243 {
244 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
245 	int i, nstream = hweight8(phy->antenna_mask);
246 	struct ieee80211_sta_vht_cap *vht_cap;
247 	u16 mcs_map = 0;
248 
249 	if (nstream > 1)
250 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
251 	else
252 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
253 
254 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
255 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
256 
257 	if (!vht)
258 		return;
259 
260 	vht_cap = &sband->vht_cap;
261 	if (nstream > 1)
262 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
263 	else
264 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
265 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
266 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
267 
268 	for (i = 0; i < 8; i++) {
269 		if (i < nstream)
270 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
271 		else
272 			mcs_map |=
273 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
274 	}
275 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
276 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
277 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
278 		vht_cap->vht_mcs.tx_highest |=
279 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
280 }
281 
282 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
283 {
284 	if (phy->cap.has_2ghz)
285 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
286 	if (phy->cap.has_5ghz)
287 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
288 	if (phy->cap.has_6ghz)
289 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
290 }
291 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
292 
293 static int
294 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
295 		const struct ieee80211_channel *chan, int n_chan,
296 		struct ieee80211_rate *rates, int n_rates,
297 		bool ht, bool vht)
298 {
299 	struct ieee80211_supported_band *sband = &msband->sband;
300 	struct ieee80211_sta_vht_cap *vht_cap;
301 	struct ieee80211_sta_ht_cap *ht_cap;
302 	struct mt76_dev *dev = phy->dev;
303 	void *chanlist;
304 	int size;
305 
306 	size = n_chan * sizeof(*chan);
307 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
308 	if (!chanlist)
309 		return -ENOMEM;
310 
311 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
312 				    GFP_KERNEL);
313 	if (!msband->chan)
314 		return -ENOMEM;
315 
316 	sband->channels = chanlist;
317 	sband->n_channels = n_chan;
318 	sband->bitrates = rates;
319 	sband->n_bitrates = n_rates;
320 
321 	if (!ht)
322 		return 0;
323 
324 	ht_cap = &sband->ht_cap;
325 	ht_cap->ht_supported = true;
326 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
327 		       IEEE80211_HT_CAP_GRN_FLD |
328 		       IEEE80211_HT_CAP_SGI_20 |
329 		       IEEE80211_HT_CAP_SGI_40 |
330 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
331 
332 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
333 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
334 
335 	mt76_init_stream_cap(phy, sband, vht);
336 
337 	if (!vht)
338 		return 0;
339 
340 	vht_cap = &sband->vht_cap;
341 	vht_cap->vht_supported = true;
342 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
343 			IEEE80211_VHT_CAP_RXSTBC_1 |
344 			IEEE80211_VHT_CAP_SHORT_GI_80 |
345 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
346 
347 	return 0;
348 }
349 
350 static int
351 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
352 		   int n_rates)
353 {
354 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
355 
356 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
357 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
358 			       n_rates, true, false);
359 }
360 
361 static int
362 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates, bool vht)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
368 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
369 			       n_rates, true, vht);
370 }
371 
372 static int
373 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
379 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
380 			       n_rates, false, false);
381 }
382 
383 static void
384 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
385 		 enum nl80211_band band)
386 {
387 	struct ieee80211_supported_band *sband = &msband->sband;
388 	bool found = false;
389 	int i;
390 
391 	if (!sband)
392 		return;
393 
394 	for (i = 0; i < sband->n_channels; i++) {
395 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
396 			continue;
397 
398 		found = true;
399 		break;
400 	}
401 
402 	if (found) {
403 		phy->chandef.chan = &sband->channels[0];
404 		phy->chan_state = &msband->chan[0];
405 		return;
406 	}
407 
408 	sband->n_channels = 0;
409 	phy->hw->wiphy->bands[band] = NULL;
410 }
411 
412 static int
413 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
414 {
415 	struct mt76_dev *dev = phy->dev;
416 	struct wiphy *wiphy = hw->wiphy;
417 
418 	SET_IEEE80211_DEV(hw, dev->dev);
419 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
420 
421 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
422 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
423 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
424 			WIPHY_FLAG_SUPPORTS_TDLS |
425 			WIPHY_FLAG_AP_UAPSD;
426 
427 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
428 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
429 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
430 
431 	wiphy->available_antennas_tx = phy->antenna_mask;
432 	wiphy->available_antennas_rx = phy->antenna_mask;
433 
434 	wiphy->sar_capa = &mt76_sar_capa;
435 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
436 				sizeof(struct mt76_freq_range_power),
437 				GFP_KERNEL);
438 	if (!phy->frp)
439 		return -ENOMEM;
440 
441 	hw->txq_data_size = sizeof(struct mt76_txq);
442 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
443 
444 	if (!hw->max_tx_fragments)
445 		hw->max_tx_fragments = 16;
446 
447 	ieee80211_hw_set(hw, SIGNAL_DBM);
448 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
449 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
450 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
451 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
452 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
453 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
454 
455 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
456 		ieee80211_hw_set(hw, TX_AMSDU);
457 		ieee80211_hw_set(hw, TX_FRAG_LIST);
458 	}
459 
460 	ieee80211_hw_set(hw, MFP_CAPABLE);
461 	ieee80211_hw_set(hw, AP_LINK_PS);
462 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
463 
464 	return 0;
465 }
466 
467 struct mt76_phy *
468 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
469 	       const struct ieee80211_ops *ops, u8 band_idx)
470 {
471 	struct ieee80211_hw *hw;
472 	unsigned int phy_size;
473 	struct mt76_phy *phy;
474 
475 	phy_size = ALIGN(sizeof(*phy), 8);
476 	hw = ieee80211_alloc_hw(size + phy_size, ops);
477 	if (!hw)
478 		return NULL;
479 
480 	phy = hw->priv;
481 	phy->dev = dev;
482 	phy->hw = hw;
483 	phy->priv = hw->priv + phy_size;
484 	phy->band_idx = band_idx;
485 
486 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
487 	hw->wiphy->interface_modes =
488 		BIT(NL80211_IFTYPE_STATION) |
489 		BIT(NL80211_IFTYPE_AP) |
490 #ifdef CONFIG_MAC80211_MESH
491 		BIT(NL80211_IFTYPE_MESH_POINT) |
492 #endif
493 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
494 		BIT(NL80211_IFTYPE_P2P_GO) |
495 		BIT(NL80211_IFTYPE_ADHOC);
496 
497 	return phy;
498 }
499 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
500 
501 int mt76_register_phy(struct mt76_phy *phy, bool vht,
502 		      struct ieee80211_rate *rates, int n_rates)
503 {
504 	int ret;
505 
506 	ret = mt76_phy_init(phy, phy->hw);
507 	if (ret)
508 		return ret;
509 
510 	if (phy->cap.has_2ghz) {
511 		ret = mt76_init_sband_2g(phy, rates, n_rates);
512 		if (ret)
513 			return ret;
514 	}
515 
516 	if (phy->cap.has_5ghz) {
517 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
518 		if (ret)
519 			return ret;
520 	}
521 
522 	if (phy->cap.has_6ghz) {
523 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
524 		if (ret)
525 			return ret;
526 	}
527 
528 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
529 		ret = mt76_led_init(phy);
530 		if (ret)
531 			return ret;
532 	}
533 
534 	wiphy_read_of_freq_limits(phy->hw->wiphy);
535 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
536 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
537 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
538 
539 	ret = ieee80211_register_hw(phy->hw);
540 	if (ret)
541 		return ret;
542 
543 	set_bit(MT76_STATE_REGISTERED, &phy->state);
544 	phy->dev->phys[phy->band_idx] = phy;
545 
546 	return 0;
547 }
548 EXPORT_SYMBOL_GPL(mt76_register_phy);
549 
550 void mt76_unregister_phy(struct mt76_phy *phy)
551 {
552 	struct mt76_dev *dev = phy->dev;
553 
554 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
555 		return;
556 
557 	if (IS_ENABLED(CONFIG_MT76_LEDS))
558 		mt76_led_cleanup(phy);
559 	mt76_tx_status_check(dev, true);
560 	ieee80211_unregister_hw(phy->hw);
561 	dev->phys[phy->band_idx] = NULL;
562 }
563 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
564 
565 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
566 {
567 	struct page_pool_params pp_params = {
568 		.order = 0,
569 		.flags = PP_FLAG_PAGE_FRAG,
570 		.nid = NUMA_NO_NODE,
571 		.dev = dev->dma_dev,
572 	};
573 	int idx = q - dev->q_rx;
574 
575 	switch (idx) {
576 	case MT_RXQ_MAIN:
577 	case MT_RXQ_BAND1:
578 	case MT_RXQ_BAND2:
579 		pp_params.pool_size = 256;
580 		break;
581 	default:
582 		pp_params.pool_size = 16;
583 		break;
584 	}
585 
586 	if (mt76_is_mmio(dev)) {
587 		/* rely on page_pool for DMA mapping */
588 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
589 		pp_params.dma_dir = DMA_FROM_DEVICE;
590 		pp_params.max_len = PAGE_SIZE;
591 		pp_params.offset = 0;
592 	}
593 
594 	q->page_pool = page_pool_create(&pp_params);
595 	if (IS_ERR(q->page_pool)) {
596 		int err = PTR_ERR(q->page_pool);
597 
598 		q->page_pool = NULL;
599 		return err;
600 	}
601 
602 	return 0;
603 }
604 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
605 
606 struct mt76_dev *
607 mt76_alloc_device(struct device *pdev, unsigned int size,
608 		  const struct ieee80211_ops *ops,
609 		  const struct mt76_driver_ops *drv_ops)
610 {
611 	struct ieee80211_hw *hw;
612 	struct mt76_phy *phy;
613 	struct mt76_dev *dev;
614 	int i;
615 
616 	hw = ieee80211_alloc_hw(size, ops);
617 	if (!hw)
618 		return NULL;
619 
620 	dev = hw->priv;
621 	dev->hw = hw;
622 	dev->dev = pdev;
623 	dev->drv = drv_ops;
624 	dev->dma_dev = pdev;
625 
626 	phy = &dev->phy;
627 	phy->dev = dev;
628 	phy->hw = hw;
629 	phy->band_idx = MT_BAND0;
630 	dev->phys[phy->band_idx] = phy;
631 
632 	spin_lock_init(&dev->rx_lock);
633 	spin_lock_init(&dev->lock);
634 	spin_lock_init(&dev->cc_lock);
635 	spin_lock_init(&dev->status_lock);
636 	spin_lock_init(&dev->wed_lock);
637 	mutex_init(&dev->mutex);
638 	init_waitqueue_head(&dev->tx_wait);
639 
640 	skb_queue_head_init(&dev->mcu.res_q);
641 	init_waitqueue_head(&dev->mcu.wait);
642 	mutex_init(&dev->mcu.mutex);
643 	dev->tx_worker.fn = mt76_tx_worker;
644 
645 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
646 	hw->wiphy->interface_modes =
647 		BIT(NL80211_IFTYPE_STATION) |
648 		BIT(NL80211_IFTYPE_AP) |
649 #ifdef CONFIG_MAC80211_MESH
650 		BIT(NL80211_IFTYPE_MESH_POINT) |
651 #endif
652 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
653 		BIT(NL80211_IFTYPE_P2P_GO) |
654 		BIT(NL80211_IFTYPE_ADHOC);
655 
656 	spin_lock_init(&dev->token_lock);
657 	idr_init(&dev->token);
658 
659 	spin_lock_init(&dev->rx_token_lock);
660 	idr_init(&dev->rx_token);
661 
662 	INIT_LIST_HEAD(&dev->wcid_list);
663 
664 	INIT_LIST_HEAD(&dev->txwi_cache);
665 	INIT_LIST_HEAD(&dev->rxwi_cache);
666 	dev->token_size = dev->drv->token_size;
667 
668 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
669 		skb_queue_head_init(&dev->rx_skb[i]);
670 
671 	dev->wq = alloc_ordered_workqueue("mt76", 0);
672 	if (!dev->wq) {
673 		ieee80211_free_hw(hw);
674 		return NULL;
675 	}
676 
677 	return dev;
678 }
679 EXPORT_SYMBOL_GPL(mt76_alloc_device);
680 
681 int mt76_register_device(struct mt76_dev *dev, bool vht,
682 			 struct ieee80211_rate *rates, int n_rates)
683 {
684 	struct ieee80211_hw *hw = dev->hw;
685 	struct mt76_phy *phy = &dev->phy;
686 	int ret;
687 
688 	dev_set_drvdata(dev->dev, dev);
689 	ret = mt76_phy_init(phy, hw);
690 	if (ret)
691 		return ret;
692 
693 	if (phy->cap.has_2ghz) {
694 		ret = mt76_init_sband_2g(phy, rates, n_rates);
695 		if (ret)
696 			return ret;
697 	}
698 
699 	if (phy->cap.has_5ghz) {
700 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
701 		if (ret)
702 			return ret;
703 	}
704 
705 	if (phy->cap.has_6ghz) {
706 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
707 		if (ret)
708 			return ret;
709 	}
710 
711 	wiphy_read_of_freq_limits(hw->wiphy);
712 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
713 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
714 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
715 
716 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
717 		ret = mt76_led_init(phy);
718 		if (ret)
719 			return ret;
720 	}
721 
722 	ret = ieee80211_register_hw(hw);
723 	if (ret)
724 		return ret;
725 
726 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
727 	set_bit(MT76_STATE_REGISTERED, &phy->state);
728 	sched_set_fifo_low(dev->tx_worker.task);
729 
730 	return 0;
731 }
732 EXPORT_SYMBOL_GPL(mt76_register_device);
733 
734 void mt76_unregister_device(struct mt76_dev *dev)
735 {
736 	struct ieee80211_hw *hw = dev->hw;
737 
738 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
739 		return;
740 
741 	if (IS_ENABLED(CONFIG_MT76_LEDS))
742 		mt76_led_cleanup(&dev->phy);
743 	mt76_tx_status_check(dev, true);
744 	ieee80211_unregister_hw(hw);
745 }
746 EXPORT_SYMBOL_GPL(mt76_unregister_device);
747 
748 void mt76_free_device(struct mt76_dev *dev)
749 {
750 	mt76_worker_teardown(&dev->tx_worker);
751 	if (dev->wq) {
752 		destroy_workqueue(dev->wq);
753 		dev->wq = NULL;
754 	}
755 	ieee80211_free_hw(dev->hw);
756 }
757 EXPORT_SYMBOL_GPL(mt76_free_device);
758 
759 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
760 {
761 	struct sk_buff *skb = phy->rx_amsdu[q].head;
762 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
763 	struct mt76_dev *dev = phy->dev;
764 
765 	phy->rx_amsdu[q].head = NULL;
766 	phy->rx_amsdu[q].tail = NULL;
767 
768 	/*
769 	 * Validate if the amsdu has a proper first subframe.
770 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
771 	 * flag of the QoS header gets flipped. In such cases, the first
772 	 * subframe has a LLC/SNAP header in the location of the destination
773 	 * address.
774 	 */
775 	if (skb_shinfo(skb)->frag_list) {
776 		int offset = 0;
777 
778 		if (!(status->flag & RX_FLAG_8023)) {
779 			offset = ieee80211_get_hdrlen_from_skb(skb);
780 
781 			if ((status->flag &
782 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
783 			    RX_FLAG_DECRYPTED)
784 				offset += 8;
785 		}
786 
787 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
788 			dev_kfree_skb(skb);
789 			return;
790 		}
791 	}
792 	__skb_queue_tail(&dev->rx_skb[q], skb);
793 }
794 
795 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
796 				  struct sk_buff *skb)
797 {
798 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
799 
800 	if (phy->rx_amsdu[q].head &&
801 	    (!status->amsdu || status->first_amsdu ||
802 	     status->seqno != phy->rx_amsdu[q].seqno))
803 		mt76_rx_release_amsdu(phy, q);
804 
805 	if (!phy->rx_amsdu[q].head) {
806 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
807 		phy->rx_amsdu[q].seqno = status->seqno;
808 		phy->rx_amsdu[q].head = skb;
809 	} else {
810 		*phy->rx_amsdu[q].tail = skb;
811 		phy->rx_amsdu[q].tail = &skb->next;
812 	}
813 
814 	if (!status->amsdu || status->last_amsdu)
815 		mt76_rx_release_amsdu(phy, q);
816 }
817 
818 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
819 {
820 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
821 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
822 
823 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
824 		dev_kfree_skb(skb);
825 		return;
826 	}
827 
828 #ifdef CONFIG_NL80211_TESTMODE
829 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
830 		phy->test.rx_stats.packets[q]++;
831 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
832 			phy->test.rx_stats.fcs_error[q]++;
833 	}
834 #endif
835 
836 	mt76_rx_release_burst(phy, q, skb);
837 }
838 EXPORT_SYMBOL_GPL(mt76_rx);
839 
840 bool mt76_has_tx_pending(struct mt76_phy *phy)
841 {
842 	struct mt76_queue *q;
843 	int i;
844 
845 	for (i = 0; i < __MT_TXQ_MAX; i++) {
846 		q = phy->q_tx[i];
847 		if (q && q->queued)
848 			return true;
849 	}
850 
851 	return false;
852 }
853 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
854 
855 static struct mt76_channel_state *
856 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
857 {
858 	struct mt76_sband *msband;
859 	int idx;
860 
861 	if (c->band == NL80211_BAND_2GHZ)
862 		msband = &phy->sband_2g;
863 	else if (c->band == NL80211_BAND_6GHZ)
864 		msband = &phy->sband_6g;
865 	else
866 		msband = &phy->sband_5g;
867 
868 	idx = c - &msband->sband.channels[0];
869 	return &msband->chan[idx];
870 }
871 
872 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
873 {
874 	struct mt76_channel_state *state = phy->chan_state;
875 
876 	state->cc_active += ktime_to_us(ktime_sub(time,
877 						  phy->survey_time));
878 	phy->survey_time = time;
879 }
880 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
881 
882 void mt76_update_survey(struct mt76_phy *phy)
883 {
884 	struct mt76_dev *dev = phy->dev;
885 	ktime_t cur_time;
886 
887 	if (dev->drv->update_survey)
888 		dev->drv->update_survey(phy);
889 
890 	cur_time = ktime_get_boottime();
891 	mt76_update_survey_active_time(phy, cur_time);
892 
893 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
894 		struct mt76_channel_state *state = phy->chan_state;
895 
896 		spin_lock_bh(&dev->cc_lock);
897 		state->cc_bss_rx += dev->cur_cc_bss_rx;
898 		dev->cur_cc_bss_rx = 0;
899 		spin_unlock_bh(&dev->cc_lock);
900 	}
901 }
902 EXPORT_SYMBOL_GPL(mt76_update_survey);
903 
904 void mt76_set_channel(struct mt76_phy *phy)
905 {
906 	struct mt76_dev *dev = phy->dev;
907 	struct ieee80211_hw *hw = phy->hw;
908 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
909 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
910 	int timeout = HZ / 5;
911 
912 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
913 	mt76_update_survey(phy);
914 
915 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
916 	    phy->chandef.width != chandef->width)
917 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
918 
919 	phy->chandef = *chandef;
920 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
921 
922 	if (!offchannel)
923 		phy->main_chan = chandef->chan;
924 
925 	if (chandef->chan != phy->main_chan)
926 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
927 }
928 EXPORT_SYMBOL_GPL(mt76_set_channel);
929 
930 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
931 		    struct survey_info *survey)
932 {
933 	struct mt76_phy *phy = hw->priv;
934 	struct mt76_dev *dev = phy->dev;
935 	struct mt76_sband *sband;
936 	struct ieee80211_channel *chan;
937 	struct mt76_channel_state *state;
938 	int ret = 0;
939 
940 	mutex_lock(&dev->mutex);
941 	if (idx == 0 && dev->drv->update_survey)
942 		mt76_update_survey(phy);
943 
944 	if (idx >= phy->sband_2g.sband.n_channels +
945 		   phy->sband_5g.sband.n_channels) {
946 		idx -= (phy->sband_2g.sband.n_channels +
947 			phy->sband_5g.sband.n_channels);
948 		sband = &phy->sband_6g;
949 	} else if (idx >= phy->sband_2g.sband.n_channels) {
950 		idx -= phy->sband_2g.sband.n_channels;
951 		sband = &phy->sband_5g;
952 	} else {
953 		sband = &phy->sband_2g;
954 	}
955 
956 	if (idx >= sband->sband.n_channels) {
957 		ret = -ENOENT;
958 		goto out;
959 	}
960 
961 	chan = &sband->sband.channels[idx];
962 	state = mt76_channel_state(phy, chan);
963 
964 	memset(survey, 0, sizeof(*survey));
965 	survey->channel = chan;
966 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
967 	survey->filled |= dev->drv->survey_flags;
968 	if (state->noise)
969 		survey->filled |= SURVEY_INFO_NOISE_DBM;
970 
971 	if (chan == phy->main_chan) {
972 		survey->filled |= SURVEY_INFO_IN_USE;
973 
974 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
975 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
976 	}
977 
978 	survey->time_busy = div_u64(state->cc_busy, 1000);
979 	survey->time_rx = div_u64(state->cc_rx, 1000);
980 	survey->time = div_u64(state->cc_active, 1000);
981 	survey->noise = state->noise;
982 
983 	spin_lock_bh(&dev->cc_lock);
984 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
985 	survey->time_tx = div_u64(state->cc_tx, 1000);
986 	spin_unlock_bh(&dev->cc_lock);
987 
988 out:
989 	mutex_unlock(&dev->mutex);
990 
991 	return ret;
992 }
993 EXPORT_SYMBOL_GPL(mt76_get_survey);
994 
995 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
996 			 struct ieee80211_key_conf *key)
997 {
998 	struct ieee80211_key_seq seq;
999 	int i;
1000 
1001 	wcid->rx_check_pn = false;
1002 
1003 	if (!key)
1004 		return;
1005 
1006 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1007 		return;
1008 
1009 	wcid->rx_check_pn = true;
1010 
1011 	/* data frame */
1012 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1013 		ieee80211_get_key_rx_seq(key, i, &seq);
1014 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1015 	}
1016 
1017 	/* robust management frame */
1018 	ieee80211_get_key_rx_seq(key, -1, &seq);
1019 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1020 
1021 }
1022 EXPORT_SYMBOL(mt76_wcid_key_setup);
1023 
1024 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1025 {
1026 	int signal = -128;
1027 	u8 chains;
1028 
1029 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1030 		int cur, diff;
1031 
1032 		cur = *chain_signal;
1033 		if (!(chains & BIT(0)) ||
1034 		    cur > 0)
1035 			continue;
1036 
1037 		if (cur > signal)
1038 			swap(cur, signal);
1039 
1040 		diff = signal - cur;
1041 		if (diff == 0)
1042 			signal += 3;
1043 		else if (diff <= 2)
1044 			signal += 2;
1045 		else if (diff <= 6)
1046 			signal += 1;
1047 	}
1048 
1049 	return signal;
1050 }
1051 EXPORT_SYMBOL(mt76_rx_signal);
1052 
1053 static void
1054 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1055 		struct ieee80211_hw **hw,
1056 		struct ieee80211_sta **sta)
1057 {
1058 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1059 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1060 	struct mt76_rx_status mstat;
1061 
1062 	mstat = *((struct mt76_rx_status *)skb->cb);
1063 	memset(status, 0, sizeof(*status));
1064 
1065 	status->flag = mstat.flag;
1066 	status->freq = mstat.freq;
1067 	status->enc_flags = mstat.enc_flags;
1068 	status->encoding = mstat.encoding;
1069 	status->bw = mstat.bw;
1070 	if (status->encoding == RX_ENC_EHT) {
1071 		status->eht.ru = mstat.eht.ru;
1072 		status->eht.gi = mstat.eht.gi;
1073 	} else {
1074 		status->he_ru = mstat.he_ru;
1075 		status->he_gi = mstat.he_gi;
1076 		status->he_dcm = mstat.he_dcm;
1077 	}
1078 	status->rate_idx = mstat.rate_idx;
1079 	status->nss = mstat.nss;
1080 	status->band = mstat.band;
1081 	status->signal = mstat.signal;
1082 	status->chains = mstat.chains;
1083 	status->ampdu_reference = mstat.ampdu_ref;
1084 	status->device_timestamp = mstat.timestamp;
1085 	status->mactime = mstat.timestamp;
1086 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1087 	if (status->signal <= -128)
1088 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1089 
1090 	if (ieee80211_is_beacon(hdr->frame_control) ||
1091 	    ieee80211_is_probe_resp(hdr->frame_control))
1092 		status->boottime_ns = ktime_get_boottime_ns();
1093 
1094 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1095 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1096 		     sizeof(mstat.chain_signal));
1097 	memcpy(status->chain_signal, mstat.chain_signal,
1098 	       sizeof(mstat.chain_signal));
1099 
1100 	*sta = wcid_to_sta(mstat.wcid);
1101 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1102 }
1103 
1104 static void
1105 mt76_check_ccmp_pn(struct sk_buff *skb)
1106 {
1107 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1108 	struct mt76_wcid *wcid = status->wcid;
1109 	struct ieee80211_hdr *hdr;
1110 	int security_idx;
1111 	int ret;
1112 
1113 	if (!(status->flag & RX_FLAG_DECRYPTED))
1114 		return;
1115 
1116 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1117 		return;
1118 
1119 	if (!wcid || !wcid->rx_check_pn)
1120 		return;
1121 
1122 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1123 	if (status->flag & RX_FLAG_8023)
1124 		goto skip_hdr_check;
1125 
1126 	hdr = mt76_skb_get_hdr(skb);
1127 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1128 		/*
1129 		 * Validate the first fragment both here and in mac80211
1130 		 * All further fragments will be validated by mac80211 only.
1131 		 */
1132 		if (ieee80211_is_frag(hdr) &&
1133 		    !ieee80211_is_first_frag(hdr->frame_control))
1134 			return;
1135 	}
1136 
1137 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1138 	 *
1139 	 * the recipient shall maintain a single replay counter for received
1140 	 * individually addressed robust Management frames that are received
1141 	 * with the To DS subfield equal to 0, [...]
1142 	 */
1143 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1144 	    !ieee80211_has_tods(hdr->frame_control))
1145 		security_idx = IEEE80211_NUM_TIDS;
1146 
1147 skip_hdr_check:
1148 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1149 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1150 		     sizeof(status->iv));
1151 	if (ret <= 0) {
1152 		status->flag |= RX_FLAG_ONLY_MONITOR;
1153 		return;
1154 	}
1155 
1156 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1157 
1158 	if (status->flag & RX_FLAG_IV_STRIPPED)
1159 		status->flag |= RX_FLAG_PN_VALIDATED;
1160 }
1161 
1162 static void
1163 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1164 		    int len)
1165 {
1166 	struct mt76_wcid *wcid = status->wcid;
1167 	struct ieee80211_rx_status info = {
1168 		.enc_flags = status->enc_flags,
1169 		.rate_idx = status->rate_idx,
1170 		.encoding = status->encoding,
1171 		.band = status->band,
1172 		.nss = status->nss,
1173 		.bw = status->bw,
1174 	};
1175 	struct ieee80211_sta *sta;
1176 	u32 airtime;
1177 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1178 
1179 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1180 	spin_lock(&dev->cc_lock);
1181 	dev->cur_cc_bss_rx += airtime;
1182 	spin_unlock(&dev->cc_lock);
1183 
1184 	if (!wcid || !wcid->sta)
1185 		return;
1186 
1187 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1188 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1189 }
1190 
1191 static void
1192 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1193 {
1194 	struct mt76_wcid *wcid;
1195 	int wcid_idx;
1196 
1197 	if (!dev->rx_ampdu_len)
1198 		return;
1199 
1200 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1201 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1202 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1203 	else
1204 		wcid = NULL;
1205 	dev->rx_ampdu_status.wcid = wcid;
1206 
1207 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1208 
1209 	dev->rx_ampdu_len = 0;
1210 	dev->rx_ampdu_ref = 0;
1211 }
1212 
1213 static void
1214 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1215 {
1216 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1217 	struct mt76_wcid *wcid = status->wcid;
1218 
1219 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1220 		return;
1221 
1222 	if (!wcid || !wcid->sta) {
1223 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1224 
1225 		if (status->flag & RX_FLAG_8023)
1226 			return;
1227 
1228 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1229 			return;
1230 
1231 		wcid = NULL;
1232 	}
1233 
1234 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1235 	    status->ampdu_ref != dev->rx_ampdu_ref)
1236 		mt76_airtime_flush_ampdu(dev);
1237 
1238 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1239 		if (!dev->rx_ampdu_len ||
1240 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1241 			dev->rx_ampdu_status = *status;
1242 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1243 			dev->rx_ampdu_ref = status->ampdu_ref;
1244 		}
1245 
1246 		dev->rx_ampdu_len += skb->len;
1247 		return;
1248 	}
1249 
1250 	mt76_airtime_report(dev, status, skb->len);
1251 }
1252 
1253 static void
1254 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1255 {
1256 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1257 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1258 	struct ieee80211_sta *sta;
1259 	struct ieee80211_hw *hw;
1260 	struct mt76_wcid *wcid = status->wcid;
1261 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1262 	bool ps;
1263 
1264 	hw = mt76_phy_hw(dev, status->phy_idx);
1265 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1266 	    !(status->flag & RX_FLAG_8023)) {
1267 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1268 		if (sta)
1269 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1270 	}
1271 
1272 	mt76_airtime_check(dev, skb);
1273 
1274 	if (!wcid || !wcid->sta)
1275 		return;
1276 
1277 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1278 
1279 	if (status->signal <= 0)
1280 		ewma_signal_add(&wcid->rssi, -status->signal);
1281 
1282 	wcid->inactive_count = 0;
1283 
1284 	if (status->flag & RX_FLAG_8023)
1285 		return;
1286 
1287 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1288 		return;
1289 
1290 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1291 		ieee80211_sta_pspoll(sta);
1292 		return;
1293 	}
1294 
1295 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1296 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1297 	      ieee80211_is_data(hdr->frame_control)))
1298 		return;
1299 
1300 	ps = ieee80211_has_pm(hdr->frame_control);
1301 
1302 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1303 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1304 		ieee80211_sta_uapsd_trigger(sta, tidno);
1305 
1306 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1307 		return;
1308 
1309 	if (ps)
1310 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1311 
1312 	if (dev->drv->sta_ps)
1313 		dev->drv->sta_ps(dev, sta, ps);
1314 
1315 	if (!ps)
1316 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1317 
1318 	ieee80211_sta_ps_transition(sta, ps);
1319 }
1320 
1321 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1322 		      struct napi_struct *napi)
1323 {
1324 	struct ieee80211_sta *sta;
1325 	struct ieee80211_hw *hw;
1326 	struct sk_buff *skb, *tmp;
1327 	LIST_HEAD(list);
1328 
1329 	spin_lock(&dev->rx_lock);
1330 	while ((skb = __skb_dequeue(frames)) != NULL) {
1331 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1332 
1333 		mt76_check_ccmp_pn(skb);
1334 		skb_shinfo(skb)->frag_list = NULL;
1335 		mt76_rx_convert(dev, skb, &hw, &sta);
1336 		ieee80211_rx_list(hw, sta, skb, &list);
1337 
1338 		/* subsequent amsdu frames */
1339 		while (nskb) {
1340 			skb = nskb;
1341 			nskb = nskb->next;
1342 			skb->next = NULL;
1343 
1344 			mt76_rx_convert(dev, skb, &hw, &sta);
1345 			ieee80211_rx_list(hw, sta, skb, &list);
1346 		}
1347 	}
1348 	spin_unlock(&dev->rx_lock);
1349 
1350 	if (!napi) {
1351 		netif_receive_skb_list(&list);
1352 		return;
1353 	}
1354 
1355 	list_for_each_entry_safe(skb, tmp, &list, list) {
1356 		skb_list_del_init(skb);
1357 		napi_gro_receive(napi, skb);
1358 	}
1359 }
1360 
1361 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1362 			   struct napi_struct *napi)
1363 {
1364 	struct sk_buff_head frames;
1365 	struct sk_buff *skb;
1366 
1367 	__skb_queue_head_init(&frames);
1368 
1369 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1370 		mt76_check_sta(dev, skb);
1371 		if (mtk_wed_device_active(&dev->mmio.wed))
1372 			__skb_queue_tail(&frames, skb);
1373 		else
1374 			mt76_rx_aggr_reorder(skb, &frames);
1375 	}
1376 
1377 	mt76_rx_complete(dev, &frames, napi);
1378 }
1379 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1380 
1381 static int
1382 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1383 	     struct ieee80211_sta *sta)
1384 {
1385 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1386 	struct mt76_dev *dev = phy->dev;
1387 	int ret;
1388 	int i;
1389 
1390 	mutex_lock(&dev->mutex);
1391 
1392 	ret = dev->drv->sta_add(dev, vif, sta);
1393 	if (ret)
1394 		goto out;
1395 
1396 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1397 		struct mt76_txq *mtxq;
1398 
1399 		if (!sta->txq[i])
1400 			continue;
1401 
1402 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1403 		mtxq->wcid = wcid->idx;
1404 	}
1405 
1406 	ewma_signal_init(&wcid->rssi);
1407 	if (phy->band_idx == MT_BAND1)
1408 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1409 	wcid->phy_idx = phy->band_idx;
1410 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1411 
1412 	mt76_packet_id_init(wcid);
1413 out:
1414 	mutex_unlock(&dev->mutex);
1415 
1416 	return ret;
1417 }
1418 
1419 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1420 		       struct ieee80211_sta *sta)
1421 {
1422 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1423 	int i, idx = wcid->idx;
1424 
1425 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1426 		mt76_rx_aggr_stop(dev, wcid, i);
1427 
1428 	if (dev->drv->sta_remove)
1429 		dev->drv->sta_remove(dev, vif, sta);
1430 
1431 	mt76_packet_id_flush(dev, wcid);
1432 
1433 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1434 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1435 }
1436 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1437 
1438 static void
1439 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1440 		struct ieee80211_sta *sta)
1441 {
1442 	mutex_lock(&dev->mutex);
1443 	__mt76_sta_remove(dev, vif, sta);
1444 	mutex_unlock(&dev->mutex);
1445 }
1446 
1447 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1448 		   struct ieee80211_sta *sta,
1449 		   enum ieee80211_sta_state old_state,
1450 		   enum ieee80211_sta_state new_state)
1451 {
1452 	struct mt76_phy *phy = hw->priv;
1453 	struct mt76_dev *dev = phy->dev;
1454 
1455 	if (old_state == IEEE80211_STA_NOTEXIST &&
1456 	    new_state == IEEE80211_STA_NONE)
1457 		return mt76_sta_add(phy, vif, sta);
1458 
1459 	if (old_state == IEEE80211_STA_AUTH &&
1460 	    new_state == IEEE80211_STA_ASSOC &&
1461 	    dev->drv->sta_assoc)
1462 		dev->drv->sta_assoc(dev, vif, sta);
1463 
1464 	if (old_state == IEEE80211_STA_NONE &&
1465 	    new_state == IEEE80211_STA_NOTEXIST)
1466 		mt76_sta_remove(dev, vif, sta);
1467 
1468 	return 0;
1469 }
1470 EXPORT_SYMBOL_GPL(mt76_sta_state);
1471 
1472 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1473 			     struct ieee80211_sta *sta)
1474 {
1475 	struct mt76_phy *phy = hw->priv;
1476 	struct mt76_dev *dev = phy->dev;
1477 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1478 
1479 	mutex_lock(&dev->mutex);
1480 	spin_lock_bh(&dev->status_lock);
1481 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1482 	spin_unlock_bh(&dev->status_lock);
1483 	mutex_unlock(&dev->mutex);
1484 }
1485 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1486 
1487 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1488 		     int *dbm)
1489 {
1490 	struct mt76_phy *phy = hw->priv;
1491 	int n_chains = hweight8(phy->antenna_mask);
1492 	int delta = mt76_tx_power_nss_delta(n_chains);
1493 
1494 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1495 
1496 	return 0;
1497 }
1498 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1499 
1500 int mt76_init_sar_power(struct ieee80211_hw *hw,
1501 			const struct cfg80211_sar_specs *sar)
1502 {
1503 	struct mt76_phy *phy = hw->priv;
1504 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1505 	int i;
1506 
1507 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1508 		return -EINVAL;
1509 
1510 	for (i = 0; i < sar->num_sub_specs; i++) {
1511 		u32 index = sar->sub_specs[i].freq_range_index;
1512 		/* SAR specifies power limitaton in 0.25dbm */
1513 		s32 power = sar->sub_specs[i].power >> 1;
1514 
1515 		if (power > 127 || power < -127)
1516 			power = 127;
1517 
1518 		phy->frp[index].range = &capa->freq_ranges[index];
1519 		phy->frp[index].power = power;
1520 	}
1521 
1522 	return 0;
1523 }
1524 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1525 
1526 int mt76_get_sar_power(struct mt76_phy *phy,
1527 		       struct ieee80211_channel *chan,
1528 		       int power)
1529 {
1530 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1531 	int freq, i;
1532 
1533 	if (!capa || !phy->frp)
1534 		return power;
1535 
1536 	if (power > 127 || power < -127)
1537 		power = 127;
1538 
1539 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1540 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1541 		if (phy->frp[i].range &&
1542 		    freq >= phy->frp[i].range->start_freq &&
1543 		    freq < phy->frp[i].range->end_freq) {
1544 			power = min_t(int, phy->frp[i].power, power);
1545 			break;
1546 		}
1547 	}
1548 
1549 	return power;
1550 }
1551 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1552 
1553 static void
1554 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1555 {
1556 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1557 		ieee80211_csa_finish(vif);
1558 }
1559 
1560 void mt76_csa_finish(struct mt76_dev *dev)
1561 {
1562 	if (!dev->csa_complete)
1563 		return;
1564 
1565 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1566 		IEEE80211_IFACE_ITER_RESUME_ALL,
1567 		__mt76_csa_finish, dev);
1568 
1569 	dev->csa_complete = 0;
1570 }
1571 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1572 
1573 static void
1574 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1575 {
1576 	struct mt76_dev *dev = priv;
1577 
1578 	if (!vif->bss_conf.csa_active)
1579 		return;
1580 
1581 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1582 }
1583 
1584 void mt76_csa_check(struct mt76_dev *dev)
1585 {
1586 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1587 		IEEE80211_IFACE_ITER_RESUME_ALL,
1588 		__mt76_csa_check, dev);
1589 }
1590 EXPORT_SYMBOL_GPL(mt76_csa_check);
1591 
1592 int
1593 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1594 {
1595 	return 0;
1596 }
1597 EXPORT_SYMBOL_GPL(mt76_set_tim);
1598 
1599 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1600 {
1601 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1602 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1603 	u8 *hdr, *pn = status->iv;
1604 
1605 	__skb_push(skb, 8);
1606 	memmove(skb->data, skb->data + 8, hdr_len);
1607 	hdr = skb->data + hdr_len;
1608 
1609 	hdr[0] = pn[5];
1610 	hdr[1] = pn[4];
1611 	hdr[2] = 0;
1612 	hdr[3] = 0x20 | (key_id << 6);
1613 	hdr[4] = pn[3];
1614 	hdr[5] = pn[2];
1615 	hdr[6] = pn[1];
1616 	hdr[7] = pn[0];
1617 
1618 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1619 }
1620 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1621 
1622 int mt76_get_rate(struct mt76_dev *dev,
1623 		  struct ieee80211_supported_band *sband,
1624 		  int idx, bool cck)
1625 {
1626 	int i, offset = 0, len = sband->n_bitrates;
1627 
1628 	if (cck) {
1629 		if (sband != &dev->phy.sband_2g.sband)
1630 			return 0;
1631 
1632 		idx &= ~BIT(2); /* short preamble */
1633 	} else if (sband == &dev->phy.sband_2g.sband) {
1634 		offset = 4;
1635 	}
1636 
1637 	for (i = offset; i < len; i++) {
1638 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1639 			return i;
1640 	}
1641 
1642 	return 0;
1643 }
1644 EXPORT_SYMBOL_GPL(mt76_get_rate);
1645 
1646 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1647 		  const u8 *mac)
1648 {
1649 	struct mt76_phy *phy = hw->priv;
1650 
1651 	set_bit(MT76_SCANNING, &phy->state);
1652 }
1653 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1654 
1655 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1656 {
1657 	struct mt76_phy *phy = hw->priv;
1658 
1659 	clear_bit(MT76_SCANNING, &phy->state);
1660 }
1661 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1662 
1663 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1664 {
1665 	struct mt76_phy *phy = hw->priv;
1666 	struct mt76_dev *dev = phy->dev;
1667 
1668 	mutex_lock(&dev->mutex);
1669 	*tx_ant = phy->antenna_mask;
1670 	*rx_ant = phy->antenna_mask;
1671 	mutex_unlock(&dev->mutex);
1672 
1673 	return 0;
1674 }
1675 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1676 
1677 struct mt76_queue *
1678 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1679 		int ring_base, u32 flags)
1680 {
1681 	struct mt76_queue *hwq;
1682 	int err;
1683 
1684 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1685 	if (!hwq)
1686 		return ERR_PTR(-ENOMEM);
1687 
1688 	hwq->flags = flags;
1689 
1690 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1691 	if (err < 0)
1692 		return ERR_PTR(err);
1693 
1694 	return hwq;
1695 }
1696 EXPORT_SYMBOL_GPL(mt76_init_queue);
1697 
1698 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
1699 {
1700 	int offset = 0;
1701 
1702 	if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
1703 		offset = 4;
1704 
1705 	/* pick the lowest rate for hidden nodes */
1706 	if (rateidx < 0)
1707 		rateidx = 0;
1708 
1709 	rateidx += offset;
1710 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1711 		rateidx = offset;
1712 
1713 	return mt76_rates[rateidx].hw_value;
1714 }
1715 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1716 
1717 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1718 			 struct mt76_sta_stats *stats, bool eht)
1719 {
1720 	int i, ei = wi->initial_stat_idx;
1721 	u64 *data = wi->data;
1722 
1723 	wi->sta_count++;
1724 
1725 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1726 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1727 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1728 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1729 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1730 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1731 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1732 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1733 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1734 	if (eht) {
1735 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1736 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1737 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1738 	}
1739 
1740 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1741 		data[ei++] += stats->tx_bw[i];
1742 
1743 	for (i = 0; i < (eht ? 14 : 12); i++)
1744 		data[ei++] += stats->tx_mcs[i];
1745 
1746 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1747 }
1748 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1749 
1750 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1751 {
1752 #ifdef CONFIG_PAGE_POOL_STATS
1753 	struct page_pool_stats stats = {};
1754 	int i;
1755 
1756 	mt76_for_each_q_rx(dev, i)
1757 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1758 
1759 	page_pool_ethtool_stats_get(data, &stats);
1760 	*index += page_pool_ethtool_stats_get_count();
1761 #endif
1762 }
1763 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1764 
1765 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1766 {
1767 	struct ieee80211_hw *hw = phy->hw;
1768 	struct mt76_dev *dev = phy->dev;
1769 
1770 	if (dev->region == NL80211_DFS_UNSET ||
1771 	    test_bit(MT76_SCANNING, &phy->state))
1772 		return MT_DFS_STATE_DISABLED;
1773 
1774 	if (!hw->conf.radar_enabled) {
1775 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1776 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1777 			return MT_DFS_STATE_ACTIVE;
1778 
1779 		return MT_DFS_STATE_DISABLED;
1780 	}
1781 
1782 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1783 		return MT_DFS_STATE_CAC;
1784 
1785 	return MT_DFS_STATE_ACTIVE;
1786 }
1787 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1788