1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include <net/page_pool.h>
8 #include "mt76.h"
9 
10 #define CHAN2G(_idx, _freq) {			\
11 	.band = NL80211_BAND_2GHZ,		\
12 	.center_freq = (_freq),			\
13 	.hw_value = (_idx),			\
14 	.max_power = 30,			\
15 }
16 
17 #define CHAN5G(_idx, _freq) {			\
18 	.band = NL80211_BAND_5GHZ,		\
19 	.center_freq = (_freq),			\
20 	.hw_value = (_idx),			\
21 	.max_power = 30,			\
22 }
23 
24 #define CHAN6G(_idx, _freq) {			\
25 	.band = NL80211_BAND_6GHZ,		\
26 	.center_freq = (_freq),			\
27 	.hw_value = (_idx),			\
28 	.max_power = 30,			\
29 }
30 
31 static const struct ieee80211_channel mt76_channels_2ghz[] = {
32 	CHAN2G(1, 2412),
33 	CHAN2G(2, 2417),
34 	CHAN2G(3, 2422),
35 	CHAN2G(4, 2427),
36 	CHAN2G(5, 2432),
37 	CHAN2G(6, 2437),
38 	CHAN2G(7, 2442),
39 	CHAN2G(8, 2447),
40 	CHAN2G(9, 2452),
41 	CHAN2G(10, 2457),
42 	CHAN2G(11, 2462),
43 	CHAN2G(12, 2467),
44 	CHAN2G(13, 2472),
45 	CHAN2G(14, 2484),
46 };
47 
48 static const struct ieee80211_channel mt76_channels_5ghz[] = {
49 	CHAN5G(36, 5180),
50 	CHAN5G(40, 5200),
51 	CHAN5G(44, 5220),
52 	CHAN5G(48, 5240),
53 
54 	CHAN5G(52, 5260),
55 	CHAN5G(56, 5280),
56 	CHAN5G(60, 5300),
57 	CHAN5G(64, 5320),
58 
59 	CHAN5G(100, 5500),
60 	CHAN5G(104, 5520),
61 	CHAN5G(108, 5540),
62 	CHAN5G(112, 5560),
63 	CHAN5G(116, 5580),
64 	CHAN5G(120, 5600),
65 	CHAN5G(124, 5620),
66 	CHAN5G(128, 5640),
67 	CHAN5G(132, 5660),
68 	CHAN5G(136, 5680),
69 	CHAN5G(140, 5700),
70 	CHAN5G(144, 5720),
71 
72 	CHAN5G(149, 5745),
73 	CHAN5G(153, 5765),
74 	CHAN5G(157, 5785),
75 	CHAN5G(161, 5805),
76 	CHAN5G(165, 5825),
77 	CHAN5G(169, 5845),
78 	CHAN5G(173, 5865),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 
201 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
202 		return 0;
203 
204 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
205 		 wiphy_name(hw->wiphy));
206 
207 	phy->leds.cdev.name = phy->leds.name;
208 	phy->leds.cdev.default_trigger =
209 		ieee80211_create_tpt_led_trigger(hw,
210 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 					mt76_tpt_blink,
212 					ARRAY_SIZE(mt76_tpt_blink));
213 
214 	if (phy == &dev->phy) {
215 		struct device_node *np = dev->dev->of_node;
216 
217 		np = of_get_child_by_name(np, "led");
218 		if (np) {
219 			int led_pin;
220 
221 			if (!of_property_read_u32(np, "led-sources", &led_pin))
222 				phy->leds.pin = led_pin;
223 			phy->leds.al = of_property_read_bool(np,
224 							     "led-active-low");
225 			of_node_put(np);
226 		}
227 	}
228 
229 	return led_classdev_register(dev->dev, &phy->leds.cdev);
230 }
231 
232 static void mt76_led_cleanup(struct mt76_phy *phy)
233 {
234 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
235 		return;
236 
237 	led_classdev_unregister(&phy->leds.cdev);
238 }
239 
240 static void mt76_init_stream_cap(struct mt76_phy *phy,
241 				 struct ieee80211_supported_band *sband,
242 				 bool vht)
243 {
244 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
245 	int i, nstream = hweight8(phy->antenna_mask);
246 	struct ieee80211_sta_vht_cap *vht_cap;
247 	u16 mcs_map = 0;
248 
249 	if (nstream > 1)
250 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
251 	else
252 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
253 
254 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
255 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
256 
257 	if (!vht)
258 		return;
259 
260 	vht_cap = &sband->vht_cap;
261 	if (nstream > 1)
262 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
263 	else
264 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
265 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
266 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
267 
268 	for (i = 0; i < 8; i++) {
269 		if (i < nstream)
270 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
271 		else
272 			mcs_map |=
273 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
274 	}
275 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
276 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
277 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
278 		vht_cap->vht_mcs.tx_highest |=
279 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
280 }
281 
282 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
283 {
284 	if (phy->cap.has_2ghz)
285 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
286 	if (phy->cap.has_5ghz)
287 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
288 	if (phy->cap.has_6ghz)
289 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
290 }
291 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
292 
293 static int
294 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
295 		const struct ieee80211_channel *chan, int n_chan,
296 		struct ieee80211_rate *rates, int n_rates,
297 		bool ht, bool vht)
298 {
299 	struct ieee80211_supported_band *sband = &msband->sband;
300 	struct ieee80211_sta_vht_cap *vht_cap;
301 	struct ieee80211_sta_ht_cap *ht_cap;
302 	struct mt76_dev *dev = phy->dev;
303 	void *chanlist;
304 	int size;
305 
306 	size = n_chan * sizeof(*chan);
307 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
308 	if (!chanlist)
309 		return -ENOMEM;
310 
311 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
312 				    GFP_KERNEL);
313 	if (!msband->chan)
314 		return -ENOMEM;
315 
316 	sband->channels = chanlist;
317 	sband->n_channels = n_chan;
318 	sband->bitrates = rates;
319 	sband->n_bitrates = n_rates;
320 
321 	if (!ht)
322 		return 0;
323 
324 	ht_cap = &sband->ht_cap;
325 	ht_cap->ht_supported = true;
326 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
327 		       IEEE80211_HT_CAP_GRN_FLD |
328 		       IEEE80211_HT_CAP_SGI_20 |
329 		       IEEE80211_HT_CAP_SGI_40 |
330 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
331 
332 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
333 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
334 
335 	mt76_init_stream_cap(phy, sband, vht);
336 
337 	if (!vht)
338 		return 0;
339 
340 	vht_cap = &sband->vht_cap;
341 	vht_cap->vht_supported = true;
342 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
343 			IEEE80211_VHT_CAP_RXSTBC_1 |
344 			IEEE80211_VHT_CAP_SHORT_GI_80 |
345 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
346 
347 	return 0;
348 }
349 
350 static int
351 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
352 		   int n_rates)
353 {
354 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
355 
356 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
357 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
358 			       n_rates, true, false);
359 }
360 
361 static int
362 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates, bool vht)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
368 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
369 			       n_rates, true, vht);
370 }
371 
372 static int
373 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
379 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
380 			       n_rates, false, false);
381 }
382 
383 static void
384 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
385 		 enum nl80211_band band)
386 {
387 	struct ieee80211_supported_band *sband = &msband->sband;
388 	bool found = false;
389 	int i;
390 
391 	if (!sband)
392 		return;
393 
394 	for (i = 0; i < sband->n_channels; i++) {
395 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
396 			continue;
397 
398 		found = true;
399 		break;
400 	}
401 
402 	if (found) {
403 		phy->chandef.chan = &sband->channels[0];
404 		phy->chan_state = &msband->chan[0];
405 		return;
406 	}
407 
408 	sband->n_channels = 0;
409 	phy->hw->wiphy->bands[band] = NULL;
410 }
411 
412 static int
413 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
414 {
415 	struct mt76_dev *dev = phy->dev;
416 	struct wiphy *wiphy = hw->wiphy;
417 
418 	SET_IEEE80211_DEV(hw, dev->dev);
419 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
420 
421 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
422 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
423 			WIPHY_FLAG_SUPPORTS_TDLS |
424 			WIPHY_FLAG_AP_UAPSD;
425 
426 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
427 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
428 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
429 
430 	wiphy->available_antennas_tx = phy->antenna_mask;
431 	wiphy->available_antennas_rx = phy->antenna_mask;
432 
433 	wiphy->sar_capa = &mt76_sar_capa;
434 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
435 				sizeof(struct mt76_freq_range_power),
436 				GFP_KERNEL);
437 	if (!phy->frp)
438 		return -ENOMEM;
439 
440 	hw->txq_data_size = sizeof(struct mt76_txq);
441 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
442 
443 	if (!hw->max_tx_fragments)
444 		hw->max_tx_fragments = 16;
445 
446 	ieee80211_hw_set(hw, SIGNAL_DBM);
447 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
448 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
449 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
450 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
451 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
452 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
453 
454 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
455 		ieee80211_hw_set(hw, TX_AMSDU);
456 		ieee80211_hw_set(hw, TX_FRAG_LIST);
457 	}
458 
459 	ieee80211_hw_set(hw, MFP_CAPABLE);
460 	ieee80211_hw_set(hw, AP_LINK_PS);
461 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
462 
463 	return 0;
464 }
465 
466 struct mt76_phy *
467 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
468 	       const struct ieee80211_ops *ops, u8 band_idx)
469 {
470 	struct ieee80211_hw *hw;
471 	unsigned int phy_size;
472 	struct mt76_phy *phy;
473 
474 	phy_size = ALIGN(sizeof(*phy), 8);
475 	hw = ieee80211_alloc_hw(size + phy_size, ops);
476 	if (!hw)
477 		return NULL;
478 
479 	phy = hw->priv;
480 	phy->dev = dev;
481 	phy->hw = hw;
482 	phy->priv = hw->priv + phy_size;
483 	phy->band_idx = band_idx;
484 
485 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
486 	hw->wiphy->interface_modes =
487 		BIT(NL80211_IFTYPE_STATION) |
488 		BIT(NL80211_IFTYPE_AP) |
489 #ifdef CONFIG_MAC80211_MESH
490 		BIT(NL80211_IFTYPE_MESH_POINT) |
491 #endif
492 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
493 		BIT(NL80211_IFTYPE_P2P_GO) |
494 		BIT(NL80211_IFTYPE_ADHOC);
495 
496 	return phy;
497 }
498 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
499 
500 int mt76_register_phy(struct mt76_phy *phy, bool vht,
501 		      struct ieee80211_rate *rates, int n_rates)
502 {
503 	int ret;
504 
505 	ret = mt76_phy_init(phy, phy->hw);
506 	if (ret)
507 		return ret;
508 
509 	if (phy->cap.has_2ghz) {
510 		ret = mt76_init_sband_2g(phy, rates, n_rates);
511 		if (ret)
512 			return ret;
513 	}
514 
515 	if (phy->cap.has_5ghz) {
516 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
517 		if (ret)
518 			return ret;
519 	}
520 
521 	if (phy->cap.has_6ghz) {
522 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
523 		if (ret)
524 			return ret;
525 	}
526 
527 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
528 		ret = mt76_led_init(phy);
529 		if (ret)
530 			return ret;
531 	}
532 
533 	wiphy_read_of_freq_limits(phy->hw->wiphy);
534 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
535 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
536 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
537 
538 	ret = ieee80211_register_hw(phy->hw);
539 	if (ret)
540 		return ret;
541 
542 	set_bit(MT76_STATE_REGISTERED, &phy->state);
543 	phy->dev->phys[phy->band_idx] = phy;
544 
545 	return 0;
546 }
547 EXPORT_SYMBOL_GPL(mt76_register_phy);
548 
549 void mt76_unregister_phy(struct mt76_phy *phy)
550 {
551 	struct mt76_dev *dev = phy->dev;
552 
553 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
554 		return;
555 
556 	if (IS_ENABLED(CONFIG_MT76_LEDS))
557 		mt76_led_cleanup(phy);
558 	mt76_tx_status_check(dev, true);
559 	ieee80211_unregister_hw(phy->hw);
560 	dev->phys[phy->band_idx] = NULL;
561 }
562 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
563 
564 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
565 {
566 	struct page_pool_params pp_params = {
567 		.order = 0,
568 		.flags = PP_FLAG_PAGE_FRAG,
569 		.nid = NUMA_NO_NODE,
570 		.dev = dev->dma_dev,
571 	};
572 	int idx = q - dev->q_rx;
573 
574 	switch (idx) {
575 	case MT_RXQ_MAIN:
576 	case MT_RXQ_BAND1:
577 	case MT_RXQ_BAND2:
578 		pp_params.pool_size = 256;
579 		break;
580 	default:
581 		pp_params.pool_size = 16;
582 		break;
583 	}
584 
585 	if (mt76_is_mmio(dev)) {
586 		/* rely on page_pool for DMA mapping */
587 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
588 		pp_params.dma_dir = DMA_FROM_DEVICE;
589 		pp_params.max_len = PAGE_SIZE;
590 		pp_params.offset = 0;
591 	}
592 
593 	q->page_pool = page_pool_create(&pp_params);
594 	if (IS_ERR(q->page_pool)) {
595 		int err = PTR_ERR(q->page_pool);
596 
597 		q->page_pool = NULL;
598 		return err;
599 	}
600 
601 	return 0;
602 }
603 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
604 
605 struct mt76_dev *
606 mt76_alloc_device(struct device *pdev, unsigned int size,
607 		  const struct ieee80211_ops *ops,
608 		  const struct mt76_driver_ops *drv_ops)
609 {
610 	struct ieee80211_hw *hw;
611 	struct mt76_phy *phy;
612 	struct mt76_dev *dev;
613 	int i;
614 
615 	hw = ieee80211_alloc_hw(size, ops);
616 	if (!hw)
617 		return NULL;
618 
619 	dev = hw->priv;
620 	dev->hw = hw;
621 	dev->dev = pdev;
622 	dev->drv = drv_ops;
623 	dev->dma_dev = pdev;
624 
625 	phy = &dev->phy;
626 	phy->dev = dev;
627 	phy->hw = hw;
628 	phy->band_idx = MT_BAND0;
629 	dev->phys[phy->band_idx] = phy;
630 
631 	spin_lock_init(&dev->rx_lock);
632 	spin_lock_init(&dev->lock);
633 	spin_lock_init(&dev->cc_lock);
634 	spin_lock_init(&dev->status_lock);
635 	spin_lock_init(&dev->wed_lock);
636 	mutex_init(&dev->mutex);
637 	init_waitqueue_head(&dev->tx_wait);
638 
639 	skb_queue_head_init(&dev->mcu.res_q);
640 	init_waitqueue_head(&dev->mcu.wait);
641 	mutex_init(&dev->mcu.mutex);
642 	dev->tx_worker.fn = mt76_tx_worker;
643 
644 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
645 	hw->wiphy->interface_modes =
646 		BIT(NL80211_IFTYPE_STATION) |
647 		BIT(NL80211_IFTYPE_AP) |
648 #ifdef CONFIG_MAC80211_MESH
649 		BIT(NL80211_IFTYPE_MESH_POINT) |
650 #endif
651 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
652 		BIT(NL80211_IFTYPE_P2P_GO) |
653 		BIT(NL80211_IFTYPE_ADHOC);
654 
655 	spin_lock_init(&dev->token_lock);
656 	idr_init(&dev->token);
657 
658 	spin_lock_init(&dev->rx_token_lock);
659 	idr_init(&dev->rx_token);
660 
661 	INIT_LIST_HEAD(&dev->wcid_list);
662 
663 	INIT_LIST_HEAD(&dev->txwi_cache);
664 	INIT_LIST_HEAD(&dev->rxwi_cache);
665 	dev->token_size = dev->drv->token_size;
666 
667 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
668 		skb_queue_head_init(&dev->rx_skb[i]);
669 
670 	dev->wq = alloc_ordered_workqueue("mt76", 0);
671 	if (!dev->wq) {
672 		ieee80211_free_hw(hw);
673 		return NULL;
674 	}
675 
676 	return dev;
677 }
678 EXPORT_SYMBOL_GPL(mt76_alloc_device);
679 
680 int mt76_register_device(struct mt76_dev *dev, bool vht,
681 			 struct ieee80211_rate *rates, int n_rates)
682 {
683 	struct ieee80211_hw *hw = dev->hw;
684 	struct mt76_phy *phy = &dev->phy;
685 	int ret;
686 
687 	dev_set_drvdata(dev->dev, dev);
688 	ret = mt76_phy_init(phy, hw);
689 	if (ret)
690 		return ret;
691 
692 	if (phy->cap.has_2ghz) {
693 		ret = mt76_init_sband_2g(phy, rates, n_rates);
694 		if (ret)
695 			return ret;
696 	}
697 
698 	if (phy->cap.has_5ghz) {
699 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
700 		if (ret)
701 			return ret;
702 	}
703 
704 	if (phy->cap.has_6ghz) {
705 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
706 		if (ret)
707 			return ret;
708 	}
709 
710 	wiphy_read_of_freq_limits(hw->wiphy);
711 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
712 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
713 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
714 
715 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
716 		ret = mt76_led_init(phy);
717 		if (ret)
718 			return ret;
719 	}
720 
721 	ret = ieee80211_register_hw(hw);
722 	if (ret)
723 		return ret;
724 
725 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
726 	set_bit(MT76_STATE_REGISTERED, &phy->state);
727 	sched_set_fifo_low(dev->tx_worker.task);
728 
729 	return 0;
730 }
731 EXPORT_SYMBOL_GPL(mt76_register_device);
732 
733 void mt76_unregister_device(struct mt76_dev *dev)
734 {
735 	struct ieee80211_hw *hw = dev->hw;
736 
737 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
738 		return;
739 
740 	if (IS_ENABLED(CONFIG_MT76_LEDS))
741 		mt76_led_cleanup(&dev->phy);
742 	mt76_tx_status_check(dev, true);
743 	ieee80211_unregister_hw(hw);
744 }
745 EXPORT_SYMBOL_GPL(mt76_unregister_device);
746 
747 void mt76_free_device(struct mt76_dev *dev)
748 {
749 	mt76_worker_teardown(&dev->tx_worker);
750 	if (dev->wq) {
751 		destroy_workqueue(dev->wq);
752 		dev->wq = NULL;
753 	}
754 	ieee80211_free_hw(dev->hw);
755 }
756 EXPORT_SYMBOL_GPL(mt76_free_device);
757 
758 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
759 {
760 	struct sk_buff *skb = phy->rx_amsdu[q].head;
761 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
762 	struct mt76_dev *dev = phy->dev;
763 
764 	phy->rx_amsdu[q].head = NULL;
765 	phy->rx_amsdu[q].tail = NULL;
766 
767 	/*
768 	 * Validate if the amsdu has a proper first subframe.
769 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
770 	 * flag of the QoS header gets flipped. In such cases, the first
771 	 * subframe has a LLC/SNAP header in the location of the destination
772 	 * address.
773 	 */
774 	if (skb_shinfo(skb)->frag_list) {
775 		int offset = 0;
776 
777 		if (!(status->flag & RX_FLAG_8023)) {
778 			offset = ieee80211_get_hdrlen_from_skb(skb);
779 
780 			if ((status->flag &
781 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
782 			    RX_FLAG_DECRYPTED)
783 				offset += 8;
784 		}
785 
786 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
787 			dev_kfree_skb(skb);
788 			return;
789 		}
790 	}
791 	__skb_queue_tail(&dev->rx_skb[q], skb);
792 }
793 
794 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
795 				  struct sk_buff *skb)
796 {
797 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
798 
799 	if (phy->rx_amsdu[q].head &&
800 	    (!status->amsdu || status->first_amsdu ||
801 	     status->seqno != phy->rx_amsdu[q].seqno))
802 		mt76_rx_release_amsdu(phy, q);
803 
804 	if (!phy->rx_amsdu[q].head) {
805 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
806 		phy->rx_amsdu[q].seqno = status->seqno;
807 		phy->rx_amsdu[q].head = skb;
808 	} else {
809 		*phy->rx_amsdu[q].tail = skb;
810 		phy->rx_amsdu[q].tail = &skb->next;
811 	}
812 
813 	if (!status->amsdu || status->last_amsdu)
814 		mt76_rx_release_amsdu(phy, q);
815 }
816 
817 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
818 {
819 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
820 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
821 
822 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
823 		dev_kfree_skb(skb);
824 		return;
825 	}
826 
827 #ifdef CONFIG_NL80211_TESTMODE
828 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
829 		phy->test.rx_stats.packets[q]++;
830 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
831 			phy->test.rx_stats.fcs_error[q]++;
832 	}
833 #endif
834 
835 	mt76_rx_release_burst(phy, q, skb);
836 }
837 EXPORT_SYMBOL_GPL(mt76_rx);
838 
839 bool mt76_has_tx_pending(struct mt76_phy *phy)
840 {
841 	struct mt76_queue *q;
842 	int i;
843 
844 	for (i = 0; i < __MT_TXQ_MAX; i++) {
845 		q = phy->q_tx[i];
846 		if (q && q->queued)
847 			return true;
848 	}
849 
850 	return false;
851 }
852 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
853 
854 static struct mt76_channel_state *
855 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
856 {
857 	struct mt76_sband *msband;
858 	int idx;
859 
860 	if (c->band == NL80211_BAND_2GHZ)
861 		msband = &phy->sband_2g;
862 	else if (c->band == NL80211_BAND_6GHZ)
863 		msband = &phy->sband_6g;
864 	else
865 		msband = &phy->sband_5g;
866 
867 	idx = c - &msband->sband.channels[0];
868 	return &msband->chan[idx];
869 }
870 
871 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
872 {
873 	struct mt76_channel_state *state = phy->chan_state;
874 
875 	state->cc_active += ktime_to_us(ktime_sub(time,
876 						  phy->survey_time));
877 	phy->survey_time = time;
878 }
879 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
880 
881 void mt76_update_survey(struct mt76_phy *phy)
882 {
883 	struct mt76_dev *dev = phy->dev;
884 	ktime_t cur_time;
885 
886 	if (dev->drv->update_survey)
887 		dev->drv->update_survey(phy);
888 
889 	cur_time = ktime_get_boottime();
890 	mt76_update_survey_active_time(phy, cur_time);
891 
892 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
893 		struct mt76_channel_state *state = phy->chan_state;
894 
895 		spin_lock_bh(&dev->cc_lock);
896 		state->cc_bss_rx += dev->cur_cc_bss_rx;
897 		dev->cur_cc_bss_rx = 0;
898 		spin_unlock_bh(&dev->cc_lock);
899 	}
900 }
901 EXPORT_SYMBOL_GPL(mt76_update_survey);
902 
903 void mt76_set_channel(struct mt76_phy *phy)
904 {
905 	struct mt76_dev *dev = phy->dev;
906 	struct ieee80211_hw *hw = phy->hw;
907 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
908 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
909 	int timeout = HZ / 5;
910 
911 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
912 	mt76_update_survey(phy);
913 
914 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
915 	    phy->chandef.width != chandef->width)
916 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
917 
918 	phy->chandef = *chandef;
919 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
920 
921 	if (!offchannel)
922 		phy->main_chan = chandef->chan;
923 
924 	if (chandef->chan != phy->main_chan)
925 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
926 }
927 EXPORT_SYMBOL_GPL(mt76_set_channel);
928 
929 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
930 		    struct survey_info *survey)
931 {
932 	struct mt76_phy *phy = hw->priv;
933 	struct mt76_dev *dev = phy->dev;
934 	struct mt76_sband *sband;
935 	struct ieee80211_channel *chan;
936 	struct mt76_channel_state *state;
937 	int ret = 0;
938 
939 	mutex_lock(&dev->mutex);
940 	if (idx == 0 && dev->drv->update_survey)
941 		mt76_update_survey(phy);
942 
943 	if (idx >= phy->sband_2g.sband.n_channels +
944 		   phy->sband_5g.sband.n_channels) {
945 		idx -= (phy->sband_2g.sband.n_channels +
946 			phy->sband_5g.sband.n_channels);
947 		sband = &phy->sband_6g;
948 	} else if (idx >= phy->sband_2g.sband.n_channels) {
949 		idx -= phy->sband_2g.sband.n_channels;
950 		sband = &phy->sband_5g;
951 	} else {
952 		sband = &phy->sband_2g;
953 	}
954 
955 	if (idx >= sband->sband.n_channels) {
956 		ret = -ENOENT;
957 		goto out;
958 	}
959 
960 	chan = &sband->sband.channels[idx];
961 	state = mt76_channel_state(phy, chan);
962 
963 	memset(survey, 0, sizeof(*survey));
964 	survey->channel = chan;
965 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
966 	survey->filled |= dev->drv->survey_flags;
967 	if (state->noise)
968 		survey->filled |= SURVEY_INFO_NOISE_DBM;
969 
970 	if (chan == phy->main_chan) {
971 		survey->filled |= SURVEY_INFO_IN_USE;
972 
973 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
974 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
975 	}
976 
977 	survey->time_busy = div_u64(state->cc_busy, 1000);
978 	survey->time_rx = div_u64(state->cc_rx, 1000);
979 	survey->time = div_u64(state->cc_active, 1000);
980 	survey->noise = state->noise;
981 
982 	spin_lock_bh(&dev->cc_lock);
983 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
984 	survey->time_tx = div_u64(state->cc_tx, 1000);
985 	spin_unlock_bh(&dev->cc_lock);
986 
987 out:
988 	mutex_unlock(&dev->mutex);
989 
990 	return ret;
991 }
992 EXPORT_SYMBOL_GPL(mt76_get_survey);
993 
994 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
995 			 struct ieee80211_key_conf *key)
996 {
997 	struct ieee80211_key_seq seq;
998 	int i;
999 
1000 	wcid->rx_check_pn = false;
1001 
1002 	if (!key)
1003 		return;
1004 
1005 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1006 		return;
1007 
1008 	wcid->rx_check_pn = true;
1009 
1010 	/* data frame */
1011 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1012 		ieee80211_get_key_rx_seq(key, i, &seq);
1013 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1014 	}
1015 
1016 	/* robust management frame */
1017 	ieee80211_get_key_rx_seq(key, -1, &seq);
1018 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1019 
1020 }
1021 EXPORT_SYMBOL(mt76_wcid_key_setup);
1022 
1023 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1024 {
1025 	int signal = -128;
1026 	u8 chains;
1027 
1028 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1029 		int cur, diff;
1030 
1031 		cur = *chain_signal;
1032 		if (!(chains & BIT(0)) ||
1033 		    cur > 0)
1034 			continue;
1035 
1036 		if (cur > signal)
1037 			swap(cur, signal);
1038 
1039 		diff = signal - cur;
1040 		if (diff == 0)
1041 			signal += 3;
1042 		else if (diff <= 2)
1043 			signal += 2;
1044 		else if (diff <= 6)
1045 			signal += 1;
1046 	}
1047 
1048 	return signal;
1049 }
1050 EXPORT_SYMBOL(mt76_rx_signal);
1051 
1052 static void
1053 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1054 		struct ieee80211_hw **hw,
1055 		struct ieee80211_sta **sta)
1056 {
1057 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1058 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1059 	struct mt76_rx_status mstat;
1060 
1061 	mstat = *((struct mt76_rx_status *)skb->cb);
1062 	memset(status, 0, sizeof(*status));
1063 
1064 	status->flag = mstat.flag;
1065 	status->freq = mstat.freq;
1066 	status->enc_flags = mstat.enc_flags;
1067 	status->encoding = mstat.encoding;
1068 	status->bw = mstat.bw;
1069 	status->he_ru = mstat.he_ru;
1070 	status->he_gi = mstat.he_gi;
1071 	status->he_dcm = mstat.he_dcm;
1072 	status->rate_idx = mstat.rate_idx;
1073 	status->nss = mstat.nss;
1074 	status->band = mstat.band;
1075 	status->signal = mstat.signal;
1076 	status->chains = mstat.chains;
1077 	status->ampdu_reference = mstat.ampdu_ref;
1078 	status->device_timestamp = mstat.timestamp;
1079 	status->mactime = mstat.timestamp;
1080 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1081 	if (status->signal <= -128)
1082 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1083 
1084 	if (ieee80211_is_beacon(hdr->frame_control) ||
1085 	    ieee80211_is_probe_resp(hdr->frame_control))
1086 		status->boottime_ns = ktime_get_boottime_ns();
1087 
1088 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1089 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1090 		     sizeof(mstat.chain_signal));
1091 	memcpy(status->chain_signal, mstat.chain_signal,
1092 	       sizeof(mstat.chain_signal));
1093 
1094 	*sta = wcid_to_sta(mstat.wcid);
1095 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1096 }
1097 
1098 static void
1099 mt76_check_ccmp_pn(struct sk_buff *skb)
1100 {
1101 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1102 	struct mt76_wcid *wcid = status->wcid;
1103 	struct ieee80211_hdr *hdr;
1104 	int security_idx;
1105 	int ret;
1106 
1107 	if (!(status->flag & RX_FLAG_DECRYPTED))
1108 		return;
1109 
1110 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1111 		return;
1112 
1113 	if (!wcid || !wcid->rx_check_pn)
1114 		return;
1115 
1116 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1117 	if (status->flag & RX_FLAG_8023)
1118 		goto skip_hdr_check;
1119 
1120 	hdr = mt76_skb_get_hdr(skb);
1121 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1122 		/*
1123 		 * Validate the first fragment both here and in mac80211
1124 		 * All further fragments will be validated by mac80211 only.
1125 		 */
1126 		if (ieee80211_is_frag(hdr) &&
1127 		    !ieee80211_is_first_frag(hdr->frame_control))
1128 			return;
1129 	}
1130 
1131 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1132 	 *
1133 	 * the recipient shall maintain a single replay counter for received
1134 	 * individually addressed robust Management frames that are received
1135 	 * with the To DS subfield equal to 0, [...]
1136 	 */
1137 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1138 	    !ieee80211_has_tods(hdr->frame_control))
1139 		security_idx = IEEE80211_NUM_TIDS;
1140 
1141 skip_hdr_check:
1142 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1143 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1144 		     sizeof(status->iv));
1145 	if (ret <= 0) {
1146 		status->flag |= RX_FLAG_ONLY_MONITOR;
1147 		return;
1148 	}
1149 
1150 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1151 
1152 	if (status->flag & RX_FLAG_IV_STRIPPED)
1153 		status->flag |= RX_FLAG_PN_VALIDATED;
1154 }
1155 
1156 static void
1157 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1158 		    int len)
1159 {
1160 	struct mt76_wcid *wcid = status->wcid;
1161 	struct ieee80211_rx_status info = {
1162 		.enc_flags = status->enc_flags,
1163 		.rate_idx = status->rate_idx,
1164 		.encoding = status->encoding,
1165 		.band = status->band,
1166 		.nss = status->nss,
1167 		.bw = status->bw,
1168 	};
1169 	struct ieee80211_sta *sta;
1170 	u32 airtime;
1171 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1172 
1173 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1174 	spin_lock(&dev->cc_lock);
1175 	dev->cur_cc_bss_rx += airtime;
1176 	spin_unlock(&dev->cc_lock);
1177 
1178 	if (!wcid || !wcid->sta)
1179 		return;
1180 
1181 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1182 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1183 }
1184 
1185 static void
1186 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1187 {
1188 	struct mt76_wcid *wcid;
1189 	int wcid_idx;
1190 
1191 	if (!dev->rx_ampdu_len)
1192 		return;
1193 
1194 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1195 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1196 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1197 	else
1198 		wcid = NULL;
1199 	dev->rx_ampdu_status.wcid = wcid;
1200 
1201 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1202 
1203 	dev->rx_ampdu_len = 0;
1204 	dev->rx_ampdu_ref = 0;
1205 }
1206 
1207 static void
1208 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1209 {
1210 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1211 	struct mt76_wcid *wcid = status->wcid;
1212 
1213 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1214 		return;
1215 
1216 	if (!wcid || !wcid->sta) {
1217 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1218 
1219 		if (status->flag & RX_FLAG_8023)
1220 			return;
1221 
1222 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1223 			return;
1224 
1225 		wcid = NULL;
1226 	}
1227 
1228 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1229 	    status->ampdu_ref != dev->rx_ampdu_ref)
1230 		mt76_airtime_flush_ampdu(dev);
1231 
1232 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1233 		if (!dev->rx_ampdu_len ||
1234 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1235 			dev->rx_ampdu_status = *status;
1236 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1237 			dev->rx_ampdu_ref = status->ampdu_ref;
1238 		}
1239 
1240 		dev->rx_ampdu_len += skb->len;
1241 		return;
1242 	}
1243 
1244 	mt76_airtime_report(dev, status, skb->len);
1245 }
1246 
1247 static void
1248 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1249 {
1250 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1251 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1252 	struct ieee80211_sta *sta;
1253 	struct ieee80211_hw *hw;
1254 	struct mt76_wcid *wcid = status->wcid;
1255 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1256 	bool ps;
1257 
1258 	hw = mt76_phy_hw(dev, status->phy_idx);
1259 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1260 	    !(status->flag & RX_FLAG_8023)) {
1261 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1262 		if (sta)
1263 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1264 	}
1265 
1266 	mt76_airtime_check(dev, skb);
1267 
1268 	if (!wcid || !wcid->sta)
1269 		return;
1270 
1271 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1272 
1273 	if (status->signal <= 0)
1274 		ewma_signal_add(&wcid->rssi, -status->signal);
1275 
1276 	wcid->inactive_count = 0;
1277 
1278 	if (status->flag & RX_FLAG_8023)
1279 		return;
1280 
1281 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1282 		return;
1283 
1284 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1285 		ieee80211_sta_pspoll(sta);
1286 		return;
1287 	}
1288 
1289 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1290 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1291 	      ieee80211_is_data(hdr->frame_control)))
1292 		return;
1293 
1294 	ps = ieee80211_has_pm(hdr->frame_control);
1295 
1296 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1297 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1298 		ieee80211_sta_uapsd_trigger(sta, tidno);
1299 
1300 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1301 		return;
1302 
1303 	if (ps)
1304 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1305 
1306 	dev->drv->sta_ps(dev, sta, ps);
1307 
1308 	if (!ps)
1309 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1310 
1311 	ieee80211_sta_ps_transition(sta, ps);
1312 }
1313 
1314 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1315 		      struct napi_struct *napi)
1316 {
1317 	struct ieee80211_sta *sta;
1318 	struct ieee80211_hw *hw;
1319 	struct sk_buff *skb, *tmp;
1320 	LIST_HEAD(list);
1321 
1322 	spin_lock(&dev->rx_lock);
1323 	while ((skb = __skb_dequeue(frames)) != NULL) {
1324 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1325 
1326 		mt76_check_ccmp_pn(skb);
1327 		skb_shinfo(skb)->frag_list = NULL;
1328 		mt76_rx_convert(dev, skb, &hw, &sta);
1329 		ieee80211_rx_list(hw, sta, skb, &list);
1330 
1331 		/* subsequent amsdu frames */
1332 		while (nskb) {
1333 			skb = nskb;
1334 			nskb = nskb->next;
1335 			skb->next = NULL;
1336 
1337 			mt76_rx_convert(dev, skb, &hw, &sta);
1338 			ieee80211_rx_list(hw, sta, skb, &list);
1339 		}
1340 	}
1341 	spin_unlock(&dev->rx_lock);
1342 
1343 	if (!napi) {
1344 		netif_receive_skb_list(&list);
1345 		return;
1346 	}
1347 
1348 	list_for_each_entry_safe(skb, tmp, &list, list) {
1349 		skb_list_del_init(skb);
1350 		napi_gro_receive(napi, skb);
1351 	}
1352 }
1353 
1354 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1355 			   struct napi_struct *napi)
1356 {
1357 	struct sk_buff_head frames;
1358 	struct sk_buff *skb;
1359 
1360 	__skb_queue_head_init(&frames);
1361 
1362 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1363 		mt76_check_sta(dev, skb);
1364 		if (mtk_wed_device_active(&dev->mmio.wed))
1365 			__skb_queue_tail(&frames, skb);
1366 		else
1367 			mt76_rx_aggr_reorder(skb, &frames);
1368 	}
1369 
1370 	mt76_rx_complete(dev, &frames, napi);
1371 }
1372 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1373 
1374 static int
1375 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1376 	     struct ieee80211_sta *sta)
1377 {
1378 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1379 	struct mt76_dev *dev = phy->dev;
1380 	int ret;
1381 	int i;
1382 
1383 	mutex_lock(&dev->mutex);
1384 
1385 	ret = dev->drv->sta_add(dev, vif, sta);
1386 	if (ret)
1387 		goto out;
1388 
1389 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1390 		struct mt76_txq *mtxq;
1391 
1392 		if (!sta->txq[i])
1393 			continue;
1394 
1395 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1396 		mtxq->wcid = wcid->idx;
1397 	}
1398 
1399 	ewma_signal_init(&wcid->rssi);
1400 	if (phy->band_idx == MT_BAND1)
1401 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1402 	wcid->phy_idx = phy->band_idx;
1403 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1404 
1405 	mt76_packet_id_init(wcid);
1406 out:
1407 	mutex_unlock(&dev->mutex);
1408 
1409 	return ret;
1410 }
1411 
1412 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1413 		       struct ieee80211_sta *sta)
1414 {
1415 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1416 	int i, idx = wcid->idx;
1417 
1418 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1419 		mt76_rx_aggr_stop(dev, wcid, i);
1420 
1421 	if (dev->drv->sta_remove)
1422 		dev->drv->sta_remove(dev, vif, sta);
1423 
1424 	mt76_packet_id_flush(dev, wcid);
1425 
1426 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1427 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1428 }
1429 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1430 
1431 static void
1432 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1433 		struct ieee80211_sta *sta)
1434 {
1435 	mutex_lock(&dev->mutex);
1436 	__mt76_sta_remove(dev, vif, sta);
1437 	mutex_unlock(&dev->mutex);
1438 }
1439 
1440 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1441 		   struct ieee80211_sta *sta,
1442 		   enum ieee80211_sta_state old_state,
1443 		   enum ieee80211_sta_state new_state)
1444 {
1445 	struct mt76_phy *phy = hw->priv;
1446 	struct mt76_dev *dev = phy->dev;
1447 
1448 	if (old_state == IEEE80211_STA_NOTEXIST &&
1449 	    new_state == IEEE80211_STA_NONE)
1450 		return mt76_sta_add(phy, vif, sta);
1451 
1452 	if (old_state == IEEE80211_STA_AUTH &&
1453 	    new_state == IEEE80211_STA_ASSOC &&
1454 	    dev->drv->sta_assoc)
1455 		dev->drv->sta_assoc(dev, vif, sta);
1456 
1457 	if (old_state == IEEE80211_STA_NONE &&
1458 	    new_state == IEEE80211_STA_NOTEXIST)
1459 		mt76_sta_remove(dev, vif, sta);
1460 
1461 	return 0;
1462 }
1463 EXPORT_SYMBOL_GPL(mt76_sta_state);
1464 
1465 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1466 			     struct ieee80211_sta *sta)
1467 {
1468 	struct mt76_phy *phy = hw->priv;
1469 	struct mt76_dev *dev = phy->dev;
1470 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1471 
1472 	mutex_lock(&dev->mutex);
1473 	spin_lock_bh(&dev->status_lock);
1474 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1475 	spin_unlock_bh(&dev->status_lock);
1476 	mutex_unlock(&dev->mutex);
1477 }
1478 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1479 
1480 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1481 		     int *dbm)
1482 {
1483 	struct mt76_phy *phy = hw->priv;
1484 	int n_chains = hweight8(phy->antenna_mask);
1485 	int delta = mt76_tx_power_nss_delta(n_chains);
1486 
1487 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1488 
1489 	return 0;
1490 }
1491 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1492 
1493 int mt76_init_sar_power(struct ieee80211_hw *hw,
1494 			const struct cfg80211_sar_specs *sar)
1495 {
1496 	struct mt76_phy *phy = hw->priv;
1497 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1498 	int i;
1499 
1500 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1501 		return -EINVAL;
1502 
1503 	for (i = 0; i < sar->num_sub_specs; i++) {
1504 		u32 index = sar->sub_specs[i].freq_range_index;
1505 		/* SAR specifies power limitaton in 0.25dbm */
1506 		s32 power = sar->sub_specs[i].power >> 1;
1507 
1508 		if (power > 127 || power < -127)
1509 			power = 127;
1510 
1511 		phy->frp[index].range = &capa->freq_ranges[index];
1512 		phy->frp[index].power = power;
1513 	}
1514 
1515 	return 0;
1516 }
1517 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1518 
1519 int mt76_get_sar_power(struct mt76_phy *phy,
1520 		       struct ieee80211_channel *chan,
1521 		       int power)
1522 {
1523 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1524 	int freq, i;
1525 
1526 	if (!capa || !phy->frp)
1527 		return power;
1528 
1529 	if (power > 127 || power < -127)
1530 		power = 127;
1531 
1532 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1533 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1534 		if (phy->frp[i].range &&
1535 		    freq >= phy->frp[i].range->start_freq &&
1536 		    freq < phy->frp[i].range->end_freq) {
1537 			power = min_t(int, phy->frp[i].power, power);
1538 			break;
1539 		}
1540 	}
1541 
1542 	return power;
1543 }
1544 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1545 
1546 static void
1547 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1548 {
1549 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1550 		ieee80211_csa_finish(vif);
1551 }
1552 
1553 void mt76_csa_finish(struct mt76_dev *dev)
1554 {
1555 	if (!dev->csa_complete)
1556 		return;
1557 
1558 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1559 		IEEE80211_IFACE_ITER_RESUME_ALL,
1560 		__mt76_csa_finish, dev);
1561 
1562 	dev->csa_complete = 0;
1563 }
1564 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1565 
1566 static void
1567 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1568 {
1569 	struct mt76_dev *dev = priv;
1570 
1571 	if (!vif->bss_conf.csa_active)
1572 		return;
1573 
1574 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1575 }
1576 
1577 void mt76_csa_check(struct mt76_dev *dev)
1578 {
1579 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1580 		IEEE80211_IFACE_ITER_RESUME_ALL,
1581 		__mt76_csa_check, dev);
1582 }
1583 EXPORT_SYMBOL_GPL(mt76_csa_check);
1584 
1585 int
1586 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1587 {
1588 	return 0;
1589 }
1590 EXPORT_SYMBOL_GPL(mt76_set_tim);
1591 
1592 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1593 {
1594 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1595 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1596 	u8 *hdr, *pn = status->iv;
1597 
1598 	__skb_push(skb, 8);
1599 	memmove(skb->data, skb->data + 8, hdr_len);
1600 	hdr = skb->data + hdr_len;
1601 
1602 	hdr[0] = pn[5];
1603 	hdr[1] = pn[4];
1604 	hdr[2] = 0;
1605 	hdr[3] = 0x20 | (key_id << 6);
1606 	hdr[4] = pn[3];
1607 	hdr[5] = pn[2];
1608 	hdr[6] = pn[1];
1609 	hdr[7] = pn[0];
1610 
1611 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1612 }
1613 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1614 
1615 int mt76_get_rate(struct mt76_dev *dev,
1616 		  struct ieee80211_supported_band *sband,
1617 		  int idx, bool cck)
1618 {
1619 	int i, offset = 0, len = sband->n_bitrates;
1620 
1621 	if (cck) {
1622 		if (sband != &dev->phy.sband_2g.sband)
1623 			return 0;
1624 
1625 		idx &= ~BIT(2); /* short preamble */
1626 	} else if (sband == &dev->phy.sband_2g.sband) {
1627 		offset = 4;
1628 	}
1629 
1630 	for (i = offset; i < len; i++) {
1631 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1632 			return i;
1633 	}
1634 
1635 	return 0;
1636 }
1637 EXPORT_SYMBOL_GPL(mt76_get_rate);
1638 
1639 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1640 		  const u8 *mac)
1641 {
1642 	struct mt76_phy *phy = hw->priv;
1643 
1644 	set_bit(MT76_SCANNING, &phy->state);
1645 }
1646 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1647 
1648 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1649 {
1650 	struct mt76_phy *phy = hw->priv;
1651 
1652 	clear_bit(MT76_SCANNING, &phy->state);
1653 }
1654 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1655 
1656 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1657 {
1658 	struct mt76_phy *phy = hw->priv;
1659 	struct mt76_dev *dev = phy->dev;
1660 
1661 	mutex_lock(&dev->mutex);
1662 	*tx_ant = phy->antenna_mask;
1663 	*rx_ant = phy->antenna_mask;
1664 	mutex_unlock(&dev->mutex);
1665 
1666 	return 0;
1667 }
1668 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1669 
1670 struct mt76_queue *
1671 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1672 		int ring_base, u32 flags)
1673 {
1674 	struct mt76_queue *hwq;
1675 	int err;
1676 
1677 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1678 	if (!hwq)
1679 		return ERR_PTR(-ENOMEM);
1680 
1681 	hwq->flags = flags;
1682 
1683 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1684 	if (err < 0)
1685 		return ERR_PTR(err);
1686 
1687 	return hwq;
1688 }
1689 EXPORT_SYMBOL_GPL(mt76_init_queue);
1690 
1691 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
1692 {
1693 	int offset = 0;
1694 
1695 	if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
1696 		offset = 4;
1697 
1698 	/* pick the lowest rate for hidden nodes */
1699 	if (rateidx < 0)
1700 		rateidx = 0;
1701 
1702 	rateidx += offset;
1703 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1704 		rateidx = offset;
1705 
1706 	return mt76_rates[rateidx].hw_value;
1707 }
1708 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1709 
1710 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1711 			 struct mt76_sta_stats *stats, bool eht)
1712 {
1713 	int i, ei = wi->initial_stat_idx;
1714 	u64 *data = wi->data;
1715 
1716 	wi->sta_count++;
1717 
1718 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1719 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1720 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1721 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1722 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1723 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1724 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1725 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1726 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1727 	if (eht) {
1728 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1729 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1730 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1731 	}
1732 
1733 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1734 		data[ei++] += stats->tx_bw[i];
1735 
1736 	for (i = 0; i < (eht ? 14 : 12); i++)
1737 		data[ei++] += stats->tx_mcs[i];
1738 
1739 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1740 }
1741 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1742 
1743 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1744 {
1745 #ifdef CONFIG_PAGE_POOL_STATS
1746 	struct page_pool_stats stats = {};
1747 	int i;
1748 
1749 	mt76_for_each_q_rx(dev, i)
1750 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1751 
1752 	page_pool_ethtool_stats_get(data, &stats);
1753 	*index += page_pool_ethtool_stats_get_count();
1754 #endif
1755 }
1756 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1757 
1758 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1759 {
1760 	struct ieee80211_hw *hw = phy->hw;
1761 	struct mt76_dev *dev = phy->dev;
1762 
1763 	if (dev->region == NL80211_DFS_UNSET ||
1764 	    test_bit(MT76_SCANNING, &phy->state))
1765 		return MT_DFS_STATE_DISABLED;
1766 
1767 	if (!hw->conf.radar_enabled) {
1768 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1769 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1770 			return MT_DFS_STATE_ACTIVE;
1771 
1772 		return MT_DFS_STATE_DISABLED;
1773 	}
1774 
1775 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1776 		return MT_DFS_STATE_CAC;
1777 
1778 	return MT_DFS_STATE_ACTIVE;
1779 }
1780 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1781