1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 };
79 
80 static const struct ieee80211_channel mt76_channels_6ghz[] = {
81 	/* UNII-5 */
82 	CHAN6G(1, 5955),
83 	CHAN6G(5, 5975),
84 	CHAN6G(9, 5995),
85 	CHAN6G(13, 6015),
86 	CHAN6G(17, 6035),
87 	CHAN6G(21, 6055),
88 	CHAN6G(25, 6075),
89 	CHAN6G(29, 6095),
90 	CHAN6G(33, 6115),
91 	CHAN6G(37, 6135),
92 	CHAN6G(41, 6155),
93 	CHAN6G(45, 6175),
94 	CHAN6G(49, 6195),
95 	CHAN6G(53, 6215),
96 	CHAN6G(57, 6235),
97 	CHAN6G(61, 6255),
98 	CHAN6G(65, 6275),
99 	CHAN6G(69, 6295),
100 	CHAN6G(73, 6315),
101 	CHAN6G(77, 6335),
102 	CHAN6G(81, 6355),
103 	CHAN6G(85, 6375),
104 	CHAN6G(89, 6395),
105 	CHAN6G(93, 6415),
106 	/* UNII-6 */
107 	CHAN6G(97, 6435),
108 	CHAN6G(101, 6455),
109 	CHAN6G(105, 6475),
110 	CHAN6G(109, 6495),
111 	CHAN6G(113, 6515),
112 	CHAN6G(117, 6535),
113 	/* UNII-7 */
114 	CHAN6G(121, 6555),
115 	CHAN6G(125, 6575),
116 	CHAN6G(129, 6595),
117 	CHAN6G(133, 6615),
118 	CHAN6G(137, 6635),
119 	CHAN6G(141, 6655),
120 	CHAN6G(145, 6675),
121 	CHAN6G(149, 6695),
122 	CHAN6G(153, 6715),
123 	CHAN6G(157, 6735),
124 	CHAN6G(161, 6755),
125 	CHAN6G(165, 6775),
126 	CHAN6G(169, 6795),
127 	CHAN6G(173, 6815),
128 	CHAN6G(177, 6835),
129 	CHAN6G(181, 6855),
130 	CHAN6G(185, 6875),
131 	/* UNII-8 */
132 	CHAN6G(189, 6895),
133 	CHAN6G(193, 6915),
134 	CHAN6G(197, 6935),
135 	CHAN6G(201, 6955),
136 	CHAN6G(205, 6975),
137 	CHAN6G(209, 6995),
138 	CHAN6G(213, 7015),
139 	CHAN6G(217, 7035),
140 	CHAN6G(221, 7055),
141 	CHAN6G(225, 7075),
142 	CHAN6G(229, 7095),
143 	CHAN6G(233, 7115),
144 };
145 
146 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
147 	{ .throughput =   0 * 1024, .blink_time = 334 },
148 	{ .throughput =   1 * 1024, .blink_time = 260 },
149 	{ .throughput =   5 * 1024, .blink_time = 220 },
150 	{ .throughput =  10 * 1024, .blink_time = 190 },
151 	{ .throughput =  20 * 1024, .blink_time = 170 },
152 	{ .throughput =  50 * 1024, .blink_time = 150 },
153 	{ .throughput =  70 * 1024, .blink_time = 130 },
154 	{ .throughput = 100 * 1024, .blink_time = 110 },
155 	{ .throughput = 200 * 1024, .blink_time =  80 },
156 	{ .throughput = 300 * 1024, .blink_time =  50 },
157 };
158 
159 struct ieee80211_rate mt76_rates[] = {
160 	CCK_RATE(0, 10),
161 	CCK_RATE(1, 20),
162 	CCK_RATE(2, 55),
163 	CCK_RATE(3, 110),
164 	OFDM_RATE(11, 60),
165 	OFDM_RATE(15, 90),
166 	OFDM_RATE(10, 120),
167 	OFDM_RATE(14, 180),
168 	OFDM_RATE(9,  240),
169 	OFDM_RATE(13, 360),
170 	OFDM_RATE(8,  480),
171 	OFDM_RATE(12, 540),
172 };
173 EXPORT_SYMBOL_GPL(mt76_rates);
174 
175 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
176 	{ .start_freq = 2402, .end_freq = 2494, },
177 	{ .start_freq = 5150, .end_freq = 5350, },
178 	{ .start_freq = 5350, .end_freq = 5470, },
179 	{ .start_freq = 5470, .end_freq = 5725, },
180 	{ .start_freq = 5725, .end_freq = 5950, },
181 	{ .start_freq = 5945, .end_freq = 6165, },
182 	{ .start_freq = 6165, .end_freq = 6405, },
183 	{ .start_freq = 6405, .end_freq = 6525, },
184 	{ .start_freq = 6525, .end_freq = 6705, },
185 	{ .start_freq = 6705, .end_freq = 6865, },
186 	{ .start_freq = 6865, .end_freq = 7125, },
187 };
188 
189 static const struct cfg80211_sar_capa mt76_sar_capa = {
190 	.type = NL80211_SAR_TYPE_POWER,
191 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
192 	.freq_ranges = &mt76_sar_freq_ranges[0],
193 };
194 
195 static int mt76_led_init(struct mt76_dev *dev)
196 {
197 	struct device_node *np = dev->dev->of_node;
198 	struct ieee80211_hw *hw = dev->hw;
199 	int led_pin;
200 
201 	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
202 		return 0;
203 
204 	snprintf(dev->led_name, sizeof(dev->led_name),
205 		 "mt76-%s", wiphy_name(hw->wiphy));
206 
207 	dev->led_cdev.name = dev->led_name;
208 	dev->led_cdev.default_trigger =
209 		ieee80211_create_tpt_led_trigger(hw,
210 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 					mt76_tpt_blink,
212 					ARRAY_SIZE(mt76_tpt_blink));
213 
214 	np = of_get_child_by_name(np, "led");
215 	if (np) {
216 		if (!of_property_read_u32(np, "led-sources", &led_pin))
217 			dev->led_pin = led_pin;
218 		dev->led_al = of_property_read_bool(np, "led-active-low");
219 		of_node_put(np);
220 	}
221 
222 	return led_classdev_register(dev->dev, &dev->led_cdev);
223 }
224 
225 static void mt76_led_cleanup(struct mt76_dev *dev)
226 {
227 	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
228 		return;
229 
230 	led_classdev_unregister(&dev->led_cdev);
231 }
232 
233 static void mt76_init_stream_cap(struct mt76_phy *phy,
234 				 struct ieee80211_supported_band *sband,
235 				 bool vht)
236 {
237 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
238 	int i, nstream = hweight8(phy->antenna_mask);
239 	struct ieee80211_sta_vht_cap *vht_cap;
240 	u16 mcs_map = 0;
241 
242 	if (nstream > 1)
243 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
244 	else
245 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
246 
247 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
248 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
249 
250 	if (!vht)
251 		return;
252 
253 	vht_cap = &sband->vht_cap;
254 	if (nstream > 1)
255 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
256 	else
257 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
258 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
259 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
260 
261 	for (i = 0; i < 8; i++) {
262 		if (i < nstream)
263 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
264 		else
265 			mcs_map |=
266 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
267 	}
268 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
269 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
270 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
271 		vht_cap->vht_mcs.tx_highest |=
272 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
273 }
274 
275 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
276 {
277 	if (phy->cap.has_2ghz)
278 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
279 	if (phy->cap.has_5ghz)
280 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
281 	if (phy->cap.has_6ghz)
282 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
283 }
284 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
285 
286 static int
287 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
288 		const struct ieee80211_channel *chan, int n_chan,
289 		struct ieee80211_rate *rates, int n_rates,
290 		bool ht, bool vht)
291 {
292 	struct ieee80211_supported_band *sband = &msband->sband;
293 	struct ieee80211_sta_vht_cap *vht_cap;
294 	struct ieee80211_sta_ht_cap *ht_cap;
295 	struct mt76_dev *dev = phy->dev;
296 	void *chanlist;
297 	int size;
298 
299 	size = n_chan * sizeof(*chan);
300 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
301 	if (!chanlist)
302 		return -ENOMEM;
303 
304 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
305 				    GFP_KERNEL);
306 	if (!msband->chan)
307 		return -ENOMEM;
308 
309 	sband->channels = chanlist;
310 	sband->n_channels = n_chan;
311 	sband->bitrates = rates;
312 	sband->n_bitrates = n_rates;
313 
314 	if (!ht)
315 		return 0;
316 
317 	ht_cap = &sband->ht_cap;
318 	ht_cap->ht_supported = true;
319 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
320 		       IEEE80211_HT_CAP_GRN_FLD |
321 		       IEEE80211_HT_CAP_SGI_20 |
322 		       IEEE80211_HT_CAP_SGI_40 |
323 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
324 
325 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
326 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
327 
328 	mt76_init_stream_cap(phy, sband, vht);
329 
330 	if (!vht)
331 		return 0;
332 
333 	vht_cap = &sband->vht_cap;
334 	vht_cap->vht_supported = true;
335 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
336 			IEEE80211_VHT_CAP_RXSTBC_1 |
337 			IEEE80211_VHT_CAP_SHORT_GI_80 |
338 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
339 
340 	return 0;
341 }
342 
343 static int
344 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
345 		   int n_rates)
346 {
347 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
348 
349 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
350 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
351 			       n_rates, true, false);
352 }
353 
354 static int
355 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
356 		   int n_rates, bool vht)
357 {
358 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
359 
360 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
361 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
362 			       n_rates, true, vht);
363 }
364 
365 static int
366 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
367 		   int n_rates)
368 {
369 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
370 
371 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
372 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
373 			       n_rates, false, false);
374 }
375 
376 static void
377 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
378 		 enum nl80211_band band)
379 {
380 	struct ieee80211_supported_band *sband = &msband->sband;
381 	bool found = false;
382 	int i;
383 
384 	if (!sband)
385 		return;
386 
387 	for (i = 0; i < sband->n_channels; i++) {
388 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
389 			continue;
390 
391 		found = true;
392 		break;
393 	}
394 
395 	if (found) {
396 		phy->chandef.chan = &sband->channels[0];
397 		phy->chan_state = &msband->chan[0];
398 		return;
399 	}
400 
401 	sband->n_channels = 0;
402 	phy->hw->wiphy->bands[band] = NULL;
403 }
404 
405 static int
406 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
407 {
408 	struct mt76_dev *dev = phy->dev;
409 	struct wiphy *wiphy = hw->wiphy;
410 
411 	SET_IEEE80211_DEV(hw, dev->dev);
412 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
413 
414 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
415 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
416 			WIPHY_FLAG_SUPPORTS_TDLS |
417 			WIPHY_FLAG_AP_UAPSD;
418 
419 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
420 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
421 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
422 
423 	wiphy->available_antennas_tx = phy->antenna_mask;
424 	wiphy->available_antennas_rx = phy->antenna_mask;
425 
426 	wiphy->sar_capa = &mt76_sar_capa;
427 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
428 				sizeof(struct mt76_freq_range_power),
429 				GFP_KERNEL);
430 	if (!phy->frp)
431 		return -ENOMEM;
432 
433 	hw->txq_data_size = sizeof(struct mt76_txq);
434 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
435 
436 	if (!hw->max_tx_fragments)
437 		hw->max_tx_fragments = 16;
438 
439 	ieee80211_hw_set(hw, SIGNAL_DBM);
440 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
441 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
442 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
446 
447 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
448 		ieee80211_hw_set(hw, TX_AMSDU);
449 		ieee80211_hw_set(hw, TX_FRAG_LIST);
450 	}
451 
452 	ieee80211_hw_set(hw, MFP_CAPABLE);
453 	ieee80211_hw_set(hw, AP_LINK_PS);
454 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
455 
456 	return 0;
457 }
458 
459 struct mt76_phy *
460 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
461 	       const struct ieee80211_ops *ops, u8 band_idx)
462 {
463 	struct ieee80211_hw *hw;
464 	unsigned int phy_size;
465 	struct mt76_phy *phy;
466 
467 	phy_size = ALIGN(sizeof(*phy), 8);
468 	hw = ieee80211_alloc_hw(size + phy_size, ops);
469 	if (!hw)
470 		return NULL;
471 
472 	phy = hw->priv;
473 	phy->dev = dev;
474 	phy->hw = hw;
475 	phy->priv = hw->priv + phy_size;
476 	phy->band_idx = band_idx;
477 
478 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
479 	hw->wiphy->interface_modes =
480 		BIT(NL80211_IFTYPE_STATION) |
481 		BIT(NL80211_IFTYPE_AP) |
482 #ifdef CONFIG_MAC80211_MESH
483 		BIT(NL80211_IFTYPE_MESH_POINT) |
484 #endif
485 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
486 		BIT(NL80211_IFTYPE_P2P_GO) |
487 		BIT(NL80211_IFTYPE_ADHOC);
488 
489 	return phy;
490 }
491 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
492 
493 int mt76_register_phy(struct mt76_phy *phy, bool vht,
494 		      struct ieee80211_rate *rates, int n_rates)
495 {
496 	int ret;
497 
498 	ret = mt76_phy_init(phy, phy->hw);
499 	if (ret)
500 		return ret;
501 
502 	if (phy->cap.has_2ghz) {
503 		ret = mt76_init_sband_2g(phy, rates, n_rates);
504 		if (ret)
505 			return ret;
506 	}
507 
508 	if (phy->cap.has_5ghz) {
509 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
510 		if (ret)
511 			return ret;
512 	}
513 
514 	if (phy->cap.has_6ghz) {
515 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
516 		if (ret)
517 			return ret;
518 	}
519 
520 	wiphy_read_of_freq_limits(phy->hw->wiphy);
521 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
522 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
523 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
524 
525 	ret = ieee80211_register_hw(phy->hw);
526 	if (ret)
527 		return ret;
528 
529 	phy->dev->phys[phy->band_idx] = phy;
530 
531 	return 0;
532 }
533 EXPORT_SYMBOL_GPL(mt76_register_phy);
534 
535 void mt76_unregister_phy(struct mt76_phy *phy)
536 {
537 	struct mt76_dev *dev = phy->dev;
538 
539 	mt76_tx_status_check(dev, true);
540 	ieee80211_unregister_hw(phy->hw);
541 	dev->phys[phy->band_idx] = NULL;
542 }
543 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
544 
545 struct mt76_dev *
546 mt76_alloc_device(struct device *pdev, unsigned int size,
547 		  const struct ieee80211_ops *ops,
548 		  const struct mt76_driver_ops *drv_ops)
549 {
550 	struct ieee80211_hw *hw;
551 	struct mt76_phy *phy;
552 	struct mt76_dev *dev;
553 	int i;
554 
555 	hw = ieee80211_alloc_hw(size, ops);
556 	if (!hw)
557 		return NULL;
558 
559 	dev = hw->priv;
560 	dev->hw = hw;
561 	dev->dev = pdev;
562 	dev->drv = drv_ops;
563 	dev->dma_dev = pdev;
564 
565 	phy = &dev->phy;
566 	phy->dev = dev;
567 	phy->hw = hw;
568 	phy->band_idx = MT_BAND0;
569 	dev->phys[phy->band_idx] = phy;
570 
571 	spin_lock_init(&dev->rx_lock);
572 	spin_lock_init(&dev->lock);
573 	spin_lock_init(&dev->cc_lock);
574 	spin_lock_init(&dev->status_lock);
575 	spin_lock_init(&dev->wed_lock);
576 	mutex_init(&dev->mutex);
577 	init_waitqueue_head(&dev->tx_wait);
578 
579 	skb_queue_head_init(&dev->mcu.res_q);
580 	init_waitqueue_head(&dev->mcu.wait);
581 	mutex_init(&dev->mcu.mutex);
582 	dev->tx_worker.fn = mt76_tx_worker;
583 
584 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
585 	hw->wiphy->interface_modes =
586 		BIT(NL80211_IFTYPE_STATION) |
587 		BIT(NL80211_IFTYPE_AP) |
588 #ifdef CONFIG_MAC80211_MESH
589 		BIT(NL80211_IFTYPE_MESH_POINT) |
590 #endif
591 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
592 		BIT(NL80211_IFTYPE_P2P_GO) |
593 		BIT(NL80211_IFTYPE_ADHOC);
594 
595 	spin_lock_init(&dev->token_lock);
596 	idr_init(&dev->token);
597 
598 	spin_lock_init(&dev->rx_token_lock);
599 	idr_init(&dev->rx_token);
600 
601 	INIT_LIST_HEAD(&dev->wcid_list);
602 
603 	INIT_LIST_HEAD(&dev->txwi_cache);
604 	INIT_LIST_HEAD(&dev->rxwi_cache);
605 	dev->token_size = dev->drv->token_size;
606 
607 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
608 		skb_queue_head_init(&dev->rx_skb[i]);
609 
610 	dev->wq = alloc_ordered_workqueue("mt76", 0);
611 	if (!dev->wq) {
612 		ieee80211_free_hw(hw);
613 		return NULL;
614 	}
615 
616 	return dev;
617 }
618 EXPORT_SYMBOL_GPL(mt76_alloc_device);
619 
620 int mt76_register_device(struct mt76_dev *dev, bool vht,
621 			 struct ieee80211_rate *rates, int n_rates)
622 {
623 	struct ieee80211_hw *hw = dev->hw;
624 	struct mt76_phy *phy = &dev->phy;
625 	int ret;
626 
627 	dev_set_drvdata(dev->dev, dev);
628 	ret = mt76_phy_init(phy, hw);
629 	if (ret)
630 		return ret;
631 
632 	if (phy->cap.has_2ghz) {
633 		ret = mt76_init_sband_2g(phy, rates, n_rates);
634 		if (ret)
635 			return ret;
636 	}
637 
638 	if (phy->cap.has_5ghz) {
639 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
640 		if (ret)
641 			return ret;
642 	}
643 
644 	if (phy->cap.has_6ghz) {
645 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
646 		if (ret)
647 			return ret;
648 	}
649 
650 	wiphy_read_of_freq_limits(hw->wiphy);
651 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
652 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
653 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
654 
655 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
656 		ret = mt76_led_init(dev);
657 		if (ret)
658 			return ret;
659 	}
660 
661 	ret = ieee80211_register_hw(hw);
662 	if (ret)
663 		return ret;
664 
665 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
666 	sched_set_fifo_low(dev->tx_worker.task);
667 
668 	return 0;
669 }
670 EXPORT_SYMBOL_GPL(mt76_register_device);
671 
672 void mt76_unregister_device(struct mt76_dev *dev)
673 {
674 	struct ieee80211_hw *hw = dev->hw;
675 
676 	if (IS_ENABLED(CONFIG_MT76_LEDS))
677 		mt76_led_cleanup(dev);
678 	mt76_tx_status_check(dev, true);
679 	ieee80211_unregister_hw(hw);
680 }
681 EXPORT_SYMBOL_GPL(mt76_unregister_device);
682 
683 void mt76_free_device(struct mt76_dev *dev)
684 {
685 	mt76_worker_teardown(&dev->tx_worker);
686 	if (dev->wq) {
687 		destroy_workqueue(dev->wq);
688 		dev->wq = NULL;
689 	}
690 	ieee80211_free_hw(dev->hw);
691 }
692 EXPORT_SYMBOL_GPL(mt76_free_device);
693 
694 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
695 {
696 	struct sk_buff *skb = phy->rx_amsdu[q].head;
697 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
698 	struct mt76_dev *dev = phy->dev;
699 
700 	phy->rx_amsdu[q].head = NULL;
701 	phy->rx_amsdu[q].tail = NULL;
702 
703 	/*
704 	 * Validate if the amsdu has a proper first subframe.
705 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
706 	 * flag of the QoS header gets flipped. In such cases, the first
707 	 * subframe has a LLC/SNAP header in the location of the destination
708 	 * address.
709 	 */
710 	if (skb_shinfo(skb)->frag_list) {
711 		int offset = 0;
712 
713 		if (!(status->flag & RX_FLAG_8023)) {
714 			offset = ieee80211_get_hdrlen_from_skb(skb);
715 
716 			if ((status->flag &
717 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
718 			    RX_FLAG_DECRYPTED)
719 				offset += 8;
720 		}
721 
722 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
723 			dev_kfree_skb(skb);
724 			return;
725 		}
726 	}
727 	__skb_queue_tail(&dev->rx_skb[q], skb);
728 }
729 
730 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
731 				  struct sk_buff *skb)
732 {
733 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
734 
735 	if (phy->rx_amsdu[q].head &&
736 	    (!status->amsdu || status->first_amsdu ||
737 	     status->seqno != phy->rx_amsdu[q].seqno))
738 		mt76_rx_release_amsdu(phy, q);
739 
740 	if (!phy->rx_amsdu[q].head) {
741 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
742 		phy->rx_amsdu[q].seqno = status->seqno;
743 		phy->rx_amsdu[q].head = skb;
744 	} else {
745 		*phy->rx_amsdu[q].tail = skb;
746 		phy->rx_amsdu[q].tail = &skb->next;
747 	}
748 
749 	if (!status->amsdu || status->last_amsdu)
750 		mt76_rx_release_amsdu(phy, q);
751 }
752 
753 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
754 {
755 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
756 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
757 
758 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
759 		dev_kfree_skb(skb);
760 		return;
761 	}
762 
763 #ifdef CONFIG_NL80211_TESTMODE
764 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
765 		phy->test.rx_stats.packets[q]++;
766 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
767 			phy->test.rx_stats.fcs_error[q]++;
768 	}
769 #endif
770 
771 	mt76_rx_release_burst(phy, q, skb);
772 }
773 EXPORT_SYMBOL_GPL(mt76_rx);
774 
775 bool mt76_has_tx_pending(struct mt76_phy *phy)
776 {
777 	struct mt76_queue *q;
778 	int i;
779 
780 	for (i = 0; i < __MT_TXQ_MAX; i++) {
781 		q = phy->q_tx[i];
782 		if (q && q->queued)
783 			return true;
784 	}
785 
786 	return false;
787 }
788 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
789 
790 static struct mt76_channel_state *
791 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
792 {
793 	struct mt76_sband *msband;
794 	int idx;
795 
796 	if (c->band == NL80211_BAND_2GHZ)
797 		msband = &phy->sband_2g;
798 	else if (c->band == NL80211_BAND_6GHZ)
799 		msband = &phy->sband_6g;
800 	else
801 		msband = &phy->sband_5g;
802 
803 	idx = c - &msband->sband.channels[0];
804 	return &msband->chan[idx];
805 }
806 
807 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
808 {
809 	struct mt76_channel_state *state = phy->chan_state;
810 
811 	state->cc_active += ktime_to_us(ktime_sub(time,
812 						  phy->survey_time));
813 	phy->survey_time = time;
814 }
815 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
816 
817 void mt76_update_survey(struct mt76_phy *phy)
818 {
819 	struct mt76_dev *dev = phy->dev;
820 	ktime_t cur_time;
821 
822 	if (dev->drv->update_survey)
823 		dev->drv->update_survey(phy);
824 
825 	cur_time = ktime_get_boottime();
826 	mt76_update_survey_active_time(phy, cur_time);
827 
828 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
829 		struct mt76_channel_state *state = phy->chan_state;
830 
831 		spin_lock_bh(&dev->cc_lock);
832 		state->cc_bss_rx += dev->cur_cc_bss_rx;
833 		dev->cur_cc_bss_rx = 0;
834 		spin_unlock_bh(&dev->cc_lock);
835 	}
836 }
837 EXPORT_SYMBOL_GPL(mt76_update_survey);
838 
839 void mt76_set_channel(struct mt76_phy *phy)
840 {
841 	struct mt76_dev *dev = phy->dev;
842 	struct ieee80211_hw *hw = phy->hw;
843 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
844 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
845 	int timeout = HZ / 5;
846 
847 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
848 	mt76_update_survey(phy);
849 
850 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
851 	    phy->chandef.width != chandef->width)
852 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
853 
854 	phy->chandef = *chandef;
855 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
856 
857 	if (!offchannel)
858 		phy->main_chan = chandef->chan;
859 
860 	if (chandef->chan != phy->main_chan)
861 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
862 }
863 EXPORT_SYMBOL_GPL(mt76_set_channel);
864 
865 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
866 		    struct survey_info *survey)
867 {
868 	struct mt76_phy *phy = hw->priv;
869 	struct mt76_dev *dev = phy->dev;
870 	struct mt76_sband *sband;
871 	struct ieee80211_channel *chan;
872 	struct mt76_channel_state *state;
873 	int ret = 0;
874 
875 	mutex_lock(&dev->mutex);
876 	if (idx == 0 && dev->drv->update_survey)
877 		mt76_update_survey(phy);
878 
879 	if (idx >= phy->sband_2g.sband.n_channels +
880 		   phy->sband_5g.sband.n_channels) {
881 		idx -= (phy->sband_2g.sband.n_channels +
882 			phy->sband_5g.sband.n_channels);
883 		sband = &phy->sband_6g;
884 	} else if (idx >= phy->sband_2g.sband.n_channels) {
885 		idx -= phy->sband_2g.sband.n_channels;
886 		sband = &phy->sband_5g;
887 	} else {
888 		sband = &phy->sband_2g;
889 	}
890 
891 	if (idx >= sband->sband.n_channels) {
892 		ret = -ENOENT;
893 		goto out;
894 	}
895 
896 	chan = &sband->sband.channels[idx];
897 	state = mt76_channel_state(phy, chan);
898 
899 	memset(survey, 0, sizeof(*survey));
900 	survey->channel = chan;
901 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
902 	survey->filled |= dev->drv->survey_flags;
903 	if (state->noise)
904 		survey->filled |= SURVEY_INFO_NOISE_DBM;
905 
906 	if (chan == phy->main_chan) {
907 		survey->filled |= SURVEY_INFO_IN_USE;
908 
909 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
910 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
911 	}
912 
913 	survey->time_busy = div_u64(state->cc_busy, 1000);
914 	survey->time_rx = div_u64(state->cc_rx, 1000);
915 	survey->time = div_u64(state->cc_active, 1000);
916 	survey->noise = state->noise;
917 
918 	spin_lock_bh(&dev->cc_lock);
919 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
920 	survey->time_tx = div_u64(state->cc_tx, 1000);
921 	spin_unlock_bh(&dev->cc_lock);
922 
923 out:
924 	mutex_unlock(&dev->mutex);
925 
926 	return ret;
927 }
928 EXPORT_SYMBOL_GPL(mt76_get_survey);
929 
930 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
931 			 struct ieee80211_key_conf *key)
932 {
933 	struct ieee80211_key_seq seq;
934 	int i;
935 
936 	wcid->rx_check_pn = false;
937 
938 	if (!key)
939 		return;
940 
941 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
942 		return;
943 
944 	wcid->rx_check_pn = true;
945 
946 	/* data frame */
947 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
948 		ieee80211_get_key_rx_seq(key, i, &seq);
949 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
950 	}
951 
952 	/* robust management frame */
953 	ieee80211_get_key_rx_seq(key, -1, &seq);
954 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
955 
956 }
957 EXPORT_SYMBOL(mt76_wcid_key_setup);
958 
959 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
960 {
961 	int signal = -128;
962 	u8 chains;
963 
964 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
965 		int cur, diff;
966 
967 		cur = *chain_signal;
968 		if (!(chains & BIT(0)) ||
969 		    cur > 0)
970 			continue;
971 
972 		if (cur > signal)
973 			swap(cur, signal);
974 
975 		diff = signal - cur;
976 		if (diff == 0)
977 			signal += 3;
978 		else if (diff <= 2)
979 			signal += 2;
980 		else if (diff <= 6)
981 			signal += 1;
982 	}
983 
984 	return signal;
985 }
986 EXPORT_SYMBOL(mt76_rx_signal);
987 
988 static void
989 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
990 		struct ieee80211_hw **hw,
991 		struct ieee80211_sta **sta)
992 {
993 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
994 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
995 	struct mt76_rx_status mstat;
996 
997 	mstat = *((struct mt76_rx_status *)skb->cb);
998 	memset(status, 0, sizeof(*status));
999 
1000 	status->flag = mstat.flag;
1001 	status->freq = mstat.freq;
1002 	status->enc_flags = mstat.enc_flags;
1003 	status->encoding = mstat.encoding;
1004 	status->bw = mstat.bw;
1005 	status->he_ru = mstat.he_ru;
1006 	status->he_gi = mstat.he_gi;
1007 	status->he_dcm = mstat.he_dcm;
1008 	status->rate_idx = mstat.rate_idx;
1009 	status->nss = mstat.nss;
1010 	status->band = mstat.band;
1011 	status->signal = mstat.signal;
1012 	status->chains = mstat.chains;
1013 	status->ampdu_reference = mstat.ampdu_ref;
1014 	status->device_timestamp = mstat.timestamp;
1015 	status->mactime = mstat.timestamp;
1016 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1017 	if (status->signal <= -128)
1018 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1019 
1020 	if (ieee80211_is_beacon(hdr->frame_control) ||
1021 	    ieee80211_is_probe_resp(hdr->frame_control))
1022 		status->boottime_ns = ktime_get_boottime_ns();
1023 
1024 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1025 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1026 		     sizeof(mstat.chain_signal));
1027 	memcpy(status->chain_signal, mstat.chain_signal,
1028 	       sizeof(mstat.chain_signal));
1029 
1030 	*sta = wcid_to_sta(mstat.wcid);
1031 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1032 }
1033 
1034 static void
1035 mt76_check_ccmp_pn(struct sk_buff *skb)
1036 {
1037 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1038 	struct mt76_wcid *wcid = status->wcid;
1039 	struct ieee80211_hdr *hdr;
1040 	int security_idx;
1041 	int ret;
1042 
1043 	if (!(status->flag & RX_FLAG_DECRYPTED))
1044 		return;
1045 
1046 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1047 		return;
1048 
1049 	if (!wcid || !wcid->rx_check_pn)
1050 		return;
1051 
1052 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1053 	if (status->flag & RX_FLAG_8023)
1054 		goto skip_hdr_check;
1055 
1056 	hdr = mt76_skb_get_hdr(skb);
1057 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1058 		/*
1059 		 * Validate the first fragment both here and in mac80211
1060 		 * All further fragments will be validated by mac80211 only.
1061 		 */
1062 		if (ieee80211_is_frag(hdr) &&
1063 		    !ieee80211_is_first_frag(hdr->frame_control))
1064 			return;
1065 	}
1066 
1067 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1068 	 *
1069 	 * the recipient shall maintain a single replay counter for received
1070 	 * individually addressed robust Management frames that are received
1071 	 * with the To DS subfield equal to 0, [...]
1072 	 */
1073 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1074 	    !ieee80211_has_tods(hdr->frame_control))
1075 		security_idx = IEEE80211_NUM_TIDS;
1076 
1077 skip_hdr_check:
1078 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1079 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1080 		     sizeof(status->iv));
1081 	if (ret <= 0) {
1082 		status->flag |= RX_FLAG_ONLY_MONITOR;
1083 		return;
1084 	}
1085 
1086 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1087 
1088 	if (status->flag & RX_FLAG_IV_STRIPPED)
1089 		status->flag |= RX_FLAG_PN_VALIDATED;
1090 }
1091 
1092 static void
1093 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1094 		    int len)
1095 {
1096 	struct mt76_wcid *wcid = status->wcid;
1097 	struct ieee80211_rx_status info = {
1098 		.enc_flags = status->enc_flags,
1099 		.rate_idx = status->rate_idx,
1100 		.encoding = status->encoding,
1101 		.band = status->band,
1102 		.nss = status->nss,
1103 		.bw = status->bw,
1104 	};
1105 	struct ieee80211_sta *sta;
1106 	u32 airtime;
1107 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1108 
1109 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1110 	spin_lock(&dev->cc_lock);
1111 	dev->cur_cc_bss_rx += airtime;
1112 	spin_unlock(&dev->cc_lock);
1113 
1114 	if (!wcid || !wcid->sta)
1115 		return;
1116 
1117 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1118 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1119 }
1120 
1121 static void
1122 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1123 {
1124 	struct mt76_wcid *wcid;
1125 	int wcid_idx;
1126 
1127 	if (!dev->rx_ampdu_len)
1128 		return;
1129 
1130 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1131 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1132 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1133 	else
1134 		wcid = NULL;
1135 	dev->rx_ampdu_status.wcid = wcid;
1136 
1137 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1138 
1139 	dev->rx_ampdu_len = 0;
1140 	dev->rx_ampdu_ref = 0;
1141 }
1142 
1143 static void
1144 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1145 {
1146 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1147 	struct mt76_wcid *wcid = status->wcid;
1148 
1149 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1150 		return;
1151 
1152 	if (!wcid || !wcid->sta) {
1153 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1154 
1155 		if (status->flag & RX_FLAG_8023)
1156 			return;
1157 
1158 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1159 			return;
1160 
1161 		wcid = NULL;
1162 	}
1163 
1164 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1165 	    status->ampdu_ref != dev->rx_ampdu_ref)
1166 		mt76_airtime_flush_ampdu(dev);
1167 
1168 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1169 		if (!dev->rx_ampdu_len ||
1170 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1171 			dev->rx_ampdu_status = *status;
1172 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1173 			dev->rx_ampdu_ref = status->ampdu_ref;
1174 		}
1175 
1176 		dev->rx_ampdu_len += skb->len;
1177 		return;
1178 	}
1179 
1180 	mt76_airtime_report(dev, status, skb->len);
1181 }
1182 
1183 static void
1184 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1185 {
1186 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1187 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1188 	struct ieee80211_sta *sta;
1189 	struct ieee80211_hw *hw;
1190 	struct mt76_wcid *wcid = status->wcid;
1191 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1192 	bool ps;
1193 
1194 	hw = mt76_phy_hw(dev, status->phy_idx);
1195 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1196 	    !(status->flag & RX_FLAG_8023)) {
1197 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1198 		if (sta)
1199 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1200 	}
1201 
1202 	mt76_airtime_check(dev, skb);
1203 
1204 	if (!wcid || !wcid->sta)
1205 		return;
1206 
1207 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1208 
1209 	if (status->signal <= 0)
1210 		ewma_signal_add(&wcid->rssi, -status->signal);
1211 
1212 	wcid->inactive_count = 0;
1213 
1214 	if (status->flag & RX_FLAG_8023)
1215 		return;
1216 
1217 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1218 		return;
1219 
1220 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1221 		ieee80211_sta_pspoll(sta);
1222 		return;
1223 	}
1224 
1225 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1226 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1227 	      ieee80211_is_data(hdr->frame_control)))
1228 		return;
1229 
1230 	ps = ieee80211_has_pm(hdr->frame_control);
1231 
1232 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1233 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1234 		ieee80211_sta_uapsd_trigger(sta, tidno);
1235 
1236 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1237 		return;
1238 
1239 	if (ps)
1240 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1241 
1242 	dev->drv->sta_ps(dev, sta, ps);
1243 
1244 	if (!ps)
1245 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1246 
1247 	ieee80211_sta_ps_transition(sta, ps);
1248 }
1249 
1250 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1251 		      struct napi_struct *napi)
1252 {
1253 	struct ieee80211_sta *sta;
1254 	struct ieee80211_hw *hw;
1255 	struct sk_buff *skb, *tmp;
1256 	LIST_HEAD(list);
1257 
1258 	spin_lock(&dev->rx_lock);
1259 	while ((skb = __skb_dequeue(frames)) != NULL) {
1260 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1261 
1262 		mt76_check_ccmp_pn(skb);
1263 		skb_shinfo(skb)->frag_list = NULL;
1264 		mt76_rx_convert(dev, skb, &hw, &sta);
1265 		ieee80211_rx_list(hw, sta, skb, &list);
1266 
1267 		/* subsequent amsdu frames */
1268 		while (nskb) {
1269 			skb = nskb;
1270 			nskb = nskb->next;
1271 			skb->next = NULL;
1272 
1273 			mt76_rx_convert(dev, skb, &hw, &sta);
1274 			ieee80211_rx_list(hw, sta, skb, &list);
1275 		}
1276 	}
1277 	spin_unlock(&dev->rx_lock);
1278 
1279 	if (!napi) {
1280 		netif_receive_skb_list(&list);
1281 		return;
1282 	}
1283 
1284 	list_for_each_entry_safe(skb, tmp, &list, list) {
1285 		skb_list_del_init(skb);
1286 		napi_gro_receive(napi, skb);
1287 	}
1288 }
1289 
1290 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1291 			   struct napi_struct *napi)
1292 {
1293 	struct sk_buff_head frames;
1294 	struct sk_buff *skb;
1295 
1296 	__skb_queue_head_init(&frames);
1297 
1298 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1299 		mt76_check_sta(dev, skb);
1300 		if (mtk_wed_device_active(&dev->mmio.wed))
1301 			__skb_queue_tail(&frames, skb);
1302 		else
1303 			mt76_rx_aggr_reorder(skb, &frames);
1304 	}
1305 
1306 	mt76_rx_complete(dev, &frames, napi);
1307 }
1308 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1309 
1310 static int
1311 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1312 	     struct ieee80211_sta *sta)
1313 {
1314 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1315 	struct mt76_dev *dev = phy->dev;
1316 	int ret;
1317 	int i;
1318 
1319 	mutex_lock(&dev->mutex);
1320 
1321 	ret = dev->drv->sta_add(dev, vif, sta);
1322 	if (ret)
1323 		goto out;
1324 
1325 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1326 		struct mt76_txq *mtxq;
1327 
1328 		if (!sta->txq[i])
1329 			continue;
1330 
1331 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1332 		mtxq->wcid = wcid->idx;
1333 	}
1334 
1335 	ewma_signal_init(&wcid->rssi);
1336 	if (phy->band_idx == MT_BAND1)
1337 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1338 	wcid->phy_idx = phy->band_idx;
1339 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1340 
1341 	mt76_packet_id_init(wcid);
1342 out:
1343 	mutex_unlock(&dev->mutex);
1344 
1345 	return ret;
1346 }
1347 
1348 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1349 		       struct ieee80211_sta *sta)
1350 {
1351 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1352 	int i, idx = wcid->idx;
1353 
1354 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1355 		mt76_rx_aggr_stop(dev, wcid, i);
1356 
1357 	if (dev->drv->sta_remove)
1358 		dev->drv->sta_remove(dev, vif, sta);
1359 
1360 	mt76_packet_id_flush(dev, wcid);
1361 
1362 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1363 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1364 }
1365 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1366 
1367 static void
1368 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1369 		struct ieee80211_sta *sta)
1370 {
1371 	mutex_lock(&dev->mutex);
1372 	__mt76_sta_remove(dev, vif, sta);
1373 	mutex_unlock(&dev->mutex);
1374 }
1375 
1376 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1377 		   struct ieee80211_sta *sta,
1378 		   enum ieee80211_sta_state old_state,
1379 		   enum ieee80211_sta_state new_state)
1380 {
1381 	struct mt76_phy *phy = hw->priv;
1382 	struct mt76_dev *dev = phy->dev;
1383 
1384 	if (old_state == IEEE80211_STA_NOTEXIST &&
1385 	    new_state == IEEE80211_STA_NONE)
1386 		return mt76_sta_add(phy, vif, sta);
1387 
1388 	if (old_state == IEEE80211_STA_AUTH &&
1389 	    new_state == IEEE80211_STA_ASSOC &&
1390 	    dev->drv->sta_assoc)
1391 		dev->drv->sta_assoc(dev, vif, sta);
1392 
1393 	if (old_state == IEEE80211_STA_NONE &&
1394 	    new_state == IEEE80211_STA_NOTEXIST)
1395 		mt76_sta_remove(dev, vif, sta);
1396 
1397 	return 0;
1398 }
1399 EXPORT_SYMBOL_GPL(mt76_sta_state);
1400 
1401 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1402 			     struct ieee80211_sta *sta)
1403 {
1404 	struct mt76_phy *phy = hw->priv;
1405 	struct mt76_dev *dev = phy->dev;
1406 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1407 
1408 	mutex_lock(&dev->mutex);
1409 	spin_lock_bh(&dev->status_lock);
1410 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1411 	spin_unlock_bh(&dev->status_lock);
1412 	mutex_unlock(&dev->mutex);
1413 }
1414 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1415 
1416 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1417 		     int *dbm)
1418 {
1419 	struct mt76_phy *phy = hw->priv;
1420 	int n_chains = hweight8(phy->antenna_mask);
1421 	int delta = mt76_tx_power_nss_delta(n_chains);
1422 
1423 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1424 
1425 	return 0;
1426 }
1427 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1428 
1429 int mt76_init_sar_power(struct ieee80211_hw *hw,
1430 			const struct cfg80211_sar_specs *sar)
1431 {
1432 	struct mt76_phy *phy = hw->priv;
1433 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1434 	int i;
1435 
1436 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1437 		return -EINVAL;
1438 
1439 	for (i = 0; i < sar->num_sub_specs; i++) {
1440 		u32 index = sar->sub_specs[i].freq_range_index;
1441 		/* SAR specifies power limitaton in 0.25dbm */
1442 		s32 power = sar->sub_specs[i].power >> 1;
1443 
1444 		if (power > 127 || power < -127)
1445 			power = 127;
1446 
1447 		phy->frp[index].range = &capa->freq_ranges[index];
1448 		phy->frp[index].power = power;
1449 	}
1450 
1451 	return 0;
1452 }
1453 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1454 
1455 int mt76_get_sar_power(struct mt76_phy *phy,
1456 		       struct ieee80211_channel *chan,
1457 		       int power)
1458 {
1459 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1460 	int freq, i;
1461 
1462 	if (!capa || !phy->frp)
1463 		return power;
1464 
1465 	if (power > 127 || power < -127)
1466 		power = 127;
1467 
1468 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1469 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1470 		if (phy->frp[i].range &&
1471 		    freq >= phy->frp[i].range->start_freq &&
1472 		    freq < phy->frp[i].range->end_freq) {
1473 			power = min_t(int, phy->frp[i].power, power);
1474 			break;
1475 		}
1476 	}
1477 
1478 	return power;
1479 }
1480 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1481 
1482 static void
1483 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1484 {
1485 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1486 		ieee80211_csa_finish(vif);
1487 }
1488 
1489 void mt76_csa_finish(struct mt76_dev *dev)
1490 {
1491 	if (!dev->csa_complete)
1492 		return;
1493 
1494 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1495 		IEEE80211_IFACE_ITER_RESUME_ALL,
1496 		__mt76_csa_finish, dev);
1497 
1498 	dev->csa_complete = 0;
1499 }
1500 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1501 
1502 static void
1503 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1504 {
1505 	struct mt76_dev *dev = priv;
1506 
1507 	if (!vif->bss_conf.csa_active)
1508 		return;
1509 
1510 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1511 }
1512 
1513 void mt76_csa_check(struct mt76_dev *dev)
1514 {
1515 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1516 		IEEE80211_IFACE_ITER_RESUME_ALL,
1517 		__mt76_csa_check, dev);
1518 }
1519 EXPORT_SYMBOL_GPL(mt76_csa_check);
1520 
1521 int
1522 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1523 {
1524 	return 0;
1525 }
1526 EXPORT_SYMBOL_GPL(mt76_set_tim);
1527 
1528 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1529 {
1530 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1531 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1532 	u8 *hdr, *pn = status->iv;
1533 
1534 	__skb_push(skb, 8);
1535 	memmove(skb->data, skb->data + 8, hdr_len);
1536 	hdr = skb->data + hdr_len;
1537 
1538 	hdr[0] = pn[5];
1539 	hdr[1] = pn[4];
1540 	hdr[2] = 0;
1541 	hdr[3] = 0x20 | (key_id << 6);
1542 	hdr[4] = pn[3];
1543 	hdr[5] = pn[2];
1544 	hdr[6] = pn[1];
1545 	hdr[7] = pn[0];
1546 
1547 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1548 }
1549 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1550 
1551 int mt76_get_rate(struct mt76_dev *dev,
1552 		  struct ieee80211_supported_band *sband,
1553 		  int idx, bool cck)
1554 {
1555 	int i, offset = 0, len = sband->n_bitrates;
1556 
1557 	if (cck) {
1558 		if (sband != &dev->phy.sband_2g.sband)
1559 			return 0;
1560 
1561 		idx &= ~BIT(2); /* short preamble */
1562 	} else if (sband == &dev->phy.sband_2g.sband) {
1563 		offset = 4;
1564 	}
1565 
1566 	for (i = offset; i < len; i++) {
1567 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1568 			return i;
1569 	}
1570 
1571 	return 0;
1572 }
1573 EXPORT_SYMBOL_GPL(mt76_get_rate);
1574 
1575 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1576 		  const u8 *mac)
1577 {
1578 	struct mt76_phy *phy = hw->priv;
1579 
1580 	set_bit(MT76_SCANNING, &phy->state);
1581 }
1582 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1583 
1584 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1585 {
1586 	struct mt76_phy *phy = hw->priv;
1587 
1588 	clear_bit(MT76_SCANNING, &phy->state);
1589 }
1590 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1591 
1592 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1593 {
1594 	struct mt76_phy *phy = hw->priv;
1595 	struct mt76_dev *dev = phy->dev;
1596 
1597 	mutex_lock(&dev->mutex);
1598 	*tx_ant = phy->antenna_mask;
1599 	*rx_ant = phy->antenna_mask;
1600 	mutex_unlock(&dev->mutex);
1601 
1602 	return 0;
1603 }
1604 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1605 
1606 struct mt76_queue *
1607 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1608 		int ring_base, u32 flags)
1609 {
1610 	struct mt76_queue *hwq;
1611 	int err;
1612 
1613 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1614 	if (!hwq)
1615 		return ERR_PTR(-ENOMEM);
1616 
1617 	hwq->flags = flags;
1618 
1619 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1620 	if (err < 0)
1621 		return ERR_PTR(err);
1622 
1623 	return hwq;
1624 }
1625 EXPORT_SYMBOL_GPL(mt76_init_queue);
1626 
1627 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
1628 {
1629 	int offset = 0;
1630 
1631 	if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
1632 		offset = 4;
1633 
1634 	/* pick the lowest rate for hidden nodes */
1635 	if (rateidx < 0)
1636 		rateidx = 0;
1637 
1638 	rateidx += offset;
1639 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1640 		rateidx = offset;
1641 
1642 	return mt76_rates[rateidx].hw_value;
1643 }
1644 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1645 
1646 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1647 			 struct mt76_sta_stats *stats)
1648 {
1649 	int i, ei = wi->initial_stat_idx;
1650 	u64 *data = wi->data;
1651 
1652 	wi->sta_count++;
1653 
1654 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1655 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1656 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1657 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1658 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1659 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1660 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1661 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1662 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1663 
1664 	for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++)
1665 		data[ei++] += stats->tx_bw[i];
1666 
1667 	for (i = 0; i < 12; i++)
1668 		data[ei++] += stats->tx_mcs[i];
1669 
1670 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1671 }
1672 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1673 
1674 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1675 {
1676 	struct ieee80211_hw *hw = phy->hw;
1677 	struct mt76_dev *dev = phy->dev;
1678 
1679 	if (dev->region == NL80211_DFS_UNSET ||
1680 	    test_bit(MT76_SCANNING, &phy->state))
1681 		return MT_DFS_STATE_DISABLED;
1682 
1683 	if (!hw->conf.radar_enabled) {
1684 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1685 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1686 			return MT_DFS_STATE_ACTIVE;
1687 
1688 		return MT_DFS_STATE_DISABLED;
1689 	}
1690 
1691 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1692 		return MT_DFS_STATE_CAC;
1693 
1694 	return MT_DFS_STATE_ACTIVE;
1695 }
1696 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1697