1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 };
79 
80 static const struct ieee80211_channel mt76_channels_6ghz[] = {
81 	/* UNII-5 */
82 	CHAN6G(1, 5955),
83 	CHAN6G(5, 5975),
84 	CHAN6G(9, 5995),
85 	CHAN6G(13, 6015),
86 	CHAN6G(17, 6035),
87 	CHAN6G(21, 6055),
88 	CHAN6G(25, 6075),
89 	CHAN6G(29, 6095),
90 	CHAN6G(33, 6115),
91 	CHAN6G(37, 6135),
92 	CHAN6G(41, 6155),
93 	CHAN6G(45, 6175),
94 	CHAN6G(49, 6195),
95 	CHAN6G(53, 6215),
96 	CHAN6G(57, 6235),
97 	CHAN6G(61, 6255),
98 	CHAN6G(65, 6275),
99 	CHAN6G(69, 6295),
100 	CHAN6G(73, 6315),
101 	CHAN6G(77, 6335),
102 	CHAN6G(81, 6355),
103 	CHAN6G(85, 6375),
104 	CHAN6G(89, 6395),
105 	CHAN6G(93, 6415),
106 	/* UNII-6 */
107 	CHAN6G(97, 6435),
108 	CHAN6G(101, 6455),
109 	CHAN6G(105, 6475),
110 	CHAN6G(109, 6495),
111 	CHAN6G(113, 6515),
112 	CHAN6G(117, 6535),
113 	/* UNII-7 */
114 	CHAN6G(121, 6555),
115 	CHAN6G(125, 6575),
116 	CHAN6G(129, 6595),
117 	CHAN6G(133, 6615),
118 	CHAN6G(137, 6635),
119 	CHAN6G(141, 6655),
120 	CHAN6G(145, 6675),
121 	CHAN6G(149, 6695),
122 	CHAN6G(153, 6715),
123 	CHAN6G(157, 6735),
124 	CHAN6G(161, 6755),
125 	CHAN6G(165, 6775),
126 	CHAN6G(169, 6795),
127 	CHAN6G(173, 6815),
128 	CHAN6G(177, 6835),
129 	CHAN6G(181, 6855),
130 	CHAN6G(185, 6875),
131 	/* UNII-8 */
132 	CHAN6G(189, 6895),
133 	CHAN6G(193, 6915),
134 	CHAN6G(197, 6935),
135 	CHAN6G(201, 6955),
136 	CHAN6G(205, 6975),
137 	CHAN6G(209, 6995),
138 	CHAN6G(213, 7015),
139 	CHAN6G(217, 7035),
140 	CHAN6G(221, 7055),
141 	CHAN6G(225, 7075),
142 	CHAN6G(229, 7095),
143 	CHAN6G(233, 7115),
144 };
145 
146 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
147 	{ .throughput =   0 * 1024, .blink_time = 334 },
148 	{ .throughput =   1 * 1024, .blink_time = 260 },
149 	{ .throughput =   5 * 1024, .blink_time = 220 },
150 	{ .throughput =  10 * 1024, .blink_time = 190 },
151 	{ .throughput =  20 * 1024, .blink_time = 170 },
152 	{ .throughput =  50 * 1024, .blink_time = 150 },
153 	{ .throughput =  70 * 1024, .blink_time = 130 },
154 	{ .throughput = 100 * 1024, .blink_time = 110 },
155 	{ .throughput = 200 * 1024, .blink_time =  80 },
156 	{ .throughput = 300 * 1024, .blink_time =  50 },
157 };
158 
159 struct ieee80211_rate mt76_rates[] = {
160 	CCK_RATE(0, 10),
161 	CCK_RATE(1, 20),
162 	CCK_RATE(2, 55),
163 	CCK_RATE(3, 110),
164 	OFDM_RATE(11, 60),
165 	OFDM_RATE(15, 90),
166 	OFDM_RATE(10, 120),
167 	OFDM_RATE(14, 180),
168 	OFDM_RATE(9,  240),
169 	OFDM_RATE(13, 360),
170 	OFDM_RATE(8,  480),
171 	OFDM_RATE(12, 540),
172 };
173 EXPORT_SYMBOL_GPL(mt76_rates);
174 
175 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
176 	{ .start_freq = 2402, .end_freq = 2494, },
177 	{ .start_freq = 5150, .end_freq = 5350, },
178 	{ .start_freq = 5350, .end_freq = 5470, },
179 	{ .start_freq = 5470, .end_freq = 5725, },
180 	{ .start_freq = 5725, .end_freq = 5950, },
181 };
182 
183 const struct cfg80211_sar_capa mt76_sar_capa = {
184 	.type = NL80211_SAR_TYPE_POWER,
185 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
186 	.freq_ranges = &mt76_sar_freq_ranges[0],
187 };
188 EXPORT_SYMBOL_GPL(mt76_sar_capa);
189 
190 static int mt76_led_init(struct mt76_dev *dev)
191 {
192 	struct device_node *np = dev->dev->of_node;
193 	struct ieee80211_hw *hw = dev->hw;
194 	int led_pin;
195 
196 	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
197 		return 0;
198 
199 	snprintf(dev->led_name, sizeof(dev->led_name),
200 		 "mt76-%s", wiphy_name(hw->wiphy));
201 
202 	dev->led_cdev.name = dev->led_name;
203 	dev->led_cdev.default_trigger =
204 		ieee80211_create_tpt_led_trigger(hw,
205 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
206 					mt76_tpt_blink,
207 					ARRAY_SIZE(mt76_tpt_blink));
208 
209 	np = of_get_child_by_name(np, "led");
210 	if (np) {
211 		if (!of_property_read_u32(np, "led-sources", &led_pin))
212 			dev->led_pin = led_pin;
213 		dev->led_al = of_property_read_bool(np, "led-active-low");
214 	}
215 
216 	return led_classdev_register(dev->dev, &dev->led_cdev);
217 }
218 
219 static void mt76_led_cleanup(struct mt76_dev *dev)
220 {
221 	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
222 		return;
223 
224 	led_classdev_unregister(&dev->led_cdev);
225 }
226 
227 static void mt76_init_stream_cap(struct mt76_phy *phy,
228 				 struct ieee80211_supported_band *sband,
229 				 bool vht)
230 {
231 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
232 	int i, nstream = hweight8(phy->antenna_mask);
233 	struct ieee80211_sta_vht_cap *vht_cap;
234 	u16 mcs_map = 0;
235 
236 	if (nstream > 1)
237 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
238 	else
239 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
240 
241 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
242 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
243 
244 	if (!vht)
245 		return;
246 
247 	vht_cap = &sband->vht_cap;
248 	if (nstream > 1)
249 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
250 	else
251 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
252 
253 	for (i = 0; i < 8; i++) {
254 		if (i < nstream)
255 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
256 		else
257 			mcs_map |=
258 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
259 	}
260 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
261 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
262 }
263 
264 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
265 {
266 	if (phy->cap.has_2ghz)
267 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
268 	if (phy->cap.has_5ghz)
269 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
270 	if (phy->cap.has_6ghz)
271 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
272 }
273 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
274 
275 static int
276 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
277 		const struct ieee80211_channel *chan, int n_chan,
278 		struct ieee80211_rate *rates, int n_rates,
279 		bool ht, bool vht)
280 {
281 	struct ieee80211_supported_band *sband = &msband->sband;
282 	struct ieee80211_sta_vht_cap *vht_cap;
283 	struct ieee80211_sta_ht_cap *ht_cap;
284 	struct mt76_dev *dev = phy->dev;
285 	void *chanlist;
286 	int size;
287 
288 	size = n_chan * sizeof(*chan);
289 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
290 	if (!chanlist)
291 		return -ENOMEM;
292 
293 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
294 				    GFP_KERNEL);
295 	if (!msband->chan)
296 		return -ENOMEM;
297 
298 	sband->channels = chanlist;
299 	sband->n_channels = n_chan;
300 	sband->bitrates = rates;
301 	sband->n_bitrates = n_rates;
302 
303 	if (!ht)
304 		return 0;
305 
306 	ht_cap = &sband->ht_cap;
307 	ht_cap->ht_supported = true;
308 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
309 		       IEEE80211_HT_CAP_GRN_FLD |
310 		       IEEE80211_HT_CAP_SGI_20 |
311 		       IEEE80211_HT_CAP_SGI_40 |
312 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
313 
314 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
315 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
316 
317 	mt76_init_stream_cap(phy, sband, vht);
318 
319 	if (!vht)
320 		return 0;
321 
322 	vht_cap = &sband->vht_cap;
323 	vht_cap->vht_supported = true;
324 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
325 			IEEE80211_VHT_CAP_RXSTBC_1 |
326 			IEEE80211_VHT_CAP_SHORT_GI_80 |
327 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
328 			IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
329 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
330 
331 	return 0;
332 }
333 
334 static int
335 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
336 		   int n_rates)
337 {
338 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
339 
340 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
341 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
342 			       n_rates, true, false);
343 }
344 
345 static int
346 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
347 		   int n_rates, bool vht)
348 {
349 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
350 
351 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
352 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
353 			       n_rates, true, vht);
354 }
355 
356 static int
357 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
358 		   int n_rates)
359 {
360 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
361 
362 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
363 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
364 			       n_rates, false, false);
365 }
366 
367 static void
368 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
369 		 enum nl80211_band band)
370 {
371 	struct ieee80211_supported_band *sband = &msband->sband;
372 	bool found = false;
373 	int i;
374 
375 	if (!sband)
376 		return;
377 
378 	for (i = 0; i < sband->n_channels; i++) {
379 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
380 			continue;
381 
382 		found = true;
383 		break;
384 	}
385 
386 	if (found) {
387 		phy->chandef.chan = &sband->channels[0];
388 		phy->chan_state = &msband->chan[0];
389 		return;
390 	}
391 
392 	sband->n_channels = 0;
393 	phy->hw->wiphy->bands[band] = NULL;
394 }
395 
396 static void
397 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
398 {
399 	struct mt76_dev *dev = phy->dev;
400 	struct wiphy *wiphy = hw->wiphy;
401 
402 	SET_IEEE80211_DEV(hw, dev->dev);
403 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
404 
405 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
406 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
407 			WIPHY_FLAG_SUPPORTS_TDLS |
408 			WIPHY_FLAG_AP_UAPSD;
409 
410 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
411 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
412 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
413 
414 	wiphy->available_antennas_tx = dev->phy.antenna_mask;
415 	wiphy->available_antennas_rx = dev->phy.antenna_mask;
416 
417 	hw->txq_data_size = sizeof(struct mt76_txq);
418 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
419 
420 	if (!hw->max_tx_fragments)
421 		hw->max_tx_fragments = 16;
422 
423 	ieee80211_hw_set(hw, SIGNAL_DBM);
424 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
425 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
426 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
427 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
428 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
429 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
430 	ieee80211_hw_set(hw, TX_AMSDU);
431 	ieee80211_hw_set(hw, TX_FRAG_LIST);
432 	ieee80211_hw_set(hw, MFP_CAPABLE);
433 	ieee80211_hw_set(hw, AP_LINK_PS);
434 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
435 }
436 
437 struct mt76_phy *
438 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
439 	       const struct ieee80211_ops *ops)
440 {
441 	struct ieee80211_hw *hw;
442 	unsigned int phy_size;
443 	struct mt76_phy *phy;
444 
445 	phy_size = ALIGN(sizeof(*phy), 8);
446 	hw = ieee80211_alloc_hw(size + phy_size, ops);
447 	if (!hw)
448 		return NULL;
449 
450 	phy = hw->priv;
451 	phy->dev = dev;
452 	phy->hw = hw;
453 	phy->priv = hw->priv + phy_size;
454 
455 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
456 	hw->wiphy->interface_modes =
457 		BIT(NL80211_IFTYPE_STATION) |
458 		BIT(NL80211_IFTYPE_AP) |
459 #ifdef CONFIG_MAC80211_MESH
460 		BIT(NL80211_IFTYPE_MESH_POINT) |
461 #endif
462 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
463 		BIT(NL80211_IFTYPE_P2P_GO) |
464 		BIT(NL80211_IFTYPE_ADHOC);
465 
466 	return phy;
467 }
468 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
469 
470 int mt76_register_phy(struct mt76_phy *phy, bool vht,
471 		      struct ieee80211_rate *rates, int n_rates)
472 {
473 	int ret;
474 
475 	mt76_phy_init(phy, phy->hw);
476 
477 	if (phy->cap.has_2ghz) {
478 		ret = mt76_init_sband_2g(phy, rates, n_rates);
479 		if (ret)
480 			return ret;
481 	}
482 
483 	if (phy->cap.has_5ghz) {
484 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
485 		if (ret)
486 			return ret;
487 	}
488 
489 	if (phy->cap.has_6ghz) {
490 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
491 		if (ret)
492 			return ret;
493 	}
494 
495 	wiphy_read_of_freq_limits(phy->hw->wiphy);
496 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
497 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
498 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
499 
500 	ret = ieee80211_register_hw(phy->hw);
501 	if (ret)
502 		return ret;
503 
504 	phy->dev->phy2 = phy;
505 
506 	return 0;
507 }
508 EXPORT_SYMBOL_GPL(mt76_register_phy);
509 
510 void mt76_unregister_phy(struct mt76_phy *phy)
511 {
512 	struct mt76_dev *dev = phy->dev;
513 
514 	mt76_tx_status_check(dev, true);
515 	ieee80211_unregister_hw(phy->hw);
516 	dev->phy2 = NULL;
517 }
518 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
519 
520 struct mt76_dev *
521 mt76_alloc_device(struct device *pdev, unsigned int size,
522 		  const struct ieee80211_ops *ops,
523 		  const struct mt76_driver_ops *drv_ops)
524 {
525 	struct ieee80211_hw *hw;
526 	struct mt76_phy *phy;
527 	struct mt76_dev *dev;
528 	int i;
529 
530 	hw = ieee80211_alloc_hw(size, ops);
531 	if (!hw)
532 		return NULL;
533 
534 	dev = hw->priv;
535 	dev->hw = hw;
536 	dev->dev = pdev;
537 	dev->drv = drv_ops;
538 
539 	phy = &dev->phy;
540 	phy->dev = dev;
541 	phy->hw = hw;
542 
543 	spin_lock_init(&dev->rx_lock);
544 	spin_lock_init(&dev->lock);
545 	spin_lock_init(&dev->cc_lock);
546 	spin_lock_init(&dev->status_lock);
547 	mutex_init(&dev->mutex);
548 	init_waitqueue_head(&dev->tx_wait);
549 
550 	skb_queue_head_init(&dev->mcu.res_q);
551 	init_waitqueue_head(&dev->mcu.wait);
552 	mutex_init(&dev->mcu.mutex);
553 	dev->tx_worker.fn = mt76_tx_worker;
554 
555 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
556 	hw->wiphy->interface_modes =
557 		BIT(NL80211_IFTYPE_STATION) |
558 		BIT(NL80211_IFTYPE_AP) |
559 #ifdef CONFIG_MAC80211_MESH
560 		BIT(NL80211_IFTYPE_MESH_POINT) |
561 #endif
562 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
563 		BIT(NL80211_IFTYPE_P2P_GO) |
564 		BIT(NL80211_IFTYPE_ADHOC);
565 
566 	spin_lock_init(&dev->token_lock);
567 	idr_init(&dev->token);
568 
569 	INIT_LIST_HEAD(&dev->wcid_list);
570 
571 	INIT_LIST_HEAD(&dev->txwi_cache);
572 
573 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
574 		skb_queue_head_init(&dev->rx_skb[i]);
575 
576 	dev->wq = alloc_ordered_workqueue("mt76", 0);
577 	if (!dev->wq) {
578 		ieee80211_free_hw(hw);
579 		return NULL;
580 	}
581 
582 	return dev;
583 }
584 EXPORT_SYMBOL_GPL(mt76_alloc_device);
585 
586 int mt76_register_device(struct mt76_dev *dev, bool vht,
587 			 struct ieee80211_rate *rates, int n_rates)
588 {
589 	struct ieee80211_hw *hw = dev->hw;
590 	struct mt76_phy *phy = &dev->phy;
591 	int ret;
592 
593 	dev_set_drvdata(dev->dev, dev);
594 	mt76_phy_init(phy, hw);
595 
596 	if (phy->cap.has_2ghz) {
597 		ret = mt76_init_sband_2g(phy, rates, n_rates);
598 		if (ret)
599 			return ret;
600 	}
601 
602 	if (phy->cap.has_5ghz) {
603 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
604 		if (ret)
605 			return ret;
606 	}
607 
608 	if (phy->cap.has_6ghz) {
609 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
610 		if (ret)
611 			return ret;
612 	}
613 
614 	wiphy_read_of_freq_limits(hw->wiphy);
615 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
616 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
617 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
618 
619 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
620 		ret = mt76_led_init(dev);
621 		if (ret)
622 			return ret;
623 	}
624 
625 	ret = ieee80211_register_hw(hw);
626 	if (ret)
627 		return ret;
628 
629 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
630 	sched_set_fifo_low(dev->tx_worker.task);
631 
632 	return 0;
633 }
634 EXPORT_SYMBOL_GPL(mt76_register_device);
635 
636 void mt76_unregister_device(struct mt76_dev *dev)
637 {
638 	struct ieee80211_hw *hw = dev->hw;
639 
640 	if (IS_ENABLED(CONFIG_MT76_LEDS))
641 		mt76_led_cleanup(dev);
642 	mt76_tx_status_check(dev, true);
643 	ieee80211_unregister_hw(hw);
644 }
645 EXPORT_SYMBOL_GPL(mt76_unregister_device);
646 
647 void mt76_free_device(struct mt76_dev *dev)
648 {
649 	mt76_worker_teardown(&dev->tx_worker);
650 	if (dev->wq) {
651 		destroy_workqueue(dev->wq);
652 		dev->wq = NULL;
653 	}
654 	ieee80211_free_hw(dev->hw);
655 }
656 EXPORT_SYMBOL_GPL(mt76_free_device);
657 
658 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
659 {
660 	struct sk_buff *skb = phy->rx_amsdu[q].head;
661 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
662 	struct mt76_dev *dev = phy->dev;
663 
664 	phy->rx_amsdu[q].head = NULL;
665 	phy->rx_amsdu[q].tail = NULL;
666 
667 	/*
668 	 * Validate if the amsdu has a proper first subframe.
669 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
670 	 * flag of the QoS header gets flipped. In such cases, the first
671 	 * subframe has a LLC/SNAP header in the location of the destination
672 	 * address.
673 	 */
674 	if (skb_shinfo(skb)->frag_list) {
675 		int offset = 0;
676 
677 		if (!(status->flag & RX_FLAG_8023)) {
678 			offset = ieee80211_get_hdrlen_from_skb(skb);
679 
680 			if ((status->flag &
681 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
682 			    RX_FLAG_DECRYPTED)
683 				offset += 8;
684 		}
685 
686 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
687 			dev_kfree_skb(skb);
688 			return;
689 		}
690 	}
691 	__skb_queue_tail(&dev->rx_skb[q], skb);
692 }
693 
694 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
695 				  struct sk_buff *skb)
696 {
697 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
698 
699 	if (phy->rx_amsdu[q].head &&
700 	    (!status->amsdu || status->first_amsdu ||
701 	     status->seqno != phy->rx_amsdu[q].seqno))
702 		mt76_rx_release_amsdu(phy, q);
703 
704 	if (!phy->rx_amsdu[q].head) {
705 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
706 		phy->rx_amsdu[q].seqno = status->seqno;
707 		phy->rx_amsdu[q].head = skb;
708 	} else {
709 		*phy->rx_amsdu[q].tail = skb;
710 		phy->rx_amsdu[q].tail = &skb->next;
711 	}
712 
713 	if (!status->amsdu || status->last_amsdu)
714 		mt76_rx_release_amsdu(phy, q);
715 }
716 
717 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
718 {
719 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
720 	struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
721 
722 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
723 		dev_kfree_skb(skb);
724 		return;
725 	}
726 
727 #ifdef CONFIG_NL80211_TESTMODE
728 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
729 		phy->test.rx_stats.packets[q]++;
730 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
731 			phy->test.rx_stats.fcs_error[q]++;
732 	}
733 #endif
734 
735 	mt76_rx_release_burst(phy, q, skb);
736 }
737 EXPORT_SYMBOL_GPL(mt76_rx);
738 
739 bool mt76_has_tx_pending(struct mt76_phy *phy)
740 {
741 	struct mt76_queue *q;
742 	int i;
743 
744 	for (i = 0; i < __MT_TXQ_MAX; i++) {
745 		q = phy->q_tx[i];
746 		if (q && q->queued)
747 			return true;
748 	}
749 
750 	return false;
751 }
752 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
753 
754 static struct mt76_channel_state *
755 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
756 {
757 	struct mt76_sband *msband;
758 	int idx;
759 
760 	if (c->band == NL80211_BAND_2GHZ)
761 		msband = &phy->sband_2g;
762 	else if (c->band == NL80211_BAND_6GHZ)
763 		msband = &phy->sband_6g;
764 	else
765 		msband = &phy->sband_5g;
766 
767 	idx = c - &msband->sband.channels[0];
768 	return &msband->chan[idx];
769 }
770 
771 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
772 {
773 	struct mt76_channel_state *state = phy->chan_state;
774 
775 	state->cc_active += ktime_to_us(ktime_sub(time,
776 						  phy->survey_time));
777 	phy->survey_time = time;
778 }
779 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
780 
781 void mt76_update_survey(struct mt76_phy *phy)
782 {
783 	struct mt76_dev *dev = phy->dev;
784 	ktime_t cur_time;
785 
786 	if (dev->drv->update_survey)
787 		dev->drv->update_survey(phy);
788 
789 	cur_time = ktime_get_boottime();
790 	mt76_update_survey_active_time(phy, cur_time);
791 
792 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
793 		struct mt76_channel_state *state = phy->chan_state;
794 
795 		spin_lock_bh(&dev->cc_lock);
796 		state->cc_bss_rx += dev->cur_cc_bss_rx;
797 		dev->cur_cc_bss_rx = 0;
798 		spin_unlock_bh(&dev->cc_lock);
799 	}
800 }
801 EXPORT_SYMBOL_GPL(mt76_update_survey);
802 
803 void mt76_set_channel(struct mt76_phy *phy)
804 {
805 	struct mt76_dev *dev = phy->dev;
806 	struct ieee80211_hw *hw = phy->hw;
807 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
808 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
809 	int timeout = HZ / 5;
810 
811 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
812 	mt76_update_survey(phy);
813 
814 	phy->chandef = *chandef;
815 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
816 
817 	if (!offchannel)
818 		phy->main_chan = chandef->chan;
819 
820 	if (chandef->chan != phy->main_chan)
821 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
822 }
823 EXPORT_SYMBOL_GPL(mt76_set_channel);
824 
825 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
826 		    struct survey_info *survey)
827 {
828 	struct mt76_phy *phy = hw->priv;
829 	struct mt76_dev *dev = phy->dev;
830 	struct mt76_sband *sband;
831 	struct ieee80211_channel *chan;
832 	struct mt76_channel_state *state;
833 	int ret = 0;
834 
835 	mutex_lock(&dev->mutex);
836 	if (idx == 0 && dev->drv->update_survey)
837 		mt76_update_survey(phy);
838 
839 	if (idx >= phy->sband_2g.sband.n_channels +
840 		   phy->sband_5g.sband.n_channels) {
841 		idx -= (phy->sband_2g.sband.n_channels +
842 			phy->sband_5g.sband.n_channels);
843 		sband = &phy->sband_6g;
844 	} else if (idx >= phy->sband_2g.sband.n_channels) {
845 		idx -= phy->sband_2g.sband.n_channels;
846 		sband = &phy->sband_5g;
847 	} else {
848 		sband = &phy->sband_2g;
849 	}
850 
851 	if (idx >= sband->sband.n_channels) {
852 		ret = -ENOENT;
853 		goto out;
854 	}
855 
856 	chan = &sband->sband.channels[idx];
857 	state = mt76_channel_state(phy, chan);
858 
859 	memset(survey, 0, sizeof(*survey));
860 	survey->channel = chan;
861 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
862 	survey->filled |= dev->drv->survey_flags;
863 	if (state->noise)
864 		survey->filled |= SURVEY_INFO_NOISE_DBM;
865 
866 	if (chan == phy->main_chan) {
867 		survey->filled |= SURVEY_INFO_IN_USE;
868 
869 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
870 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
871 	}
872 
873 	survey->time_busy = div_u64(state->cc_busy, 1000);
874 	survey->time_rx = div_u64(state->cc_rx, 1000);
875 	survey->time = div_u64(state->cc_active, 1000);
876 	survey->noise = state->noise;
877 
878 	spin_lock_bh(&dev->cc_lock);
879 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
880 	survey->time_tx = div_u64(state->cc_tx, 1000);
881 	spin_unlock_bh(&dev->cc_lock);
882 
883 out:
884 	mutex_unlock(&dev->mutex);
885 
886 	return ret;
887 }
888 EXPORT_SYMBOL_GPL(mt76_get_survey);
889 
890 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
891 			 struct ieee80211_key_conf *key)
892 {
893 	struct ieee80211_key_seq seq;
894 	int i;
895 
896 	wcid->rx_check_pn = false;
897 
898 	if (!key)
899 		return;
900 
901 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
902 		return;
903 
904 	wcid->rx_check_pn = true;
905 
906 	/* data frame */
907 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
908 		ieee80211_get_key_rx_seq(key, i, &seq);
909 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
910 	}
911 
912 	/* robust management frame */
913 	ieee80211_get_key_rx_seq(key, -1, &seq);
914 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
915 
916 }
917 EXPORT_SYMBOL(mt76_wcid_key_setup);
918 
919 static void
920 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
921 		struct ieee80211_hw **hw,
922 		struct ieee80211_sta **sta)
923 {
924 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
925 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
926 	struct mt76_rx_status mstat;
927 
928 	mstat = *((struct mt76_rx_status *)skb->cb);
929 	memset(status, 0, sizeof(*status));
930 
931 	status->flag = mstat.flag;
932 	status->freq = mstat.freq;
933 	status->enc_flags = mstat.enc_flags;
934 	status->encoding = mstat.encoding;
935 	status->bw = mstat.bw;
936 	status->he_ru = mstat.he_ru;
937 	status->he_gi = mstat.he_gi;
938 	status->he_dcm = mstat.he_dcm;
939 	status->rate_idx = mstat.rate_idx;
940 	status->nss = mstat.nss;
941 	status->band = mstat.band;
942 	status->signal = mstat.signal;
943 	status->chains = mstat.chains;
944 	status->ampdu_reference = mstat.ampdu_ref;
945 	status->device_timestamp = mstat.timestamp;
946 	status->mactime = mstat.timestamp;
947 
948 	if (ieee80211_is_beacon(hdr->frame_control) ||
949 	    ieee80211_is_probe_resp(hdr->frame_control))
950 		status->boottime_ns = ktime_get_boottime_ns();
951 
952 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
953 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
954 		     sizeof(mstat.chain_signal));
955 	memcpy(status->chain_signal, mstat.chain_signal,
956 	       sizeof(mstat.chain_signal));
957 
958 	*sta = wcid_to_sta(mstat.wcid);
959 	*hw = mt76_phy_hw(dev, mstat.ext_phy);
960 }
961 
962 static int
963 mt76_check_ccmp_pn(struct sk_buff *skb)
964 {
965 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
966 	struct mt76_wcid *wcid = status->wcid;
967 	struct ieee80211_hdr *hdr;
968 	int security_idx;
969 	int ret;
970 
971 	if (!(status->flag & RX_FLAG_DECRYPTED))
972 		return 0;
973 
974 	if (!wcid || !wcid->rx_check_pn)
975 		return 0;
976 
977 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
978 	if (status->flag & RX_FLAG_8023)
979 		goto skip_hdr_check;
980 
981 	hdr = mt76_skb_get_hdr(skb);
982 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
983 		/*
984 		 * Validate the first fragment both here and in mac80211
985 		 * All further fragments will be validated by mac80211 only.
986 		 */
987 		if (ieee80211_is_frag(hdr) &&
988 		    !ieee80211_is_first_frag(hdr->frame_control))
989 			return 0;
990 	}
991 
992 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
993 	 *
994 	 * the recipient shall maintain a single replay counter for received
995 	 * individually addressed robust Management frames that are received
996 	 * with the To DS subfield equal to 0, [...]
997 	 */
998 	if (ieee80211_is_mgmt(hdr->frame_control) &&
999 	    !ieee80211_has_tods(hdr->frame_control))
1000 		security_idx = IEEE80211_NUM_TIDS;
1001 
1002 skip_hdr_check:
1003 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1004 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1005 		     sizeof(status->iv));
1006 	if (ret <= 0)
1007 		return -EINVAL; /* replay */
1008 
1009 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1010 
1011 	if (status->flag & RX_FLAG_IV_STRIPPED)
1012 		status->flag |= RX_FLAG_PN_VALIDATED;
1013 
1014 	return 0;
1015 }
1016 
1017 static void
1018 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1019 		    int len)
1020 {
1021 	struct mt76_wcid *wcid = status->wcid;
1022 	struct ieee80211_rx_status info = {
1023 		.enc_flags = status->enc_flags,
1024 		.rate_idx = status->rate_idx,
1025 		.encoding = status->encoding,
1026 		.band = status->band,
1027 		.nss = status->nss,
1028 		.bw = status->bw,
1029 	};
1030 	struct ieee80211_sta *sta;
1031 	u32 airtime;
1032 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1033 
1034 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1035 	spin_lock(&dev->cc_lock);
1036 	dev->cur_cc_bss_rx += airtime;
1037 	spin_unlock(&dev->cc_lock);
1038 
1039 	if (!wcid || !wcid->sta)
1040 		return;
1041 
1042 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1043 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1044 }
1045 
1046 static void
1047 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1048 {
1049 	struct mt76_wcid *wcid;
1050 	int wcid_idx;
1051 
1052 	if (!dev->rx_ampdu_len)
1053 		return;
1054 
1055 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1056 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1057 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1058 	else
1059 		wcid = NULL;
1060 	dev->rx_ampdu_status.wcid = wcid;
1061 
1062 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1063 
1064 	dev->rx_ampdu_len = 0;
1065 	dev->rx_ampdu_ref = 0;
1066 }
1067 
1068 static void
1069 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1070 {
1071 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1072 	struct mt76_wcid *wcid = status->wcid;
1073 
1074 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1075 		return;
1076 
1077 	if (!wcid || !wcid->sta) {
1078 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1079 
1080 		if (status->flag & RX_FLAG_8023)
1081 			return;
1082 
1083 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1084 			return;
1085 
1086 		wcid = NULL;
1087 	}
1088 
1089 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1090 	    status->ampdu_ref != dev->rx_ampdu_ref)
1091 		mt76_airtime_flush_ampdu(dev);
1092 
1093 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1094 		if (!dev->rx_ampdu_len ||
1095 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1096 			dev->rx_ampdu_status = *status;
1097 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1098 			dev->rx_ampdu_ref = status->ampdu_ref;
1099 		}
1100 
1101 		dev->rx_ampdu_len += skb->len;
1102 		return;
1103 	}
1104 
1105 	mt76_airtime_report(dev, status, skb->len);
1106 }
1107 
1108 static void
1109 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1110 {
1111 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1112 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1113 	struct ieee80211_sta *sta;
1114 	struct ieee80211_hw *hw;
1115 	struct mt76_wcid *wcid = status->wcid;
1116 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1117 	bool ps;
1118 
1119 	hw = mt76_phy_hw(dev, status->ext_phy);
1120 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1121 	    !(status->flag & RX_FLAG_8023)) {
1122 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1123 		if (sta)
1124 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1125 	}
1126 
1127 	mt76_airtime_check(dev, skb);
1128 
1129 	if (!wcid || !wcid->sta)
1130 		return;
1131 
1132 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1133 
1134 	if (status->signal <= 0)
1135 		ewma_signal_add(&wcid->rssi, -status->signal);
1136 
1137 	wcid->inactive_count = 0;
1138 
1139 	if (status->flag & RX_FLAG_8023)
1140 		return;
1141 
1142 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1143 		return;
1144 
1145 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1146 		ieee80211_sta_pspoll(sta);
1147 		return;
1148 	}
1149 
1150 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1151 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1152 	      ieee80211_is_data(hdr->frame_control)))
1153 		return;
1154 
1155 	ps = ieee80211_has_pm(hdr->frame_control);
1156 
1157 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1158 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1159 		ieee80211_sta_uapsd_trigger(sta, tidno);
1160 
1161 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1162 		return;
1163 
1164 	if (ps)
1165 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1166 	else
1167 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1168 
1169 	dev->drv->sta_ps(dev, sta, ps);
1170 	ieee80211_sta_ps_transition(sta, ps);
1171 }
1172 
1173 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1174 		      struct napi_struct *napi)
1175 {
1176 	struct ieee80211_sta *sta;
1177 	struct ieee80211_hw *hw;
1178 	struct sk_buff *skb, *tmp;
1179 	LIST_HEAD(list);
1180 
1181 	spin_lock(&dev->rx_lock);
1182 	while ((skb = __skb_dequeue(frames)) != NULL) {
1183 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1184 
1185 		if (mt76_check_ccmp_pn(skb)) {
1186 			dev_kfree_skb(skb);
1187 			continue;
1188 		}
1189 
1190 		skb_shinfo(skb)->frag_list = NULL;
1191 		mt76_rx_convert(dev, skb, &hw, &sta);
1192 		ieee80211_rx_list(hw, sta, skb, &list);
1193 
1194 		/* subsequent amsdu frames */
1195 		while (nskb) {
1196 			skb = nskb;
1197 			nskb = nskb->next;
1198 			skb->next = NULL;
1199 
1200 			mt76_rx_convert(dev, skb, &hw, &sta);
1201 			ieee80211_rx_list(hw, sta, skb, &list);
1202 		}
1203 	}
1204 	spin_unlock(&dev->rx_lock);
1205 
1206 	if (!napi) {
1207 		netif_receive_skb_list(&list);
1208 		return;
1209 	}
1210 
1211 	list_for_each_entry_safe(skb, tmp, &list, list) {
1212 		skb_list_del_init(skb);
1213 		napi_gro_receive(napi, skb);
1214 	}
1215 }
1216 
1217 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1218 			   struct napi_struct *napi)
1219 {
1220 	struct sk_buff_head frames;
1221 	struct sk_buff *skb;
1222 
1223 	__skb_queue_head_init(&frames);
1224 
1225 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1226 		mt76_check_sta(dev, skb);
1227 		mt76_rx_aggr_reorder(skb, &frames);
1228 	}
1229 
1230 	mt76_rx_complete(dev, &frames, napi);
1231 }
1232 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1233 
1234 static int
1235 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
1236 	     struct ieee80211_sta *sta, bool ext_phy)
1237 {
1238 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1239 	int ret;
1240 	int i;
1241 
1242 	mutex_lock(&dev->mutex);
1243 
1244 	ret = dev->drv->sta_add(dev, vif, sta);
1245 	if (ret)
1246 		goto out;
1247 
1248 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1249 		struct mt76_txq *mtxq;
1250 
1251 		if (!sta->txq[i])
1252 			continue;
1253 
1254 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1255 		mtxq->wcid = wcid;
1256 	}
1257 
1258 	ewma_signal_init(&wcid->rssi);
1259 	if (ext_phy)
1260 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1261 	wcid->ext_phy = ext_phy;
1262 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1263 
1264 	mt76_packet_id_init(wcid);
1265 out:
1266 	mutex_unlock(&dev->mutex);
1267 
1268 	return ret;
1269 }
1270 
1271 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1272 		       struct ieee80211_sta *sta)
1273 {
1274 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1275 	int i, idx = wcid->idx;
1276 
1277 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1278 		mt76_rx_aggr_stop(dev, wcid, i);
1279 
1280 	if (dev->drv->sta_remove)
1281 		dev->drv->sta_remove(dev, vif, sta);
1282 
1283 	mt76_packet_id_flush(dev, wcid);
1284 
1285 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1286 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1287 }
1288 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1289 
1290 static void
1291 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1292 		struct ieee80211_sta *sta)
1293 {
1294 	mutex_lock(&dev->mutex);
1295 	__mt76_sta_remove(dev, vif, sta);
1296 	mutex_unlock(&dev->mutex);
1297 }
1298 
1299 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1300 		   struct ieee80211_sta *sta,
1301 		   enum ieee80211_sta_state old_state,
1302 		   enum ieee80211_sta_state new_state)
1303 {
1304 	struct mt76_phy *phy = hw->priv;
1305 	struct mt76_dev *dev = phy->dev;
1306 	bool ext_phy = phy != &dev->phy;
1307 
1308 	if (old_state == IEEE80211_STA_NOTEXIST &&
1309 	    new_state == IEEE80211_STA_NONE)
1310 		return mt76_sta_add(dev, vif, sta, ext_phy);
1311 
1312 	if (old_state == IEEE80211_STA_AUTH &&
1313 	    new_state == IEEE80211_STA_ASSOC &&
1314 	    dev->drv->sta_assoc)
1315 		dev->drv->sta_assoc(dev, vif, sta);
1316 
1317 	if (old_state == IEEE80211_STA_NONE &&
1318 	    new_state == IEEE80211_STA_NOTEXIST)
1319 		mt76_sta_remove(dev, vif, sta);
1320 
1321 	return 0;
1322 }
1323 EXPORT_SYMBOL_GPL(mt76_sta_state);
1324 
1325 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1326 			     struct ieee80211_sta *sta)
1327 {
1328 	struct mt76_phy *phy = hw->priv;
1329 	struct mt76_dev *dev = phy->dev;
1330 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1331 
1332 	mutex_lock(&dev->mutex);
1333 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1334 	mutex_unlock(&dev->mutex);
1335 }
1336 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1337 
1338 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1339 		     int *dbm)
1340 {
1341 	struct mt76_phy *phy = hw->priv;
1342 	int n_chains = hweight8(phy->antenna_mask);
1343 	int delta = mt76_tx_power_nss_delta(n_chains);
1344 
1345 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1346 
1347 	return 0;
1348 }
1349 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1350 
1351 static void
1352 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1353 {
1354 	if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1355 		ieee80211_csa_finish(vif);
1356 }
1357 
1358 void mt76_csa_finish(struct mt76_dev *dev)
1359 {
1360 	if (!dev->csa_complete)
1361 		return;
1362 
1363 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1364 		IEEE80211_IFACE_ITER_RESUME_ALL,
1365 		__mt76_csa_finish, dev);
1366 
1367 	dev->csa_complete = 0;
1368 }
1369 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1370 
1371 static void
1372 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1373 {
1374 	struct mt76_dev *dev = priv;
1375 
1376 	if (!vif->csa_active)
1377 		return;
1378 
1379 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1380 }
1381 
1382 void mt76_csa_check(struct mt76_dev *dev)
1383 {
1384 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1385 		IEEE80211_IFACE_ITER_RESUME_ALL,
1386 		__mt76_csa_check, dev);
1387 }
1388 EXPORT_SYMBOL_GPL(mt76_csa_check);
1389 
1390 int
1391 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1392 {
1393 	return 0;
1394 }
1395 EXPORT_SYMBOL_GPL(mt76_set_tim);
1396 
1397 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1398 {
1399 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1400 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1401 	u8 *hdr, *pn = status->iv;
1402 
1403 	__skb_push(skb, 8);
1404 	memmove(skb->data, skb->data + 8, hdr_len);
1405 	hdr = skb->data + hdr_len;
1406 
1407 	hdr[0] = pn[5];
1408 	hdr[1] = pn[4];
1409 	hdr[2] = 0;
1410 	hdr[3] = 0x20 | (key_id << 6);
1411 	hdr[4] = pn[3];
1412 	hdr[5] = pn[2];
1413 	hdr[6] = pn[1];
1414 	hdr[7] = pn[0];
1415 
1416 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1417 }
1418 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1419 
1420 int mt76_get_rate(struct mt76_dev *dev,
1421 		  struct ieee80211_supported_band *sband,
1422 		  int idx, bool cck)
1423 {
1424 	int i, offset = 0, len = sband->n_bitrates;
1425 
1426 	if (cck) {
1427 		if (sband != &dev->phy.sband_2g.sband)
1428 			return 0;
1429 
1430 		idx &= ~BIT(2); /* short preamble */
1431 	} else if (sband == &dev->phy.sband_2g.sband) {
1432 		offset = 4;
1433 	}
1434 
1435 	for (i = offset; i < len; i++) {
1436 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1437 			return i;
1438 	}
1439 
1440 	return 0;
1441 }
1442 EXPORT_SYMBOL_GPL(mt76_get_rate);
1443 
1444 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1445 		  const u8 *mac)
1446 {
1447 	struct mt76_phy *phy = hw->priv;
1448 
1449 	set_bit(MT76_SCANNING, &phy->state);
1450 }
1451 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1452 
1453 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1454 {
1455 	struct mt76_phy *phy = hw->priv;
1456 
1457 	clear_bit(MT76_SCANNING, &phy->state);
1458 }
1459 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1460 
1461 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1462 {
1463 	struct mt76_phy *phy = hw->priv;
1464 	struct mt76_dev *dev = phy->dev;
1465 
1466 	mutex_lock(&dev->mutex);
1467 	*tx_ant = phy->antenna_mask;
1468 	*rx_ant = phy->antenna_mask;
1469 	mutex_unlock(&dev->mutex);
1470 
1471 	return 0;
1472 }
1473 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1474 
1475 struct mt76_queue *
1476 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1477 		int ring_base)
1478 {
1479 	struct mt76_queue *hwq;
1480 	int err;
1481 
1482 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1483 	if (!hwq)
1484 		return ERR_PTR(-ENOMEM);
1485 
1486 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1487 	if (err < 0)
1488 		return ERR_PTR(err);
1489 
1490 	return hwq;
1491 }
1492 EXPORT_SYMBOL_GPL(mt76_init_queue);
1493 
1494 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
1495 {
1496 	int offset = 0;
1497 	struct ieee80211_rate *rate;
1498 
1499 	if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
1500 		offset = 4;
1501 
1502 	/* pick the lowest rate for hidden nodes */
1503 	if (rateidx < 0)
1504 		rateidx = 0;
1505 
1506 	rate = &mt76_rates[offset + rateidx];
1507 
1508 	return rate->hw_value;
1509 }
1510 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1511 
1512 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1513 			 struct mt76_sta_stats *stats)
1514 {
1515 	int i, ei = wi->initial_stat_idx;
1516 	u64 *data = wi->data;
1517 
1518 	wi->sta_count++;
1519 
1520 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1521 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1522 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1523 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1524 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1525 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1526 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1527 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1528 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1529 
1530 	for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++)
1531 		data[ei++] += stats->tx_bw[i];
1532 
1533 	for (i = 0; i < 12; i++)
1534 		data[ei++] += stats->tx_mcs[i];
1535 
1536 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1537 }
1538 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1539