1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 
201 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
202 		return 0;
203 
204 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
205 		 wiphy_name(hw->wiphy));
206 
207 	phy->leds.cdev.name = phy->leds.name;
208 	phy->leds.cdev.default_trigger =
209 		ieee80211_create_tpt_led_trigger(hw,
210 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 					mt76_tpt_blink,
212 					ARRAY_SIZE(mt76_tpt_blink));
213 
214 	if (phy == &dev->phy) {
215 		struct device_node *np = dev->dev->of_node;
216 
217 		np = of_get_child_by_name(np, "led");
218 		if (np) {
219 			int led_pin;
220 
221 			if (!of_property_read_u32(np, "led-sources", &led_pin))
222 				phy->leds.pin = led_pin;
223 			phy->leds.al = of_property_read_bool(np,
224 							     "led-active-low");
225 			of_node_put(np);
226 		}
227 	}
228 
229 	return led_classdev_register(dev->dev, &phy->leds.cdev);
230 }
231 
mt76_led_cleanup(struct mt76_phy * phy)232 static void mt76_led_cleanup(struct mt76_phy *phy)
233 {
234 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
235 		return;
236 
237 	led_classdev_unregister(&phy->leds.cdev);
238 }
239 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)240 static void mt76_init_stream_cap(struct mt76_phy *phy,
241 				 struct ieee80211_supported_band *sband,
242 				 bool vht)
243 {
244 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
245 	int i, nstream = hweight8(phy->antenna_mask);
246 	struct ieee80211_sta_vht_cap *vht_cap;
247 	u16 mcs_map = 0;
248 
249 	if (nstream > 1)
250 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
251 	else
252 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
253 
254 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
255 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
256 
257 	if (!vht)
258 		return;
259 
260 	vht_cap = &sband->vht_cap;
261 	if (nstream > 1)
262 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
263 	else
264 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
265 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
266 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
267 
268 	for (i = 0; i < 8; i++) {
269 		if (i < nstream)
270 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
271 		else
272 			mcs_map |=
273 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
274 	}
275 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
276 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
277 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
278 		vht_cap->vht_mcs.tx_highest |=
279 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
280 }
281 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)282 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
283 {
284 	if (phy->cap.has_2ghz)
285 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
286 	if (phy->cap.has_5ghz)
287 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
288 	if (phy->cap.has_6ghz)
289 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
290 }
291 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
292 
293 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)294 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
295 		const struct ieee80211_channel *chan, int n_chan,
296 		struct ieee80211_rate *rates, int n_rates,
297 		bool ht, bool vht)
298 {
299 	struct ieee80211_supported_band *sband = &msband->sband;
300 	struct ieee80211_sta_vht_cap *vht_cap;
301 	struct ieee80211_sta_ht_cap *ht_cap;
302 	struct mt76_dev *dev = phy->dev;
303 	void *chanlist;
304 	int size;
305 
306 	size = n_chan * sizeof(*chan);
307 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
308 	if (!chanlist)
309 		return -ENOMEM;
310 
311 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
312 				    GFP_KERNEL);
313 	if (!msband->chan)
314 		return -ENOMEM;
315 
316 	sband->channels = chanlist;
317 	sband->n_channels = n_chan;
318 	sband->bitrates = rates;
319 	sband->n_bitrates = n_rates;
320 
321 	if (!ht)
322 		return 0;
323 
324 	ht_cap = &sband->ht_cap;
325 	ht_cap->ht_supported = true;
326 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
327 		       IEEE80211_HT_CAP_GRN_FLD |
328 		       IEEE80211_HT_CAP_SGI_20 |
329 		       IEEE80211_HT_CAP_SGI_40 |
330 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
331 
332 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
333 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
334 
335 	mt76_init_stream_cap(phy, sband, vht);
336 
337 	if (!vht)
338 		return 0;
339 
340 	vht_cap = &sband->vht_cap;
341 	vht_cap->vht_supported = true;
342 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
343 			IEEE80211_VHT_CAP_RXSTBC_1 |
344 			IEEE80211_VHT_CAP_SHORT_GI_80 |
345 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
346 
347 	return 0;
348 }
349 
350 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)351 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
352 		   int n_rates)
353 {
354 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
355 
356 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
357 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
358 			       n_rates, true, false);
359 }
360 
361 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)362 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates, bool vht)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
368 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
369 			       n_rates, true, vht);
370 }
371 
372 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)373 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
379 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
380 			       n_rates, false, false);
381 }
382 
383 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)384 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
385 		 enum nl80211_band band)
386 {
387 	struct ieee80211_supported_band *sband = &msband->sband;
388 	bool found = false;
389 	int i;
390 
391 	if (!sband)
392 		return;
393 
394 	for (i = 0; i < sband->n_channels; i++) {
395 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
396 			continue;
397 
398 		found = true;
399 		break;
400 	}
401 
402 	if (found) {
403 		phy->chandef.chan = &sband->channels[0];
404 		phy->chan_state = &msband->chan[0];
405 		return;
406 	}
407 
408 	sband->n_channels = 0;
409 	phy->hw->wiphy->bands[band] = NULL;
410 }
411 
412 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)413 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
414 {
415 	struct mt76_dev *dev = phy->dev;
416 	struct wiphy *wiphy = hw->wiphy;
417 
418 	INIT_LIST_HEAD(&phy->tx_list);
419 	spin_lock_init(&phy->tx_lock);
420 
421 	SET_IEEE80211_DEV(hw, dev->dev);
422 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
423 
424 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
425 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
426 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
427 			WIPHY_FLAG_SUPPORTS_TDLS |
428 			WIPHY_FLAG_AP_UAPSD;
429 
430 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
431 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
432 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
433 
434 	wiphy->available_antennas_tx = phy->antenna_mask;
435 	wiphy->available_antennas_rx = phy->antenna_mask;
436 
437 	wiphy->sar_capa = &mt76_sar_capa;
438 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
439 				sizeof(struct mt76_freq_range_power),
440 				GFP_KERNEL);
441 	if (!phy->frp)
442 		return -ENOMEM;
443 
444 	hw->txq_data_size = sizeof(struct mt76_txq);
445 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
446 
447 	if (!hw->max_tx_fragments)
448 		hw->max_tx_fragments = 16;
449 
450 	ieee80211_hw_set(hw, SIGNAL_DBM);
451 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
452 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
453 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
454 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
455 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
456 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
457 
458 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
459 		ieee80211_hw_set(hw, TX_AMSDU);
460 		ieee80211_hw_set(hw, TX_FRAG_LIST);
461 	}
462 
463 	ieee80211_hw_set(hw, MFP_CAPABLE);
464 	ieee80211_hw_set(hw, AP_LINK_PS);
465 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
466 
467 	return 0;
468 }
469 
470 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)471 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
472 	       const struct ieee80211_ops *ops, u8 band_idx)
473 {
474 	struct ieee80211_hw *hw;
475 	unsigned int phy_size;
476 	struct mt76_phy *phy;
477 
478 	phy_size = ALIGN(sizeof(*phy), 8);
479 	hw = ieee80211_alloc_hw(size + phy_size, ops);
480 	if (!hw)
481 		return NULL;
482 
483 	phy = hw->priv;
484 	phy->dev = dev;
485 	phy->hw = hw;
486 	phy->priv = hw->priv + phy_size;
487 	phy->band_idx = band_idx;
488 
489 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
490 	hw->wiphy->interface_modes =
491 		BIT(NL80211_IFTYPE_STATION) |
492 		BIT(NL80211_IFTYPE_AP) |
493 #ifdef CONFIG_MAC80211_MESH
494 		BIT(NL80211_IFTYPE_MESH_POINT) |
495 #endif
496 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
497 		BIT(NL80211_IFTYPE_P2P_GO) |
498 		BIT(NL80211_IFTYPE_ADHOC);
499 
500 	return phy;
501 }
502 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
503 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)504 int mt76_register_phy(struct mt76_phy *phy, bool vht,
505 		      struct ieee80211_rate *rates, int n_rates)
506 {
507 	int ret;
508 
509 	ret = mt76_phy_init(phy, phy->hw);
510 	if (ret)
511 		return ret;
512 
513 	if (phy->cap.has_2ghz) {
514 		ret = mt76_init_sband_2g(phy, rates, n_rates);
515 		if (ret)
516 			return ret;
517 	}
518 
519 	if (phy->cap.has_5ghz) {
520 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
521 		if (ret)
522 			return ret;
523 	}
524 
525 	if (phy->cap.has_6ghz) {
526 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
527 		if (ret)
528 			return ret;
529 	}
530 
531 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
532 		ret = mt76_led_init(phy);
533 		if (ret)
534 			return ret;
535 	}
536 
537 	wiphy_read_of_freq_limits(phy->hw->wiphy);
538 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
539 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
540 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
541 
542 	ret = ieee80211_register_hw(phy->hw);
543 	if (ret)
544 		return ret;
545 
546 	set_bit(MT76_STATE_REGISTERED, &phy->state);
547 	phy->dev->phys[phy->band_idx] = phy;
548 
549 	return 0;
550 }
551 EXPORT_SYMBOL_GPL(mt76_register_phy);
552 
mt76_unregister_phy(struct mt76_phy * phy)553 void mt76_unregister_phy(struct mt76_phy *phy)
554 {
555 	struct mt76_dev *dev = phy->dev;
556 
557 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
558 		return;
559 
560 	if (IS_ENABLED(CONFIG_MT76_LEDS))
561 		mt76_led_cleanup(phy);
562 	mt76_tx_status_check(dev, true);
563 	ieee80211_unregister_hw(phy->hw);
564 	dev->phys[phy->band_idx] = NULL;
565 }
566 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
567 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)568 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
569 {
570 	struct page_pool_params pp_params = {
571 		.order = 0,
572 		.flags = PP_FLAG_PAGE_FRAG,
573 		.nid = NUMA_NO_NODE,
574 		.dev = dev->dma_dev,
575 	};
576 	int idx = q - dev->q_rx;
577 
578 	switch (idx) {
579 	case MT_RXQ_MAIN:
580 	case MT_RXQ_BAND1:
581 	case MT_RXQ_BAND2:
582 		pp_params.pool_size = 256;
583 		break;
584 	default:
585 		pp_params.pool_size = 16;
586 		break;
587 	}
588 
589 	if (mt76_is_mmio(dev)) {
590 		/* rely on page_pool for DMA mapping */
591 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
592 		pp_params.dma_dir = DMA_FROM_DEVICE;
593 		pp_params.max_len = PAGE_SIZE;
594 		pp_params.offset = 0;
595 	}
596 
597 	q->page_pool = page_pool_create(&pp_params);
598 	if (IS_ERR(q->page_pool)) {
599 		int err = PTR_ERR(q->page_pool);
600 
601 		q->page_pool = NULL;
602 		return err;
603 	}
604 
605 	return 0;
606 }
607 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
608 
609 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)610 mt76_alloc_device(struct device *pdev, unsigned int size,
611 		  const struct ieee80211_ops *ops,
612 		  const struct mt76_driver_ops *drv_ops)
613 {
614 	struct ieee80211_hw *hw;
615 	struct mt76_phy *phy;
616 	struct mt76_dev *dev;
617 	int i;
618 
619 	hw = ieee80211_alloc_hw(size, ops);
620 	if (!hw)
621 		return NULL;
622 
623 	dev = hw->priv;
624 	dev->hw = hw;
625 	dev->dev = pdev;
626 	dev->drv = drv_ops;
627 	dev->dma_dev = pdev;
628 
629 	phy = &dev->phy;
630 	phy->dev = dev;
631 	phy->hw = hw;
632 	phy->band_idx = MT_BAND0;
633 	dev->phys[phy->band_idx] = phy;
634 
635 	spin_lock_init(&dev->rx_lock);
636 	spin_lock_init(&dev->lock);
637 	spin_lock_init(&dev->cc_lock);
638 	spin_lock_init(&dev->status_lock);
639 	spin_lock_init(&dev->wed_lock);
640 	mutex_init(&dev->mutex);
641 	init_waitqueue_head(&dev->tx_wait);
642 
643 	skb_queue_head_init(&dev->mcu.res_q);
644 	init_waitqueue_head(&dev->mcu.wait);
645 	mutex_init(&dev->mcu.mutex);
646 	dev->tx_worker.fn = mt76_tx_worker;
647 
648 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
649 	hw->wiphy->interface_modes =
650 		BIT(NL80211_IFTYPE_STATION) |
651 		BIT(NL80211_IFTYPE_AP) |
652 #ifdef CONFIG_MAC80211_MESH
653 		BIT(NL80211_IFTYPE_MESH_POINT) |
654 #endif
655 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
656 		BIT(NL80211_IFTYPE_P2P_GO) |
657 		BIT(NL80211_IFTYPE_ADHOC);
658 
659 	spin_lock_init(&dev->token_lock);
660 	idr_init(&dev->token);
661 
662 	spin_lock_init(&dev->rx_token_lock);
663 	idr_init(&dev->rx_token);
664 
665 	INIT_LIST_HEAD(&dev->wcid_list);
666 	INIT_LIST_HEAD(&dev->sta_poll_list);
667 	spin_lock_init(&dev->sta_poll_lock);
668 
669 	INIT_LIST_HEAD(&dev->txwi_cache);
670 	INIT_LIST_HEAD(&dev->rxwi_cache);
671 	dev->token_size = dev->drv->token_size;
672 
673 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
674 		skb_queue_head_init(&dev->rx_skb[i]);
675 
676 	dev->wq = alloc_ordered_workqueue("mt76", 0);
677 	if (!dev->wq) {
678 		ieee80211_free_hw(hw);
679 		return NULL;
680 	}
681 
682 	return dev;
683 }
684 EXPORT_SYMBOL_GPL(mt76_alloc_device);
685 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)686 int mt76_register_device(struct mt76_dev *dev, bool vht,
687 			 struct ieee80211_rate *rates, int n_rates)
688 {
689 	struct ieee80211_hw *hw = dev->hw;
690 	struct mt76_phy *phy = &dev->phy;
691 	int ret;
692 
693 	dev_set_drvdata(dev->dev, dev);
694 	mt76_wcid_init(&dev->global_wcid);
695 	ret = mt76_phy_init(phy, hw);
696 	if (ret)
697 		return ret;
698 
699 	if (phy->cap.has_2ghz) {
700 		ret = mt76_init_sband_2g(phy, rates, n_rates);
701 		if (ret)
702 			return ret;
703 	}
704 
705 	if (phy->cap.has_5ghz) {
706 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
707 		if (ret)
708 			return ret;
709 	}
710 
711 	if (phy->cap.has_6ghz) {
712 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
713 		if (ret)
714 			return ret;
715 	}
716 
717 	wiphy_read_of_freq_limits(hw->wiphy);
718 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
719 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
720 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
721 
722 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
723 		ret = mt76_led_init(phy);
724 		if (ret)
725 			return ret;
726 	}
727 
728 	ret = ieee80211_register_hw(hw);
729 	if (ret)
730 		return ret;
731 
732 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
733 	set_bit(MT76_STATE_REGISTERED, &phy->state);
734 	sched_set_fifo_low(dev->tx_worker.task);
735 
736 	return 0;
737 }
738 EXPORT_SYMBOL_GPL(mt76_register_device);
739 
mt76_unregister_device(struct mt76_dev * dev)740 void mt76_unregister_device(struct mt76_dev *dev)
741 {
742 	struct ieee80211_hw *hw = dev->hw;
743 
744 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
745 		return;
746 
747 	if (IS_ENABLED(CONFIG_MT76_LEDS))
748 		mt76_led_cleanup(&dev->phy);
749 	mt76_tx_status_check(dev, true);
750 	mt76_wcid_cleanup(dev, &dev->global_wcid);
751 	ieee80211_unregister_hw(hw);
752 }
753 EXPORT_SYMBOL_GPL(mt76_unregister_device);
754 
mt76_free_device(struct mt76_dev * dev)755 void mt76_free_device(struct mt76_dev *dev)
756 {
757 	mt76_worker_teardown(&dev->tx_worker);
758 	if (dev->wq) {
759 		destroy_workqueue(dev->wq);
760 		dev->wq = NULL;
761 	}
762 	ieee80211_free_hw(dev->hw);
763 }
764 EXPORT_SYMBOL_GPL(mt76_free_device);
765 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)766 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
767 {
768 	struct sk_buff *skb = phy->rx_amsdu[q].head;
769 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
770 	struct mt76_dev *dev = phy->dev;
771 
772 	phy->rx_amsdu[q].head = NULL;
773 	phy->rx_amsdu[q].tail = NULL;
774 
775 	/*
776 	 * Validate if the amsdu has a proper first subframe.
777 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
778 	 * flag of the QoS header gets flipped. In such cases, the first
779 	 * subframe has a LLC/SNAP header in the location of the destination
780 	 * address.
781 	 */
782 	if (skb_shinfo(skb)->frag_list) {
783 		int offset = 0;
784 
785 		if (!(status->flag & RX_FLAG_8023)) {
786 			offset = ieee80211_get_hdrlen_from_skb(skb);
787 
788 			if ((status->flag &
789 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
790 			    RX_FLAG_DECRYPTED)
791 				offset += 8;
792 		}
793 
794 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
795 			dev_kfree_skb(skb);
796 			return;
797 		}
798 	}
799 	__skb_queue_tail(&dev->rx_skb[q], skb);
800 }
801 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)802 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
803 				  struct sk_buff *skb)
804 {
805 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
806 
807 	if (phy->rx_amsdu[q].head &&
808 	    (!status->amsdu || status->first_amsdu ||
809 	     status->seqno != phy->rx_amsdu[q].seqno))
810 		mt76_rx_release_amsdu(phy, q);
811 
812 	if (!phy->rx_amsdu[q].head) {
813 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
814 		phy->rx_amsdu[q].seqno = status->seqno;
815 		phy->rx_amsdu[q].head = skb;
816 	} else {
817 		*phy->rx_amsdu[q].tail = skb;
818 		phy->rx_amsdu[q].tail = &skb->next;
819 	}
820 
821 	if (!status->amsdu || status->last_amsdu)
822 		mt76_rx_release_amsdu(phy, q);
823 }
824 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)825 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
826 {
827 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
828 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
829 
830 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
831 		dev_kfree_skb(skb);
832 		return;
833 	}
834 
835 #ifdef CONFIG_NL80211_TESTMODE
836 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
837 		phy->test.rx_stats.packets[q]++;
838 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
839 			phy->test.rx_stats.fcs_error[q]++;
840 	}
841 #endif
842 
843 	mt76_rx_release_burst(phy, q, skb);
844 }
845 EXPORT_SYMBOL_GPL(mt76_rx);
846 
mt76_has_tx_pending(struct mt76_phy * phy)847 bool mt76_has_tx_pending(struct mt76_phy *phy)
848 {
849 	struct mt76_queue *q;
850 	int i;
851 
852 	for (i = 0; i < __MT_TXQ_MAX; i++) {
853 		q = phy->q_tx[i];
854 		if (q && q->queued)
855 			return true;
856 	}
857 
858 	return false;
859 }
860 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
861 
862 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)863 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
864 {
865 	struct mt76_sband *msband;
866 	int idx;
867 
868 	if (c->band == NL80211_BAND_2GHZ)
869 		msband = &phy->sband_2g;
870 	else if (c->band == NL80211_BAND_6GHZ)
871 		msband = &phy->sband_6g;
872 	else
873 		msband = &phy->sband_5g;
874 
875 	idx = c - &msband->sband.channels[0];
876 	return &msband->chan[idx];
877 }
878 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)879 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
880 {
881 	struct mt76_channel_state *state = phy->chan_state;
882 
883 	state->cc_active += ktime_to_us(ktime_sub(time,
884 						  phy->survey_time));
885 	phy->survey_time = time;
886 }
887 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
888 
mt76_update_survey(struct mt76_phy * phy)889 void mt76_update_survey(struct mt76_phy *phy)
890 {
891 	struct mt76_dev *dev = phy->dev;
892 	ktime_t cur_time;
893 
894 	if (dev->drv->update_survey)
895 		dev->drv->update_survey(phy);
896 
897 	cur_time = ktime_get_boottime();
898 	mt76_update_survey_active_time(phy, cur_time);
899 
900 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
901 		struct mt76_channel_state *state = phy->chan_state;
902 
903 		spin_lock_bh(&dev->cc_lock);
904 		state->cc_bss_rx += dev->cur_cc_bss_rx;
905 		dev->cur_cc_bss_rx = 0;
906 		spin_unlock_bh(&dev->cc_lock);
907 	}
908 }
909 EXPORT_SYMBOL_GPL(mt76_update_survey);
910 
mt76_set_channel(struct mt76_phy * phy)911 void mt76_set_channel(struct mt76_phy *phy)
912 {
913 	struct mt76_dev *dev = phy->dev;
914 	struct ieee80211_hw *hw = phy->hw;
915 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
916 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
917 	int timeout = HZ / 5;
918 
919 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
920 	mt76_update_survey(phy);
921 
922 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
923 	    phy->chandef.width != chandef->width)
924 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
925 
926 	phy->chandef = *chandef;
927 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
928 
929 	if (!offchannel)
930 		phy->main_chan = chandef->chan;
931 
932 	if (chandef->chan != phy->main_chan)
933 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
934 }
935 EXPORT_SYMBOL_GPL(mt76_set_channel);
936 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)937 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
938 		    struct survey_info *survey)
939 {
940 	struct mt76_phy *phy = hw->priv;
941 	struct mt76_dev *dev = phy->dev;
942 	struct mt76_sband *sband;
943 	struct ieee80211_channel *chan;
944 	struct mt76_channel_state *state;
945 	int ret = 0;
946 
947 	mutex_lock(&dev->mutex);
948 	if (idx == 0 && dev->drv->update_survey)
949 		mt76_update_survey(phy);
950 
951 	if (idx >= phy->sband_2g.sband.n_channels +
952 		   phy->sband_5g.sband.n_channels) {
953 		idx -= (phy->sband_2g.sband.n_channels +
954 			phy->sband_5g.sband.n_channels);
955 		sband = &phy->sband_6g;
956 	} else if (idx >= phy->sband_2g.sband.n_channels) {
957 		idx -= phy->sband_2g.sband.n_channels;
958 		sband = &phy->sband_5g;
959 	} else {
960 		sband = &phy->sband_2g;
961 	}
962 
963 	if (idx >= sband->sband.n_channels) {
964 		ret = -ENOENT;
965 		goto out;
966 	}
967 
968 	chan = &sband->sband.channels[idx];
969 	state = mt76_channel_state(phy, chan);
970 
971 	memset(survey, 0, sizeof(*survey));
972 	survey->channel = chan;
973 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
974 	survey->filled |= dev->drv->survey_flags;
975 	if (state->noise)
976 		survey->filled |= SURVEY_INFO_NOISE_DBM;
977 
978 	if (chan == phy->main_chan) {
979 		survey->filled |= SURVEY_INFO_IN_USE;
980 
981 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
982 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
983 	}
984 
985 	survey->time_busy = div_u64(state->cc_busy, 1000);
986 	survey->time_rx = div_u64(state->cc_rx, 1000);
987 	survey->time = div_u64(state->cc_active, 1000);
988 	survey->noise = state->noise;
989 
990 	spin_lock_bh(&dev->cc_lock);
991 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
992 	survey->time_tx = div_u64(state->cc_tx, 1000);
993 	spin_unlock_bh(&dev->cc_lock);
994 
995 out:
996 	mutex_unlock(&dev->mutex);
997 
998 	return ret;
999 }
1000 EXPORT_SYMBOL_GPL(mt76_get_survey);
1001 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1002 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1003 			 struct ieee80211_key_conf *key)
1004 {
1005 	struct ieee80211_key_seq seq;
1006 	int i;
1007 
1008 	wcid->rx_check_pn = false;
1009 
1010 	if (!key)
1011 		return;
1012 
1013 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1014 		return;
1015 
1016 	wcid->rx_check_pn = true;
1017 
1018 	/* data frame */
1019 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1020 		ieee80211_get_key_rx_seq(key, i, &seq);
1021 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1022 	}
1023 
1024 	/* robust management frame */
1025 	ieee80211_get_key_rx_seq(key, -1, &seq);
1026 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1027 
1028 }
1029 EXPORT_SYMBOL(mt76_wcid_key_setup);
1030 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1031 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1032 {
1033 	int signal = -128;
1034 	u8 chains;
1035 
1036 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1037 		int cur, diff;
1038 
1039 		cur = *chain_signal;
1040 		if (!(chains & BIT(0)) ||
1041 		    cur > 0)
1042 			continue;
1043 
1044 		if (cur > signal)
1045 			swap(cur, signal);
1046 
1047 		diff = signal - cur;
1048 		if (diff == 0)
1049 			signal += 3;
1050 		else if (diff <= 2)
1051 			signal += 2;
1052 		else if (diff <= 6)
1053 			signal += 1;
1054 	}
1055 
1056 	return signal;
1057 }
1058 EXPORT_SYMBOL(mt76_rx_signal);
1059 
1060 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1061 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1062 		struct ieee80211_hw **hw,
1063 		struct ieee80211_sta **sta)
1064 {
1065 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1066 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1067 	struct mt76_rx_status mstat;
1068 
1069 	mstat = *((struct mt76_rx_status *)skb->cb);
1070 	memset(status, 0, sizeof(*status));
1071 
1072 	status->flag = mstat.flag;
1073 	status->freq = mstat.freq;
1074 	status->enc_flags = mstat.enc_flags;
1075 	status->encoding = mstat.encoding;
1076 	status->bw = mstat.bw;
1077 	if (status->encoding == RX_ENC_EHT) {
1078 		status->eht.ru = mstat.eht.ru;
1079 		status->eht.gi = mstat.eht.gi;
1080 	} else {
1081 		status->he_ru = mstat.he_ru;
1082 		status->he_gi = mstat.he_gi;
1083 		status->he_dcm = mstat.he_dcm;
1084 	}
1085 	status->rate_idx = mstat.rate_idx;
1086 	status->nss = mstat.nss;
1087 	status->band = mstat.band;
1088 	status->signal = mstat.signal;
1089 	status->chains = mstat.chains;
1090 	status->ampdu_reference = mstat.ampdu_ref;
1091 	status->device_timestamp = mstat.timestamp;
1092 	status->mactime = mstat.timestamp;
1093 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1094 	if (status->signal <= -128)
1095 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1096 
1097 	if (ieee80211_is_beacon(hdr->frame_control) ||
1098 	    ieee80211_is_probe_resp(hdr->frame_control))
1099 		status->boottime_ns = ktime_get_boottime_ns();
1100 
1101 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1102 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1103 		     sizeof(mstat.chain_signal));
1104 	memcpy(status->chain_signal, mstat.chain_signal,
1105 	       sizeof(mstat.chain_signal));
1106 
1107 	*sta = wcid_to_sta(mstat.wcid);
1108 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1109 }
1110 
1111 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1112 mt76_check_ccmp_pn(struct sk_buff *skb)
1113 {
1114 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1115 	struct mt76_wcid *wcid = status->wcid;
1116 	struct ieee80211_hdr *hdr;
1117 	int security_idx;
1118 	int ret;
1119 
1120 	if (!(status->flag & RX_FLAG_DECRYPTED))
1121 		return;
1122 
1123 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1124 		return;
1125 
1126 	if (!wcid || !wcid->rx_check_pn)
1127 		return;
1128 
1129 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1130 	if (status->flag & RX_FLAG_8023)
1131 		goto skip_hdr_check;
1132 
1133 	hdr = mt76_skb_get_hdr(skb);
1134 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1135 		/*
1136 		 * Validate the first fragment both here and in mac80211
1137 		 * All further fragments will be validated by mac80211 only.
1138 		 */
1139 		if (ieee80211_is_frag(hdr) &&
1140 		    !ieee80211_is_first_frag(hdr->frame_control))
1141 			return;
1142 	}
1143 
1144 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1145 	 *
1146 	 * the recipient shall maintain a single replay counter for received
1147 	 * individually addressed robust Management frames that are received
1148 	 * with the To DS subfield equal to 0, [...]
1149 	 */
1150 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1151 	    !ieee80211_has_tods(hdr->frame_control))
1152 		security_idx = IEEE80211_NUM_TIDS;
1153 
1154 skip_hdr_check:
1155 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1156 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1157 		     sizeof(status->iv));
1158 	if (ret <= 0) {
1159 		status->flag |= RX_FLAG_ONLY_MONITOR;
1160 		return;
1161 	}
1162 
1163 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1164 
1165 	if (status->flag & RX_FLAG_IV_STRIPPED)
1166 		status->flag |= RX_FLAG_PN_VALIDATED;
1167 }
1168 
1169 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1170 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1171 		    int len)
1172 {
1173 	struct mt76_wcid *wcid = status->wcid;
1174 	struct ieee80211_rx_status info = {
1175 		.enc_flags = status->enc_flags,
1176 		.rate_idx = status->rate_idx,
1177 		.encoding = status->encoding,
1178 		.band = status->band,
1179 		.nss = status->nss,
1180 		.bw = status->bw,
1181 	};
1182 	struct ieee80211_sta *sta;
1183 	u32 airtime;
1184 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1185 
1186 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1187 	spin_lock(&dev->cc_lock);
1188 	dev->cur_cc_bss_rx += airtime;
1189 	spin_unlock(&dev->cc_lock);
1190 
1191 	if (!wcid || !wcid->sta)
1192 		return;
1193 
1194 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1195 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1196 }
1197 
1198 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1199 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1200 {
1201 	struct mt76_wcid *wcid;
1202 	int wcid_idx;
1203 
1204 	if (!dev->rx_ampdu_len)
1205 		return;
1206 
1207 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1208 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1209 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1210 	else
1211 		wcid = NULL;
1212 	dev->rx_ampdu_status.wcid = wcid;
1213 
1214 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1215 
1216 	dev->rx_ampdu_len = 0;
1217 	dev->rx_ampdu_ref = 0;
1218 }
1219 
1220 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1221 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1222 {
1223 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1224 	struct mt76_wcid *wcid = status->wcid;
1225 
1226 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1227 		return;
1228 
1229 	if (!wcid || !wcid->sta) {
1230 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1231 
1232 		if (status->flag & RX_FLAG_8023)
1233 			return;
1234 
1235 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1236 			return;
1237 
1238 		wcid = NULL;
1239 	}
1240 
1241 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1242 	    status->ampdu_ref != dev->rx_ampdu_ref)
1243 		mt76_airtime_flush_ampdu(dev);
1244 
1245 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1246 		if (!dev->rx_ampdu_len ||
1247 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1248 			dev->rx_ampdu_status = *status;
1249 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1250 			dev->rx_ampdu_ref = status->ampdu_ref;
1251 		}
1252 
1253 		dev->rx_ampdu_len += skb->len;
1254 		return;
1255 	}
1256 
1257 	mt76_airtime_report(dev, status, skb->len);
1258 }
1259 
1260 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1261 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1262 {
1263 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1264 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1265 	struct ieee80211_sta *sta;
1266 	struct ieee80211_hw *hw;
1267 	struct mt76_wcid *wcid = status->wcid;
1268 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1269 	bool ps;
1270 
1271 	hw = mt76_phy_hw(dev, status->phy_idx);
1272 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1273 	    !(status->flag & RX_FLAG_8023)) {
1274 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1275 		if (sta)
1276 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1277 	}
1278 
1279 	mt76_airtime_check(dev, skb);
1280 
1281 	if (!wcid || !wcid->sta)
1282 		return;
1283 
1284 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1285 
1286 	if (status->signal <= 0)
1287 		ewma_signal_add(&wcid->rssi, -status->signal);
1288 
1289 	wcid->inactive_count = 0;
1290 
1291 	if (status->flag & RX_FLAG_8023)
1292 		return;
1293 
1294 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1295 		return;
1296 
1297 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1298 		ieee80211_sta_pspoll(sta);
1299 		return;
1300 	}
1301 
1302 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1303 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1304 	      ieee80211_is_data(hdr->frame_control)))
1305 		return;
1306 
1307 	ps = ieee80211_has_pm(hdr->frame_control);
1308 
1309 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1310 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1311 		ieee80211_sta_uapsd_trigger(sta, tidno);
1312 
1313 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1314 		return;
1315 
1316 	if (ps)
1317 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1318 
1319 	if (dev->drv->sta_ps)
1320 		dev->drv->sta_ps(dev, sta, ps);
1321 
1322 	if (!ps)
1323 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1324 
1325 	ieee80211_sta_ps_transition(sta, ps);
1326 }
1327 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1328 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1329 		      struct napi_struct *napi)
1330 {
1331 	struct ieee80211_sta *sta;
1332 	struct ieee80211_hw *hw;
1333 	struct sk_buff *skb, *tmp;
1334 	LIST_HEAD(list);
1335 
1336 	spin_lock(&dev->rx_lock);
1337 	while ((skb = __skb_dequeue(frames)) != NULL) {
1338 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1339 
1340 		mt76_check_ccmp_pn(skb);
1341 		skb_shinfo(skb)->frag_list = NULL;
1342 		mt76_rx_convert(dev, skb, &hw, &sta);
1343 		ieee80211_rx_list(hw, sta, skb, &list);
1344 
1345 		/* subsequent amsdu frames */
1346 		while (nskb) {
1347 			skb = nskb;
1348 			nskb = nskb->next;
1349 			skb->next = NULL;
1350 
1351 			mt76_rx_convert(dev, skb, &hw, &sta);
1352 			ieee80211_rx_list(hw, sta, skb, &list);
1353 		}
1354 	}
1355 	spin_unlock(&dev->rx_lock);
1356 
1357 	if (!napi) {
1358 		netif_receive_skb_list(&list);
1359 		return;
1360 	}
1361 
1362 	list_for_each_entry_safe(skb, tmp, &list, list) {
1363 		skb_list_del_init(skb);
1364 		napi_gro_receive(napi, skb);
1365 	}
1366 }
1367 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1368 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1369 			   struct napi_struct *napi)
1370 {
1371 	struct sk_buff_head frames;
1372 	struct sk_buff *skb;
1373 
1374 	__skb_queue_head_init(&frames);
1375 
1376 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1377 		mt76_check_sta(dev, skb);
1378 		if (mtk_wed_device_active(&dev->mmio.wed))
1379 			__skb_queue_tail(&frames, skb);
1380 		else
1381 			mt76_rx_aggr_reorder(skb, &frames);
1382 	}
1383 
1384 	mt76_rx_complete(dev, &frames, napi);
1385 }
1386 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1387 
1388 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1389 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1390 	     struct ieee80211_sta *sta)
1391 {
1392 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1393 	struct mt76_dev *dev = phy->dev;
1394 	int ret;
1395 	int i;
1396 
1397 	mutex_lock(&dev->mutex);
1398 
1399 	ret = dev->drv->sta_add(dev, vif, sta);
1400 	if (ret)
1401 		goto out;
1402 
1403 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1404 		struct mt76_txq *mtxq;
1405 
1406 		if (!sta->txq[i])
1407 			continue;
1408 
1409 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1410 		mtxq->wcid = wcid->idx;
1411 	}
1412 
1413 	ewma_signal_init(&wcid->rssi);
1414 	if (phy->band_idx == MT_BAND1)
1415 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1416 	wcid->phy_idx = phy->band_idx;
1417 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1418 
1419 	mt76_wcid_init(wcid);
1420 out:
1421 	mutex_unlock(&dev->mutex);
1422 
1423 	return ret;
1424 }
1425 
__mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1426 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1427 		       struct ieee80211_sta *sta)
1428 {
1429 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1430 	int i, idx = wcid->idx;
1431 
1432 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1433 		mt76_rx_aggr_stop(dev, wcid, i);
1434 
1435 	if (dev->drv->sta_remove)
1436 		dev->drv->sta_remove(dev, vif, sta);
1437 
1438 	mt76_wcid_cleanup(dev, wcid);
1439 
1440 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1441 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1442 }
1443 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1444 
1445 static void
mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1446 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1447 		struct ieee80211_sta *sta)
1448 {
1449 	mutex_lock(&dev->mutex);
1450 	__mt76_sta_remove(dev, vif, sta);
1451 	mutex_unlock(&dev->mutex);
1452 }
1453 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1454 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1455 		   struct ieee80211_sta *sta,
1456 		   enum ieee80211_sta_state old_state,
1457 		   enum ieee80211_sta_state new_state)
1458 {
1459 	struct mt76_phy *phy = hw->priv;
1460 	struct mt76_dev *dev = phy->dev;
1461 
1462 	if (old_state == IEEE80211_STA_NOTEXIST &&
1463 	    new_state == IEEE80211_STA_NONE)
1464 		return mt76_sta_add(phy, vif, sta);
1465 
1466 	if (old_state == IEEE80211_STA_AUTH &&
1467 	    new_state == IEEE80211_STA_ASSOC &&
1468 	    dev->drv->sta_assoc)
1469 		dev->drv->sta_assoc(dev, vif, sta);
1470 
1471 	if (old_state == IEEE80211_STA_NONE &&
1472 	    new_state == IEEE80211_STA_NOTEXIST)
1473 		mt76_sta_remove(dev, vif, sta);
1474 
1475 	return 0;
1476 }
1477 EXPORT_SYMBOL_GPL(mt76_sta_state);
1478 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1479 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1480 			     struct ieee80211_sta *sta)
1481 {
1482 	struct mt76_phy *phy = hw->priv;
1483 	struct mt76_dev *dev = phy->dev;
1484 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1485 
1486 	mutex_lock(&dev->mutex);
1487 	spin_lock_bh(&dev->status_lock);
1488 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1489 	spin_unlock_bh(&dev->status_lock);
1490 	mutex_unlock(&dev->mutex);
1491 }
1492 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1493 
mt76_wcid_init(struct mt76_wcid * wcid)1494 void mt76_wcid_init(struct mt76_wcid *wcid)
1495 {
1496 	INIT_LIST_HEAD(&wcid->tx_list);
1497 	skb_queue_head_init(&wcid->tx_pending);
1498 
1499 	INIT_LIST_HEAD(&wcid->list);
1500 	idr_init(&wcid->pktid);
1501 }
1502 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1503 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1504 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1505 {
1506 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1507 	struct ieee80211_hw *hw;
1508 	struct sk_buff_head list;
1509 	struct sk_buff *skb;
1510 
1511 	mt76_tx_status_lock(dev, &list);
1512 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1513 	mt76_tx_status_unlock(dev, &list);
1514 
1515 	idr_destroy(&wcid->pktid);
1516 
1517 	spin_lock_bh(&phy->tx_lock);
1518 
1519 	if (!list_empty(&wcid->tx_list))
1520 		list_del_init(&wcid->tx_list);
1521 
1522 	spin_lock(&wcid->tx_pending.lock);
1523 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1524 	spin_unlock(&wcid->tx_pending.lock);
1525 
1526 	spin_unlock_bh(&phy->tx_lock);
1527 
1528 	while ((skb = __skb_dequeue(&list)) != NULL) {
1529 		hw = mt76_tx_status_get_hw(dev, skb);
1530 		ieee80211_free_txskb(hw, skb);
1531 	}
1532 }
1533 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1534 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)1535 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1536 		     int *dbm)
1537 {
1538 	struct mt76_phy *phy = hw->priv;
1539 	int n_chains = hweight8(phy->antenna_mask);
1540 	int delta = mt76_tx_power_nss_delta(n_chains);
1541 
1542 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1543 
1544 	return 0;
1545 }
1546 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1547 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1548 int mt76_init_sar_power(struct ieee80211_hw *hw,
1549 			const struct cfg80211_sar_specs *sar)
1550 {
1551 	struct mt76_phy *phy = hw->priv;
1552 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1553 	int i;
1554 
1555 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1556 		return -EINVAL;
1557 
1558 	for (i = 0; i < sar->num_sub_specs; i++) {
1559 		u32 index = sar->sub_specs[i].freq_range_index;
1560 		/* SAR specifies power limitaton in 0.25dbm */
1561 		s32 power = sar->sub_specs[i].power >> 1;
1562 
1563 		if (power > 127 || power < -127)
1564 			power = 127;
1565 
1566 		phy->frp[index].range = &capa->freq_ranges[index];
1567 		phy->frp[index].power = power;
1568 	}
1569 
1570 	return 0;
1571 }
1572 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1573 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1574 int mt76_get_sar_power(struct mt76_phy *phy,
1575 		       struct ieee80211_channel *chan,
1576 		       int power)
1577 {
1578 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1579 	int freq, i;
1580 
1581 	if (!capa || !phy->frp)
1582 		return power;
1583 
1584 	if (power > 127 || power < -127)
1585 		power = 127;
1586 
1587 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1588 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1589 		if (phy->frp[i].range &&
1590 		    freq >= phy->frp[i].range->start_freq &&
1591 		    freq < phy->frp[i].range->end_freq) {
1592 			power = min_t(int, phy->frp[i].power, power);
1593 			break;
1594 		}
1595 	}
1596 
1597 	return power;
1598 }
1599 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1600 
1601 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1602 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1603 {
1604 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1605 		ieee80211_csa_finish(vif);
1606 }
1607 
mt76_csa_finish(struct mt76_dev * dev)1608 void mt76_csa_finish(struct mt76_dev *dev)
1609 {
1610 	if (!dev->csa_complete)
1611 		return;
1612 
1613 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1614 		IEEE80211_IFACE_ITER_RESUME_ALL,
1615 		__mt76_csa_finish, dev);
1616 
1617 	dev->csa_complete = 0;
1618 }
1619 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1620 
1621 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1622 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1623 {
1624 	struct mt76_dev *dev = priv;
1625 
1626 	if (!vif->bss_conf.csa_active)
1627 		return;
1628 
1629 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1630 }
1631 
mt76_csa_check(struct mt76_dev * dev)1632 void mt76_csa_check(struct mt76_dev *dev)
1633 {
1634 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1635 		IEEE80211_IFACE_ITER_RESUME_ALL,
1636 		__mt76_csa_check, dev);
1637 }
1638 EXPORT_SYMBOL_GPL(mt76_csa_check);
1639 
1640 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1641 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1642 {
1643 	return 0;
1644 }
1645 EXPORT_SYMBOL_GPL(mt76_set_tim);
1646 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1647 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1648 {
1649 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1650 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1651 	u8 *hdr, *pn = status->iv;
1652 
1653 	__skb_push(skb, 8);
1654 	memmove(skb->data, skb->data + 8, hdr_len);
1655 	hdr = skb->data + hdr_len;
1656 
1657 	hdr[0] = pn[5];
1658 	hdr[1] = pn[4];
1659 	hdr[2] = 0;
1660 	hdr[3] = 0x20 | (key_id << 6);
1661 	hdr[4] = pn[3];
1662 	hdr[5] = pn[2];
1663 	hdr[6] = pn[1];
1664 	hdr[7] = pn[0];
1665 
1666 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1667 }
1668 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1669 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1670 int mt76_get_rate(struct mt76_dev *dev,
1671 		  struct ieee80211_supported_band *sband,
1672 		  int idx, bool cck)
1673 {
1674 	int i, offset = 0, len = sband->n_bitrates;
1675 
1676 	if (cck) {
1677 		if (sband != &dev->phy.sband_2g.sband)
1678 			return 0;
1679 
1680 		idx &= ~BIT(2); /* short preamble */
1681 	} else if (sband == &dev->phy.sband_2g.sband) {
1682 		offset = 4;
1683 	}
1684 
1685 	for (i = offset; i < len; i++) {
1686 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1687 			return i;
1688 	}
1689 
1690 	return 0;
1691 }
1692 EXPORT_SYMBOL_GPL(mt76_get_rate);
1693 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1694 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1695 		  const u8 *mac)
1696 {
1697 	struct mt76_phy *phy = hw->priv;
1698 
1699 	set_bit(MT76_SCANNING, &phy->state);
1700 }
1701 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1702 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1703 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1704 {
1705 	struct mt76_phy *phy = hw->priv;
1706 
1707 	clear_bit(MT76_SCANNING, &phy->state);
1708 }
1709 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1710 
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1711 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1712 {
1713 	struct mt76_phy *phy = hw->priv;
1714 	struct mt76_dev *dev = phy->dev;
1715 
1716 	mutex_lock(&dev->mutex);
1717 	*tx_ant = phy->antenna_mask;
1718 	*rx_ant = phy->antenna_mask;
1719 	mutex_unlock(&dev->mutex);
1720 
1721 	return 0;
1722 }
1723 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1724 
1725 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,u32 flags)1726 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1727 		int ring_base, u32 flags)
1728 {
1729 	struct mt76_queue *hwq;
1730 	int err;
1731 
1732 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1733 	if (!hwq)
1734 		return ERR_PTR(-ENOMEM);
1735 
1736 	hwq->flags = flags;
1737 
1738 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1739 	if (err < 0)
1740 		return ERR_PTR(err);
1741 
1742 	return hwq;
1743 }
1744 EXPORT_SYMBOL_GPL(mt76_init_queue);
1745 
mt76_calculate_default_rate(struct mt76_phy * phy,struct ieee80211_vif * vif,int rateidx)1746 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1747 				struct ieee80211_vif *vif, int rateidx)
1748 {
1749 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1750 	struct cfg80211_chan_def *chandef = mvif->ctx ?
1751 					    &mvif->ctx->def :
1752 					    &phy->chandef;
1753 	int offset = 0;
1754 
1755 	if (chandef->chan->band != NL80211_BAND_2GHZ)
1756 		offset = 4;
1757 
1758 	/* pick the lowest rate for hidden nodes */
1759 	if (rateidx < 0)
1760 		rateidx = 0;
1761 
1762 	rateidx += offset;
1763 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1764 		rateidx = offset;
1765 
1766 	return mt76_rates[rateidx].hw_value;
1767 }
1768 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1769 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1770 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1771 			 struct mt76_sta_stats *stats, bool eht)
1772 {
1773 	int i, ei = wi->initial_stat_idx;
1774 	u64 *data = wi->data;
1775 
1776 	wi->sta_count++;
1777 
1778 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1779 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1780 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1781 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1782 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1783 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1784 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1785 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1786 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1787 	if (eht) {
1788 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1789 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1790 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1791 	}
1792 
1793 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1794 		data[ei++] += stats->tx_bw[i];
1795 
1796 	for (i = 0; i < (eht ? 14 : 12); i++)
1797 		data[ei++] += stats->tx_mcs[i];
1798 
1799 	for (i = 0; i < 4; i++)
1800 		data[ei++] += stats->tx_nss[i];
1801 
1802 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1803 }
1804 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1805 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1806 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1807 {
1808 #ifdef CONFIG_PAGE_POOL_STATS
1809 	struct page_pool_stats stats = {};
1810 	int i;
1811 
1812 	mt76_for_each_q_rx(dev, i)
1813 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1814 
1815 	page_pool_ethtool_stats_get(data, &stats);
1816 	*index += page_pool_ethtool_stats_get_count();
1817 #endif
1818 }
1819 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1820 
mt76_phy_dfs_state(struct mt76_phy * phy)1821 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1822 {
1823 	struct ieee80211_hw *hw = phy->hw;
1824 	struct mt76_dev *dev = phy->dev;
1825 
1826 	if (dev->region == NL80211_DFS_UNSET ||
1827 	    test_bit(MT76_SCANNING, &phy->state))
1828 		return MT_DFS_STATE_DISABLED;
1829 
1830 	if (!hw->conf.radar_enabled) {
1831 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1832 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1833 			return MT_DFS_STATE_ACTIVE;
1834 
1835 		return MT_DFS_STATE_DISABLED;
1836 	}
1837 
1838 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1839 		return MT_DFS_STATE_CAC;
1840 
1841 	return MT_DFS_STATE_ACTIVE;
1842 }
1843 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1844