1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/of.h>
6 #include "mt76.h"
7 
8 #define CHAN2G(_idx, _freq) {			\
9 	.band = NL80211_BAND_2GHZ,		\
10 	.center_freq = (_freq),			\
11 	.hw_value = (_idx),			\
12 	.max_power = 30,			\
13 }
14 
15 #define CHAN5G(_idx, _freq) {			\
16 	.band = NL80211_BAND_5GHZ,		\
17 	.center_freq = (_freq),			\
18 	.hw_value = (_idx),			\
19 	.max_power = 30,			\
20 }
21 
22 static const struct ieee80211_channel mt76_channels_2ghz[] = {
23 	CHAN2G(1, 2412),
24 	CHAN2G(2, 2417),
25 	CHAN2G(3, 2422),
26 	CHAN2G(4, 2427),
27 	CHAN2G(5, 2432),
28 	CHAN2G(6, 2437),
29 	CHAN2G(7, 2442),
30 	CHAN2G(8, 2447),
31 	CHAN2G(9, 2452),
32 	CHAN2G(10, 2457),
33 	CHAN2G(11, 2462),
34 	CHAN2G(12, 2467),
35 	CHAN2G(13, 2472),
36 	CHAN2G(14, 2484),
37 };
38 
39 static const struct ieee80211_channel mt76_channels_5ghz[] = {
40 	CHAN5G(36, 5180),
41 	CHAN5G(40, 5200),
42 	CHAN5G(44, 5220),
43 	CHAN5G(48, 5240),
44 
45 	CHAN5G(52, 5260),
46 	CHAN5G(56, 5280),
47 	CHAN5G(60, 5300),
48 	CHAN5G(64, 5320),
49 
50 	CHAN5G(100, 5500),
51 	CHAN5G(104, 5520),
52 	CHAN5G(108, 5540),
53 	CHAN5G(112, 5560),
54 	CHAN5G(116, 5580),
55 	CHAN5G(120, 5600),
56 	CHAN5G(124, 5620),
57 	CHAN5G(128, 5640),
58 	CHAN5G(132, 5660),
59 	CHAN5G(136, 5680),
60 	CHAN5G(140, 5700),
61 
62 	CHAN5G(149, 5745),
63 	CHAN5G(153, 5765),
64 	CHAN5G(157, 5785),
65 	CHAN5G(161, 5805),
66 	CHAN5G(165, 5825),
67 };
68 
69 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
70 	{ .throughput =   0 * 1024, .blink_time = 334 },
71 	{ .throughput =   1 * 1024, .blink_time = 260 },
72 	{ .throughput =   5 * 1024, .blink_time = 220 },
73 	{ .throughput =  10 * 1024, .blink_time = 190 },
74 	{ .throughput =  20 * 1024, .blink_time = 170 },
75 	{ .throughput =  50 * 1024, .blink_time = 150 },
76 	{ .throughput =  70 * 1024, .blink_time = 130 },
77 	{ .throughput = 100 * 1024, .blink_time = 110 },
78 	{ .throughput = 200 * 1024, .blink_time =  80 },
79 	{ .throughput = 300 * 1024, .blink_time =  50 },
80 };
81 
82 static int mt76_led_init(struct mt76_dev *dev)
83 {
84 	struct device_node *np = dev->dev->of_node;
85 	struct ieee80211_hw *hw = dev->hw;
86 	int led_pin;
87 
88 	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
89 		return 0;
90 
91 	snprintf(dev->led_name, sizeof(dev->led_name),
92 		 "mt76-%s", wiphy_name(hw->wiphy));
93 
94 	dev->led_cdev.name = dev->led_name;
95 	dev->led_cdev.default_trigger =
96 		ieee80211_create_tpt_led_trigger(hw,
97 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
98 					mt76_tpt_blink,
99 					ARRAY_SIZE(mt76_tpt_blink));
100 
101 	np = of_get_child_by_name(np, "led");
102 	if (np) {
103 		if (!of_property_read_u32(np, "led-sources", &led_pin))
104 			dev->led_pin = led_pin;
105 		dev->led_al = of_property_read_bool(np, "led-active-low");
106 	}
107 
108 	return led_classdev_register(dev->dev, &dev->led_cdev);
109 }
110 
111 static void mt76_led_cleanup(struct mt76_dev *dev)
112 {
113 	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
114 		return;
115 
116 	led_classdev_unregister(&dev->led_cdev);
117 }
118 
119 static void mt76_init_stream_cap(struct mt76_dev *dev,
120 				 struct ieee80211_supported_band *sband,
121 				 bool vht)
122 {
123 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
124 	int i, nstream = hweight8(dev->antenna_mask);
125 	struct ieee80211_sta_vht_cap *vht_cap;
126 	u16 mcs_map = 0;
127 
128 	if (nstream > 1)
129 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
130 	else
131 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
132 
133 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
134 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
135 
136 	if (!vht)
137 		return;
138 
139 	vht_cap = &sband->vht_cap;
140 	if (nstream > 1)
141 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
142 	else
143 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
144 
145 	for (i = 0; i < 8; i++) {
146 		if (i < nstream)
147 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
148 		else
149 			mcs_map |=
150 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
151 	}
152 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
153 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
154 }
155 
156 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
157 {
158 	if (dev->cap.has_2ghz)
159 		mt76_init_stream_cap(dev, &dev->sband_2g.sband, false);
160 	if (dev->cap.has_5ghz)
161 		mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht);
162 }
163 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
164 
165 static int
166 mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
167 		const struct ieee80211_channel *chan, int n_chan,
168 		struct ieee80211_rate *rates, int n_rates, bool vht)
169 {
170 	struct ieee80211_supported_band *sband = &msband->sband;
171 	struct ieee80211_sta_ht_cap *ht_cap;
172 	struct ieee80211_sta_vht_cap *vht_cap;
173 	void *chanlist;
174 	int size;
175 
176 	size = n_chan * sizeof(*chan);
177 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
178 	if (!chanlist)
179 		return -ENOMEM;
180 
181 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
182 				    GFP_KERNEL);
183 	if (!msband->chan)
184 		return -ENOMEM;
185 
186 	sband->channels = chanlist;
187 	sband->n_channels = n_chan;
188 	sband->bitrates = rates;
189 	sband->n_bitrates = n_rates;
190 	dev->chandef.chan = &sband->channels[0];
191 	dev->chan_state = &msband->chan[0];
192 
193 	ht_cap = &sband->ht_cap;
194 	ht_cap->ht_supported = true;
195 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
196 		       IEEE80211_HT_CAP_GRN_FLD |
197 		       IEEE80211_HT_CAP_SGI_20 |
198 		       IEEE80211_HT_CAP_SGI_40 |
199 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
200 
201 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
202 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
203 	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
204 
205 	mt76_init_stream_cap(dev, sband, vht);
206 
207 	if (!vht)
208 		return 0;
209 
210 	vht_cap = &sband->vht_cap;
211 	vht_cap->vht_supported = true;
212 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
213 			IEEE80211_VHT_CAP_RXSTBC_1 |
214 			IEEE80211_VHT_CAP_SHORT_GI_80 |
215 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
216 			IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
217 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
218 
219 	return 0;
220 }
221 
222 static int
223 mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
224 		   int n_rates)
225 {
226 	dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
227 
228 	return mt76_init_sband(dev, &dev->sband_2g,
229 			       mt76_channels_2ghz,
230 			       ARRAY_SIZE(mt76_channels_2ghz),
231 			       rates, n_rates, false);
232 }
233 
234 static int
235 mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
236 		   int n_rates, bool vht)
237 {
238 	dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
239 
240 	return mt76_init_sband(dev, &dev->sband_5g,
241 			       mt76_channels_5ghz,
242 			       ARRAY_SIZE(mt76_channels_5ghz),
243 			       rates, n_rates, vht);
244 }
245 
246 static void
247 mt76_check_sband(struct mt76_dev *dev, int band)
248 {
249 	struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
250 	bool found = false;
251 	int i;
252 
253 	if (!sband)
254 		return;
255 
256 	for (i = 0; i < sband->n_channels; i++) {
257 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
258 			continue;
259 
260 		found = true;
261 		break;
262 	}
263 
264 	if (found)
265 		return;
266 
267 	sband->n_channels = 0;
268 	dev->hw->wiphy->bands[band] = NULL;
269 }
270 
271 struct mt76_dev *
272 mt76_alloc_device(struct device *pdev, unsigned int size,
273 		  const struct ieee80211_ops *ops,
274 		  const struct mt76_driver_ops *drv_ops)
275 {
276 	struct ieee80211_hw *hw;
277 	struct mt76_dev *dev;
278 
279 	hw = ieee80211_alloc_hw(size, ops);
280 	if (!hw)
281 		return NULL;
282 
283 	dev = hw->priv;
284 	dev->hw = hw;
285 	dev->dev = pdev;
286 	dev->drv = drv_ops;
287 
288 	spin_lock_init(&dev->rx_lock);
289 	spin_lock_init(&dev->lock);
290 	spin_lock_init(&dev->cc_lock);
291 	mutex_init(&dev->mutex);
292 	init_waitqueue_head(&dev->tx_wait);
293 	skb_queue_head_init(&dev->status_list);
294 
295 	tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
296 
297 	return dev;
298 }
299 EXPORT_SYMBOL_GPL(mt76_alloc_device);
300 
301 int mt76_register_device(struct mt76_dev *dev, bool vht,
302 			 struct ieee80211_rate *rates, int n_rates)
303 {
304 	struct ieee80211_hw *hw = dev->hw;
305 	struct wiphy *wiphy = hw->wiphy;
306 	int ret;
307 
308 	dev_set_drvdata(dev->dev, dev);
309 
310 	INIT_LIST_HEAD(&dev->txwi_cache);
311 
312 	SET_IEEE80211_DEV(hw, dev->dev);
313 	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
314 
315 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
316 
317 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
318 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
319 
320 	wiphy->available_antennas_tx = dev->antenna_mask;
321 	wiphy->available_antennas_rx = dev->antenna_mask;
322 
323 	hw->txq_data_size = sizeof(struct mt76_txq);
324 	hw->max_tx_fragments = 16;
325 
326 	ieee80211_hw_set(hw, SIGNAL_DBM);
327 	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
328 	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
329 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
330 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
331 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
332 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
333 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
334 	ieee80211_hw_set(hw, TX_AMSDU);
335 	ieee80211_hw_set(hw, TX_FRAG_LIST);
336 	ieee80211_hw_set(hw, MFP_CAPABLE);
337 	ieee80211_hw_set(hw, AP_LINK_PS);
338 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
339 	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
340 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
341 
342 	wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
343 	wiphy->interface_modes =
344 		BIT(NL80211_IFTYPE_STATION) |
345 		BIT(NL80211_IFTYPE_AP) |
346 #ifdef CONFIG_MAC80211_MESH
347 		BIT(NL80211_IFTYPE_MESH_POINT) |
348 #endif
349 		BIT(NL80211_IFTYPE_ADHOC);
350 
351 	if (dev->cap.has_2ghz) {
352 		ret = mt76_init_sband_2g(dev, rates, n_rates);
353 		if (ret)
354 			return ret;
355 	}
356 
357 	if (dev->cap.has_5ghz) {
358 		ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
359 		if (ret)
360 			return ret;
361 	}
362 
363 	wiphy_read_of_freq_limits(dev->hw->wiphy);
364 	mt76_check_sband(dev, NL80211_BAND_2GHZ);
365 	mt76_check_sband(dev, NL80211_BAND_5GHZ);
366 
367 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
368 		ret = mt76_led_init(dev);
369 		if (ret)
370 			return ret;
371 	}
372 
373 	return ieee80211_register_hw(hw);
374 }
375 EXPORT_SYMBOL_GPL(mt76_register_device);
376 
377 void mt76_unregister_device(struct mt76_dev *dev)
378 {
379 	struct ieee80211_hw *hw = dev->hw;
380 
381 	mt76_led_cleanup(dev);
382 	mt76_tx_status_check(dev, NULL, true);
383 	ieee80211_unregister_hw(hw);
384 }
385 EXPORT_SYMBOL_GPL(mt76_unregister_device);
386 
387 void mt76_free_device(struct mt76_dev *dev)
388 {
389 	mt76_tx_free(dev);
390 	ieee80211_free_hw(dev->hw);
391 }
392 EXPORT_SYMBOL_GPL(mt76_free_device);
393 
394 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
395 {
396 	if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
397 		dev_kfree_skb(skb);
398 		return;
399 	}
400 
401 	__skb_queue_tail(&dev->rx_skb[q], skb);
402 }
403 EXPORT_SYMBOL_GPL(mt76_rx);
404 
405 bool mt76_has_tx_pending(struct mt76_dev *dev)
406 {
407 	struct mt76_queue *q;
408 	int i;
409 
410 	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
411 		q = dev->q_tx[i].q;
412 		if (q && q->queued)
413 			return true;
414 	}
415 
416 	return false;
417 }
418 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
419 
420 static struct mt76_channel_state *
421 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
422 {
423 	struct mt76_sband *msband;
424 	int idx;
425 
426 	if (c->band == NL80211_BAND_2GHZ)
427 		msband = &dev->sband_2g;
428 	else
429 		msband = &dev->sband_5g;
430 
431 	idx = c - &msband->sband.channels[0];
432 	return &msband->chan[idx];
433 }
434 
435 void mt76_update_survey(struct mt76_dev *dev)
436 {
437 	struct mt76_channel_state *state = dev->chan_state;
438 	ktime_t cur_time;
439 
440 	if (!test_bit(MT76_STATE_RUNNING, &dev->state))
441 		return;
442 
443 	if (dev->drv->update_survey)
444 		dev->drv->update_survey(dev);
445 
446 	cur_time = ktime_get_boottime();
447 	state->cc_active += ktime_to_us(ktime_sub(cur_time,
448 						  dev->survey_time));
449 	dev->survey_time = cur_time;
450 
451 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
452 		spin_lock_bh(&dev->cc_lock);
453 		state->cc_bss_rx += dev->cur_cc_bss_rx;
454 		dev->cur_cc_bss_rx = 0;
455 		spin_unlock_bh(&dev->cc_lock);
456 	}
457 }
458 EXPORT_SYMBOL_GPL(mt76_update_survey);
459 
460 void mt76_set_channel(struct mt76_dev *dev)
461 {
462 	struct ieee80211_hw *hw = dev->hw;
463 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
464 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
465 	int timeout = HZ / 5;
466 
467 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
468 	mt76_update_survey(dev);
469 
470 	dev->chandef = *chandef;
471 	dev->chan_state = mt76_channel_state(dev, chandef->chan);
472 
473 	if (!offchannel)
474 		dev->main_chan = chandef->chan;
475 
476 	if (chandef->chan != dev->main_chan)
477 		memset(dev->chan_state, 0, sizeof(*dev->chan_state));
478 }
479 EXPORT_SYMBOL_GPL(mt76_set_channel);
480 
481 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
482 		    struct survey_info *survey)
483 {
484 	struct mt76_dev *dev = hw->priv;
485 	struct mt76_sband *sband;
486 	struct ieee80211_channel *chan;
487 	struct mt76_channel_state *state;
488 	int ret = 0;
489 
490 	mutex_lock(&dev->mutex);
491 	if (idx == 0 && dev->drv->update_survey)
492 		mt76_update_survey(dev);
493 
494 	sband = &dev->sband_2g;
495 	if (idx >= sband->sband.n_channels) {
496 		idx -= sband->sband.n_channels;
497 		sband = &dev->sband_5g;
498 	}
499 
500 	if (idx >= sband->sband.n_channels) {
501 		ret = -ENOENT;
502 		goto out;
503 	}
504 
505 	chan = &sband->sband.channels[idx];
506 	state = mt76_channel_state(dev, chan);
507 
508 	memset(survey, 0, sizeof(*survey));
509 	survey->channel = chan;
510 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
511 	survey->filled |= dev->drv->survey_flags;
512 	if (chan == dev->main_chan) {
513 		survey->filled |= SURVEY_INFO_IN_USE;
514 
515 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
516 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
517 	}
518 
519 	survey->time_busy = div_u64(state->cc_busy, 1000);
520 	survey->time_rx = div_u64(state->cc_rx, 1000);
521 	survey->time = div_u64(state->cc_active, 1000);
522 
523 	spin_lock_bh(&dev->cc_lock);
524 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
525 	survey->time_tx = div_u64(state->cc_tx, 1000);
526 	spin_unlock_bh(&dev->cc_lock);
527 
528 out:
529 	mutex_unlock(&dev->mutex);
530 
531 	return ret;
532 }
533 EXPORT_SYMBOL_GPL(mt76_get_survey);
534 
535 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
536 			 struct ieee80211_key_conf *key)
537 {
538 	struct ieee80211_key_seq seq;
539 	int i;
540 
541 	wcid->rx_check_pn = false;
542 
543 	if (!key)
544 		return;
545 
546 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
547 		return;
548 
549 	wcid->rx_check_pn = true;
550 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
551 		ieee80211_get_key_rx_seq(key, i, &seq);
552 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
553 	}
554 }
555 EXPORT_SYMBOL(mt76_wcid_key_setup);
556 
557 static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
558 {
559 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
560 	struct mt76_rx_status mstat;
561 
562 	mstat = *((struct mt76_rx_status *)skb->cb);
563 	memset(status, 0, sizeof(*status));
564 
565 	status->flag = mstat.flag;
566 	status->freq = mstat.freq;
567 	status->enc_flags = mstat.enc_flags;
568 	status->encoding = mstat.encoding;
569 	status->bw = mstat.bw;
570 	status->rate_idx = mstat.rate_idx;
571 	status->nss = mstat.nss;
572 	status->band = mstat.band;
573 	status->signal = mstat.signal;
574 	status->chains = mstat.chains;
575 	status->ampdu_reference = mstat.ampdu_ref;
576 
577 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
578 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
579 		     sizeof(mstat.chain_signal));
580 	memcpy(status->chain_signal, mstat.chain_signal,
581 	       sizeof(mstat.chain_signal));
582 
583 	return wcid_to_sta(mstat.wcid);
584 }
585 
586 static int
587 mt76_check_ccmp_pn(struct sk_buff *skb)
588 {
589 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
590 	struct mt76_wcid *wcid = status->wcid;
591 	struct ieee80211_hdr *hdr;
592 	int ret;
593 
594 	if (!(status->flag & RX_FLAG_DECRYPTED))
595 		return 0;
596 
597 	if (!wcid || !wcid->rx_check_pn)
598 		return 0;
599 
600 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
601 		/*
602 		 * Validate the first fragment both here and in mac80211
603 		 * All further fragments will be validated by mac80211 only.
604 		 */
605 		hdr = (struct ieee80211_hdr *)skb->data;
606 		if (ieee80211_is_frag(hdr) &&
607 		    !ieee80211_is_first_frag(hdr->frame_control))
608 			return 0;
609 	}
610 
611 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
612 	ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
613 		     sizeof(status->iv));
614 	if (ret <= 0)
615 		return -EINVAL; /* replay */
616 
617 	memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
618 
619 	if (status->flag & RX_FLAG_IV_STRIPPED)
620 		status->flag |= RX_FLAG_PN_VALIDATED;
621 
622 	return 0;
623 }
624 
625 static void
626 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
627 		    int len)
628 {
629 	struct mt76_wcid *wcid = status->wcid;
630 	struct ieee80211_sta *sta;
631 	u32 airtime;
632 
633 	airtime = mt76_calc_rx_airtime(dev, status, len);
634 	spin_lock(&dev->cc_lock);
635 	dev->cur_cc_bss_rx += airtime;
636 	spin_unlock(&dev->cc_lock);
637 
638 	if (!wcid || !wcid->sta)
639 		return;
640 
641 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
642 	ieee80211_sta_register_airtime(sta, status->tid, 0, airtime);
643 }
644 
645 static void
646 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
647 {
648 	struct mt76_wcid *wcid;
649 	int wcid_idx;
650 
651 	if (!dev->rx_ampdu_len)
652 		return;
653 
654 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
655 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
656 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
657 	else
658 		wcid = NULL;
659 	dev->rx_ampdu_status.wcid = wcid;
660 
661 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
662 
663 	dev->rx_ampdu_len = 0;
664 	dev->rx_ampdu_ref = 0;
665 }
666 
667 static void
668 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
669 {
670 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
671 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
672 	struct mt76_wcid *wcid = status->wcid;
673 
674 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
675 		return;
676 
677 	if (!wcid || !wcid->sta) {
678 		if (!ether_addr_equal(hdr->addr1, dev->macaddr))
679 			return;
680 
681 		wcid = NULL;
682 	}
683 
684 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
685 	    status->ampdu_ref != dev->rx_ampdu_ref)
686 		mt76_airtime_flush_ampdu(dev);
687 
688 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
689 		if (!dev->rx_ampdu_len ||
690 		    status->ampdu_ref != dev->rx_ampdu_ref) {
691 			dev->rx_ampdu_status = *status;
692 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
693 			dev->rx_ampdu_ref = status->ampdu_ref;
694 		}
695 
696 		dev->rx_ampdu_len += skb->len;
697 		return;
698 	}
699 
700 	mt76_airtime_report(dev, status, skb->len);
701 }
702 
703 static void
704 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
705 {
706 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
707 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
708 	struct ieee80211_sta *sta;
709 	struct mt76_wcid *wcid = status->wcid;
710 	bool ps;
711 	int i;
712 
713 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
714 		sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
715 		if (sta)
716 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
717 	}
718 
719 	mt76_airtime_check(dev, skb);
720 
721 	if (!wcid || !wcid->sta)
722 		return;
723 
724 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
725 
726 	if (status->signal <= 0)
727 		ewma_signal_add(&wcid->rssi, -status->signal);
728 
729 	wcid->inactive_count = 0;
730 
731 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
732 		return;
733 
734 	if (ieee80211_is_pspoll(hdr->frame_control)) {
735 		ieee80211_sta_pspoll(sta);
736 		return;
737 	}
738 
739 	if (ieee80211_has_morefrags(hdr->frame_control) ||
740 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
741 	      ieee80211_is_data(hdr->frame_control)))
742 		return;
743 
744 	ps = ieee80211_has_pm(hdr->frame_control);
745 
746 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
747 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
748 		ieee80211_sta_uapsd_trigger(sta, status->tid);
749 
750 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
751 		return;
752 
753 	if (ps)
754 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
755 	else
756 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
757 
758 	dev->drv->sta_ps(dev, sta, ps);
759 	ieee80211_sta_ps_transition(sta, ps);
760 
761 	if (ps)
762 		return;
763 
764 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
765 		struct mt76_txq *mtxq;
766 
767 		if (!sta->txq[i])
768 			continue;
769 
770 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
771 		if (!skb_queue_empty(&mtxq->retry_q))
772 			ieee80211_schedule_txq(dev->hw, sta->txq[i]);
773 	}
774 }
775 
776 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
777 		      struct napi_struct *napi)
778 {
779 	struct ieee80211_sta *sta;
780 	struct sk_buff *skb;
781 
782 	spin_lock(&dev->rx_lock);
783 	while ((skb = __skb_dequeue(frames)) != NULL) {
784 		if (mt76_check_ccmp_pn(skb)) {
785 			dev_kfree_skb(skb);
786 			continue;
787 		}
788 
789 		sta = mt76_rx_convert(skb);
790 		ieee80211_rx_napi(dev->hw, sta, skb, napi);
791 	}
792 	spin_unlock(&dev->rx_lock);
793 }
794 
795 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
796 			   struct napi_struct *napi)
797 {
798 	struct sk_buff_head frames;
799 	struct sk_buff *skb;
800 
801 	__skb_queue_head_init(&frames);
802 
803 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
804 		mt76_check_sta(dev, skb);
805 		mt76_rx_aggr_reorder(skb, &frames);
806 	}
807 
808 	mt76_rx_complete(dev, &frames, napi);
809 }
810 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
811 
812 static int
813 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
814 	     struct ieee80211_sta *sta)
815 {
816 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
817 	int ret;
818 	int i;
819 
820 	mutex_lock(&dev->mutex);
821 
822 	ret = dev->drv->sta_add(dev, vif, sta);
823 	if (ret)
824 		goto out;
825 
826 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
827 		struct mt76_txq *mtxq;
828 
829 		if (!sta->txq[i])
830 			continue;
831 
832 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
833 		mtxq->wcid = wcid;
834 
835 		mt76_txq_init(dev, sta->txq[i]);
836 	}
837 
838 	ewma_signal_init(&wcid->rssi);
839 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
840 
841 out:
842 	mutex_unlock(&dev->mutex);
843 
844 	return ret;
845 }
846 
847 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
848 		       struct ieee80211_sta *sta)
849 {
850 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
851 	int i, idx = wcid->idx;
852 
853 	rcu_assign_pointer(dev->wcid[idx], NULL);
854 	synchronize_rcu();
855 
856 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
857 		mt76_rx_aggr_stop(dev, wcid, i);
858 
859 	if (dev->drv->sta_remove)
860 		dev->drv->sta_remove(dev, vif, sta);
861 
862 	mt76_tx_status_check(dev, wcid, true);
863 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
864 		mt76_txq_remove(dev, sta->txq[i]);
865 	mt76_wcid_free(dev->wcid_mask, idx);
866 }
867 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
868 
869 static void
870 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
871 		struct ieee80211_sta *sta)
872 {
873 	mutex_lock(&dev->mutex);
874 	__mt76_sta_remove(dev, vif, sta);
875 	mutex_unlock(&dev->mutex);
876 }
877 
878 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
879 		   struct ieee80211_sta *sta,
880 		   enum ieee80211_sta_state old_state,
881 		   enum ieee80211_sta_state new_state)
882 {
883 	struct mt76_dev *dev = hw->priv;
884 
885 	if (old_state == IEEE80211_STA_NOTEXIST &&
886 	    new_state == IEEE80211_STA_NONE)
887 		return mt76_sta_add(dev, vif, sta);
888 
889 	if (old_state == IEEE80211_STA_AUTH &&
890 	    new_state == IEEE80211_STA_ASSOC &&
891 	    dev->drv->sta_assoc)
892 		dev->drv->sta_assoc(dev, vif, sta);
893 
894 	if (old_state == IEEE80211_STA_NONE &&
895 	    new_state == IEEE80211_STA_NOTEXIST)
896 		mt76_sta_remove(dev, vif, sta);
897 
898 	return 0;
899 }
900 EXPORT_SYMBOL_GPL(mt76_sta_state);
901 
902 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
903 		     int *dbm)
904 {
905 	struct mt76_dev *dev = hw->priv;
906 	int n_chains = hweight8(dev->antenna_mask);
907 
908 	*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
909 
910 	/* convert from per-chain power to combined
911 	 * output power
912 	 */
913 	switch (n_chains) {
914 	case 4:
915 		*dbm += 6;
916 		break;
917 	case 3:
918 		*dbm += 4;
919 		break;
920 	case 2:
921 		*dbm += 3;
922 		break;
923 	default:
924 		break;
925 	}
926 
927 	return 0;
928 }
929 EXPORT_SYMBOL_GPL(mt76_get_txpower);
930 
931 static void
932 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
933 {
934 	if (vif->csa_active && ieee80211_csa_is_complete(vif))
935 		ieee80211_csa_finish(vif);
936 }
937 
938 void mt76_csa_finish(struct mt76_dev *dev)
939 {
940 	if (!dev->csa_complete)
941 		return;
942 
943 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
944 		IEEE80211_IFACE_ITER_RESUME_ALL,
945 		__mt76_csa_finish, dev);
946 
947 	dev->csa_complete = 0;
948 }
949 EXPORT_SYMBOL_GPL(mt76_csa_finish);
950 
951 static void
952 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
953 {
954 	struct mt76_dev *dev = priv;
955 
956 	if (!vif->csa_active)
957 		return;
958 
959 	dev->csa_complete |= ieee80211_csa_is_complete(vif);
960 }
961 
962 void mt76_csa_check(struct mt76_dev *dev)
963 {
964 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
965 		IEEE80211_IFACE_ITER_RESUME_ALL,
966 		__mt76_csa_check, dev);
967 }
968 EXPORT_SYMBOL_GPL(mt76_csa_check);
969 
970 int
971 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
972 {
973 	return 0;
974 }
975 EXPORT_SYMBOL_GPL(mt76_set_tim);
976 
977 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
978 {
979 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
980 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
981 	u8 *hdr, *pn = status->iv;
982 
983 	__skb_push(skb, 8);
984 	memmove(skb->data, skb->data + 8, hdr_len);
985 	hdr = skb->data + hdr_len;
986 
987 	hdr[0] = pn[5];
988 	hdr[1] = pn[4];
989 	hdr[2] = 0;
990 	hdr[3] = 0x20 | (key_id << 6);
991 	hdr[4] = pn[3];
992 	hdr[5] = pn[2];
993 	hdr[6] = pn[1];
994 	hdr[7] = pn[0];
995 
996 	status->flag &= ~RX_FLAG_IV_STRIPPED;
997 }
998 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
999 
1000 int mt76_get_rate(struct mt76_dev *dev,
1001 		  struct ieee80211_supported_band *sband,
1002 		  int idx, bool cck)
1003 {
1004 	int i, offset = 0, len = sband->n_bitrates;
1005 
1006 	if (cck) {
1007 		if (sband == &dev->sband_5g.sband)
1008 			return 0;
1009 
1010 		idx &= ~BIT(2); /* short preamble */
1011 	} else if (sband == &dev->sband_2g.sband) {
1012 		offset = 4;
1013 	}
1014 
1015 	for (i = offset; i < len; i++) {
1016 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1017 			return i;
1018 	}
1019 
1020 	return 0;
1021 }
1022 EXPORT_SYMBOL_GPL(mt76_get_rate);
1023 
1024 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1025 		  const u8 *mac)
1026 {
1027 	struct mt76_dev *dev = hw->priv;
1028 
1029 	set_bit(MT76_SCANNING, &dev->state);
1030 }
1031 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1032 
1033 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1034 {
1035 	struct mt76_dev *dev = hw->priv;
1036 
1037 	clear_bit(MT76_SCANNING, &dev->state);
1038 }
1039 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1040 
1041 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1042 {
1043 	struct mt76_dev *dev = hw->priv;
1044 
1045 	mutex_lock(&dev->mutex);
1046 	*tx_ant = dev->antenna_mask;
1047 	*rx_ant = dev->antenna_mask;
1048 	mutex_unlock(&dev->mutex);
1049 
1050 	return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1053