1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/fs.h>
5 #include <linux/firmware.h>
6 #include "mt7921.h"
7 #include "mt7921_trace.h"
8 #include "mcu.h"
9 #include "../mt76_connac2_mac.h"
10 
11 #define MT_STA_BFER			BIT(0)
12 #define MT_STA_BFEE			BIT(1)
13 
14 static bool mt7921_disable_clc;
15 module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
16 MODULE_PARM_DESC(disable_clc, "disable CLC support");
17 
18 int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
19 			      struct sk_buff *skb, int seq)
20 {
21 	int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
22 	struct mt76_connac2_mcu_rxd *rxd;
23 	int ret = 0;
24 
25 	if (!skb) {
26 		dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
27 			cmd, seq);
28 		mt7921_reset(mdev);
29 
30 		return -ETIMEDOUT;
31 	}
32 
33 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
34 	if (seq != rxd->seq)
35 		return -EAGAIN;
36 
37 	if (cmd == MCU_CMD(PATCH_SEM_CONTROL) ||
38 	    cmd == MCU_CMD(PATCH_FINISH_REQ)) {
39 		skb_pull(skb, sizeof(*rxd) - 4);
40 		ret = *skb->data;
41 	} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
42 		skb_pull(skb, sizeof(*rxd) + 4);
43 		ret = le32_to_cpu(*(__le32 *)skb->data);
44 	} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
45 		   cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
46 		   cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
47 		   cmd == MCU_UNI_CMD(HIF_CTRL) ||
48 		   cmd == MCU_UNI_CMD(OFFLOAD) ||
49 		   cmd == MCU_UNI_CMD(SUSPEND)) {
50 		struct mt76_connac_mcu_uni_event *event;
51 
52 		skb_pull(skb, sizeof(*rxd));
53 		event = (struct mt76_connac_mcu_uni_event *)skb->data;
54 		ret = le32_to_cpu(event->status);
55 		/* skip invalid event */
56 		if (mcu_cmd != event->cid)
57 			ret = -EAGAIN;
58 	} else if (cmd == MCU_CE_QUERY(REG_READ)) {
59 		struct mt76_connac_mcu_reg_event *event;
60 
61 		skb_pull(skb, sizeof(*rxd));
62 		event = (struct mt76_connac_mcu_reg_event *)skb->data;
63 		ret = (int)le32_to_cpu(event->val);
64 	} else {
65 		skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
66 	}
67 
68 	return ret;
69 }
70 EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
71 
72 static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val)
73 {
74 	struct mt7921_mcu_eeprom_info *res, req = {
75 		.addr = cpu_to_le32(round_down(offset,
76 				    MT7921_EEPROM_BLOCK_SIZE)),
77 	};
78 	struct sk_buff *skb;
79 	int ret;
80 
81 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
82 					&req, sizeof(req), true, &skb);
83 	if (ret)
84 		return ret;
85 
86 	res = (struct mt7921_mcu_eeprom_info *)skb->data;
87 	*val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
88 	dev_kfree_skb(skb);
89 
90 	return 0;
91 }
92 
93 #ifdef CONFIG_PM
94 
95 static int
96 mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
97 			      struct ieee80211_vif *vif, bool suspend)
98 {
99 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
100 	struct {
101 		struct {
102 			u8 bss_idx;
103 			u8 pad[3];
104 		} __packed hdr;
105 		struct mt76_connac_arpns_tlv arpns;
106 	} req = {
107 		.hdr = {
108 			.bss_idx = mvif->mt76.idx,
109 		},
110 		.arpns = {
111 			.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
112 			.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
113 			.mode = suspend,
114 		},
115 	};
116 
117 	return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
118 				 true);
119 }
120 
121 void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
122 {
123 	if (IS_ENABLED(CONFIG_IPV6)) {
124 		struct mt76_phy *phy = priv;
125 
126 		mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif,
127 					      !test_bit(MT76_STATE_RUNNING,
128 					      &phy->state));
129 	}
130 
131 	mt76_connac_mcu_set_suspend_iter(priv, mac, vif);
132 }
133 
134 #endif /* CONFIG_PM */
135 
136 static void
137 mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
138 {
139 	struct mt7921_roc_grant_tlv *grant;
140 	struct mt76_connac2_mcu_rxd *rxd;
141 	int duration;
142 
143 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
144 	grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4);
145 
146 	/* should never happen */
147 	WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
148 
149 	if (grant->reqtype == MT7921_ROC_REQ_ROC)
150 		ieee80211_ready_on_channel(dev->mt76.phy.hw);
151 
152 	dev->phy.roc_grant = true;
153 	wake_up(&dev->phy.roc_wait);
154 	duration = le32_to_cpu(grant->max_interval);
155 	mod_timer(&dev->phy.roc_timer,
156 		  jiffies + msecs_to_jiffies(duration));
157 }
158 
159 static void
160 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
161 {
162 	struct mt76_phy *mphy = &dev->mt76.phy;
163 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
164 
165 	spin_lock_bh(&dev->mt76.lock);
166 	__skb_queue_tail(&phy->scan_event_list, skb);
167 	spin_unlock_bh(&dev->mt76.lock);
168 
169 	ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
170 				     MT7921_HW_SCAN_TIMEOUT);
171 }
172 
173 static void
174 mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
175 				struct ieee80211_vif *vif)
176 {
177 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
178 	struct mt76_connac_beacon_loss_event *event = priv;
179 
180 	if (mvif->idx != event->bss_idx)
181 		return;
182 
183 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) ||
184 	    vif->type != NL80211_IFTYPE_STATION)
185 		return;
186 
187 	ieee80211_connection_loss(vif);
188 }
189 
190 static void
191 mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
192 {
193 	struct mt76_connac_beacon_loss_event *event;
194 	struct mt76_phy *mphy = &dev->mt76.phy;
195 
196 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
197 	event = (struct mt76_connac_beacon_loss_event *)skb->data;
198 
199 	ieee80211_iterate_active_interfaces_atomic(mphy->hw,
200 					IEEE80211_IFACE_ITER_RESUME_ALL,
201 					mt7921_mcu_connection_loss_iter, event);
202 }
203 
204 static void
205 mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
206 {
207 	struct mt7921_debug_msg {
208 		__le16 id;
209 		u8 type;
210 		u8 flag;
211 		__le32 value;
212 		__le16 len;
213 		u8 content[512];
214 	} __packed * msg;
215 
216 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
217 	msg = (struct mt7921_debug_msg *)skb->data;
218 
219 	if (msg->type == 3) { /* fw log */
220 		u16 len = min_t(u16, le16_to_cpu(msg->len), 512);
221 		int i;
222 
223 		for (i = 0 ; i < len; i++) {
224 			if (!msg->content[i])
225 				msg->content[i] = ' ';
226 		}
227 		wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content);
228 	}
229 }
230 
231 static void
232 mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
233 {
234 	struct mt7921_mcu_lp_event {
235 		u8 state;
236 		u8 reserved[3];
237 	} __packed * event;
238 
239 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
240 	event = (struct mt7921_mcu_lp_event *)skb->data;
241 
242 	trace_lp_event(dev, event->state);
243 }
244 
245 static void
246 mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
247 {
248 	struct mt7921_mcu_tx_done_event *event;
249 
250 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
251 	event = (struct mt7921_mcu_tx_done_event *)skb->data;
252 
253 	mt7921_mac_add_txs(dev, event->txs);
254 }
255 
256 static void
257 mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
258 {
259 	struct mt76_connac2_mcu_rxd *rxd;
260 
261 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
262 	switch (rxd->eid) {
263 	case MCU_EVENT_BSS_BEACON_LOSS:
264 		mt7921_mcu_connection_loss_event(dev, skb);
265 		break;
266 	case MCU_EVENT_SCHED_SCAN_DONE:
267 	case MCU_EVENT_SCAN_DONE:
268 		mt7921_mcu_scan_event(dev, skb);
269 		return;
270 	case MCU_EVENT_DBG_MSG:
271 		mt7921_mcu_debug_msg_event(dev, skb);
272 		break;
273 	case MCU_EVENT_COREDUMP:
274 		dev->fw_assert = true;
275 		mt76_connac_mcu_coredump_event(&dev->mt76, skb,
276 					       &dev->coredump);
277 		return;
278 	case MCU_EVENT_LP_INFO:
279 		mt7921_mcu_low_power_event(dev, skb);
280 		break;
281 	case MCU_EVENT_TX_DONE:
282 		mt7921_mcu_tx_done_event(dev, skb);
283 		break;
284 	default:
285 		break;
286 	}
287 	dev_kfree_skb(skb);
288 }
289 
290 static void
291 mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev,
292 				    struct sk_buff *skb)
293 {
294 	struct mt76_connac2_mcu_rxd *rxd;
295 
296 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
297 
298 	switch (rxd->eid) {
299 	case MCU_UNI_EVENT_ROC:
300 		mt7921_mcu_uni_roc_event(dev, skb);
301 		break;
302 	default:
303 		break;
304 	}
305 	dev_kfree_skb(skb);
306 }
307 
308 void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
309 {
310 	struct mt76_connac2_mcu_rxd *rxd;
311 
312 	if (skb_linearize(skb))
313 		return;
314 
315 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
316 
317 	if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) {
318 		mt7921_mcu_uni_rx_unsolicited_event(dev, skb);
319 		return;
320 	}
321 
322 	if (rxd->eid == 0x6) {
323 		mt76_mcu_rx_event(&dev->mt76, skb);
324 		return;
325 	}
326 
327 	if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
328 	    rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
329 	    rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
330 	    rxd->eid == MCU_EVENT_SCAN_DONE ||
331 	    rxd->eid == MCU_EVENT_TX_DONE ||
332 	    rxd->eid == MCU_EVENT_DBG_MSG ||
333 	    rxd->eid == MCU_EVENT_COREDUMP ||
334 	    rxd->eid == MCU_EVENT_LP_INFO ||
335 	    !rxd->seq)
336 		mt7921_mcu_rx_unsolicited_event(dev, skb);
337 	else
338 		mt76_mcu_rx_event(&dev->mt76, skb);
339 }
340 
341 /** starec & wtbl **/
342 int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
343 			 struct ieee80211_ampdu_params *params,
344 			 bool enable)
345 {
346 	struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
347 
348 	if (enable && !params->amsdu)
349 		msta->wcid.amsdu = false;
350 
351 	return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
352 				      MCU_UNI_CMD(STA_REC_UPDATE),
353 				      enable, true);
354 }
355 
356 int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
357 			 struct ieee80211_ampdu_params *params,
358 			 bool enable)
359 {
360 	struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
361 
362 	return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
363 				      MCU_UNI_CMD(STA_REC_UPDATE),
364 				      enable, false);
365 }
366 
367 static char *mt7921_patch_name(struct mt7921_dev *dev)
368 {
369 	char *ret;
370 
371 	if (is_mt7922(&dev->mt76))
372 		ret = MT7922_ROM_PATCH;
373 	else
374 		ret = MT7921_ROM_PATCH;
375 
376 	return ret;
377 }
378 
379 static char *mt7921_ram_name(struct mt7921_dev *dev)
380 {
381 	char *ret;
382 
383 	if (is_mt7922(&dev->mt76))
384 		ret = MT7922_FIRMWARE_WM;
385 	else
386 		ret = MT7921_FIRMWARE_WM;
387 
388 	return ret;
389 }
390 
391 static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name)
392 {
393 	const struct mt76_connac2_fw_trailer *hdr;
394 	const struct mt76_connac2_fw_region *region;
395 	const struct mt7921_clc *clc;
396 	struct mt76_dev *mdev = &dev->mt76;
397 	struct mt7921_phy *phy = &dev->phy;
398 	const struct firmware *fw;
399 	int ret, i, len, offset = 0;
400 	u8 *clc_base = NULL, hw_encap = 0;
401 
402 	if (mt7921_disable_clc ||
403 	    mt76_is_usb(&dev->mt76))
404 		return 0;
405 
406 	if (mt76_is_mmio(&dev->mt76)) {
407 		ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
408 		if (ret)
409 			return ret;
410 		hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
411 	}
412 
413 	ret = request_firmware(&fw, fw_name, mdev->dev);
414 	if (ret)
415 		return ret;
416 
417 	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
418 		dev_err(mdev->dev, "Invalid firmware\n");
419 		ret = -EINVAL;
420 		goto out;
421 	}
422 
423 	hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
424 	for (i = 0; i < hdr->n_region; i++) {
425 		region = (const void *)((const u8 *)hdr -
426 					(hdr->n_region - i) * sizeof(*region));
427 		len = le32_to_cpu(region->len);
428 
429 		/* check if we have valid buffer size */
430 		if (offset + len > fw->size) {
431 			dev_err(mdev->dev, "Invalid firmware region\n");
432 			ret = -EINVAL;
433 			goto out;
434 		}
435 
436 		if ((region->feature_set & FW_FEATURE_NON_DL) &&
437 		    region->type == FW_TYPE_CLC) {
438 			clc_base = (u8 *)(fw->data + offset);
439 			break;
440 		}
441 		offset += len;
442 	}
443 
444 	if (!clc_base)
445 		goto out;
446 
447 	for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
448 		clc = (const struct mt7921_clc *)(clc_base + offset);
449 
450 		/* do not init buf again if chip reset triggered */
451 		if (phy->clc[clc->idx])
452 			continue;
453 
454 		/* header content sanity */
455 		if (clc->idx == MT7921_CLC_POWER &&
456 		    u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
457 			continue;
458 
459 		phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
460 						  le32_to_cpu(clc->len),
461 						  GFP_KERNEL);
462 
463 		if (!phy->clc[clc->idx]) {
464 			ret = -ENOMEM;
465 			goto out;
466 		}
467 	}
468 	ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
469 out:
470 	release_firmware(fw);
471 
472 	return ret;
473 }
474 
475 static int mt7921_load_firmware(struct mt7921_dev *dev)
476 {
477 	int ret;
478 
479 	ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev));
480 	if (ret)
481 		return ret;
482 
483 	if (mt76_is_sdio(&dev->mt76)) {
484 		/* activate again */
485 		ret = __mt7921_mcu_fw_pmctrl(dev);
486 		if (!ret)
487 			ret = __mt7921_mcu_drv_pmctrl(dev);
488 	}
489 
490 	ret = mt76_connac2_load_ram(&dev->mt76, mt7921_ram_name(dev), NULL);
491 	if (ret)
492 		return ret;
493 
494 	if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
495 			    MT_TOP_MISC2_FW_N9_RDY, 1500)) {
496 		dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
497 
498 		return -EIO;
499 	}
500 
501 #ifdef CONFIG_PM
502 	dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
503 #endif /* CONFIG_PM */
504 
505 	dev_dbg(dev->mt76.dev, "Firmware init done\n");
506 
507 	return 0;
508 }
509 
510 int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
511 {
512 	struct {
513 		u8 ctrl_val;
514 		u8 pad[3];
515 	} data = {
516 		.ctrl_val = ctrl
517 	};
518 
519 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST),
520 				 &data, sizeof(data), false);
521 }
522 
523 int mt7921_run_firmware(struct mt7921_dev *dev)
524 {
525 	int err;
526 
527 	err = mt7921_load_firmware(dev);
528 	if (err)
529 		return err;
530 
531 	err = mt76_connac_mcu_get_nic_capability(&dev->mphy);
532 	if (err)
533 		return err;
534 
535 	set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
536 	err = mt7921_load_clc(dev, mt7921_ram_name(dev));
537 	if (err)
538 		return err;
539 
540 	return mt7921_mcu_fw_log_2_host(dev, 1);
541 }
542 EXPORT_SYMBOL_GPL(mt7921_run_firmware);
543 
544 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
545 {
546 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
547 	struct edca {
548 		__le16 cw_min;
549 		__le16 cw_max;
550 		__le16 txop;
551 		__le16 aifs;
552 		u8 guardtime;
553 		u8 acm;
554 	} __packed;
555 	struct mt7921_mcu_tx {
556 		struct edca edca[IEEE80211_NUM_ACS];
557 		u8 bss_idx;
558 		u8 qos;
559 		u8 wmm_idx;
560 		u8 pad;
561 	} __packed req = {
562 		.bss_idx = mvif->mt76.idx,
563 		.qos = vif->bss_conf.qos,
564 		.wmm_idx = mvif->mt76.wmm_idx,
565 	};
566 	struct mu_edca {
567 		u8 cw_min;
568 		u8 cw_max;
569 		u8 aifsn;
570 		u8 acm;
571 		u8 timer;
572 		u8 padding[3];
573 	};
574 	struct mt7921_mcu_mu_tx {
575 		u8 ver;
576 		u8 pad0;
577 		__le16 len;
578 		u8 bss_idx;
579 		u8 qos;
580 		u8 wmm_idx;
581 		u8 pad1;
582 		struct mu_edca edca[IEEE80211_NUM_ACS];
583 		u8 pad3[32];
584 	} __packed req_mu = {
585 		.bss_idx = mvif->mt76.idx,
586 		.qos = vif->bss_conf.qos,
587 		.wmm_idx = mvif->mt76.wmm_idx,
588 	};
589 	static const int to_aci[] = { 1, 0, 2, 3 };
590 	int ac, ret;
591 
592 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
593 		struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
594 		struct edca *e = &req.edca[to_aci[ac]];
595 
596 		e->aifs = cpu_to_le16(q->aifs);
597 		e->txop = cpu_to_le16(q->txop);
598 
599 		if (q->cw_min)
600 			e->cw_min = cpu_to_le16(q->cw_min);
601 		else
602 			e->cw_min = cpu_to_le16(5);
603 
604 		if (q->cw_max)
605 			e->cw_max = cpu_to_le16(q->cw_max);
606 		else
607 			e->cw_max = cpu_to_le16(10);
608 	}
609 
610 	ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
611 				sizeof(req), false);
612 	if (ret)
613 		return ret;
614 
615 	if (!vif->bss_conf.he_support)
616 		return 0;
617 
618 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
619 		struct ieee80211_he_mu_edca_param_ac_rec *q;
620 		struct mu_edca *e;
621 
622 		if (!mvif->queue_params[ac].mu_edca)
623 			break;
624 
625 		q = &mvif->queue_params[ac].mu_edca_param_rec;
626 		e = &(req_mu.edca[to_aci[ac]]);
627 
628 		e->cw_min = q->ecw_min_max & 0xf;
629 		e->cw_max = (q->ecw_min_max & 0xf0) >> 4;
630 		e->aifsn = q->aifsn;
631 		e->timer = q->mu_edca_timer;
632 	}
633 
634 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS),
635 				 &req_mu, sizeof(req_mu), false);
636 }
637 
638 int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
639 		       struct ieee80211_channel *chan, int duration,
640 		       enum mt7921_roc_req type, u8 token_id)
641 {
642 	int center_ch = ieee80211_frequency_to_channel(chan->center_freq);
643 	struct mt7921_dev *dev = phy->dev;
644 	struct {
645 		struct {
646 			u8 rsv[4];
647 		} __packed hdr;
648 		struct roc_acquire_tlv {
649 			__le16 tag;
650 			__le16 len;
651 			u8 bss_idx;
652 			u8 tokenid;
653 			u8 control_channel;
654 			u8 sco;
655 			u8 band;
656 			u8 bw;
657 			u8 center_chan;
658 			u8 center_chan2;
659 			u8 bw_from_ap;
660 			u8 center_chan_from_ap;
661 			u8 center_chan2_from_ap;
662 			u8 reqtype;
663 			__le32 maxinterval;
664 			u8 dbdcband;
665 			u8 rsv[3];
666 		} __packed roc;
667 	} __packed req = {
668 		.roc = {
669 			.tag = cpu_to_le16(UNI_ROC_ACQUIRE),
670 			.len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
671 			.tokenid = token_id,
672 			.reqtype = type,
673 			.maxinterval = cpu_to_le32(duration),
674 			.bss_idx = vif->mt76.idx,
675 			.control_channel = chan->hw_value,
676 			.bw = CMD_CBW_20MHZ,
677 			.bw_from_ap = CMD_CBW_20MHZ,
678 			.center_chan = center_ch,
679 			.center_chan_from_ap = center_ch,
680 			.dbdcband = 0xff, /* auto */
681 		},
682 	};
683 
684 	if (chan->hw_value < center_ch)
685 		req.roc.sco = 1; /* SCA */
686 	else if (chan->hw_value > center_ch)
687 		req.roc.sco = 3; /* SCB */
688 
689 	switch (chan->band) {
690 	case NL80211_BAND_6GHZ:
691 		req.roc.band = 3;
692 		break;
693 	case NL80211_BAND_5GHZ:
694 		req.roc.band = 2;
695 		break;
696 	default:
697 		req.roc.band = 1;
698 		break;
699 	}
700 
701 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
702 				 &req, sizeof(req), false);
703 }
704 
705 int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
706 			 u8 token_id)
707 {
708 	struct mt7921_dev *dev = phy->dev;
709 	struct {
710 		struct {
711 			u8 rsv[4];
712 		} __packed hdr;
713 		struct roc_abort_tlv {
714 			__le16 tag;
715 			__le16 len;
716 			u8 bss_idx;
717 			u8 tokenid;
718 			u8 dbdcband;
719 			u8 rsv[5];
720 		} __packed abort;
721 	} __packed req = {
722 		.abort = {
723 			.tag = cpu_to_le16(UNI_ROC_ABORT),
724 			.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
725 			.tokenid = token_id,
726 			.bss_idx = vif->mt76.idx,
727 			.dbdcband = 0xff, /* auto*/
728 		},
729 	};
730 
731 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
732 				 &req, sizeof(req), false);
733 }
734 
735 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
736 {
737 	struct mt7921_dev *dev = phy->dev;
738 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
739 	int freq1 = chandef->center_freq1;
740 	struct {
741 		u8 control_ch;
742 		u8 center_ch;
743 		u8 bw;
744 		u8 tx_streams_num;
745 		u8 rx_streams;	/* mask or num */
746 		u8 switch_reason;
747 		u8 band_idx;
748 		u8 center_ch2;	/* for 80+80 only */
749 		__le16 cac_case;
750 		u8 channel_band;
751 		u8 rsv0;
752 		__le32 outband_freq;
753 		u8 txpower_drop;
754 		u8 ap_bw;
755 		u8 ap_center_ch;
756 		u8 rsv1[57];
757 	} __packed req = {
758 		.control_ch = chandef->chan->hw_value,
759 		.center_ch = ieee80211_frequency_to_channel(freq1),
760 		.bw = mt76_connac_chan_bw(chandef),
761 		.tx_streams_num = hweight8(phy->mt76->antenna_mask),
762 		.rx_streams = phy->mt76->antenna_mask,
763 		.band_idx = phy != &dev->phy,
764 	};
765 
766 	if (chandef->chan->band == NL80211_BAND_6GHZ)
767 		req.channel_band = 2;
768 	else
769 		req.channel_band = chandef->chan->band;
770 
771 	if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
772 	    dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
773 		req.switch_reason = CH_SWITCH_NORMAL;
774 	else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
775 		req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
776 	else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
777 					  NL80211_IFTYPE_AP))
778 		req.switch_reason = CH_SWITCH_DFS;
779 	else
780 		req.switch_reason = CH_SWITCH_NORMAL;
781 
782 	if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
783 		req.rx_streams = hweight8(req.rx_streams);
784 
785 	if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
786 		int freq2 = chandef->center_freq2;
787 
788 		req.center_ch2 = ieee80211_frequency_to_channel(freq2);
789 	}
790 
791 	return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
792 }
793 
794 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
795 {
796 	struct req_hdr {
797 		u8 buffer_mode;
798 		u8 format;
799 		__le16 len;
800 	} __packed req = {
801 		.buffer_mode = EE_MODE_EFUSE,
802 		.format = EE_FORMAT_WHOLE,
803 	};
804 
805 	return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
806 				 &req, sizeof(req), true);
807 }
808 EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
809 
810 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
811 {
812 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
813 	struct {
814 		struct {
815 			u8 bss_idx;
816 			u8 pad[3];
817 		} __packed hdr;
818 		struct ps_tlv {
819 			__le16 tag;
820 			__le16 len;
821 			u8 ps_state; /* 0: device awake
822 				      * 1: static power save
823 				      * 2: dynamic power saving
824 				      * 3: enter TWT power saving
825 				      * 4: leave TWT power saving
826 				      */
827 			u8 pad[3];
828 		} __packed ps;
829 	} __packed ps_req = {
830 		.hdr = {
831 			.bss_idx = mvif->mt76.idx,
832 		},
833 		.ps = {
834 			.tag = cpu_to_le16(UNI_BSS_INFO_PS),
835 			.len = cpu_to_le16(sizeof(struct ps_tlv)),
836 			.ps_state = vif->cfg.ps ? 2 : 0,
837 		},
838 	};
839 
840 	if (vif->type != NL80211_IFTYPE_STATION)
841 		return -EOPNOTSUPP;
842 
843 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
844 				 &ps_req, sizeof(ps_req), true);
845 }
846 
847 static int
848 mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
849 			 bool enable)
850 {
851 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
852 	struct {
853 		struct {
854 			u8 bss_idx;
855 			u8 pad[3];
856 		} __packed hdr;
857 		struct bcnft_tlv {
858 			__le16 tag;
859 			__le16 len;
860 			__le16 bcn_interval;
861 			u8 dtim_period;
862 			u8 pad;
863 		} __packed bcnft;
864 	} __packed bcnft_req = {
865 		.hdr = {
866 			.bss_idx = mvif->mt76.idx,
867 		},
868 		.bcnft = {
869 			.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
870 			.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
871 			.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
872 			.dtim_period = vif->bss_conf.dtim_period,
873 		},
874 	};
875 
876 	if (vif->type != NL80211_IFTYPE_STATION)
877 		return 0;
878 
879 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
880 				 &bcnft_req, sizeof(bcnft_req), true);
881 }
882 
883 int
884 mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
885 		      bool enable)
886 {
887 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
888 	struct {
889 		u8 bss_idx;
890 		u8 dtim_period;
891 		__le16 aid;
892 		__le16 bcn_interval;
893 		__le16 atim_window;
894 		u8 uapsd;
895 		u8 bmc_delivered_ac;
896 		u8 bmc_triggered_ac;
897 		u8 pad;
898 	} req = {
899 		.bss_idx = mvif->mt76.idx,
900 		.aid = cpu_to_le16(vif->cfg.aid),
901 		.dtim_period = vif->bss_conf.dtim_period,
902 		.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
903 	};
904 	struct {
905 		u8 bss_idx;
906 		u8 pad[3];
907 	} req_hdr = {
908 		.bss_idx = mvif->mt76.idx,
909 	};
910 	int err;
911 
912 	err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
913 				&req_hdr, sizeof(req_hdr), false);
914 	if (err < 0 || !enable)
915 		return err;
916 
917 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
918 				 &req, sizeof(req), false);
919 }
920 
921 int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
922 			  struct ieee80211_vif *vif, bool enable,
923 			  enum mt76_sta_info_state state)
924 {
925 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
926 	int rssi = -ewma_rssi_read(&mvif->rssi);
927 	struct mt76_sta_cmd_info info = {
928 		.sta = sta,
929 		.vif = vif,
930 		.enable = enable,
931 		.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
932 		.state = state,
933 		.offload_fw = true,
934 		.rcpi = to_rcpi(rssi),
935 	};
936 	struct mt7921_sta *msta;
937 
938 	msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
939 	info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
940 	info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
941 
942 	return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
943 }
944 
945 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
946 {
947 	struct mt76_phy *mphy = &dev->mt76.phy;
948 	struct mt76_connac_pm *pm = &dev->pm;
949 	int err = 0;
950 
951 	mutex_lock(&pm->mutex);
952 
953 	if (!test_bit(MT76_STATE_PM, &mphy->state))
954 		goto out;
955 
956 	err = __mt7921_mcu_drv_pmctrl(dev);
957 out:
958 	mutex_unlock(&pm->mutex);
959 
960 	if (err)
961 		mt7921_reset(&dev->mt76);
962 
963 	return err;
964 }
965 EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl);
966 
967 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
968 {
969 	struct mt76_phy *mphy = &dev->mt76.phy;
970 	struct mt76_connac_pm *pm = &dev->pm;
971 	int err = 0;
972 
973 	mutex_lock(&pm->mutex);
974 
975 	if (mt76_connac_skip_fw_pmctrl(mphy, pm))
976 		goto out;
977 
978 	err = __mt7921_mcu_fw_pmctrl(dev);
979 out:
980 	mutex_unlock(&pm->mutex);
981 
982 	if (err)
983 		mt7921_reset(&dev->mt76);
984 
985 	return err;
986 }
987 EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl);
988 
989 int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
990 				 struct ieee80211_vif *vif,
991 				 bool enable)
992 {
993 #define MT7921_FIF_BIT_CLR		BIT(1)
994 #define MT7921_FIF_BIT_SET		BIT(0)
995 	int err;
996 
997 	if (enable) {
998 		err = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
999 		if (err)
1000 			return err;
1001 
1002 		err = mt7921_mcu_set_rxfilter(dev, 0,
1003 					      MT7921_FIF_BIT_SET,
1004 					      MT_WF_RFCR_DROP_OTHER_BEACON);
1005 		if (err)
1006 			return err;
1007 
1008 		return 0;
1009 	}
1010 
1011 	err = mt7921_mcu_set_bss_pm(dev, vif, false);
1012 	if (err)
1013 		return err;
1014 
1015 	err = mt7921_mcu_set_rxfilter(dev, 0,
1016 				      MT7921_FIF_BIT_CLR,
1017 				      MT_WF_RFCR_DROP_OTHER_BEACON);
1018 	if (err)
1019 		return err;
1020 
1021 	return 0;
1022 }
1023 
1024 int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
1025 {
1026 	struct mt7921_txpwr_event *event;
1027 	struct mt7921_txpwr_req req = {
1028 		.dbdc_idx = 0,
1029 	};
1030 	struct sk_buff *skb;
1031 	int ret;
1032 
1033 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR),
1034 					&req, sizeof(req), true, &skb);
1035 	if (ret)
1036 		return ret;
1037 
1038 	event = (struct mt7921_txpwr_event *)skb->data;
1039 	WARN_ON(skb->len != le16_to_cpu(event->len));
1040 	memcpy(txpwr, &event->txpwr, sizeof(event->txpwr));
1041 
1042 	dev_kfree_skb(skb);
1043 
1044 	return 0;
1045 }
1046 
1047 int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
1048 			   bool enable)
1049 {
1050 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1051 	struct {
1052 		struct {
1053 			u8 band_idx;
1054 			u8 pad[3];
1055 		} __packed hdr;
1056 		struct sniffer_enable_tlv {
1057 			__le16 tag;
1058 			__le16 len;
1059 			u8 enable;
1060 			u8 pad[3];
1061 		} __packed enable;
1062 	} req = {
1063 		.hdr = {
1064 			.band_idx = mvif->band_idx,
1065 		},
1066 		.enable = {
1067 			.tag = cpu_to_le16(0),
1068 			.len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
1069 			.enable = enable,
1070 		},
1071 	};
1072 
1073 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
1074 				 true);
1075 }
1076 
1077 int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
1078 			      struct ieee80211_chanctx_conf *ctx)
1079 {
1080 	struct cfg80211_chan_def *chandef = &ctx->def;
1081 	int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
1082 	const u8 ch_band[] = {
1083 		[NL80211_BAND_2GHZ] = 1,
1084 		[NL80211_BAND_5GHZ] = 2,
1085 		[NL80211_BAND_6GHZ] = 3,
1086 	};
1087 	const u8 ch_width[] = {
1088 		[NL80211_CHAN_WIDTH_20_NOHT] = 0,
1089 		[NL80211_CHAN_WIDTH_20] = 0,
1090 		[NL80211_CHAN_WIDTH_40] = 0,
1091 		[NL80211_CHAN_WIDTH_80] = 1,
1092 		[NL80211_CHAN_WIDTH_160] = 2,
1093 		[NL80211_CHAN_WIDTH_80P80] = 3,
1094 		[NL80211_CHAN_WIDTH_5] = 4,
1095 		[NL80211_CHAN_WIDTH_10] = 5,
1096 		[NL80211_CHAN_WIDTH_320] = 6,
1097 	};
1098 	struct {
1099 		struct {
1100 			u8 band_idx;
1101 			u8 pad[3];
1102 		} __packed hdr;
1103 		struct config_tlv {
1104 			__le16 tag;
1105 			__le16 len;
1106 			u16 aid;
1107 			u8 ch_band;
1108 			u8 bw;
1109 			u8 control_ch;
1110 			u8 sco;
1111 			u8 center_ch;
1112 			u8 center_ch2;
1113 			u8 drop_err;
1114 			u8 pad[3];
1115 		} __packed tlv;
1116 	} __packed req = {
1117 		.hdr = {
1118 			.band_idx = vif->mt76.band_idx,
1119 		},
1120 		.tlv = {
1121 			.tag = cpu_to_le16(1),
1122 			.len = cpu_to_le16(sizeof(req.tlv)),
1123 			.control_ch = chandef->chan->hw_value,
1124 			.center_ch = ieee80211_frequency_to_channel(freq1),
1125 			.drop_err = 1,
1126 		},
1127 	};
1128 	if (chandef->chan->band < ARRAY_SIZE(ch_band))
1129 		req.tlv.ch_band = ch_band[chandef->chan->band];
1130 	if (chandef->width < ARRAY_SIZE(ch_width))
1131 		req.tlv.bw = ch_width[chandef->width];
1132 
1133 	if (freq2)
1134 		req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2);
1135 
1136 	if (req.tlv.control_ch < req.tlv.center_ch)
1137 		req.tlv.sco = 1; /* SCA */
1138 	else if (req.tlv.control_ch > req.tlv.center_ch)
1139 		req.tlv.sco = 3; /* SCB */
1140 
1141 	return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER),
1142 				 &req, sizeof(req), true);
1143 }
1144 
1145 int
1146 mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
1147 				  struct ieee80211_hw *hw,
1148 				  struct ieee80211_vif *vif,
1149 				  bool enable)
1150 {
1151 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
1152 	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
1153 	struct ieee80211_mutable_offsets offs;
1154 	struct {
1155 		struct req_hdr {
1156 			u8 bss_idx;
1157 			u8 pad[3];
1158 		} __packed hdr;
1159 		struct bcn_content_tlv {
1160 			__le16 tag;
1161 			__le16 len;
1162 			__le16 tim_ie_pos;
1163 			__le16 csa_ie_pos;
1164 			__le16 bcc_ie_pos;
1165 			/* 0: disable beacon offload
1166 			 * 1: enable beacon offload
1167 			 * 2: update probe respond offload
1168 			 */
1169 			u8 enable;
1170 			/* 0: legacy format (TXD + payload)
1171 			 * 1: only cap field IE
1172 			 */
1173 			u8 type;
1174 			__le16 pkt_len;
1175 			u8 pkt[512];
1176 		} __packed beacon_tlv;
1177 	} req = {
1178 		.hdr = {
1179 			.bss_idx = mvif->mt76.idx,
1180 		},
1181 		.beacon_tlv = {
1182 			.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
1183 			.len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
1184 			.enable = enable,
1185 		},
1186 	};
1187 	struct sk_buff *skb;
1188 
1189 	/* support enable/update process only
1190 	 * disable flow would be handled in bss stop handler automatically
1191 	 */
1192 	if (!enable)
1193 		return -EOPNOTSUPP;
1194 
1195 	skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
1196 	if (!skb)
1197 		return -EINVAL;
1198 
1199 	if (skb->len > 512 - MT_TXD_SIZE) {
1200 		dev_err(dev->mt76.dev, "beacon size limit exceed\n");
1201 		dev_kfree_skb(skb);
1202 		return -EINVAL;
1203 	}
1204 
1205 	mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt),
1206 				    skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON);
1207 	memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
1208 	req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
1209 	req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
1210 
1211 	if (offs.cntdwn_counter_offs[0]) {
1212 		u16 csa_offs;
1213 
1214 		csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
1215 		req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
1216 	}
1217 	dev_kfree_skb(skb);
1218 
1219 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
1220 				 &req, sizeof(req), true);
1221 }
1222 
1223 static
1224 int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
1225 			 enum environment_cap env_cap,
1226 			 struct mt7921_clc *clc,
1227 			 u8 idx)
1228 {
1229 	struct sk_buff *skb;
1230 	struct {
1231 		u8 ver;
1232 		u8 pad0;
1233 		__le16 len;
1234 		u8 idx;
1235 		u8 env;
1236 		u8 acpi_conf;
1237 		u8 pad1;
1238 		u8 alpha2[2];
1239 		u8 type[2];
1240 		u8 rsvd[64];
1241 	} __packed req = {
1242 		.idx = idx,
1243 		.env = env_cap,
1244 		.acpi_conf = mt7921_acpi_get_flags(&dev->phy),
1245 	};
1246 	int ret, valid_cnt = 0;
1247 	u8 i, *pos;
1248 
1249 	if (!clc)
1250 		return 0;
1251 
1252 	pos = clc->data;
1253 	for (i = 0; i < clc->nr_country; i++) {
1254 		struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
1255 		u16 len = le16_to_cpu(rule->len);
1256 
1257 		pos += len + sizeof(*rule);
1258 		if (rule->alpha2[0] != alpha2[0] ||
1259 		    rule->alpha2[1] != alpha2[1])
1260 			continue;
1261 
1262 		memcpy(req.alpha2, rule->alpha2, 2);
1263 		memcpy(req.type, rule->type, 2);
1264 
1265 		req.len = cpu_to_le16(sizeof(req) + len);
1266 		skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
1267 					   le16_to_cpu(req.len),
1268 					   sizeof(req), GFP_KERNEL);
1269 		if (!skb)
1270 			return -ENOMEM;
1271 		skb_put_data(skb, rule->data, len);
1272 
1273 		ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1274 					    MCU_CE_CMD(SET_CLC), false);
1275 		if (ret < 0)
1276 			return ret;
1277 		valid_cnt++;
1278 	}
1279 
1280 	if (!valid_cnt)
1281 		return -ENOENT;
1282 
1283 	return 0;
1284 }
1285 
1286 int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
1287 		       enum environment_cap env_cap)
1288 {
1289 	struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy;
1290 	int i, ret;
1291 
1292 	/* submit all clc config */
1293 	for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
1294 		ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
1295 					   phy->clc[i], i);
1296 
1297 		/* If no country found, set "00" as default */
1298 		if (ret == -ENOENT)
1299 			ret = __mt7921_mcu_set_clc(dev, "00",
1300 						   ENVIRON_INDOOR,
1301 						   phy->clc[i], i);
1302 		if (ret < 0)
1303 			return ret;
1304 	}
1305 	return 0;
1306 }
1307 
1308 int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif,
1309 			    u8 bit_op, u32 bit_map)
1310 {
1311 	struct {
1312 		u8 rsv[4];
1313 		u8 mode;
1314 		u8 rsv2[3];
1315 		__le32 fif;
1316 		__le32 bit_map; /* bit_* for bitmap update */
1317 		u8 bit_op;
1318 		u8 pad[51];
1319 	} __packed data = {
1320 		.mode = fif ? 1 : 2,
1321 		.fif = cpu_to_le32(fif),
1322 		.bit_map = cpu_to_le32(bit_map),
1323 		.bit_op = bit_op,
1324 	};
1325 
1326 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
1327 				 &data, sizeof(data), false);
1328 }
1329