1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/fs.h>
5 #include <linux/firmware.h>
6 #include "mt7921.h"
7 #include "mt7921_trace.h"
8 #include "mcu.h"
9 #include "../mt76_connac2_mac.h"
10 
11 #define MT_STA_BFER			BIT(0)
12 #define MT_STA_BFEE			BIT(1)
13 
14 static bool mt7921_disable_clc;
15 module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
16 MODULE_PARM_DESC(disable_clc, "disable CLC support");
17 
18 int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
19 			      struct sk_buff *skb, int seq)
20 {
21 	int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
22 	struct mt76_connac2_mcu_rxd *rxd;
23 	int ret = 0;
24 
25 	if (!skb) {
26 		dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
27 			cmd, seq);
28 		mt7921_reset(mdev);
29 
30 		return -ETIMEDOUT;
31 	}
32 
33 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
34 	if (seq != rxd->seq)
35 		return -EAGAIN;
36 
37 	if (cmd == MCU_CMD(PATCH_SEM_CONTROL) ||
38 	    cmd == MCU_CMD(PATCH_FINISH_REQ)) {
39 		skb_pull(skb, sizeof(*rxd) - 4);
40 		ret = *skb->data;
41 	} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
42 		skb_pull(skb, sizeof(*rxd) + 4);
43 		ret = le32_to_cpu(*(__le32 *)skb->data);
44 	} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
45 		   cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
46 		   cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
47 		   cmd == MCU_UNI_CMD(HIF_CTRL) ||
48 		   cmd == MCU_UNI_CMD(OFFLOAD) ||
49 		   cmd == MCU_UNI_CMD(SUSPEND)) {
50 		struct mt76_connac_mcu_uni_event *event;
51 
52 		skb_pull(skb, sizeof(*rxd));
53 		event = (struct mt76_connac_mcu_uni_event *)skb->data;
54 		ret = le32_to_cpu(event->status);
55 		/* skip invalid event */
56 		if (mcu_cmd != event->cid)
57 			ret = -EAGAIN;
58 	} else if (cmd == MCU_CE_QUERY(REG_READ)) {
59 		struct mt76_connac_mcu_reg_event *event;
60 
61 		skb_pull(skb, sizeof(*rxd));
62 		event = (struct mt76_connac_mcu_reg_event *)skb->data;
63 		ret = (int)le32_to_cpu(event->val);
64 	} else {
65 		skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
66 	}
67 
68 	return ret;
69 }
70 EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
71 
72 static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val)
73 {
74 	struct mt7921_mcu_eeprom_info *res, req = {
75 		.addr = cpu_to_le32(round_down(offset,
76 				    MT7921_EEPROM_BLOCK_SIZE)),
77 	};
78 	struct sk_buff *skb;
79 	int ret;
80 
81 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
82 					&req, sizeof(req), true, &skb);
83 	if (ret)
84 		return ret;
85 
86 	res = (struct mt7921_mcu_eeprom_info *)skb->data;
87 	*val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
88 	dev_kfree_skb(skb);
89 
90 	return 0;
91 }
92 
93 #ifdef CONFIG_PM
94 
95 static int
96 mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
97 			      struct ieee80211_vif *vif, bool suspend)
98 {
99 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
100 	struct {
101 		struct {
102 			u8 bss_idx;
103 			u8 pad[3];
104 		} __packed hdr;
105 		struct mt76_connac_arpns_tlv arpns;
106 	} req = {
107 		.hdr = {
108 			.bss_idx = mvif->mt76.idx,
109 		},
110 		.arpns = {
111 			.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
112 			.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
113 			.mode = suspend,
114 		},
115 	};
116 
117 	return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
118 				 true);
119 }
120 
121 void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
122 {
123 	if (IS_ENABLED(CONFIG_IPV6)) {
124 		struct mt76_phy *phy = priv;
125 
126 		mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif,
127 					      !test_bit(MT76_STATE_RUNNING,
128 					      &phy->state));
129 	}
130 
131 	mt76_connac_mcu_set_suspend_iter(priv, mac, vif);
132 }
133 
134 #endif /* CONFIG_PM */
135 
136 static void
137 mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
138 {
139 	struct mt7921_roc_grant_tlv *grant;
140 	struct mt76_connac2_mcu_rxd *rxd;
141 	int duration;
142 
143 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
144 	grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4);
145 
146 	/* should never happen */
147 	WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
148 
149 	if (grant->reqtype == MT7921_ROC_REQ_ROC)
150 		ieee80211_ready_on_channel(dev->mt76.phy.hw);
151 
152 	dev->phy.roc_grant = true;
153 	wake_up(&dev->phy.roc_wait);
154 	duration = le32_to_cpu(grant->max_interval);
155 	mod_timer(&dev->phy.roc_timer,
156 		  jiffies + msecs_to_jiffies(duration));
157 }
158 
159 static void
160 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
161 {
162 	struct mt76_phy *mphy = &dev->mt76.phy;
163 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
164 
165 	spin_lock_bh(&dev->mt76.lock);
166 	__skb_queue_tail(&phy->scan_event_list, skb);
167 	spin_unlock_bh(&dev->mt76.lock);
168 
169 	ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
170 				     MT7921_HW_SCAN_TIMEOUT);
171 }
172 
173 static void
174 mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
175 				struct ieee80211_vif *vif)
176 {
177 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
178 	struct mt76_connac_beacon_loss_event *event = priv;
179 
180 	if (mvif->idx != event->bss_idx)
181 		return;
182 
183 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) ||
184 	    vif->type != NL80211_IFTYPE_STATION)
185 		return;
186 
187 	ieee80211_connection_loss(vif);
188 }
189 
190 static void
191 mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
192 {
193 	struct mt76_connac_beacon_loss_event *event;
194 	struct mt76_phy *mphy = &dev->mt76.phy;
195 
196 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
197 	event = (struct mt76_connac_beacon_loss_event *)skb->data;
198 
199 	ieee80211_iterate_active_interfaces_atomic(mphy->hw,
200 					IEEE80211_IFACE_ITER_RESUME_ALL,
201 					mt7921_mcu_connection_loss_iter, event);
202 }
203 
204 static void
205 mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
206 {
207 	struct mt7921_debug_msg {
208 		__le16 id;
209 		u8 type;
210 		u8 flag;
211 		__le32 value;
212 		__le16 len;
213 		u8 content[512];
214 	} __packed * msg;
215 
216 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
217 	msg = (struct mt7921_debug_msg *)skb->data;
218 
219 	if (msg->type == 3) { /* fw log */
220 		u16 len = min_t(u16, le16_to_cpu(msg->len), 512);
221 		int i;
222 
223 		for (i = 0 ; i < len; i++) {
224 			if (!msg->content[i])
225 				msg->content[i] = ' ';
226 		}
227 		wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content);
228 	}
229 }
230 
231 static void
232 mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
233 {
234 	struct mt7921_mcu_lp_event {
235 		u8 state;
236 		u8 reserved[3];
237 	} __packed * event;
238 
239 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
240 	event = (struct mt7921_mcu_lp_event *)skb->data;
241 
242 	trace_lp_event(dev, event->state);
243 }
244 
245 static void
246 mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
247 {
248 	struct mt7921_mcu_tx_done_event *event;
249 
250 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
251 	event = (struct mt7921_mcu_tx_done_event *)skb->data;
252 
253 	mt7921_mac_add_txs(dev, event->txs);
254 }
255 
256 static void
257 mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
258 {
259 	struct mt76_connac2_mcu_rxd *rxd;
260 
261 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
262 	switch (rxd->eid) {
263 	case MCU_EVENT_BSS_BEACON_LOSS:
264 		mt7921_mcu_connection_loss_event(dev, skb);
265 		break;
266 	case MCU_EVENT_SCHED_SCAN_DONE:
267 	case MCU_EVENT_SCAN_DONE:
268 		mt7921_mcu_scan_event(dev, skb);
269 		return;
270 	case MCU_EVENT_DBG_MSG:
271 		mt7921_mcu_debug_msg_event(dev, skb);
272 		break;
273 	case MCU_EVENT_COREDUMP:
274 		dev->fw_assert = true;
275 		mt76_connac_mcu_coredump_event(&dev->mt76, skb,
276 					       &dev->coredump);
277 		return;
278 	case MCU_EVENT_LP_INFO:
279 		mt7921_mcu_low_power_event(dev, skb);
280 		break;
281 	case MCU_EVENT_TX_DONE:
282 		mt7921_mcu_tx_done_event(dev, skb);
283 		break;
284 	default:
285 		break;
286 	}
287 	dev_kfree_skb(skb);
288 }
289 
290 static void
291 mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev,
292 				    struct sk_buff *skb)
293 {
294 	struct mt76_connac2_mcu_rxd *rxd;
295 
296 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
297 
298 	switch (rxd->eid) {
299 	case MCU_UNI_EVENT_ROC:
300 		mt7921_mcu_uni_roc_event(dev, skb);
301 		break;
302 	default:
303 		break;
304 	}
305 	dev_kfree_skb(skb);
306 }
307 
308 void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
309 {
310 	struct mt76_connac2_mcu_rxd *rxd;
311 
312 	if (skb_linearize(skb))
313 		return;
314 
315 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
316 
317 	if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) {
318 		mt7921_mcu_uni_rx_unsolicited_event(dev, skb);
319 		return;
320 	}
321 
322 	if (rxd->eid == 0x6) {
323 		mt76_mcu_rx_event(&dev->mt76, skb);
324 		return;
325 	}
326 
327 	if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
328 	    rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
329 	    rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
330 	    rxd->eid == MCU_EVENT_SCAN_DONE ||
331 	    rxd->eid == MCU_EVENT_TX_DONE ||
332 	    rxd->eid == MCU_EVENT_DBG_MSG ||
333 	    rxd->eid == MCU_EVENT_COREDUMP ||
334 	    rxd->eid == MCU_EVENT_LP_INFO ||
335 	    !rxd->seq)
336 		mt7921_mcu_rx_unsolicited_event(dev, skb);
337 	else
338 		mt76_mcu_rx_event(&dev->mt76, skb);
339 }
340 
341 /** starec & wtbl **/
342 int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
343 			 struct ieee80211_ampdu_params *params,
344 			 bool enable)
345 {
346 	struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
347 
348 	if (enable && !params->amsdu)
349 		msta->wcid.amsdu = false;
350 
351 	return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
352 				      MCU_UNI_CMD(STA_REC_UPDATE),
353 				      enable, true);
354 }
355 
356 int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
357 			 struct ieee80211_ampdu_params *params,
358 			 bool enable)
359 {
360 	struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
361 
362 	return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
363 				      MCU_UNI_CMD(STA_REC_UPDATE),
364 				      enable, false);
365 }
366 
367 static char *mt7921_patch_name(struct mt7921_dev *dev)
368 {
369 	char *ret;
370 
371 	if (is_mt7922(&dev->mt76))
372 		ret = MT7922_ROM_PATCH;
373 	else
374 		ret = MT7921_ROM_PATCH;
375 
376 	return ret;
377 }
378 
379 static char *mt7921_ram_name(struct mt7921_dev *dev)
380 {
381 	char *ret;
382 
383 	if (is_mt7922(&dev->mt76))
384 		ret = MT7922_FIRMWARE_WM;
385 	else
386 		ret = MT7921_FIRMWARE_WM;
387 
388 	return ret;
389 }
390 
391 static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name)
392 {
393 	const struct mt76_connac2_fw_trailer *hdr;
394 	const struct mt76_connac2_fw_region *region;
395 	const struct mt7921_clc *clc;
396 	struct mt76_dev *mdev = &dev->mt76;
397 	struct mt7921_phy *phy = &dev->phy;
398 	const struct firmware *fw;
399 	int ret, i, len, offset = 0;
400 	u8 *clc_base = NULL, hw_encap = 0;
401 
402 	if (mt7921_disable_clc ||
403 	    mt76_is_usb(&dev->mt76))
404 		return 0;
405 
406 	if (mt76_is_mmio(&dev->mt76)) {
407 		ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
408 		if (ret)
409 			return ret;
410 		hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
411 	}
412 
413 	ret = request_firmware(&fw, fw_name, mdev->dev);
414 	if (ret)
415 		return ret;
416 
417 	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
418 		dev_err(mdev->dev, "Invalid firmware\n");
419 		ret = -EINVAL;
420 		goto out;
421 	}
422 
423 	hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
424 	for (i = 0; i < hdr->n_region; i++) {
425 		region = (const void *)((const u8 *)hdr -
426 					(hdr->n_region - i) * sizeof(*region));
427 		len = le32_to_cpu(region->len);
428 
429 		/* check if we have valid buffer size */
430 		if (offset + len > fw->size) {
431 			dev_err(mdev->dev, "Invalid firmware region\n");
432 			ret = -EINVAL;
433 			goto out;
434 		}
435 
436 		if ((region->feature_set & FW_FEATURE_NON_DL) &&
437 		    region->type == FW_TYPE_CLC) {
438 			clc_base = (u8 *)(fw->data + offset);
439 			break;
440 		}
441 		offset += len;
442 	}
443 
444 	if (!clc_base)
445 		goto out;
446 
447 	for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
448 		clc = (const struct mt7921_clc *)(clc_base + offset);
449 
450 		/* do not init buf again if chip reset triggered */
451 		if (phy->clc[clc->idx])
452 			continue;
453 
454 		/* header content sanity */
455 		if (clc->idx == MT7921_CLC_POWER &&
456 		    u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
457 			continue;
458 
459 		phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
460 						  le32_to_cpu(clc->len),
461 						  GFP_KERNEL);
462 
463 		if (!phy->clc[clc->idx]) {
464 			ret = -ENOMEM;
465 			goto out;
466 		}
467 	}
468 	ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
469 out:
470 	release_firmware(fw);
471 
472 	return ret;
473 }
474 
475 static int mt7921_load_firmware(struct mt7921_dev *dev)
476 {
477 	int ret;
478 
479 	ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
480 	if (ret && mt76_is_mmio(&dev->mt76)) {
481 		dev_dbg(dev->mt76.dev, "Firmware is already download\n");
482 		goto fw_loaded;
483 	}
484 
485 	ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev));
486 	if (ret)
487 		return ret;
488 
489 	if (mt76_is_sdio(&dev->mt76)) {
490 		/* activate again */
491 		ret = __mt7921_mcu_fw_pmctrl(dev);
492 		if (!ret)
493 			ret = __mt7921_mcu_drv_pmctrl(dev);
494 	}
495 
496 	ret = mt76_connac2_load_ram(&dev->mt76, mt7921_ram_name(dev), NULL);
497 	if (ret)
498 		return ret;
499 
500 	if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
501 			    MT_TOP_MISC2_FW_N9_RDY, 1500)) {
502 		dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
503 
504 		return -EIO;
505 	}
506 
507 fw_loaded:
508 
509 #ifdef CONFIG_PM
510 	dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
511 #endif /* CONFIG_PM */
512 
513 	dev_dbg(dev->mt76.dev, "Firmware init done\n");
514 
515 	return 0;
516 }
517 
518 int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
519 {
520 	struct {
521 		u8 ctrl_val;
522 		u8 pad[3];
523 	} data = {
524 		.ctrl_val = ctrl
525 	};
526 
527 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST),
528 				 &data, sizeof(data), false);
529 }
530 
531 int mt7921_run_firmware(struct mt7921_dev *dev)
532 {
533 	int err;
534 
535 	err = mt7921_load_firmware(dev);
536 	if (err)
537 		return err;
538 
539 	err = mt76_connac_mcu_get_nic_capability(&dev->mphy);
540 	if (err)
541 		return err;
542 
543 	set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
544 	err = mt7921_load_clc(dev, mt7921_ram_name(dev));
545 	if (err)
546 		return err;
547 
548 	return mt7921_mcu_fw_log_2_host(dev, 1);
549 }
550 EXPORT_SYMBOL_GPL(mt7921_run_firmware);
551 
552 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
553 {
554 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
555 	struct edca {
556 		__le16 cw_min;
557 		__le16 cw_max;
558 		__le16 txop;
559 		__le16 aifs;
560 		u8 guardtime;
561 		u8 acm;
562 	} __packed;
563 	struct mt7921_mcu_tx {
564 		struct edca edca[IEEE80211_NUM_ACS];
565 		u8 bss_idx;
566 		u8 qos;
567 		u8 wmm_idx;
568 		u8 pad;
569 	} __packed req = {
570 		.bss_idx = mvif->mt76.idx,
571 		.qos = vif->bss_conf.qos,
572 		.wmm_idx = mvif->mt76.wmm_idx,
573 	};
574 	struct mu_edca {
575 		u8 cw_min;
576 		u8 cw_max;
577 		u8 aifsn;
578 		u8 acm;
579 		u8 timer;
580 		u8 padding[3];
581 	};
582 	struct mt7921_mcu_mu_tx {
583 		u8 ver;
584 		u8 pad0;
585 		__le16 len;
586 		u8 bss_idx;
587 		u8 qos;
588 		u8 wmm_idx;
589 		u8 pad1;
590 		struct mu_edca edca[IEEE80211_NUM_ACS];
591 		u8 pad3[32];
592 	} __packed req_mu = {
593 		.bss_idx = mvif->mt76.idx,
594 		.qos = vif->bss_conf.qos,
595 		.wmm_idx = mvif->mt76.wmm_idx,
596 	};
597 	static const int to_aci[] = { 1, 0, 2, 3 };
598 	int ac, ret;
599 
600 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
601 		struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
602 		struct edca *e = &req.edca[to_aci[ac]];
603 
604 		e->aifs = cpu_to_le16(q->aifs);
605 		e->txop = cpu_to_le16(q->txop);
606 
607 		if (q->cw_min)
608 			e->cw_min = cpu_to_le16(q->cw_min);
609 		else
610 			e->cw_min = cpu_to_le16(5);
611 
612 		if (q->cw_max)
613 			e->cw_max = cpu_to_le16(q->cw_max);
614 		else
615 			e->cw_max = cpu_to_le16(10);
616 	}
617 
618 	ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
619 				sizeof(req), false);
620 	if (ret)
621 		return ret;
622 
623 	if (!vif->bss_conf.he_support)
624 		return 0;
625 
626 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
627 		struct ieee80211_he_mu_edca_param_ac_rec *q;
628 		struct mu_edca *e;
629 
630 		if (!mvif->queue_params[ac].mu_edca)
631 			break;
632 
633 		q = &mvif->queue_params[ac].mu_edca_param_rec;
634 		e = &(req_mu.edca[to_aci[ac]]);
635 
636 		e->cw_min = q->ecw_min_max & 0xf;
637 		e->cw_max = (q->ecw_min_max & 0xf0) >> 4;
638 		e->aifsn = q->aifsn;
639 		e->timer = q->mu_edca_timer;
640 	}
641 
642 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS),
643 				 &req_mu, sizeof(req_mu), false);
644 }
645 
646 int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
647 		       struct ieee80211_channel *chan, int duration,
648 		       enum mt7921_roc_req type, u8 token_id)
649 {
650 	int center_ch = ieee80211_frequency_to_channel(chan->center_freq);
651 	struct mt7921_dev *dev = phy->dev;
652 	struct {
653 		struct {
654 			u8 rsv[4];
655 		} __packed hdr;
656 		struct roc_acquire_tlv {
657 			__le16 tag;
658 			__le16 len;
659 			u8 bss_idx;
660 			u8 tokenid;
661 			u8 control_channel;
662 			u8 sco;
663 			u8 band;
664 			u8 bw;
665 			u8 center_chan;
666 			u8 center_chan2;
667 			u8 bw_from_ap;
668 			u8 center_chan_from_ap;
669 			u8 center_chan2_from_ap;
670 			u8 reqtype;
671 			__le32 maxinterval;
672 			u8 dbdcband;
673 			u8 rsv[3];
674 		} __packed roc;
675 	} __packed req = {
676 		.roc = {
677 			.tag = cpu_to_le16(UNI_ROC_ACQUIRE),
678 			.len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
679 			.tokenid = token_id,
680 			.reqtype = type,
681 			.maxinterval = cpu_to_le32(duration),
682 			.bss_idx = vif->mt76.idx,
683 			.control_channel = chan->hw_value,
684 			.bw = CMD_CBW_20MHZ,
685 			.bw_from_ap = CMD_CBW_20MHZ,
686 			.center_chan = center_ch,
687 			.center_chan_from_ap = center_ch,
688 			.dbdcband = 0xff, /* auto */
689 		},
690 	};
691 
692 	if (chan->hw_value < center_ch)
693 		req.roc.sco = 1; /* SCA */
694 	else if (chan->hw_value > center_ch)
695 		req.roc.sco = 3; /* SCB */
696 
697 	switch (chan->band) {
698 	case NL80211_BAND_6GHZ:
699 		req.roc.band = 3;
700 		break;
701 	case NL80211_BAND_5GHZ:
702 		req.roc.band = 2;
703 		break;
704 	default:
705 		req.roc.band = 1;
706 		break;
707 	}
708 
709 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
710 				 &req, sizeof(req), false);
711 }
712 
713 int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
714 			 u8 token_id)
715 {
716 	struct mt7921_dev *dev = phy->dev;
717 	struct {
718 		struct {
719 			u8 rsv[4];
720 		} __packed hdr;
721 		struct roc_abort_tlv {
722 			__le16 tag;
723 			__le16 len;
724 			u8 bss_idx;
725 			u8 tokenid;
726 			u8 dbdcband;
727 			u8 rsv[5];
728 		} __packed abort;
729 	} __packed req = {
730 		.abort = {
731 			.tag = cpu_to_le16(UNI_ROC_ABORT),
732 			.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
733 			.tokenid = token_id,
734 			.bss_idx = vif->mt76.idx,
735 			.dbdcband = 0xff, /* auto*/
736 		},
737 	};
738 
739 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
740 				 &req, sizeof(req), false);
741 }
742 
743 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
744 {
745 	struct mt7921_dev *dev = phy->dev;
746 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
747 	int freq1 = chandef->center_freq1;
748 	struct {
749 		u8 control_ch;
750 		u8 center_ch;
751 		u8 bw;
752 		u8 tx_streams_num;
753 		u8 rx_streams;	/* mask or num */
754 		u8 switch_reason;
755 		u8 band_idx;
756 		u8 center_ch2;	/* for 80+80 only */
757 		__le16 cac_case;
758 		u8 channel_band;
759 		u8 rsv0;
760 		__le32 outband_freq;
761 		u8 txpower_drop;
762 		u8 ap_bw;
763 		u8 ap_center_ch;
764 		u8 rsv1[57];
765 	} __packed req = {
766 		.control_ch = chandef->chan->hw_value,
767 		.center_ch = ieee80211_frequency_to_channel(freq1),
768 		.bw = mt76_connac_chan_bw(chandef),
769 		.tx_streams_num = hweight8(phy->mt76->antenna_mask),
770 		.rx_streams = phy->mt76->antenna_mask,
771 		.band_idx = phy != &dev->phy,
772 	};
773 
774 	if (chandef->chan->band == NL80211_BAND_6GHZ)
775 		req.channel_band = 2;
776 	else
777 		req.channel_band = chandef->chan->band;
778 
779 	if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
780 	    dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
781 		req.switch_reason = CH_SWITCH_NORMAL;
782 	else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
783 		req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
784 	else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
785 					  NL80211_IFTYPE_AP))
786 		req.switch_reason = CH_SWITCH_DFS;
787 	else
788 		req.switch_reason = CH_SWITCH_NORMAL;
789 
790 	if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
791 		req.rx_streams = hweight8(req.rx_streams);
792 
793 	if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
794 		int freq2 = chandef->center_freq2;
795 
796 		req.center_ch2 = ieee80211_frequency_to_channel(freq2);
797 	}
798 
799 	return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
800 }
801 
802 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
803 {
804 	struct req_hdr {
805 		u8 buffer_mode;
806 		u8 format;
807 		__le16 len;
808 	} __packed req = {
809 		.buffer_mode = EE_MODE_EFUSE,
810 		.format = EE_FORMAT_WHOLE,
811 	};
812 
813 	return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
814 				 &req, sizeof(req), true);
815 }
816 EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
817 
818 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
819 {
820 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
821 	struct {
822 		struct {
823 			u8 bss_idx;
824 			u8 pad[3];
825 		} __packed hdr;
826 		struct ps_tlv {
827 			__le16 tag;
828 			__le16 len;
829 			u8 ps_state; /* 0: device awake
830 				      * 1: static power save
831 				      * 2: dynamic power saving
832 				      * 3: enter TWT power saving
833 				      * 4: leave TWT power saving
834 				      */
835 			u8 pad[3];
836 		} __packed ps;
837 	} __packed ps_req = {
838 		.hdr = {
839 			.bss_idx = mvif->mt76.idx,
840 		},
841 		.ps = {
842 			.tag = cpu_to_le16(UNI_BSS_INFO_PS),
843 			.len = cpu_to_le16(sizeof(struct ps_tlv)),
844 			.ps_state = vif->cfg.ps ? 2 : 0,
845 		},
846 	};
847 
848 	if (vif->type != NL80211_IFTYPE_STATION)
849 		return -EOPNOTSUPP;
850 
851 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
852 				 &ps_req, sizeof(ps_req), true);
853 }
854 
855 static int
856 mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
857 			 bool enable)
858 {
859 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
860 	struct {
861 		struct {
862 			u8 bss_idx;
863 			u8 pad[3];
864 		} __packed hdr;
865 		struct bcnft_tlv {
866 			__le16 tag;
867 			__le16 len;
868 			__le16 bcn_interval;
869 			u8 dtim_period;
870 			u8 pad;
871 		} __packed bcnft;
872 	} __packed bcnft_req = {
873 		.hdr = {
874 			.bss_idx = mvif->mt76.idx,
875 		},
876 		.bcnft = {
877 			.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
878 			.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
879 			.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
880 			.dtim_period = vif->bss_conf.dtim_period,
881 		},
882 	};
883 
884 	if (vif->type != NL80211_IFTYPE_STATION)
885 		return 0;
886 
887 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
888 				 &bcnft_req, sizeof(bcnft_req), true);
889 }
890 
891 int
892 mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
893 		      bool enable)
894 {
895 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
896 	struct {
897 		u8 bss_idx;
898 		u8 dtim_period;
899 		__le16 aid;
900 		__le16 bcn_interval;
901 		__le16 atim_window;
902 		u8 uapsd;
903 		u8 bmc_delivered_ac;
904 		u8 bmc_triggered_ac;
905 		u8 pad;
906 	} req = {
907 		.bss_idx = mvif->mt76.idx,
908 		.aid = cpu_to_le16(vif->cfg.aid),
909 		.dtim_period = vif->bss_conf.dtim_period,
910 		.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
911 	};
912 	struct {
913 		u8 bss_idx;
914 		u8 pad[3];
915 	} req_hdr = {
916 		.bss_idx = mvif->mt76.idx,
917 	};
918 	int err;
919 
920 	err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
921 				&req_hdr, sizeof(req_hdr), false);
922 	if (err < 0 || !enable)
923 		return err;
924 
925 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
926 				 &req, sizeof(req), false);
927 }
928 
929 int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
930 			  struct ieee80211_vif *vif, bool enable,
931 			  enum mt76_sta_info_state state)
932 {
933 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
934 	int rssi = -ewma_rssi_read(&mvif->rssi);
935 	struct mt76_sta_cmd_info info = {
936 		.sta = sta,
937 		.vif = vif,
938 		.enable = enable,
939 		.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
940 		.state = state,
941 		.offload_fw = true,
942 		.rcpi = to_rcpi(rssi),
943 	};
944 	struct mt7921_sta *msta;
945 
946 	msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
947 	info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
948 	info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
949 
950 	return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
951 }
952 
953 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
954 {
955 	struct mt76_phy *mphy = &dev->mt76.phy;
956 	struct mt76_connac_pm *pm = &dev->pm;
957 	int err = 0;
958 
959 	mutex_lock(&pm->mutex);
960 
961 	if (!test_bit(MT76_STATE_PM, &mphy->state))
962 		goto out;
963 
964 	err = __mt7921_mcu_drv_pmctrl(dev);
965 out:
966 	mutex_unlock(&pm->mutex);
967 
968 	if (err)
969 		mt7921_reset(&dev->mt76);
970 
971 	return err;
972 }
973 EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl);
974 
975 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
976 {
977 	struct mt76_phy *mphy = &dev->mt76.phy;
978 	struct mt76_connac_pm *pm = &dev->pm;
979 	int err = 0;
980 
981 	mutex_lock(&pm->mutex);
982 
983 	if (mt76_connac_skip_fw_pmctrl(mphy, pm))
984 		goto out;
985 
986 	err = __mt7921_mcu_fw_pmctrl(dev);
987 out:
988 	mutex_unlock(&pm->mutex);
989 
990 	if (err)
991 		mt7921_reset(&dev->mt76);
992 
993 	return err;
994 }
995 EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl);
996 
997 int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
998 				 struct ieee80211_vif *vif,
999 				 bool enable)
1000 {
1001 #define MT7921_FIF_BIT_CLR		BIT(1)
1002 #define MT7921_FIF_BIT_SET		BIT(0)
1003 	int err;
1004 
1005 	if (enable) {
1006 		err = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
1007 		if (err)
1008 			return err;
1009 
1010 		err = mt7921_mcu_set_rxfilter(dev, 0,
1011 					      MT7921_FIF_BIT_SET,
1012 					      MT_WF_RFCR_DROP_OTHER_BEACON);
1013 		if (err)
1014 			return err;
1015 
1016 		return 0;
1017 	}
1018 
1019 	err = mt7921_mcu_set_bss_pm(dev, vif, false);
1020 	if (err)
1021 		return err;
1022 
1023 	err = mt7921_mcu_set_rxfilter(dev, 0,
1024 				      MT7921_FIF_BIT_CLR,
1025 				      MT_WF_RFCR_DROP_OTHER_BEACON);
1026 	if (err)
1027 		return err;
1028 
1029 	return 0;
1030 }
1031 
1032 int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
1033 {
1034 	struct mt7921_txpwr_event *event;
1035 	struct mt7921_txpwr_req req = {
1036 		.dbdc_idx = 0,
1037 	};
1038 	struct sk_buff *skb;
1039 	int ret;
1040 
1041 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR),
1042 					&req, sizeof(req), true, &skb);
1043 	if (ret)
1044 		return ret;
1045 
1046 	event = (struct mt7921_txpwr_event *)skb->data;
1047 	WARN_ON(skb->len != le16_to_cpu(event->len));
1048 	memcpy(txpwr, &event->txpwr, sizeof(event->txpwr));
1049 
1050 	dev_kfree_skb(skb);
1051 
1052 	return 0;
1053 }
1054 
1055 int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
1056 			   bool enable)
1057 {
1058 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1059 	struct {
1060 		struct {
1061 			u8 band_idx;
1062 			u8 pad[3];
1063 		} __packed hdr;
1064 		struct sniffer_enable_tlv {
1065 			__le16 tag;
1066 			__le16 len;
1067 			u8 enable;
1068 			u8 pad[3];
1069 		} __packed enable;
1070 	} req = {
1071 		.hdr = {
1072 			.band_idx = mvif->band_idx,
1073 		},
1074 		.enable = {
1075 			.tag = cpu_to_le16(0),
1076 			.len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
1077 			.enable = enable,
1078 		},
1079 	};
1080 
1081 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
1082 				 true);
1083 }
1084 
1085 int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
1086 			      struct ieee80211_chanctx_conf *ctx)
1087 {
1088 	struct cfg80211_chan_def *chandef = &ctx->def;
1089 	int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
1090 	const u8 ch_band[] = {
1091 		[NL80211_BAND_2GHZ] = 1,
1092 		[NL80211_BAND_5GHZ] = 2,
1093 		[NL80211_BAND_6GHZ] = 3,
1094 	};
1095 	const u8 ch_width[] = {
1096 		[NL80211_CHAN_WIDTH_20_NOHT] = 0,
1097 		[NL80211_CHAN_WIDTH_20] = 0,
1098 		[NL80211_CHAN_WIDTH_40] = 0,
1099 		[NL80211_CHAN_WIDTH_80] = 1,
1100 		[NL80211_CHAN_WIDTH_160] = 2,
1101 		[NL80211_CHAN_WIDTH_80P80] = 3,
1102 		[NL80211_CHAN_WIDTH_5] = 4,
1103 		[NL80211_CHAN_WIDTH_10] = 5,
1104 		[NL80211_CHAN_WIDTH_320] = 6,
1105 	};
1106 	struct {
1107 		struct {
1108 			u8 band_idx;
1109 			u8 pad[3];
1110 		} __packed hdr;
1111 		struct config_tlv {
1112 			__le16 tag;
1113 			__le16 len;
1114 			u16 aid;
1115 			u8 ch_band;
1116 			u8 bw;
1117 			u8 control_ch;
1118 			u8 sco;
1119 			u8 center_ch;
1120 			u8 center_ch2;
1121 			u8 drop_err;
1122 			u8 pad[3];
1123 		} __packed tlv;
1124 	} __packed req = {
1125 		.hdr = {
1126 			.band_idx = vif->mt76.band_idx,
1127 		},
1128 		.tlv = {
1129 			.tag = cpu_to_le16(1),
1130 			.len = cpu_to_le16(sizeof(req.tlv)),
1131 			.control_ch = chandef->chan->hw_value,
1132 			.center_ch = ieee80211_frequency_to_channel(freq1),
1133 			.drop_err = 1,
1134 		},
1135 	};
1136 	if (chandef->chan->band < ARRAY_SIZE(ch_band))
1137 		req.tlv.ch_band = ch_band[chandef->chan->band];
1138 	if (chandef->width < ARRAY_SIZE(ch_width))
1139 		req.tlv.bw = ch_width[chandef->width];
1140 
1141 	if (freq2)
1142 		req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2);
1143 
1144 	if (req.tlv.control_ch < req.tlv.center_ch)
1145 		req.tlv.sco = 1; /* SCA */
1146 	else if (req.tlv.control_ch > req.tlv.center_ch)
1147 		req.tlv.sco = 3; /* SCB */
1148 
1149 	return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER),
1150 				 &req, sizeof(req), true);
1151 }
1152 
1153 int
1154 mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
1155 				  struct ieee80211_hw *hw,
1156 				  struct ieee80211_vif *vif,
1157 				  bool enable)
1158 {
1159 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
1160 	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
1161 	struct ieee80211_mutable_offsets offs;
1162 	struct {
1163 		struct req_hdr {
1164 			u8 bss_idx;
1165 			u8 pad[3];
1166 		} __packed hdr;
1167 		struct bcn_content_tlv {
1168 			__le16 tag;
1169 			__le16 len;
1170 			__le16 tim_ie_pos;
1171 			__le16 csa_ie_pos;
1172 			__le16 bcc_ie_pos;
1173 			/* 0: disable beacon offload
1174 			 * 1: enable beacon offload
1175 			 * 2: update probe respond offload
1176 			 */
1177 			u8 enable;
1178 			/* 0: legacy format (TXD + payload)
1179 			 * 1: only cap field IE
1180 			 */
1181 			u8 type;
1182 			__le16 pkt_len;
1183 			u8 pkt[512];
1184 		} __packed beacon_tlv;
1185 	} req = {
1186 		.hdr = {
1187 			.bss_idx = mvif->mt76.idx,
1188 		},
1189 		.beacon_tlv = {
1190 			.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
1191 			.len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
1192 			.enable = enable,
1193 		},
1194 	};
1195 	struct sk_buff *skb;
1196 
1197 	/* support enable/update process only
1198 	 * disable flow would be handled in bss stop handler automatically
1199 	 */
1200 	if (!enable)
1201 		return -EOPNOTSUPP;
1202 
1203 	skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
1204 	if (!skb)
1205 		return -EINVAL;
1206 
1207 	if (skb->len > 512 - MT_TXD_SIZE) {
1208 		dev_err(dev->mt76.dev, "beacon size limit exceed\n");
1209 		dev_kfree_skb(skb);
1210 		return -EINVAL;
1211 	}
1212 
1213 	mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt),
1214 				    skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON);
1215 	memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
1216 	req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
1217 	req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
1218 
1219 	if (offs.cntdwn_counter_offs[0]) {
1220 		u16 csa_offs;
1221 
1222 		csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
1223 		req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
1224 	}
1225 	dev_kfree_skb(skb);
1226 
1227 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
1228 				 &req, sizeof(req), true);
1229 }
1230 
1231 static
1232 int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
1233 			 enum environment_cap env_cap,
1234 			 struct mt7921_clc *clc,
1235 			 u8 idx)
1236 {
1237 	struct sk_buff *skb;
1238 	struct {
1239 		u8 ver;
1240 		u8 pad0;
1241 		__le16 len;
1242 		u8 idx;
1243 		u8 env;
1244 		u8 acpi_conf;
1245 		u8 pad1;
1246 		u8 alpha2[2];
1247 		u8 type[2];
1248 		u8 rsvd[64];
1249 	} __packed req = {
1250 		.idx = idx,
1251 		.env = env_cap,
1252 		.acpi_conf = mt7921_acpi_get_flags(&dev->phy),
1253 	};
1254 	int ret, valid_cnt = 0;
1255 	u8 i, *pos;
1256 
1257 	if (!clc)
1258 		return 0;
1259 
1260 	pos = clc->data;
1261 	for (i = 0; i < clc->nr_country; i++) {
1262 		struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
1263 		u16 len = le16_to_cpu(rule->len);
1264 
1265 		pos += len + sizeof(*rule);
1266 		if (rule->alpha2[0] != alpha2[0] ||
1267 		    rule->alpha2[1] != alpha2[1])
1268 			continue;
1269 
1270 		memcpy(req.alpha2, rule->alpha2, 2);
1271 		memcpy(req.type, rule->type, 2);
1272 
1273 		req.len = cpu_to_le16(sizeof(req) + len);
1274 		skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
1275 					   le16_to_cpu(req.len),
1276 					   sizeof(req), GFP_KERNEL);
1277 		if (!skb)
1278 			return -ENOMEM;
1279 		skb_put_data(skb, rule->data, len);
1280 
1281 		ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1282 					    MCU_CE_CMD(SET_CLC), false);
1283 		if (ret < 0)
1284 			return ret;
1285 		valid_cnt++;
1286 	}
1287 
1288 	if (!valid_cnt)
1289 		return -ENOENT;
1290 
1291 	return 0;
1292 }
1293 
1294 int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
1295 		       enum environment_cap env_cap)
1296 {
1297 	struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy;
1298 	int i, ret;
1299 
1300 	/* submit all clc config */
1301 	for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
1302 		ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
1303 					   phy->clc[i], i);
1304 
1305 		/* If no country found, set "00" as default */
1306 		if (ret == -ENOENT)
1307 			ret = __mt7921_mcu_set_clc(dev, "00",
1308 						   ENVIRON_INDOOR,
1309 						   phy->clc[i], i);
1310 		if (ret < 0)
1311 			return ret;
1312 	}
1313 	return 0;
1314 }
1315 
1316 int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif,
1317 			    u8 bit_op, u32 bit_map)
1318 {
1319 	struct {
1320 		u8 rsv[4];
1321 		u8 mode;
1322 		u8 rsv2[3];
1323 		__le32 fif;
1324 		__le32 bit_map; /* bit_* for bitmap update */
1325 		u8 bit_op;
1326 		u8 pad[51];
1327 	} __packed data = {
1328 		.mode = fif ? 1 : 2,
1329 		.fif = cpu_to_le32(fif),
1330 		.bit_map = cpu_to_le32(bit_map),
1331 		.bit_op = bit_op,
1332 	};
1333 
1334 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
1335 				 &data, sizeof(data), false);
1336 }
1337