1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/fs.h> 5 #include <linux/firmware.h> 6 #include "mt7921.h" 7 #include "mt7921_trace.h" 8 #include "eeprom.h" 9 #include "mcu.h" 10 #include "mac.h" 11 12 #define MT_STA_BFER BIT(0) 13 #define MT_STA_BFEE BIT(1) 14 15 static bool mt7921_disable_clc; 16 module_param_named(disable_clc, mt7921_disable_clc, bool, 0644); 17 MODULE_PARM_DESC(disable_clc, "disable CLC support"); 18 19 static int 20 mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb) 21 { 22 struct mt7921_mcu_eeprom_info *res; 23 u8 *buf; 24 25 if (!skb) 26 return -EINVAL; 27 28 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 29 30 res = (struct mt7921_mcu_eeprom_info *)skb->data; 31 buf = dev->eeprom.data + le32_to_cpu(res->addr); 32 memcpy(buf, res->data, 16); 33 34 return 0; 35 } 36 37 int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, 38 struct sk_buff *skb, int seq) 39 { 40 int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); 41 struct mt76_connac2_mcu_rxd *rxd; 42 int ret = 0; 43 44 if (!skb) { 45 dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 46 cmd, seq); 47 mt7921_reset(mdev); 48 49 return -ETIMEDOUT; 50 } 51 52 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 53 if (seq != rxd->seq) 54 return -EAGAIN; 55 56 if (cmd == MCU_CMD(PATCH_SEM_CONTROL) || 57 cmd == MCU_CMD(PATCH_FINISH_REQ)) { 58 skb_pull(skb, sizeof(*rxd) - 4); 59 ret = *skb->data; 60 } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { 61 skb_pull(skb, sizeof(*rxd) + 4); 62 ret = le32_to_cpu(*(__le32 *)skb->data); 63 } else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) { 64 ret = mt7921_mcu_parse_eeprom(mdev, skb); 65 } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || 66 cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || 67 cmd == MCU_UNI_CMD(STA_REC_UPDATE) || 68 cmd == MCU_UNI_CMD(HIF_CTRL) || 69 cmd == MCU_UNI_CMD(OFFLOAD) || 70 cmd == MCU_UNI_CMD(SUSPEND)) { 71 struct mt7921_mcu_uni_event *event; 72 73 skb_pull(skb, sizeof(*rxd)); 74 event = (struct mt7921_mcu_uni_event *)skb->data; 75 ret = le32_to_cpu(event->status); 76 /* skip invalid event */ 77 if (mcu_cmd != event->cid) 78 ret = -EAGAIN; 79 } else if (cmd == MCU_CE_QUERY(REG_READ)) { 80 struct mt7921_mcu_reg_event *event; 81 82 skb_pull(skb, sizeof(*rxd)); 83 event = (struct mt7921_mcu_reg_event *)skb->data; 84 ret = (int)le32_to_cpu(event->val); 85 } else { 86 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 87 } 88 89 return ret; 90 } 91 EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response); 92 93 static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val) 94 { 95 struct mt7921_mcu_eeprom_info *res, req = { 96 .addr = cpu_to_le32(round_down(offset, 97 MT7921_EEPROM_BLOCK_SIZE)), 98 }; 99 struct sk_buff *skb; 100 int ret; 101 102 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), 103 &req, sizeof(req), true, &skb); 104 if (ret) 105 return ret; 106 107 res = (struct mt7921_mcu_eeprom_info *)skb->data; 108 *val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE]; 109 dev_kfree_skb(skb); 110 111 return 0; 112 } 113 114 #ifdef CONFIG_PM 115 116 static int 117 mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev, 118 struct ieee80211_vif *vif, bool suspend) 119 { 120 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 121 struct { 122 struct { 123 u8 bss_idx; 124 u8 pad[3]; 125 } __packed hdr; 126 struct mt76_connac_arpns_tlv arpns; 127 } req = { 128 .hdr = { 129 .bss_idx = mvif->mt76.idx, 130 }, 131 .arpns = { 132 .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), 133 .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), 134 .mode = suspend, 135 }, 136 }; 137 138 return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req), 139 true); 140 } 141 142 void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 143 { 144 if (IS_ENABLED(CONFIG_IPV6)) { 145 struct mt76_phy *phy = priv; 146 147 mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif, 148 !test_bit(MT76_STATE_RUNNING, 149 &phy->state)); 150 } 151 152 mt76_connac_mcu_set_suspend_iter(priv, mac, vif); 153 } 154 155 #endif /* CONFIG_PM */ 156 157 static void 158 mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb) 159 { 160 struct mt7921_roc_grant_tlv *grant; 161 struct mt76_connac2_mcu_rxd *rxd; 162 int duration; 163 164 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 165 grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4); 166 167 /* should never happen */ 168 WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); 169 170 if (grant->reqtype == MT7921_ROC_REQ_ROC) 171 ieee80211_ready_on_channel(dev->mt76.phy.hw); 172 173 dev->phy.roc_grant = true; 174 wake_up(&dev->phy.roc_wait); 175 duration = le32_to_cpu(grant->max_interval); 176 mod_timer(&dev->phy.roc_timer, 177 jiffies + msecs_to_jiffies(duration)); 178 } 179 180 static void 181 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) 182 { 183 struct mt76_phy *mphy = &dev->mt76.phy; 184 struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; 185 186 spin_lock_bh(&dev->mt76.lock); 187 __skb_queue_tail(&phy->scan_event_list, skb); 188 spin_unlock_bh(&dev->mt76.lock); 189 190 ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, 191 MT7921_HW_SCAN_TIMEOUT); 192 } 193 194 static void 195 mt7921_mcu_connection_loss_iter(void *priv, u8 *mac, 196 struct ieee80211_vif *vif) 197 { 198 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 199 struct mt76_connac_beacon_loss_event *event = priv; 200 201 if (mvif->idx != event->bss_idx) 202 return; 203 204 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) || 205 vif->type != NL80211_IFTYPE_STATION) 206 return; 207 208 ieee80211_connection_loss(vif); 209 } 210 211 static void 212 mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb) 213 { 214 struct mt76_connac_beacon_loss_event *event; 215 struct mt76_phy *mphy = &dev->mt76.phy; 216 217 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 218 event = (struct mt76_connac_beacon_loss_event *)skb->data; 219 220 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 221 IEEE80211_IFACE_ITER_RESUME_ALL, 222 mt7921_mcu_connection_loss_iter, event); 223 } 224 225 static void 226 mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) 227 { 228 struct mt7921_debug_msg { 229 __le16 id; 230 u8 type; 231 u8 flag; 232 __le32 value; 233 __le16 len; 234 u8 content[512]; 235 } __packed * msg; 236 237 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 238 msg = (struct mt7921_debug_msg *)skb->data; 239 240 if (msg->type == 3) { /* fw log */ 241 u16 len = min_t(u16, le16_to_cpu(msg->len), 512); 242 int i; 243 244 for (i = 0 ; i < len; i++) { 245 if (!msg->content[i]) 246 msg->content[i] = ' '; 247 } 248 wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content); 249 } 250 } 251 252 static void 253 mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) 254 { 255 struct mt7921_mcu_lp_event { 256 u8 state; 257 u8 reserved[3]; 258 } __packed * event; 259 260 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 261 event = (struct mt7921_mcu_lp_event *)skb->data; 262 263 trace_lp_event(dev, event->state); 264 } 265 266 static void 267 mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb) 268 { 269 struct mt7921_mcu_tx_done_event *event; 270 271 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 272 event = (struct mt7921_mcu_tx_done_event *)skb->data; 273 274 mt7921_mac_add_txs(dev, event->txs); 275 } 276 277 static void 278 mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) 279 { 280 struct mt76_connac2_mcu_rxd *rxd; 281 282 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 283 switch (rxd->eid) { 284 case MCU_EVENT_BSS_BEACON_LOSS: 285 mt7921_mcu_connection_loss_event(dev, skb); 286 break; 287 case MCU_EVENT_SCHED_SCAN_DONE: 288 case MCU_EVENT_SCAN_DONE: 289 mt7921_mcu_scan_event(dev, skb); 290 return; 291 case MCU_EVENT_DBG_MSG: 292 mt7921_mcu_debug_msg_event(dev, skb); 293 break; 294 case MCU_EVENT_COREDUMP: 295 dev->fw_assert = true; 296 mt76_connac_mcu_coredump_event(&dev->mt76, skb, 297 &dev->coredump); 298 return; 299 case MCU_EVENT_LP_INFO: 300 mt7921_mcu_low_power_event(dev, skb); 301 break; 302 case MCU_EVENT_TX_DONE: 303 mt7921_mcu_tx_done_event(dev, skb); 304 break; 305 default: 306 break; 307 } 308 dev_kfree_skb(skb); 309 } 310 311 static void 312 mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev, 313 struct sk_buff *skb) 314 { 315 struct mt76_connac2_mcu_rxd *rxd; 316 317 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 318 319 switch (rxd->eid) { 320 case MCU_UNI_EVENT_ROC: 321 mt7921_mcu_uni_roc_event(dev, skb); 322 break; 323 default: 324 break; 325 } 326 dev_kfree_skb(skb); 327 } 328 329 void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) 330 { 331 struct mt76_connac2_mcu_rxd *rxd; 332 333 if (skb_linearize(skb)) 334 return; 335 336 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 337 338 if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) { 339 mt7921_mcu_uni_rx_unsolicited_event(dev, skb); 340 return; 341 } 342 343 if (rxd->eid == 0x6) { 344 mt76_mcu_rx_event(&dev->mt76, skb); 345 return; 346 } 347 348 if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT || 349 rxd->eid == MCU_EVENT_BSS_BEACON_LOSS || 350 rxd->eid == MCU_EVENT_SCHED_SCAN_DONE || 351 rxd->eid == MCU_EVENT_SCAN_DONE || 352 rxd->eid == MCU_EVENT_TX_DONE || 353 rxd->eid == MCU_EVENT_DBG_MSG || 354 rxd->eid == MCU_EVENT_COREDUMP || 355 rxd->eid == MCU_EVENT_LP_INFO || 356 !rxd->seq) 357 mt7921_mcu_rx_unsolicited_event(dev, skb); 358 else 359 mt76_mcu_rx_event(&dev->mt76, skb); 360 } 361 362 /** starec & wtbl **/ 363 int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, 364 struct ieee80211_ampdu_params *params, 365 bool enable) 366 { 367 struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; 368 369 if (enable && !params->amsdu) 370 msta->wcid.amsdu = false; 371 372 return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, 373 MCU_UNI_CMD(STA_REC_UPDATE), 374 enable, true); 375 } 376 377 int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, 378 struct ieee80211_ampdu_params *params, 379 bool enable) 380 { 381 struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; 382 383 return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, 384 MCU_UNI_CMD(STA_REC_UPDATE), 385 enable, false); 386 } 387 388 static char *mt7921_patch_name(struct mt7921_dev *dev) 389 { 390 char *ret; 391 392 if (is_mt7922(&dev->mt76)) 393 ret = MT7922_ROM_PATCH; 394 else 395 ret = MT7921_ROM_PATCH; 396 397 return ret; 398 } 399 400 static char *mt7921_ram_name(struct mt7921_dev *dev) 401 { 402 char *ret; 403 404 if (is_mt7922(&dev->mt76)) 405 ret = MT7922_FIRMWARE_WM; 406 else 407 ret = MT7921_FIRMWARE_WM; 408 409 return ret; 410 } 411 412 static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name) 413 { 414 const struct mt76_connac2_fw_trailer *hdr; 415 const struct mt76_connac2_fw_region *region; 416 const struct mt7921_clc *clc; 417 struct mt76_dev *mdev = &dev->mt76; 418 struct mt7921_phy *phy = &dev->phy; 419 const struct firmware *fw; 420 int ret, i, len, offset = 0; 421 u8 *clc_base = NULL, hw_encap = 0; 422 423 if (mt7921_disable_clc || 424 mt76_is_usb(&dev->mt76)) 425 return 0; 426 427 if (mt76_is_mmio(&dev->mt76)) { 428 ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap); 429 if (ret) 430 return ret; 431 hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP); 432 } 433 434 ret = request_firmware(&fw, fw_name, mdev->dev); 435 if (ret) 436 return ret; 437 438 if (!fw || !fw->data || fw->size < sizeof(*hdr)) { 439 dev_err(mdev->dev, "Invalid firmware\n"); 440 ret = -EINVAL; 441 goto out; 442 } 443 444 hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); 445 for (i = 0; i < hdr->n_region; i++) { 446 region = (const void *)((const u8 *)hdr - 447 (hdr->n_region - i) * sizeof(*region)); 448 len = le32_to_cpu(region->len); 449 450 /* check if we have valid buffer size */ 451 if (offset + len > fw->size) { 452 dev_err(mdev->dev, "Invalid firmware region\n"); 453 ret = -EINVAL; 454 goto out; 455 } 456 457 if ((region->feature_set & FW_FEATURE_NON_DL) && 458 region->type == FW_TYPE_CLC) { 459 clc_base = (u8 *)(fw->data + offset); 460 break; 461 } 462 offset += len; 463 } 464 465 if (!clc_base) 466 goto out; 467 468 for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) { 469 clc = (const struct mt7921_clc *)(clc_base + offset); 470 471 /* do not init buf again if chip reset triggered */ 472 if (phy->clc[clc->idx]) 473 continue; 474 475 /* header content sanity */ 476 if (clc->idx == MT7921_CLC_POWER && 477 u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap) 478 continue; 479 480 phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc, 481 le32_to_cpu(clc->len), 482 GFP_KERNEL); 483 484 if (!phy->clc[clc->idx]) { 485 ret = -ENOMEM; 486 goto out; 487 } 488 } 489 ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR); 490 out: 491 release_firmware(fw); 492 493 return ret; 494 } 495 496 static int mt7921_load_firmware(struct mt7921_dev *dev) 497 { 498 int ret; 499 500 ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); 501 if (ret && mt76_is_mmio(&dev->mt76)) { 502 dev_dbg(dev->mt76.dev, "Firmware is already download\n"); 503 goto fw_loaded; 504 } 505 506 ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev)); 507 if (ret) 508 return ret; 509 510 if (mt76_is_sdio(&dev->mt76)) { 511 /* activate again */ 512 ret = __mt7921_mcu_fw_pmctrl(dev); 513 if (!ret) 514 ret = __mt7921_mcu_drv_pmctrl(dev); 515 } 516 517 ret = mt76_connac2_load_ram(&dev->mt76, mt7921_ram_name(dev), NULL); 518 if (ret) 519 return ret; 520 521 if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, 522 MT_TOP_MISC2_FW_N9_RDY, 1500)) { 523 dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); 524 525 return -EIO; 526 } 527 528 fw_loaded: 529 530 #ifdef CONFIG_PM 531 dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; 532 #endif /* CONFIG_PM */ 533 534 dev_dbg(dev->mt76.dev, "Firmware init done\n"); 535 536 return 0; 537 } 538 539 int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) 540 { 541 struct { 542 u8 ctrl_val; 543 u8 pad[3]; 544 } data = { 545 .ctrl_val = ctrl 546 }; 547 548 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST), 549 &data, sizeof(data), false); 550 } 551 552 int mt7921_run_firmware(struct mt7921_dev *dev) 553 { 554 int err; 555 556 err = mt7921_load_firmware(dev); 557 if (err) 558 return err; 559 560 err = mt76_connac_mcu_get_nic_capability(&dev->mphy); 561 if (err) 562 return err; 563 564 set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 565 err = mt7921_load_clc(dev, mt7921_ram_name(dev)); 566 if (err) 567 return err; 568 569 return mt7921_mcu_fw_log_2_host(dev, 1); 570 } 571 EXPORT_SYMBOL_GPL(mt7921_run_firmware); 572 573 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) 574 { 575 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 576 struct edca { 577 __le16 cw_min; 578 __le16 cw_max; 579 __le16 txop; 580 __le16 aifs; 581 u8 guardtime; 582 u8 acm; 583 } __packed; 584 struct mt7921_mcu_tx { 585 struct edca edca[IEEE80211_NUM_ACS]; 586 u8 bss_idx; 587 u8 qos; 588 u8 wmm_idx; 589 u8 pad; 590 } __packed req = { 591 .bss_idx = mvif->mt76.idx, 592 .qos = vif->bss_conf.qos, 593 .wmm_idx = mvif->mt76.wmm_idx, 594 }; 595 struct mu_edca { 596 u8 cw_min; 597 u8 cw_max; 598 u8 aifsn; 599 u8 acm; 600 u8 timer; 601 u8 padding[3]; 602 }; 603 struct mt7921_mcu_mu_tx { 604 u8 ver; 605 u8 pad0; 606 __le16 len; 607 u8 bss_idx; 608 u8 qos; 609 u8 wmm_idx; 610 u8 pad1; 611 struct mu_edca edca[IEEE80211_NUM_ACS]; 612 u8 pad3[32]; 613 } __packed req_mu = { 614 .bss_idx = mvif->mt76.idx, 615 .qos = vif->bss_conf.qos, 616 .wmm_idx = mvif->mt76.wmm_idx, 617 }; 618 static const int to_aci[] = { 1, 0, 2, 3 }; 619 int ac, ret; 620 621 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 622 struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; 623 struct edca *e = &req.edca[to_aci[ac]]; 624 625 e->aifs = cpu_to_le16(q->aifs); 626 e->txop = cpu_to_le16(q->txop); 627 628 if (q->cw_min) 629 e->cw_min = cpu_to_le16(q->cw_min); 630 else 631 e->cw_min = cpu_to_le16(5); 632 633 if (q->cw_max) 634 e->cw_max = cpu_to_le16(q->cw_max); 635 else 636 e->cw_max = cpu_to_le16(10); 637 } 638 639 ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req, 640 sizeof(req), false); 641 if (ret) 642 return ret; 643 644 if (!vif->bss_conf.he_support) 645 return 0; 646 647 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 648 struct ieee80211_he_mu_edca_param_ac_rec *q; 649 struct mu_edca *e; 650 651 if (!mvif->queue_params[ac].mu_edca) 652 break; 653 654 q = &mvif->queue_params[ac].mu_edca_param_rec; 655 e = &(req_mu.edca[to_aci[ac]]); 656 657 e->cw_min = q->ecw_min_max & 0xf; 658 e->cw_max = (q->ecw_min_max & 0xf0) >> 4; 659 e->aifsn = q->aifsn; 660 e->timer = q->mu_edca_timer; 661 } 662 663 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS), 664 &req_mu, sizeof(req_mu), false); 665 } 666 667 int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, 668 struct ieee80211_channel *chan, int duration, 669 enum mt7921_roc_req type, u8 token_id) 670 { 671 int center_ch = ieee80211_frequency_to_channel(chan->center_freq); 672 struct mt7921_dev *dev = phy->dev; 673 struct { 674 struct { 675 u8 rsv[4]; 676 } __packed hdr; 677 struct roc_acquire_tlv { 678 __le16 tag; 679 __le16 len; 680 u8 bss_idx; 681 u8 tokenid; 682 u8 control_channel; 683 u8 sco; 684 u8 band; 685 u8 bw; 686 u8 center_chan; 687 u8 center_chan2; 688 u8 bw_from_ap; 689 u8 center_chan_from_ap; 690 u8 center_chan2_from_ap; 691 u8 reqtype; 692 __le32 maxinterval; 693 u8 dbdcband; 694 u8 rsv[3]; 695 } __packed roc; 696 } __packed req = { 697 .roc = { 698 .tag = cpu_to_le16(UNI_ROC_ACQUIRE), 699 .len = cpu_to_le16(sizeof(struct roc_acquire_tlv)), 700 .tokenid = token_id, 701 .reqtype = type, 702 .maxinterval = cpu_to_le32(duration), 703 .bss_idx = vif->mt76.idx, 704 .control_channel = chan->hw_value, 705 .bw = CMD_CBW_20MHZ, 706 .bw_from_ap = CMD_CBW_20MHZ, 707 .center_chan = center_ch, 708 .center_chan_from_ap = center_ch, 709 .dbdcband = 0xff, /* auto */ 710 }, 711 }; 712 713 if (chan->hw_value < center_ch) 714 req.roc.sco = 1; /* SCA */ 715 else if (chan->hw_value > center_ch) 716 req.roc.sco = 3; /* SCB */ 717 718 switch (chan->band) { 719 case NL80211_BAND_6GHZ: 720 req.roc.band = 3; 721 break; 722 case NL80211_BAND_5GHZ: 723 req.roc.band = 2; 724 break; 725 default: 726 req.roc.band = 1; 727 break; 728 } 729 730 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), 731 &req, sizeof(req), false); 732 } 733 734 int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, 735 u8 token_id) 736 { 737 struct mt7921_dev *dev = phy->dev; 738 struct { 739 struct { 740 u8 rsv[4]; 741 } __packed hdr; 742 struct roc_abort_tlv { 743 __le16 tag; 744 __le16 len; 745 u8 bss_idx; 746 u8 tokenid; 747 u8 dbdcband; 748 u8 rsv[5]; 749 } __packed abort; 750 } __packed req = { 751 .abort = { 752 .tag = cpu_to_le16(UNI_ROC_ABORT), 753 .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), 754 .tokenid = token_id, 755 .bss_idx = vif->mt76.idx, 756 .dbdcband = 0xff, /* auto*/ 757 }, 758 }; 759 760 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), 761 &req, sizeof(req), false); 762 } 763 764 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) 765 { 766 struct mt7921_dev *dev = phy->dev; 767 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 768 int freq1 = chandef->center_freq1; 769 struct { 770 u8 control_ch; 771 u8 center_ch; 772 u8 bw; 773 u8 tx_streams_num; 774 u8 rx_streams; /* mask or num */ 775 u8 switch_reason; 776 u8 band_idx; 777 u8 center_ch2; /* for 80+80 only */ 778 __le16 cac_case; 779 u8 channel_band; 780 u8 rsv0; 781 __le32 outband_freq; 782 u8 txpower_drop; 783 u8 ap_bw; 784 u8 ap_center_ch; 785 u8 rsv1[57]; 786 } __packed req = { 787 .control_ch = chandef->chan->hw_value, 788 .center_ch = ieee80211_frequency_to_channel(freq1), 789 .bw = mt76_connac_chan_bw(chandef), 790 .tx_streams_num = hweight8(phy->mt76->antenna_mask), 791 .rx_streams = phy->mt76->antenna_mask, 792 .band_idx = phy != &dev->phy, 793 }; 794 795 if (chandef->chan->band == NL80211_BAND_6GHZ) 796 req.channel_band = 2; 797 else 798 req.channel_band = chandef->chan->band; 799 800 if (cmd == MCU_EXT_CMD(SET_RX_PATH) || 801 dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) 802 req.switch_reason = CH_SWITCH_NORMAL; 803 else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 804 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 805 else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef, 806 NL80211_IFTYPE_AP)) 807 req.switch_reason = CH_SWITCH_DFS; 808 else 809 req.switch_reason = CH_SWITCH_NORMAL; 810 811 if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH)) 812 req.rx_streams = hweight8(req.rx_streams); 813 814 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 815 int freq2 = chandef->center_freq2; 816 817 req.center_ch2 = ieee80211_frequency_to_channel(freq2); 818 } 819 820 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); 821 } 822 823 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) 824 { 825 struct req_hdr { 826 u8 buffer_mode; 827 u8 format; 828 __le16 len; 829 } __packed req = { 830 .buffer_mode = EE_MODE_EFUSE, 831 .format = EE_FORMAT_WHOLE, 832 }; 833 834 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE), 835 &req, sizeof(req), true); 836 } 837 EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom); 838 839 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) 840 { 841 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 842 struct { 843 struct { 844 u8 bss_idx; 845 u8 pad[3]; 846 } __packed hdr; 847 struct ps_tlv { 848 __le16 tag; 849 __le16 len; 850 u8 ps_state; /* 0: device awake 851 * 1: static power save 852 * 2: dynamic power saving 853 * 3: enter TWT power saving 854 * 4: leave TWT power saving 855 */ 856 u8 pad[3]; 857 } __packed ps; 858 } __packed ps_req = { 859 .hdr = { 860 .bss_idx = mvif->mt76.idx, 861 }, 862 .ps = { 863 .tag = cpu_to_le16(UNI_BSS_INFO_PS), 864 .len = cpu_to_le16(sizeof(struct ps_tlv)), 865 .ps_state = vif->cfg.ps ? 2 : 0, 866 }, 867 }; 868 869 if (vif->type != NL80211_IFTYPE_STATION) 870 return -EOPNOTSUPP; 871 872 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 873 &ps_req, sizeof(ps_req), true); 874 } 875 876 static int 877 mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, 878 bool enable) 879 { 880 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 881 struct { 882 struct { 883 u8 bss_idx; 884 u8 pad[3]; 885 } __packed hdr; 886 struct bcnft_tlv { 887 __le16 tag; 888 __le16 len; 889 __le16 bcn_interval; 890 u8 dtim_period; 891 u8 pad; 892 } __packed bcnft; 893 } __packed bcnft_req = { 894 .hdr = { 895 .bss_idx = mvif->mt76.idx, 896 }, 897 .bcnft = { 898 .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), 899 .len = cpu_to_le16(sizeof(struct bcnft_tlv)), 900 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), 901 .dtim_period = vif->bss_conf.dtim_period, 902 }, 903 }; 904 905 if (vif->type != NL80211_IFTYPE_STATION) 906 return 0; 907 908 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 909 &bcnft_req, sizeof(bcnft_req), true); 910 } 911 912 int 913 mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, 914 bool enable) 915 { 916 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 917 struct { 918 u8 bss_idx; 919 u8 dtim_period; 920 __le16 aid; 921 __le16 bcn_interval; 922 __le16 atim_window; 923 u8 uapsd; 924 u8 bmc_delivered_ac; 925 u8 bmc_triggered_ac; 926 u8 pad; 927 } req = { 928 .bss_idx = mvif->mt76.idx, 929 .aid = cpu_to_le16(vif->cfg.aid), 930 .dtim_period = vif->bss_conf.dtim_period, 931 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), 932 }; 933 struct { 934 u8 bss_idx; 935 u8 pad[3]; 936 } req_hdr = { 937 .bss_idx = mvif->mt76.idx, 938 }; 939 int err; 940 941 err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT), 942 &req_hdr, sizeof(req_hdr), false); 943 if (err < 0 || !enable) 944 return err; 945 946 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED), 947 &req, sizeof(req), false); 948 } 949 950 int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, 951 struct ieee80211_vif *vif, bool enable, 952 enum mt76_sta_info_state state) 953 { 954 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 955 int rssi = -ewma_rssi_read(&mvif->rssi); 956 struct mt76_sta_cmd_info info = { 957 .sta = sta, 958 .vif = vif, 959 .enable = enable, 960 .cmd = MCU_UNI_CMD(STA_REC_UPDATE), 961 .state = state, 962 .offload_fw = true, 963 .rcpi = to_rcpi(rssi), 964 }; 965 struct mt7921_sta *msta; 966 967 msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL; 968 info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; 969 info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; 970 971 return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); 972 } 973 974 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) 975 { 976 struct mt76_phy *mphy = &dev->mt76.phy; 977 struct mt76_connac_pm *pm = &dev->pm; 978 int err = 0; 979 980 mutex_lock(&pm->mutex); 981 982 if (!test_bit(MT76_STATE_PM, &mphy->state)) 983 goto out; 984 985 err = __mt7921_mcu_drv_pmctrl(dev); 986 out: 987 mutex_unlock(&pm->mutex); 988 989 if (err) 990 mt7921_reset(&dev->mt76); 991 992 return err; 993 } 994 EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl); 995 996 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) 997 { 998 struct mt76_phy *mphy = &dev->mt76.phy; 999 struct mt76_connac_pm *pm = &dev->pm; 1000 int err = 0; 1001 1002 mutex_lock(&pm->mutex); 1003 1004 if (mt76_connac_skip_fw_pmctrl(mphy, pm)) 1005 goto out; 1006 1007 err = __mt7921_mcu_fw_pmctrl(dev); 1008 out: 1009 mutex_unlock(&pm->mutex); 1010 1011 if (err) 1012 mt7921_reset(&dev->mt76); 1013 1014 return err; 1015 } 1016 EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl); 1017 1018 int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, 1019 struct ieee80211_vif *vif, 1020 bool enable) 1021 { 1022 #define MT7921_FIF_BIT_CLR BIT(1) 1023 #define MT7921_FIF_BIT_SET BIT(0) 1024 int err; 1025 1026 if (enable) { 1027 err = mt7921_mcu_uni_bss_bcnft(dev, vif, true); 1028 if (err) 1029 return err; 1030 1031 err = mt7921_mcu_set_rxfilter(dev, 0, 1032 MT7921_FIF_BIT_SET, 1033 MT_WF_RFCR_DROP_OTHER_BEACON); 1034 if (err) 1035 return err; 1036 1037 return 0; 1038 } 1039 1040 err = mt7921_mcu_set_bss_pm(dev, vif, false); 1041 if (err) 1042 return err; 1043 1044 err = mt7921_mcu_set_rxfilter(dev, 0, 1045 MT7921_FIF_BIT_CLR, 1046 MT_WF_RFCR_DROP_OTHER_BEACON); 1047 if (err) 1048 return err; 1049 1050 return 0; 1051 } 1052 1053 int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) 1054 { 1055 struct mt7921_txpwr_event *event; 1056 struct mt7921_txpwr_req req = { 1057 .dbdc_idx = 0, 1058 }; 1059 struct sk_buff *skb; 1060 int ret; 1061 1062 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR), 1063 &req, sizeof(req), true, &skb); 1064 if (ret) 1065 return ret; 1066 1067 event = (struct mt7921_txpwr_event *)skb->data; 1068 WARN_ON(skb->len != le16_to_cpu(event->len)); 1069 memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); 1070 1071 dev_kfree_skb(skb); 1072 1073 return 0; 1074 } 1075 1076 int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, 1077 bool enable) 1078 { 1079 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 1080 struct { 1081 struct { 1082 u8 band_idx; 1083 u8 pad[3]; 1084 } __packed hdr; 1085 struct sniffer_enable_tlv { 1086 __le16 tag; 1087 __le16 len; 1088 u8 enable; 1089 u8 pad[3]; 1090 } __packed enable; 1091 } req = { 1092 .hdr = { 1093 .band_idx = mvif->band_idx, 1094 }, 1095 .enable = { 1096 .tag = cpu_to_le16(0), 1097 .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)), 1098 .enable = enable, 1099 }, 1100 }; 1101 1102 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), 1103 true); 1104 } 1105 1106 int mt7921_mcu_config_sniffer(struct mt7921_vif *vif, 1107 struct ieee80211_chanctx_conf *ctx) 1108 { 1109 struct cfg80211_chan_def *chandef = &ctx->def; 1110 int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; 1111 const u8 ch_band[] = { 1112 [NL80211_BAND_2GHZ] = 1, 1113 [NL80211_BAND_5GHZ] = 2, 1114 [NL80211_BAND_6GHZ] = 3, 1115 }; 1116 const u8 ch_width[] = { 1117 [NL80211_CHAN_WIDTH_20_NOHT] = 0, 1118 [NL80211_CHAN_WIDTH_20] = 0, 1119 [NL80211_CHAN_WIDTH_40] = 0, 1120 [NL80211_CHAN_WIDTH_80] = 1, 1121 [NL80211_CHAN_WIDTH_160] = 2, 1122 [NL80211_CHAN_WIDTH_80P80] = 3, 1123 [NL80211_CHAN_WIDTH_5] = 4, 1124 [NL80211_CHAN_WIDTH_10] = 5, 1125 [NL80211_CHAN_WIDTH_320] = 6, 1126 }; 1127 struct { 1128 struct { 1129 u8 band_idx; 1130 u8 pad[3]; 1131 } __packed hdr; 1132 struct config_tlv { 1133 __le16 tag; 1134 __le16 len; 1135 u16 aid; 1136 u8 ch_band; 1137 u8 bw; 1138 u8 control_ch; 1139 u8 sco; 1140 u8 center_ch; 1141 u8 center_ch2; 1142 u8 drop_err; 1143 u8 pad[3]; 1144 } __packed tlv; 1145 } __packed req = { 1146 .hdr = { 1147 .band_idx = vif->mt76.band_idx, 1148 }, 1149 .tlv = { 1150 .tag = cpu_to_le16(1), 1151 .len = cpu_to_le16(sizeof(req.tlv)), 1152 .control_ch = chandef->chan->hw_value, 1153 .center_ch = ieee80211_frequency_to_channel(freq1), 1154 .drop_err = 1, 1155 }, 1156 }; 1157 if (chandef->chan->band < ARRAY_SIZE(ch_band)) 1158 req.tlv.ch_band = ch_band[chandef->chan->band]; 1159 if (chandef->width < ARRAY_SIZE(ch_width)) 1160 req.tlv.bw = ch_width[chandef->width]; 1161 1162 if (freq2) 1163 req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2); 1164 1165 if (req.tlv.control_ch < req.tlv.center_ch) 1166 req.tlv.sco = 1; /* SCA */ 1167 else if (req.tlv.control_ch > req.tlv.center_ch) 1168 req.tlv.sco = 3; /* SCB */ 1169 1170 return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER), 1171 &req, sizeof(req), true); 1172 } 1173 1174 int 1175 mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev, 1176 struct ieee80211_hw *hw, 1177 struct ieee80211_vif *vif, 1178 bool enable) 1179 { 1180 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 1181 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 1182 struct ieee80211_mutable_offsets offs; 1183 struct { 1184 struct req_hdr { 1185 u8 bss_idx; 1186 u8 pad[3]; 1187 } __packed hdr; 1188 struct bcn_content_tlv { 1189 __le16 tag; 1190 __le16 len; 1191 __le16 tim_ie_pos; 1192 __le16 csa_ie_pos; 1193 __le16 bcc_ie_pos; 1194 /* 0: disable beacon offload 1195 * 1: enable beacon offload 1196 * 2: update probe respond offload 1197 */ 1198 u8 enable; 1199 /* 0: legacy format (TXD + payload) 1200 * 1: only cap field IE 1201 */ 1202 u8 type; 1203 __le16 pkt_len; 1204 u8 pkt[512]; 1205 } __packed beacon_tlv; 1206 } req = { 1207 .hdr = { 1208 .bss_idx = mvif->mt76.idx, 1209 }, 1210 .beacon_tlv = { 1211 .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), 1212 .len = cpu_to_le16(sizeof(struct bcn_content_tlv)), 1213 .enable = enable, 1214 }, 1215 }; 1216 struct sk_buff *skb; 1217 1218 /* support enable/update process only 1219 * disable flow would be handled in bss stop handler automatically 1220 */ 1221 if (!enable) 1222 return -EOPNOTSUPP; 1223 1224 skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0); 1225 if (!skb) 1226 return -EINVAL; 1227 1228 if (skb->len > 512 - MT_TXD_SIZE) { 1229 dev_err(dev->mt76.dev, "beacon size limit exceed\n"); 1230 dev_kfree_skb(skb); 1231 return -EINVAL; 1232 } 1233 1234 mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt), 1235 skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON); 1236 memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len); 1237 req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); 1238 req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); 1239 1240 if (offs.cntdwn_counter_offs[0]) { 1241 u16 csa_offs; 1242 1243 csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; 1244 req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); 1245 } 1246 dev_kfree_skb(skb); 1247 1248 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 1249 &req, sizeof(req), true); 1250 } 1251 1252 static 1253 int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, 1254 enum environment_cap env_cap, 1255 struct mt7921_clc *clc, 1256 u8 idx) 1257 { 1258 struct sk_buff *skb; 1259 struct { 1260 u8 ver; 1261 u8 pad0; 1262 __le16 len; 1263 u8 idx; 1264 u8 env; 1265 u8 acpi_conf; 1266 u8 pad1; 1267 u8 alpha2[2]; 1268 u8 type[2]; 1269 u8 rsvd[64]; 1270 } __packed req = { 1271 .idx = idx, 1272 .env = env_cap, 1273 .acpi_conf = mt7921_acpi_get_flags(&dev->phy), 1274 }; 1275 int ret, valid_cnt = 0; 1276 u8 i, *pos; 1277 1278 if (!clc) 1279 return 0; 1280 1281 pos = clc->data; 1282 for (i = 0; i < clc->nr_country; i++) { 1283 struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos; 1284 u16 len = le16_to_cpu(rule->len); 1285 1286 pos += len + sizeof(*rule); 1287 if (rule->alpha2[0] != alpha2[0] || 1288 rule->alpha2[1] != alpha2[1]) 1289 continue; 1290 1291 memcpy(req.alpha2, rule->alpha2, 2); 1292 memcpy(req.type, rule->type, 2); 1293 1294 req.len = cpu_to_le16(sizeof(req) + len); 1295 skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, 1296 le16_to_cpu(req.len), 1297 sizeof(req), GFP_KERNEL); 1298 if (!skb) 1299 return -ENOMEM; 1300 skb_put_data(skb, rule->data, len); 1301 1302 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 1303 MCU_CE_CMD(SET_CLC), false); 1304 if (ret < 0) 1305 return ret; 1306 valid_cnt++; 1307 } 1308 1309 if (!valid_cnt) 1310 return -ENOENT; 1311 1312 return 0; 1313 } 1314 1315 int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, 1316 enum environment_cap env_cap) 1317 { 1318 struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy; 1319 int i, ret; 1320 1321 /* submit all clc config */ 1322 for (i = 0; i < ARRAY_SIZE(phy->clc); i++) { 1323 ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap, 1324 phy->clc[i], i); 1325 1326 /* If no country found, set "00" as default */ 1327 if (ret == -ENOENT) 1328 ret = __mt7921_mcu_set_clc(dev, "00", 1329 ENVIRON_INDOOR, 1330 phy->clc[i], i); 1331 if (ret < 0) 1332 return ret; 1333 } 1334 return 0; 1335 } 1336 1337 int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif, 1338 u8 bit_op, u32 bit_map) 1339 { 1340 struct { 1341 u8 rsv[4]; 1342 u8 mode; 1343 u8 rsv2[3]; 1344 __le32 fif; 1345 __le32 bit_map; /* bit_* for bitmap update */ 1346 u8 bit_op; 1347 u8 pad[51]; 1348 } __packed data = { 1349 .mode = fif ? 1 : 2, 1350 .fif = cpu_to_le32(fif), 1351 .bit_map = cpu_to_le32(bit_map), 1352 .bit_op = bit_op, 1353 }; 1354 1355 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER), 1356 &data, sizeof(data), false); 1357 } 1358