1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/firmware.h> 5 #include <linux/fs.h> 6 #include "mt7921.h" 7 #include "mt7921_trace.h" 8 #include "mcu.h" 9 #include "mac.h" 10 11 struct mt7921_patch_hdr { 12 char build_date[16]; 13 char platform[4]; 14 __be32 hw_sw_ver; 15 __be32 patch_ver; 16 __be16 checksum; 17 u16 reserved; 18 struct { 19 __be32 patch_ver; 20 __be32 subsys; 21 __be32 feature; 22 __be32 n_region; 23 __be32 crc; 24 u32 reserved[11]; 25 } desc; 26 } __packed; 27 28 struct mt7921_patch_sec { 29 __be32 type; 30 __be32 offs; 31 __be32 size; 32 union { 33 __be32 spec[13]; 34 struct { 35 __be32 addr; 36 __be32 len; 37 __be32 sec_key_idx; 38 __be32 align_len; 39 u32 reserved[9]; 40 } info; 41 }; 42 } __packed; 43 44 struct mt7921_fw_trailer { 45 u8 chip_id; 46 u8 eco_code; 47 u8 n_region; 48 u8 format_ver; 49 u8 format_flag; 50 u8 reserved[2]; 51 char fw_ver[10]; 52 char build_date[15]; 53 u32 crc; 54 } __packed; 55 56 struct mt7921_fw_region { 57 __le32 decomp_crc; 58 __le32 decomp_len; 59 __le32 decomp_blk_sz; 60 u8 reserved[4]; 61 __le32 addr; 62 __le32 len; 63 u8 feature_set; 64 u8 reserved1[15]; 65 } __packed; 66 67 #define MT_STA_BFER BIT(0) 68 #define MT_STA_BFEE BIT(1) 69 70 #define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24) 71 #define PATCH_SEC_ENC_TYPE_PLAIN 0x00 72 #define PATCH_SEC_ENC_TYPE_AES 0x01 73 #define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02 74 #define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0) 75 #define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0) 76 77 static int 78 mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb) 79 { 80 struct mt7921_mcu_eeprom_info *res; 81 u8 *buf; 82 83 if (!skb) 84 return -EINVAL; 85 86 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 87 88 res = (struct mt7921_mcu_eeprom_info *)skb->data; 89 buf = dev->eeprom.data + le32_to_cpu(res->addr); 90 memcpy(buf, res->data, 16); 91 92 return 0; 93 } 94 95 int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, 96 struct sk_buff *skb, int seq) 97 { 98 int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); 99 struct mt7921_mcu_rxd *rxd; 100 int ret = 0; 101 102 if (!skb) { 103 dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 104 cmd, seq); 105 mt7921_reset(mdev); 106 107 return -ETIMEDOUT; 108 } 109 110 rxd = (struct mt7921_mcu_rxd *)skb->data; 111 if (seq != rxd->seq) 112 return -EAGAIN; 113 114 if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { 115 skb_pull(skb, sizeof(*rxd) - 4); 116 ret = *skb->data; 117 } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { 118 skb_pull(skb, sizeof(*rxd) + 4); 119 ret = le32_to_cpu(*(__le32 *)skb->data); 120 } else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) { 121 ret = mt7921_mcu_parse_eeprom(mdev, skb); 122 } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || 123 cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || 124 cmd == MCU_UNI_CMD(STA_REC_UPDATE) || 125 cmd == MCU_UNI_CMD(HIF_CTRL) || 126 cmd == MCU_UNI_CMD(OFFLOAD) || 127 cmd == MCU_UNI_CMD(SUSPEND)) { 128 struct mt7921_mcu_uni_event *event; 129 130 skb_pull(skb, sizeof(*rxd)); 131 event = (struct mt7921_mcu_uni_event *)skb->data; 132 ret = le32_to_cpu(event->status); 133 /* skip invalid event */ 134 if (mcu_cmd != event->cid) 135 ret = -EAGAIN; 136 } else if (cmd == MCU_CE_QUERY(REG_READ)) { 137 struct mt7921_mcu_reg_event *event; 138 139 skb_pull(skb, sizeof(*rxd)); 140 event = (struct mt7921_mcu_reg_event *)skb->data; 141 ret = (int)le32_to_cpu(event->val); 142 } else { 143 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 144 } 145 146 return ret; 147 } 148 EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response); 149 150 int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, 151 int cmd, int *wait_seq) 152 { 153 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 154 int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); 155 struct mt7921_uni_txd *uni_txd; 156 struct mt7921_mcu_txd *mcu_txd; 157 __le32 *txd; 158 u32 val; 159 u8 seq; 160 161 if (cmd == MCU_UNI_CMD(HIF_CTRL) || 162 cmd == MCU_UNI_CMD(SUSPEND) || 163 cmd == MCU_UNI_CMD(OFFLOAD)) 164 mdev->mcu.timeout = HZ; 165 else 166 mdev->mcu.timeout = 3 * HZ; 167 168 seq = ++dev->mt76.mcu.msg_seq & 0xf; 169 if (!seq) 170 seq = ++dev->mt76.mcu.msg_seq & 0xf; 171 172 if (cmd == MCU_CMD(FW_SCATTER)) 173 goto exit; 174 175 txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); 176 txd = (__le32 *)skb_push(skb, txd_len); 177 178 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | 179 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) | 180 FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0); 181 txd[0] = cpu_to_le32(val); 182 183 val = MT_TXD1_LONG_FORMAT | 184 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD); 185 txd[1] = cpu_to_le32(val); 186 187 if (cmd & __MCU_CMD_FIELD_UNI) { 188 uni_txd = (struct mt7921_uni_txd *)txd; 189 uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); 190 uni_txd->option = MCU_CMD_UNI_EXT_ACK; 191 uni_txd->cid = cpu_to_le16(mcu_cmd); 192 uni_txd->s2d_index = MCU_S2D_H2N; 193 uni_txd->pkt_type = MCU_PKT_ID; 194 uni_txd->seq = seq; 195 196 goto exit; 197 } 198 199 mcu_txd = (struct mt7921_mcu_txd *)txd; 200 mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd)); 201 mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, 202 MT_TX_MCU_PORT_RX_Q0)); 203 mcu_txd->pkt_type = MCU_PKT_ID; 204 mcu_txd->seq = seq; 205 mcu_txd->cid = mcu_cmd; 206 mcu_txd->s2d_index = MCU_S2D_H2N; 207 mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd); 208 209 if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) { 210 if (cmd & __MCU_CMD_FIELD_QUERY) 211 mcu_txd->set_query = MCU_Q_QUERY; 212 else 213 mcu_txd->set_query = MCU_Q_SET; 214 mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid; 215 } else { 216 mcu_txd->set_query = MCU_Q_NA; 217 } 218 219 exit: 220 if (wait_seq) 221 *wait_seq = seq; 222 223 return 0; 224 } 225 EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message); 226 227 static void 228 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) 229 { 230 struct mt76_phy *mphy = &dev->mt76.phy; 231 struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; 232 233 spin_lock_bh(&dev->mt76.lock); 234 __skb_queue_tail(&phy->scan_event_list, skb); 235 spin_unlock_bh(&dev->mt76.lock); 236 237 ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, 238 MT7921_HW_SCAN_TIMEOUT); 239 } 240 241 static void 242 mt7921_mcu_connection_loss_iter(void *priv, u8 *mac, 243 struct ieee80211_vif *vif) 244 { 245 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 246 struct mt76_connac_beacon_loss_event *event = priv; 247 248 if (mvif->idx != event->bss_idx) 249 return; 250 251 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 252 return; 253 254 ieee80211_connection_loss(vif); 255 } 256 257 static void 258 mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb) 259 { 260 struct mt76_connac_beacon_loss_event *event; 261 struct mt76_phy *mphy = &dev->mt76.phy; 262 263 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 264 event = (struct mt76_connac_beacon_loss_event *)skb->data; 265 266 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 267 IEEE80211_IFACE_ITER_RESUME_ALL, 268 mt7921_mcu_connection_loss_iter, event); 269 } 270 271 static void 272 mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb) 273 { 274 struct mt76_phy *mphy = &dev->mt76.phy; 275 struct mt76_connac_mcu_bss_event *event; 276 277 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 278 event = (struct mt76_connac_mcu_bss_event *)skb->data; 279 if (event->is_absent) 280 ieee80211_stop_queues(mphy->hw); 281 else 282 ieee80211_wake_queues(mphy->hw); 283 } 284 285 static void 286 mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) 287 { 288 struct mt7921_debug_msg { 289 __le16 id; 290 u8 type; 291 u8 flag; 292 __le32 value; 293 __le16 len; 294 u8 content[512]; 295 } __packed * msg; 296 297 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 298 msg = (struct mt7921_debug_msg *)skb->data; 299 300 if (msg->type == 3) { /* fw log */ 301 u16 len = min_t(u16, le16_to_cpu(msg->len), 512); 302 int i; 303 304 for (i = 0 ; i < len; i++) { 305 if (!msg->content[i]) 306 msg->content[i] = ' '; 307 } 308 wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content); 309 } 310 } 311 312 static void 313 mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) 314 { 315 struct mt7921_mcu_lp_event { 316 u8 state; 317 u8 reserved[3]; 318 } __packed * event; 319 320 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 321 event = (struct mt7921_mcu_lp_event *)skb->data; 322 323 trace_lp_event(dev, event->state); 324 } 325 326 static void 327 mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb) 328 { 329 struct mt7921_mcu_tx_done_event *event; 330 331 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 332 event = (struct mt7921_mcu_tx_done_event *)skb->data; 333 334 mt7921_mac_add_txs(dev, event->txs); 335 } 336 337 static void 338 mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) 339 { 340 struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; 341 342 switch (rxd->eid) { 343 case MCU_EVENT_BSS_BEACON_LOSS: 344 mt7921_mcu_connection_loss_event(dev, skb); 345 break; 346 case MCU_EVENT_SCHED_SCAN_DONE: 347 case MCU_EVENT_SCAN_DONE: 348 mt7921_mcu_scan_event(dev, skb); 349 return; 350 case MCU_EVENT_BSS_ABSENCE: 351 mt7921_mcu_bss_event(dev, skb); 352 break; 353 case MCU_EVENT_DBG_MSG: 354 mt7921_mcu_debug_msg_event(dev, skb); 355 break; 356 case MCU_EVENT_COREDUMP: 357 dev->fw_assert = true; 358 mt76_connac_mcu_coredump_event(&dev->mt76, skb, 359 &dev->coredump); 360 return; 361 case MCU_EVENT_LP_INFO: 362 mt7921_mcu_low_power_event(dev, skb); 363 break; 364 case MCU_EVENT_TX_DONE: 365 mt7921_mcu_tx_done_event(dev, skb); 366 break; 367 default: 368 break; 369 } 370 dev_kfree_skb(skb); 371 } 372 373 void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) 374 { 375 struct mt7921_mcu_rxd *rxd; 376 377 if (skb_linearize(skb)) 378 return; 379 380 rxd = (struct mt7921_mcu_rxd *)skb->data; 381 382 if (rxd->eid == 0x6) { 383 mt76_mcu_rx_event(&dev->mt76, skb); 384 return; 385 } 386 387 if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT || 388 rxd->eid == MCU_EVENT_BSS_BEACON_LOSS || 389 rxd->eid == MCU_EVENT_SCHED_SCAN_DONE || 390 rxd->eid == MCU_EVENT_BSS_ABSENCE || 391 rxd->eid == MCU_EVENT_SCAN_DONE || 392 rxd->eid == MCU_EVENT_TX_DONE || 393 rxd->eid == MCU_EVENT_DBG_MSG || 394 rxd->eid == MCU_EVENT_COREDUMP || 395 rxd->eid == MCU_EVENT_LP_INFO || 396 !rxd->seq) 397 mt7921_mcu_rx_unsolicited_event(dev, skb); 398 else 399 mt76_mcu_rx_event(&dev->mt76, skb); 400 } 401 402 /** starec & wtbl **/ 403 int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, 404 struct ieee80211_ampdu_params *params, 405 bool enable) 406 { 407 struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; 408 409 if (enable && !params->amsdu) 410 msta->wcid.amsdu = false; 411 412 return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, 413 MCU_UNI_CMD(STA_REC_UPDATE), 414 enable, true); 415 } 416 417 int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, 418 struct ieee80211_ampdu_params *params, 419 bool enable) 420 { 421 struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; 422 423 return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, 424 MCU_UNI_CMD(STA_REC_UPDATE), 425 enable, false); 426 } 427 428 static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info) 429 { 430 u32 mode = DL_MODE_NEED_RSP; 431 432 if (info == PATCH_SEC_NOT_SUPPORT) 433 return mode; 434 435 switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) { 436 case PATCH_SEC_ENC_TYPE_PLAIN: 437 break; 438 case PATCH_SEC_ENC_TYPE_AES: 439 mode |= DL_MODE_ENCRYPT; 440 mode |= FIELD_PREP(DL_MODE_KEY_IDX, 441 (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX; 442 mode |= DL_MODE_RESET_SEC_IV; 443 break; 444 case PATCH_SEC_ENC_TYPE_SCRAMBLE: 445 mode |= DL_MODE_ENCRYPT; 446 mode |= DL_CONFIG_ENCRY_MODE_SEL; 447 mode |= DL_MODE_RESET_SEC_IV; 448 break; 449 default: 450 dev_err(dev->mt76.dev, "Encryption type not support!\n"); 451 } 452 453 return mode; 454 } 455 456 static char *mt7921_patch_name(struct mt7921_dev *dev) 457 { 458 char *ret; 459 460 if (is_mt7922(&dev->mt76)) 461 ret = MT7922_ROM_PATCH; 462 else 463 ret = MT7921_ROM_PATCH; 464 465 return ret; 466 } 467 468 static int mt7921_load_patch(struct mt7921_dev *dev) 469 { 470 const struct mt7921_patch_hdr *hdr; 471 const struct firmware *fw = NULL; 472 int i, ret, sem, max_len; 473 474 max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096; 475 476 sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); 477 switch (sem) { 478 case PATCH_IS_DL: 479 return 0; 480 case PATCH_NOT_DL_SEM_SUCCESS: 481 break; 482 default: 483 dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); 484 return -EAGAIN; 485 } 486 487 ret = request_firmware(&fw, mt7921_patch_name(dev), dev->mt76.dev); 488 if (ret) 489 goto out; 490 491 if (!fw || !fw->data || fw->size < sizeof(*hdr)) { 492 dev_err(dev->mt76.dev, "Invalid firmware\n"); 493 ret = -EINVAL; 494 goto out; 495 } 496 497 hdr = (const struct mt7921_patch_hdr *)(fw->data); 498 499 dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n", 500 be32_to_cpu(hdr->hw_sw_ver), hdr->build_date); 501 502 for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) { 503 struct mt7921_patch_sec *sec; 504 const u8 *dl; 505 u32 len, addr, mode; 506 u32 sec_info = 0; 507 508 sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) + 509 i * sizeof(*sec)); 510 if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) != 511 PATCH_SEC_TYPE_INFO) { 512 ret = -EINVAL; 513 goto out; 514 } 515 516 addr = be32_to_cpu(sec->info.addr); 517 len = be32_to_cpu(sec->info.len); 518 dl = fw->data + be32_to_cpu(sec->offs); 519 sec_info = be32_to_cpu(sec->info.sec_key_idx); 520 mode = mt7921_get_data_mode(dev, sec_info); 521 522 ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len, 523 mode); 524 if (ret) { 525 dev_err(dev->mt76.dev, "Download request failed\n"); 526 goto out; 527 } 528 529 ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), 530 dl, len, max_len); 531 if (ret) { 532 dev_err(dev->mt76.dev, "Failed to send patch\n"); 533 goto out; 534 } 535 } 536 537 ret = mt76_connac_mcu_start_patch(&dev->mt76); 538 if (ret) 539 dev_err(dev->mt76.dev, "Failed to start patch\n"); 540 541 if (mt76_is_sdio(&dev->mt76)) { 542 /* activate again */ 543 ret = __mt7921_mcu_fw_pmctrl(dev); 544 if (!ret) 545 ret = __mt7921_mcu_drv_pmctrl(dev); 546 } 547 548 out: 549 sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); 550 switch (sem) { 551 case PATCH_REL_SEM_SUCCESS: 552 break; 553 default: 554 ret = -EAGAIN; 555 dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); 556 break; 557 } 558 release_firmware(fw); 559 560 return ret; 561 } 562 563 static int 564 mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev, 565 const struct mt7921_fw_trailer *hdr, 566 const u8 *data, bool is_wa) 567 { 568 int i, offset = 0, max_len; 569 u32 override = 0, option = 0; 570 571 max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096; 572 573 for (i = 0; i < hdr->n_region; i++) { 574 const struct mt7921_fw_region *region; 575 int err; 576 u32 len, addr, mode; 577 578 region = (const struct mt7921_fw_region *)((const u8 *)hdr - 579 (hdr->n_region - i) * sizeof(*region)); 580 mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76, 581 region->feature_set, is_wa); 582 len = le32_to_cpu(region->len); 583 addr = le32_to_cpu(region->addr); 584 585 if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR) 586 override = addr; 587 588 err = mt76_connac_mcu_init_download(&dev->mt76, addr, len, 589 mode); 590 if (err) { 591 dev_err(dev->mt76.dev, "Download request failed\n"); 592 return err; 593 } 594 595 err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), 596 data + offset, len, max_len); 597 if (err) { 598 dev_err(dev->mt76.dev, "Failed to send firmware.\n"); 599 return err; 600 } 601 602 offset += len; 603 } 604 605 if (override) 606 option |= FW_START_OVERRIDE; 607 608 if (is_wa) 609 option |= FW_START_WORKING_PDA_CR4; 610 611 return mt76_connac_mcu_start_firmware(&dev->mt76, override, option); 612 } 613 614 static char *mt7921_ram_name(struct mt7921_dev *dev) 615 { 616 char *ret; 617 618 if (is_mt7922(&dev->mt76)) 619 ret = MT7922_FIRMWARE_WM; 620 else 621 ret = MT7921_FIRMWARE_WM; 622 623 return ret; 624 } 625 626 static int mt7921_load_ram(struct mt7921_dev *dev) 627 { 628 const struct mt7921_fw_trailer *hdr; 629 const struct firmware *fw; 630 int ret; 631 632 ret = request_firmware(&fw, mt7921_ram_name(dev), dev->mt76.dev); 633 if (ret) 634 return ret; 635 636 if (!fw || !fw->data || fw->size < sizeof(*hdr)) { 637 dev_err(dev->mt76.dev, "Invalid firmware\n"); 638 ret = -EINVAL; 639 goto out; 640 } 641 642 hdr = (const struct mt7921_fw_trailer *)(fw->data + fw->size - 643 sizeof(*hdr)); 644 645 dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n", 646 hdr->fw_ver, hdr->build_date); 647 648 ret = mt7921_mcu_send_ram_firmware(dev, hdr, fw->data, false); 649 if (ret) { 650 dev_err(dev->mt76.dev, "Failed to start WM firmware\n"); 651 goto out; 652 } 653 654 snprintf(dev->mt76.hw->wiphy->fw_version, 655 sizeof(dev->mt76.hw->wiphy->fw_version), 656 "%.10s-%.15s", hdr->fw_ver, hdr->build_date); 657 658 out: 659 release_firmware(fw); 660 661 return ret; 662 } 663 664 static int mt7921_load_firmware(struct mt7921_dev *dev) 665 { 666 int ret; 667 668 ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); 669 if (ret && mt76_is_mmio(&dev->mt76)) { 670 dev_dbg(dev->mt76.dev, "Firmware is already download\n"); 671 goto fw_loaded; 672 } 673 674 ret = mt7921_load_patch(dev); 675 if (ret) 676 return ret; 677 678 ret = mt7921_load_ram(dev); 679 if (ret) 680 return ret; 681 682 if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, 683 MT_TOP_MISC2_FW_N9_RDY, 1500)) { 684 dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); 685 686 return -EIO; 687 } 688 689 fw_loaded: 690 691 #ifdef CONFIG_PM 692 dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; 693 #endif /* CONFIG_PM */ 694 695 dev_dbg(dev->mt76.dev, "Firmware init done\n"); 696 697 return 0; 698 } 699 700 int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) 701 { 702 struct { 703 u8 ctrl_val; 704 u8 pad[3]; 705 } data = { 706 .ctrl_val = ctrl 707 }; 708 709 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST), 710 &data, sizeof(data), false); 711 } 712 713 int mt7921_run_firmware(struct mt7921_dev *dev) 714 { 715 int err; 716 717 err = mt7921_load_firmware(dev); 718 if (err) 719 return err; 720 721 err = mt76_connac_mcu_get_nic_capability(&dev->mphy); 722 if (err) 723 return err; 724 725 set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 726 return mt7921_mcu_fw_log_2_host(dev, 1); 727 } 728 EXPORT_SYMBOL_GPL(mt7921_run_firmware); 729 730 void mt7921_mcu_exit(struct mt7921_dev *dev) 731 { 732 skb_queue_purge(&dev->mt76.mcu.res_q); 733 } 734 EXPORT_SYMBOL_GPL(mt7921_mcu_exit); 735 736 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) 737 { 738 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 739 struct edca { 740 __le16 cw_min; 741 __le16 cw_max; 742 __le16 txop; 743 __le16 aifs; 744 u8 guardtime; 745 u8 acm; 746 } __packed; 747 struct mt7921_mcu_tx { 748 struct edca edca[IEEE80211_NUM_ACS]; 749 u8 bss_idx; 750 u8 qos; 751 u8 wmm_idx; 752 u8 pad; 753 } __packed req = { 754 .bss_idx = mvif->mt76.idx, 755 .qos = vif->bss_conf.qos, 756 .wmm_idx = mvif->mt76.wmm_idx, 757 }; 758 struct mu_edca { 759 u8 cw_min; 760 u8 cw_max; 761 u8 aifsn; 762 u8 acm; 763 u8 timer; 764 u8 padding[3]; 765 }; 766 struct mt7921_mcu_mu_tx { 767 u8 ver; 768 u8 pad0; 769 __le16 len; 770 u8 bss_idx; 771 u8 qos; 772 u8 wmm_idx; 773 u8 pad1; 774 struct mu_edca edca[IEEE80211_NUM_ACS]; 775 u8 pad3[32]; 776 } __packed req_mu = { 777 .bss_idx = mvif->mt76.idx, 778 .qos = vif->bss_conf.qos, 779 .wmm_idx = mvif->mt76.wmm_idx, 780 }; 781 static const int to_aci[] = { 1, 0, 2, 3 }; 782 int ac, ret; 783 784 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 785 struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; 786 struct edca *e = &req.edca[to_aci[ac]]; 787 788 e->aifs = cpu_to_le16(q->aifs); 789 e->txop = cpu_to_le16(q->txop); 790 791 if (q->cw_min) 792 e->cw_min = cpu_to_le16(q->cw_min); 793 else 794 e->cw_min = cpu_to_le16(5); 795 796 if (q->cw_max) 797 e->cw_max = cpu_to_le16(q->cw_max); 798 else 799 e->cw_max = cpu_to_le16(10); 800 } 801 802 ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req, 803 sizeof(req), false); 804 if (ret) 805 return ret; 806 807 if (!vif->bss_conf.he_support) 808 return 0; 809 810 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 811 struct ieee80211_he_mu_edca_param_ac_rec *q; 812 struct mu_edca *e; 813 814 if (!mvif->queue_params[ac].mu_edca) 815 break; 816 817 q = &mvif->queue_params[ac].mu_edca_param_rec; 818 e = &(req_mu.edca[to_aci[ac]]); 819 820 e->cw_min = q->ecw_min_max & 0xf; 821 e->cw_max = (q->ecw_min_max & 0xf0) >> 4; 822 e->aifsn = q->aifsn; 823 e->timer = q->mu_edca_timer; 824 } 825 826 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS), 827 &req_mu, sizeof(req_mu), false); 828 } 829 830 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) 831 { 832 struct mt7921_dev *dev = phy->dev; 833 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 834 int freq1 = chandef->center_freq1; 835 struct { 836 u8 control_ch; 837 u8 center_ch; 838 u8 bw; 839 u8 tx_streams_num; 840 u8 rx_streams; /* mask or num */ 841 u8 switch_reason; 842 u8 band_idx; 843 u8 center_ch2; /* for 80+80 only */ 844 __le16 cac_case; 845 u8 channel_band; 846 u8 rsv0; 847 __le32 outband_freq; 848 u8 txpower_drop; 849 u8 ap_bw; 850 u8 ap_center_ch; 851 u8 rsv1[57]; 852 } __packed req = { 853 .control_ch = chandef->chan->hw_value, 854 .center_ch = ieee80211_frequency_to_channel(freq1), 855 .bw = mt76_connac_chan_bw(chandef), 856 .tx_streams_num = hweight8(phy->mt76->antenna_mask), 857 .rx_streams = phy->mt76->antenna_mask, 858 .band_idx = phy != &dev->phy, 859 }; 860 861 if (chandef->chan->band == NL80211_BAND_6GHZ) 862 req.channel_band = 2; 863 else 864 req.channel_band = chandef->chan->band; 865 866 if (cmd == MCU_EXT_CMD(SET_RX_PATH) || 867 dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) 868 req.switch_reason = CH_SWITCH_NORMAL; 869 else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 870 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 871 else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef, 872 NL80211_IFTYPE_AP)) 873 req.switch_reason = CH_SWITCH_DFS; 874 else 875 req.switch_reason = CH_SWITCH_NORMAL; 876 877 if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH)) 878 req.rx_streams = hweight8(req.rx_streams); 879 880 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 881 int freq2 = chandef->center_freq2; 882 883 req.center_ch2 = ieee80211_frequency_to_channel(freq2); 884 } 885 886 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); 887 } 888 889 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) 890 { 891 struct req_hdr { 892 u8 buffer_mode; 893 u8 format; 894 __le16 len; 895 } __packed req = { 896 .buffer_mode = EE_MODE_EFUSE, 897 .format = EE_FORMAT_WHOLE, 898 }; 899 900 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE), 901 &req, sizeof(req), true); 902 } 903 EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom); 904 905 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) 906 { 907 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 908 struct { 909 struct { 910 u8 bss_idx; 911 u8 pad[3]; 912 } __packed hdr; 913 struct ps_tlv { 914 __le16 tag; 915 __le16 len; 916 u8 ps_state; /* 0: device awake 917 * 1: static power save 918 * 2: dynamic power saving 919 * 3: enter TWT power saving 920 * 4: leave TWT power saving 921 */ 922 u8 pad[3]; 923 } __packed ps; 924 } __packed ps_req = { 925 .hdr = { 926 .bss_idx = mvif->mt76.idx, 927 }, 928 .ps = { 929 .tag = cpu_to_le16(UNI_BSS_INFO_PS), 930 .len = cpu_to_le16(sizeof(struct ps_tlv)), 931 .ps_state = vif->bss_conf.ps ? 2 : 0, 932 }, 933 }; 934 935 if (vif->type != NL80211_IFTYPE_STATION) 936 return -EOPNOTSUPP; 937 938 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 939 &ps_req, sizeof(ps_req), true); 940 } 941 942 static int 943 mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, 944 bool enable) 945 { 946 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 947 struct { 948 struct { 949 u8 bss_idx; 950 u8 pad[3]; 951 } __packed hdr; 952 struct bcnft_tlv { 953 __le16 tag; 954 __le16 len; 955 __le16 bcn_interval; 956 u8 dtim_period; 957 u8 pad; 958 } __packed bcnft; 959 } __packed bcnft_req = { 960 .hdr = { 961 .bss_idx = mvif->mt76.idx, 962 }, 963 .bcnft = { 964 .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), 965 .len = cpu_to_le16(sizeof(struct bcnft_tlv)), 966 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), 967 .dtim_period = vif->bss_conf.dtim_period, 968 }, 969 }; 970 971 if (vif->type != NL80211_IFTYPE_STATION) 972 return 0; 973 974 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 975 &bcnft_req, sizeof(bcnft_req), true); 976 } 977 978 static int 979 mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, 980 bool enable) 981 { 982 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 983 struct { 984 u8 bss_idx; 985 u8 dtim_period; 986 __le16 aid; 987 __le16 bcn_interval; 988 __le16 atim_window; 989 u8 uapsd; 990 u8 bmc_delivered_ac; 991 u8 bmc_triggered_ac; 992 u8 pad; 993 } req = { 994 .bss_idx = mvif->mt76.idx, 995 .aid = cpu_to_le16(vif->bss_conf.aid), 996 .dtim_period = vif->bss_conf.dtim_period, 997 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), 998 }; 999 struct { 1000 u8 bss_idx; 1001 u8 pad[3]; 1002 } req_hdr = { 1003 .bss_idx = mvif->mt76.idx, 1004 }; 1005 int err; 1006 1007 if (vif->type != NL80211_IFTYPE_STATION) 1008 return 0; 1009 1010 err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT), 1011 &req_hdr, sizeof(req_hdr), false); 1012 if (err < 0 || !enable) 1013 return err; 1014 1015 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED), 1016 &req, sizeof(req), false); 1017 } 1018 1019 int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, 1020 struct ieee80211_vif *vif, bool enable, 1021 enum mt76_sta_info_state state) 1022 { 1023 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 1024 int rssi = -ewma_rssi_read(&mvif->rssi); 1025 struct mt76_sta_cmd_info info = { 1026 .sta = sta, 1027 .vif = vif, 1028 .enable = enable, 1029 .cmd = MCU_UNI_CMD(STA_REC_UPDATE), 1030 .state = state, 1031 .offload_fw = true, 1032 .rcpi = to_rcpi(rssi), 1033 }; 1034 struct mt7921_sta *msta; 1035 1036 msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL; 1037 info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; 1038 info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; 1039 1040 return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); 1041 } 1042 1043 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) 1044 { 1045 struct mt76_phy *mphy = &dev->mt76.phy; 1046 struct mt76_connac_pm *pm = &dev->pm; 1047 int err = 0; 1048 1049 mutex_lock(&pm->mutex); 1050 1051 if (!test_bit(MT76_STATE_PM, &mphy->state)) 1052 goto out; 1053 1054 err = __mt7921_mcu_drv_pmctrl(dev); 1055 out: 1056 mutex_unlock(&pm->mutex); 1057 1058 if (err) 1059 mt7921_reset(&dev->mt76); 1060 1061 return err; 1062 } 1063 EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl); 1064 1065 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) 1066 { 1067 struct mt76_phy *mphy = &dev->mt76.phy; 1068 struct mt76_connac_pm *pm = &dev->pm; 1069 int err = 0; 1070 1071 mutex_lock(&pm->mutex); 1072 1073 if (mt76_connac_skip_fw_pmctrl(mphy, pm)) 1074 goto out; 1075 1076 err = __mt7921_mcu_fw_pmctrl(dev); 1077 out: 1078 mutex_unlock(&pm->mutex); 1079 1080 if (err) 1081 mt7921_reset(&dev->mt76); 1082 1083 return err; 1084 } 1085 EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl); 1086 1087 int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, 1088 struct ieee80211_vif *vif, 1089 bool enable) 1090 { 1091 struct ieee80211_hw *hw = mt76_hw(dev); 1092 int err; 1093 1094 if (enable) { 1095 err = mt7921_mcu_uni_bss_bcnft(dev, vif, true); 1096 if (err) 1097 return err; 1098 1099 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 1100 ieee80211_hw_set(hw, CONNECTION_MONITOR); 1101 mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); 1102 1103 return 0; 1104 } 1105 1106 err = mt7921_mcu_set_bss_pm(dev, vif, false); 1107 if (err) 1108 return err; 1109 1110 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 1111 __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags); 1112 mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); 1113 1114 return 0; 1115 } 1116 1117 int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) 1118 { 1119 struct mt7921_txpwr_event *event; 1120 struct mt7921_txpwr_req req = { 1121 .dbdc_idx = 0, 1122 }; 1123 struct sk_buff *skb; 1124 int ret; 1125 1126 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR), 1127 &req, sizeof(req), true, &skb); 1128 if (ret) 1129 return ret; 1130 1131 event = (struct mt7921_txpwr_event *)skb->data; 1132 WARN_ON(skb->len != le16_to_cpu(event->len)); 1133 memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); 1134 1135 dev_kfree_skb(skb); 1136 1137 return 0; 1138 } 1139 1140 int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, 1141 bool enable) 1142 { 1143 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 1144 struct { 1145 struct { 1146 u8 band_idx; 1147 u8 pad[3]; 1148 } __packed hdr; 1149 struct sniffer_enable_tlv { 1150 __le16 tag; 1151 __le16 len; 1152 u8 enable; 1153 u8 pad[3]; 1154 } __packed enable; 1155 } req = { 1156 .hdr = { 1157 .band_idx = mvif->band_idx, 1158 }, 1159 .enable = { 1160 .tag = cpu_to_le16(0), 1161 .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)), 1162 .enable = enable, 1163 }, 1164 }; 1165 1166 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), 1167 true); 1168 } 1169