1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "coex.h" 7 #include "debug.h" 8 #include "fw.h" 9 #include "mac.h" 10 #include "phy.h" 11 #include "reg.h" 12 13 static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header) 14 { 15 struct sk_buff *skb; 16 u32 header_len = 0; 17 18 if (header) 19 header_len = H2C_HEADER_LEN; 20 21 skb = dev_alloc_skb(len + header_len + 24); 22 if (!skb) 23 return NULL; 24 skb_reserve(skb, header_len + 24); 25 memset(skb->data, 0, len); 26 27 return skb; 28 } 29 30 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len) 31 { 32 return rtw89_fw_h2c_alloc_skb(len, true); 33 } 34 35 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len) 36 { 37 return rtw89_fw_h2c_alloc_skb(len, false); 38 } 39 40 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 41 { 42 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 43 44 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 45 } 46 47 #define FWDL_WAIT_CNT 400000 48 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 49 { 50 u8 val; 51 int ret; 52 53 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 54 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 55 1, FWDL_WAIT_CNT, false, rtwdev); 56 if (ret) { 57 switch (val) { 58 case RTW89_FWDL_CHECKSUM_FAIL: 59 rtw89_err(rtwdev, "fw checksum fail\n"); 60 return -EINVAL; 61 62 case RTW89_FWDL_SECURITY_FAIL: 63 rtw89_err(rtwdev, "fw security fail\n"); 64 return -EINVAL; 65 66 case RTW89_FWDL_CV_NOT_MATCH: 67 rtw89_err(rtwdev, "fw cv not match\n"); 68 return -EINVAL; 69 70 default: 71 return -EBUSY; 72 } 73 } 74 75 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 76 77 return 0; 78 } 79 80 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 81 struct rtw89_fw_bin_info *info) 82 { 83 struct rtw89_fw_hdr_section_info *section_info; 84 const u8 *fw_end = fw + len; 85 const u8 *bin; 86 u32 i; 87 88 if (!info) 89 return -EINVAL; 90 91 info->section_num = GET_FW_HDR_SEC_NUM(fw); 92 info->hdr_len = RTW89_FW_HDR_SIZE + 93 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 94 95 bin = fw + info->hdr_len; 96 97 /* jump to section header */ 98 fw += RTW89_FW_HDR_SIZE; 99 section_info = info->section_info; 100 for (i = 0; i < info->section_num; i++) { 101 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 102 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 103 section_info->len += FWDL_SECTION_CHKSUM_LEN; 104 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 105 section_info->dladdr = 106 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 107 section_info->addr = bin; 108 bin += section_info->len; 109 fw += RTW89_FW_SECTION_HDR_SIZE; 110 section_info++; 111 } 112 113 if (fw_end != bin) { 114 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 115 return -EINVAL; 116 } 117 118 return 0; 119 } 120 121 static 122 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 123 struct rtw89_fw_suit *fw_suit) 124 { 125 struct rtw89_fw_info *fw_info = &rtwdev->fw; 126 const u8 *mfw = fw_info->firmware->data; 127 u32 mfw_len = fw_info->firmware->size; 128 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 129 const struct rtw89_mfw_info *mfw_info; 130 int i; 131 132 if (mfw_hdr->sig != RTW89_MFW_SIG) { 133 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 134 /* legacy firmware support normal type only */ 135 if (type != RTW89_FW_NORMAL) 136 return -EINVAL; 137 fw_suit->data = mfw; 138 fw_suit->size = mfw_len; 139 return 0; 140 } 141 142 for (i = 0; i < mfw_hdr->fw_nr; i++) { 143 mfw_info = &mfw_hdr->info[i]; 144 if (mfw_info->cv != rtwdev->hal.cv || 145 mfw_info->type != type || 146 mfw_info->mp) 147 continue; 148 149 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 150 fw_suit->size = le32_to_cpu(mfw_info->size); 151 return 0; 152 } 153 154 rtw89_err(rtwdev, "no suitable firmware found\n"); 155 return -ENOENT; 156 } 157 158 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 159 enum rtw89_fw_type type, 160 struct rtw89_fw_suit *fw_suit) 161 { 162 const u8 *hdr = fw_suit->data; 163 164 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 165 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 166 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 167 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 168 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 169 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 170 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 171 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 172 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 173 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 174 175 rtw89_info(rtwdev, 176 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 177 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 178 fw_suit->sub_idex, fw_suit->cmd_ver, type); 179 } 180 181 static 182 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 183 { 184 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 185 int ret; 186 187 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit); 188 if (ret) 189 return ret; 190 191 rtw89_fw_update_ver(rtwdev, type, fw_suit); 192 193 return 0; 194 } 195 196 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 197 { 198 const struct rtw89_chip_info *chip = rtwdev->chip; 199 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 200 201 if (chip->chip_id == RTL8852A && 202 RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0)) 203 rtwdev->fw.old_ht_ra_format = true; 204 205 if (chip->chip_id == RTL8852A && 206 RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0)) 207 rtwdev->fw.scan_offload = true; 208 209 if (chip->chip_id == RTL8852A && 210 RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0)) 211 rtwdev->fw.tx_wake = true; 212 } 213 214 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 215 { 216 int ret; 217 218 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL); 219 if (ret) 220 return ret; 221 222 /* It still works if wowlan firmware isn't existing. */ 223 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN); 224 225 rtw89_fw_recognize_features(rtwdev); 226 227 return 0; 228 } 229 230 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 231 u8 type, u8 cat, u8 class, u8 func, 232 bool rack, bool dack, u32 len) 233 { 234 struct fwcmd_hdr *hdr; 235 236 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 237 238 if (!(rtwdev->fw.h2c_seq % 4)) 239 rack = true; 240 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 241 FIELD_PREP(H2C_HDR_CAT, cat) | 242 FIELD_PREP(H2C_HDR_CLASS, class) | 243 FIELD_PREP(H2C_HDR_FUNC, func) | 244 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 245 246 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 247 len + H2C_HEADER_LEN) | 248 (rack ? H2C_HDR_REC_ACK : 0) | 249 (dack ? H2C_HDR_DONE_ACK : 0)); 250 251 rtwdev->fw.h2c_seq++; 252 } 253 254 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 255 struct sk_buff *skb, 256 u8 type, u8 cat, u8 class, u8 func, 257 u32 len) 258 { 259 struct fwcmd_hdr *hdr; 260 261 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 262 263 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 264 FIELD_PREP(H2C_HDR_CAT, cat) | 265 FIELD_PREP(H2C_HDR_CLASS, class) | 266 FIELD_PREP(H2C_HDR_FUNC, func) | 267 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 268 269 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 270 len + H2C_HEADER_LEN)); 271 } 272 273 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 274 { 275 struct sk_buff *skb; 276 u32 ret = 0; 277 278 skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); 279 if (!skb) { 280 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 281 return -ENOMEM; 282 } 283 284 skb_put_data(skb, fw, len); 285 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 286 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 287 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 288 H2C_FUNC_MAC_FWHDR_DL, len); 289 290 ret = rtw89_h2c_tx(rtwdev, skb, false); 291 if (ret) { 292 rtw89_err(rtwdev, "failed to send h2c\n"); 293 ret = -1; 294 goto fail; 295 } 296 297 return 0; 298 fail: 299 dev_kfree_skb_any(skb); 300 301 return ret; 302 } 303 304 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 305 { 306 u8 val; 307 int ret; 308 309 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 310 if (ret) { 311 rtw89_err(rtwdev, "[ERR]FW header download\n"); 312 return ret; 313 } 314 315 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 316 1, FWDL_WAIT_CNT, false, 317 rtwdev, R_AX_WCPU_FW_CTRL); 318 if (ret) { 319 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 320 return ret; 321 } 322 323 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 324 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 325 326 return 0; 327 } 328 329 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 330 struct rtw89_fw_hdr_section_info *info) 331 { 332 struct sk_buff *skb; 333 const u8 *section = info->addr; 334 u32 residue_len = info->len; 335 u32 pkt_len; 336 int ret; 337 338 while (residue_len) { 339 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 340 pkt_len = FWDL_SECTION_PER_PKT_LEN; 341 else 342 pkt_len = residue_len; 343 344 skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len); 345 if (!skb) { 346 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 347 return -ENOMEM; 348 } 349 skb_put_data(skb, section, pkt_len); 350 351 ret = rtw89_h2c_tx(rtwdev, skb, true); 352 if (ret) { 353 rtw89_err(rtwdev, "failed to send h2c\n"); 354 ret = -1; 355 goto fail; 356 } 357 358 section += pkt_len; 359 residue_len -= pkt_len; 360 } 361 362 return 0; 363 fail: 364 dev_kfree_skb_any(skb); 365 366 return ret; 367 } 368 369 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 370 struct rtw89_fw_bin_info *info) 371 { 372 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 373 u8 section_num = info->section_num; 374 int ret; 375 376 while (section_num--) { 377 ret = __rtw89_fw_download_main(rtwdev, section_info); 378 if (ret) 379 return ret; 380 section_info++; 381 } 382 383 mdelay(5); 384 385 ret = rtw89_fw_check_rdy(rtwdev); 386 if (ret) { 387 rtw89_warn(rtwdev, "download firmware fail\n"); 388 return ret; 389 } 390 391 return 0; 392 } 393 394 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 395 { 396 u32 val32; 397 u16 index; 398 399 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 400 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 401 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 402 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 403 404 for (index = 0; index < 15; index++) { 405 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 406 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 407 fsleep(10); 408 } 409 } 410 411 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 412 { 413 u32 val32; 414 u16 val16; 415 416 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 417 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 418 419 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 420 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 421 422 rtw89_fw_prog_cnt_dump(rtwdev); 423 } 424 425 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 426 { 427 struct rtw89_fw_info *fw_info = &rtwdev->fw; 428 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 429 struct rtw89_fw_bin_info info; 430 const u8 *fw = fw_suit->data; 431 u32 len = fw_suit->size; 432 u8 val; 433 int ret; 434 435 if (!fw || !len) { 436 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 437 return -ENOENT; 438 } 439 440 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 441 if (ret) { 442 rtw89_err(rtwdev, "parse fw header fail\n"); 443 goto fwdl_err; 444 } 445 446 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 447 1, FWDL_WAIT_CNT, false, 448 rtwdev, R_AX_WCPU_FW_CTRL); 449 if (ret) { 450 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 451 goto fwdl_err; 452 } 453 454 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len); 455 if (ret) { 456 ret = -EBUSY; 457 goto fwdl_err; 458 } 459 460 ret = rtw89_fw_download_main(rtwdev, fw, &info); 461 if (ret) { 462 ret = -EBUSY; 463 goto fwdl_err; 464 } 465 466 fw_info->h2c_seq = 0; 467 fw_info->rec_seq = 0; 468 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 469 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 470 471 return ret; 472 473 fwdl_err: 474 rtw89_fw_dl_fail_dump(rtwdev); 475 return ret; 476 } 477 478 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 479 { 480 struct rtw89_fw_info *fw = &rtwdev->fw; 481 482 wait_for_completion(&fw->completion); 483 if (!fw->firmware) 484 return -EINVAL; 485 486 return 0; 487 } 488 489 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context) 490 { 491 struct rtw89_fw_info *fw = context; 492 struct rtw89_dev *rtwdev = fw->rtwdev; 493 494 if (!firmware || !firmware->data) { 495 rtw89_err(rtwdev, "failed to request firmware\n"); 496 complete_all(&fw->completion); 497 return; 498 } 499 500 fw->firmware = firmware; 501 complete_all(&fw->completion); 502 } 503 504 int rtw89_load_firmware(struct rtw89_dev *rtwdev) 505 { 506 struct rtw89_fw_info *fw = &rtwdev->fw; 507 const char *fw_name = rtwdev->chip->fw_name; 508 int ret; 509 510 fw->rtwdev = rtwdev; 511 init_completion(&fw->completion); 512 513 ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, 514 GFP_KERNEL, fw, rtw89_load_firmware_cb); 515 if (ret) { 516 rtw89_err(rtwdev, "failed to async firmware request\n"); 517 return ret; 518 } 519 520 return 0; 521 } 522 523 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 524 { 525 struct rtw89_fw_info *fw = &rtwdev->fw; 526 527 rtw89_wait_firmware_completion(rtwdev); 528 529 if (fw->firmware) 530 release_firmware(fw->firmware); 531 } 532 533 #define H2C_CAM_LEN 60 534 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 535 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 536 { 537 struct sk_buff *skb; 538 539 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN); 540 if (!skb) { 541 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 542 return -ENOMEM; 543 } 544 skb_put(skb, H2C_CAM_LEN); 545 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 546 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data); 547 548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 549 H2C_CAT_MAC, 550 H2C_CL_MAC_ADDR_CAM_UPDATE, 551 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 552 H2C_CAM_LEN); 553 554 if (rtw89_h2c_tx(rtwdev, skb, false)) { 555 rtw89_err(rtwdev, "failed to send h2c\n"); 556 goto fail; 557 } 558 559 return 0; 560 fail: 561 dev_kfree_skb_any(skb); 562 563 return -EBUSY; 564 } 565 566 #define H2C_BA_CAM_LEN 8 567 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 568 bool valid, struct ieee80211_ampdu_params *params) 569 { 570 u8 macid = rtwsta->mac_id; 571 struct sk_buff *skb; 572 u8 entry_idx; 573 int ret; 574 575 ret = valid ? 576 rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) : 577 rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx); 578 if (ret) { 579 /* it still works even if we don't have static BA CAM, because 580 * hardware can create dynamic BA CAM automatically. 581 */ 582 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 583 "failed to %s entry tid=%d for h2c ba cam\n", 584 valid ? "alloc" : "free", params->tid); 585 return 0; 586 } 587 588 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN); 589 if (!skb) { 590 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 591 return -ENOMEM; 592 } 593 skb_put(skb, H2C_BA_CAM_LEN); 594 SET_BA_CAM_MACID(skb->data, macid); 595 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 596 if (!valid) 597 goto end; 598 SET_BA_CAM_VALID(skb->data, valid); 599 SET_BA_CAM_TID(skb->data, params->tid); 600 if (params->buf_size > 64) 601 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 602 else 603 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 604 /* If init req is set, hw will set the ssn */ 605 SET_BA_CAM_INIT_REQ(skb->data, 1); 606 SET_BA_CAM_SSN(skb->data, params->ssn); 607 608 end: 609 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 610 H2C_CAT_MAC, 611 H2C_CL_BA_CAM, 612 H2C_FUNC_MAC_BA_CAM, 0, 1, 613 H2C_BA_CAM_LEN); 614 615 if (rtw89_h2c_tx(rtwdev, skb, false)) { 616 rtw89_err(rtwdev, "failed to send h2c\n"); 617 goto fail; 618 } 619 620 return 0; 621 fail: 622 dev_kfree_skb_any(skb); 623 624 return -EBUSY; 625 } 626 627 #define H2C_LOG_CFG_LEN 12 628 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 629 { 630 struct sk_buff *skb; 631 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 632 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 633 634 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN); 635 if (!skb) { 636 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 637 return -ENOMEM; 638 } 639 640 skb_put(skb, H2C_LOG_CFG_LEN); 641 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 642 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 643 SET_LOG_CFG_COMP(skb->data, comp); 644 SET_LOG_CFG_COMP_EXT(skb->data, 0); 645 646 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 647 H2C_CAT_MAC, 648 H2C_CL_FW_INFO, 649 H2C_FUNC_LOG_CFG, 0, 0, 650 H2C_LOG_CFG_LEN); 651 652 if (rtw89_h2c_tx(rtwdev, skb, false)) { 653 rtw89_err(rtwdev, "failed to send h2c\n"); 654 goto fail; 655 } 656 657 return 0; 658 fail: 659 dev_kfree_skb_any(skb); 660 661 return -EBUSY; 662 } 663 664 #define H2C_GENERAL_PKT_LEN 6 665 #define H2C_GENERAL_PKT_ID_UND 0xff 666 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid) 667 { 668 struct sk_buff *skb; 669 670 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN); 671 if (!skb) { 672 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 673 return -ENOMEM; 674 } 675 skb_put(skb, H2C_GENERAL_PKT_LEN); 676 SET_GENERAL_PKT_MACID(skb->data, macid); 677 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 678 SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 679 SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 680 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 681 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 682 683 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 684 H2C_CAT_MAC, 685 H2C_CL_FW_INFO, 686 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 687 H2C_GENERAL_PKT_LEN); 688 689 if (rtw89_h2c_tx(rtwdev, skb, false)) { 690 rtw89_err(rtwdev, "failed to send h2c\n"); 691 goto fail; 692 } 693 694 return 0; 695 fail: 696 dev_kfree_skb_any(skb); 697 698 return -EBUSY; 699 } 700 701 #define H2C_LPS_PARM_LEN 8 702 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 703 struct rtw89_lps_parm *lps_param) 704 { 705 struct sk_buff *skb; 706 707 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN); 708 if (!skb) { 709 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 710 return -ENOMEM; 711 } 712 skb_put(skb, H2C_LPS_PARM_LEN); 713 714 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 715 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 716 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 717 SET_LPS_PARM_RLBM(skb->data, 1); 718 SET_LPS_PARM_SMARTPS(skb->data, 1); 719 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 720 SET_LPS_PARM_VOUAPSD(skb->data, 0); 721 SET_LPS_PARM_VIUAPSD(skb->data, 0); 722 SET_LPS_PARM_BEUAPSD(skb->data, 0); 723 SET_LPS_PARM_BKUAPSD(skb->data, 0); 724 725 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 726 H2C_CAT_MAC, 727 H2C_CL_MAC_PS, 728 H2C_FUNC_MAC_LPS_PARM, 0, 1, 729 H2C_LPS_PARM_LEN); 730 731 if (rtw89_h2c_tx(rtwdev, skb, false)) { 732 rtw89_err(rtwdev, "failed to send h2c\n"); 733 goto fail; 734 } 735 736 return 0; 737 fail: 738 dev_kfree_skb_any(skb); 739 740 return -EBUSY; 741 } 742 743 #define H2C_CMC_TBL_LEN 68 744 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 745 struct rtw89_vif *rtwvif) 746 { 747 struct rtw89_hal *hal = &rtwdev->hal; 748 struct sk_buff *skb; 749 u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 750 u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 751 u8 macid = rtwvif->mac_id; 752 753 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); 754 if (!skb) { 755 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 756 return -ENOMEM; 757 } 758 skb_put(skb, H2C_CMC_TBL_LEN); 759 SET_CTRL_INFO_MACID(skb->data, macid); 760 SET_CTRL_INFO_OPERATION(skb->data, 1); 761 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 762 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 763 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 764 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 765 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 766 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 767 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 768 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 769 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 770 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 771 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 772 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 773 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 774 SET_CMC_TBL_DATA_DCM(skb->data, 0); 775 776 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 777 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 778 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 779 H2C_CMC_TBL_LEN); 780 781 if (rtw89_h2c_tx(rtwdev, skb, false)) { 782 rtw89_err(rtwdev, "failed to send h2c\n"); 783 goto fail; 784 } 785 786 return 0; 787 fail: 788 dev_kfree_skb_any(skb); 789 790 return -EBUSY; 791 } 792 793 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 794 struct ieee80211_sta *sta, u8 *pads) 795 { 796 bool ppe_th; 797 u8 ppe16, ppe8; 798 u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1; 799 u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0]; 800 u8 ru_bitmap; 801 u8 n, idx, sh; 802 u16 ppe; 803 int i; 804 805 if (!sta->he_cap.has_he) 806 return; 807 808 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 809 sta->he_cap.he_cap_elem.phy_cap_info[6]); 810 if (!ppe_th) { 811 u8 pad; 812 813 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 814 sta->he_cap.he_cap_elem.phy_cap_info[9]); 815 816 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 817 pads[i] = pad; 818 } 819 820 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 821 n = hweight8(ru_bitmap); 822 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 823 824 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 825 if (!(ru_bitmap & BIT(i))) { 826 pads[i] = 1; 827 continue; 828 } 829 830 idx = n >> 3; 831 sh = n & 7; 832 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 833 834 ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx])); 835 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 836 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 837 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 838 839 if (ppe16 != 7 && ppe8 == 7) 840 pads[i] = 2; 841 else if (ppe8 != 7) 842 pads[i] = 1; 843 else 844 pads[i] = 0; 845 } 846 } 847 848 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 849 struct ieee80211_vif *vif, 850 struct ieee80211_sta *sta) 851 { 852 struct rtw89_hal *hal = &rtwdev->hal; 853 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 854 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 855 struct sk_buff *skb; 856 u8 pads[RTW89_PPE_BW_NUM]; 857 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 858 859 memset(pads, 0, sizeof(pads)); 860 if (sta) 861 __get_sta_he_pkt_padding(rtwdev, sta, pads); 862 863 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); 864 if (!skb) { 865 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 866 return -ENOMEM; 867 } 868 skb_put(skb, H2C_CMC_TBL_LEN); 869 SET_CTRL_INFO_MACID(skb->data, mac_id); 870 SET_CTRL_INFO_OPERATION(skb->data, 1); 871 SET_CMC_TBL_DISRTSFB(skb->data, 1); 872 SET_CMC_TBL_DISDATAFB(skb->data, 1); 873 if (hal->current_band_type == RTW89_BAND_2G) 874 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1); 875 else 876 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6); 877 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 878 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 879 if (vif->type == NL80211_IFTYPE_STATION) 880 SET_CMC_TBL_ULDL(skb->data, 1); 881 else 882 SET_CMC_TBL_ULDL(skb->data, 0); 883 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 884 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 885 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 886 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 887 if (sta) 888 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he); 889 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 890 SET_CMC_TBL_DATA_DCM(skb->data, 0); 891 892 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 893 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 894 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 895 H2C_CMC_TBL_LEN); 896 897 if (rtw89_h2c_tx(rtwdev, skb, false)) { 898 rtw89_err(rtwdev, "failed to send h2c\n"); 899 goto fail; 900 } 901 902 return 0; 903 fail: 904 dev_kfree_skb_any(skb); 905 906 return -EBUSY; 907 } 908 909 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 910 struct rtw89_sta *rtwsta) 911 { 912 struct sk_buff *skb; 913 914 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); 915 if (!skb) { 916 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 917 return -ENOMEM; 918 } 919 skb_put(skb, H2C_CMC_TBL_LEN); 920 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 921 SET_CTRL_INFO_OPERATION(skb->data, 1); 922 if (rtwsta->cctl_tx_time) { 923 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 924 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 925 } 926 if (rtwsta->cctl_tx_retry_limit) { 927 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 928 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 929 } 930 931 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 932 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 933 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 934 H2C_CMC_TBL_LEN); 935 936 if (rtw89_h2c_tx(rtwdev, skb, false)) { 937 rtw89_err(rtwdev, "failed to send h2c\n"); 938 goto fail; 939 } 940 941 return 0; 942 fail: 943 dev_kfree_skb_any(skb); 944 945 return -EBUSY; 946 } 947 948 #define H2C_BCN_BASE_LEN 12 949 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 950 struct rtw89_vif *rtwvif) 951 { 952 struct rtw89_hal *hal = &rtwdev->hal; 953 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 954 struct sk_buff *skb; 955 struct sk_buff *skb_beacon; 956 u16 tim_offset; 957 int bcn_total_len; 958 959 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, NULL); 960 if (!skb_beacon) { 961 rtw89_err(rtwdev, "failed to get beacon skb\n"); 962 return -ENOMEM; 963 } 964 965 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 966 skb = rtw89_fw_h2c_alloc_skb_with_hdr(bcn_total_len); 967 if (!skb) { 968 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 969 dev_kfree_skb_any(skb_beacon); 970 return -ENOMEM; 971 } 972 skb_put(skb, H2C_BCN_BASE_LEN); 973 974 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 975 SET_BCN_UPD_MBSSID(skb->data, 0); 976 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 977 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 978 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 979 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 980 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 981 SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ? 982 RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6); 983 984 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 985 dev_kfree_skb_any(skb_beacon); 986 987 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 988 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 989 H2C_FUNC_MAC_BCN_UPD, 0, 1, 990 bcn_total_len); 991 992 if (rtw89_h2c_tx(rtwdev, skb, false)) { 993 rtw89_err(rtwdev, "failed to send h2c\n"); 994 dev_kfree_skb_any(skb); 995 return -EBUSY; 996 } 997 998 return 0; 999 } 1000 1001 #define H2C_ROLE_MAINTAIN_LEN 4 1002 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1003 struct rtw89_vif *rtwvif, 1004 struct rtw89_sta *rtwsta, 1005 enum rtw89_upd_mode upd_mode) 1006 { 1007 struct sk_buff *skb; 1008 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1009 u8 self_role; 1010 1011 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1012 if (rtwsta) 1013 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1014 else 1015 self_role = rtwvif->self_role; 1016 } else { 1017 self_role = rtwvif->self_role; 1018 } 1019 1020 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_ROLE_MAINTAIN_LEN); 1021 if (!skb) { 1022 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1023 return -ENOMEM; 1024 } 1025 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1026 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1027 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1028 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1029 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1030 1031 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1032 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1033 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1034 H2C_ROLE_MAINTAIN_LEN); 1035 1036 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1037 rtw89_err(rtwdev, "failed to send h2c\n"); 1038 goto fail; 1039 } 1040 1041 return 0; 1042 fail: 1043 dev_kfree_skb_any(skb); 1044 1045 return -EBUSY; 1046 } 1047 1048 #define H2C_JOIN_INFO_LEN 4 1049 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1050 struct rtw89_sta *rtwsta, bool dis_conn) 1051 { 1052 struct sk_buff *skb; 1053 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1054 u8 self_role = rtwvif->self_role; 1055 u8 net_type = rtwvif->net_type; 1056 1057 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1058 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1059 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1060 } 1061 1062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN); 1063 if (!skb) { 1064 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1065 return -ENOMEM; 1066 } 1067 skb_put(skb, H2C_JOIN_INFO_LEN); 1068 SET_JOININFO_MACID(skb->data, mac_id); 1069 SET_JOININFO_OP(skb->data, dis_conn); 1070 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1071 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1072 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1073 SET_JOININFO_ISHESTA(skb->data, 0); 1074 SET_JOININFO_DLBW(skb->data, 0); 1075 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1076 SET_JOININFO_DL_T_PE(skb->data, 0); 1077 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1078 SET_JOININFO_NET_TYPE(skb->data, net_type); 1079 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1080 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1081 1082 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1083 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1084 H2C_FUNC_MAC_JOININFO, 0, 1, 1085 H2C_JOIN_INFO_LEN); 1086 1087 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1088 rtw89_err(rtwdev, "failed to send h2c\n"); 1089 goto fail; 1090 } 1091 1092 return 0; 1093 fail: 1094 dev_kfree_skb_any(skb); 1095 1096 return -EBUSY; 1097 } 1098 1099 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1100 bool pause) 1101 { 1102 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1103 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1104 struct sk_buff *skb; 1105 1106 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN); 1107 if (!skb) { 1108 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1109 return -ENOMEM; 1110 } 1111 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1112 if (pause) 1113 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1114 skb_put_data(skb, &h2c, len); 1115 1116 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1117 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1118 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1119 len); 1120 1121 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1122 rtw89_err(rtwdev, "failed to send h2c\n"); 1123 goto fail; 1124 } 1125 1126 return 0; 1127 fail: 1128 dev_kfree_skb_any(skb); 1129 1130 return -EBUSY; 1131 } 1132 1133 #define H2C_EDCA_LEN 12 1134 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1135 u8 ac, u32 val) 1136 { 1137 struct sk_buff *skb; 1138 1139 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN); 1140 if (!skb) { 1141 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1142 return -ENOMEM; 1143 } 1144 skb_put(skb, H2C_EDCA_LEN); 1145 RTW89_SET_EDCA_SEL(skb->data, 0); 1146 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1147 RTW89_SET_EDCA_WMM(skb->data, 0); 1148 RTW89_SET_EDCA_AC(skb->data, ac); 1149 RTW89_SET_EDCA_PARAM(skb->data, val); 1150 1151 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1152 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1153 H2C_FUNC_USR_EDCA, 0, 1, 1154 H2C_EDCA_LEN); 1155 1156 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1157 rtw89_err(rtwdev, "failed to send h2c\n"); 1158 goto fail; 1159 } 1160 1161 return 0; 1162 fail: 1163 dev_kfree_skb_any(skb); 1164 1165 return -EBUSY; 1166 } 1167 1168 #define H2C_OFLD_CFG_LEN 8 1169 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1170 { 1171 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1172 struct sk_buff *skb; 1173 1174 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN); 1175 if (!skb) { 1176 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1177 return -ENOMEM; 1178 } 1179 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1180 1181 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1182 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1183 H2C_FUNC_OFLD_CFG, 0, 1, 1184 H2C_OFLD_CFG_LEN); 1185 1186 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1187 rtw89_err(rtwdev, "failed to send h2c\n"); 1188 goto fail; 1189 } 1190 1191 return 0; 1192 fail: 1193 dev_kfree_skb_any(skb); 1194 1195 return -EBUSY; 1196 } 1197 1198 #define H2C_RA_LEN 16 1199 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1200 { 1201 struct sk_buff *skb; 1202 u8 *cmd; 1203 1204 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN); 1205 if (!skb) { 1206 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1207 return -ENOMEM; 1208 } 1209 skb_put(skb, H2C_RA_LEN); 1210 cmd = skb->data; 1211 rtw89_debug(rtwdev, RTW89_DBG_RA, 1212 "ra cmd msk: %llx ", ra->ra_mask); 1213 1214 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1215 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1216 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1217 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1218 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1219 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1220 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1221 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1222 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1223 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1224 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1225 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1226 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1227 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1228 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1229 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1230 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1231 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1232 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1233 1234 if (csi) { 1235 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1236 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1237 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1238 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1239 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1240 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1241 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1242 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1243 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1244 } 1245 1246 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1247 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1248 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1249 H2C_RA_LEN); 1250 1251 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1252 rtw89_err(rtwdev, "failed to send h2c\n"); 1253 goto fail; 1254 } 1255 1256 return 0; 1257 fail: 1258 dev_kfree_skb_any(skb); 1259 1260 return -EBUSY; 1261 } 1262 1263 #define H2C_LEN_CXDRVHDR 2 1264 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR) 1265 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1266 { 1267 struct rtw89_btc *btc = &rtwdev->btc; 1268 struct rtw89_btc_dm *dm = &btc->dm; 1269 struct rtw89_btc_init_info *init_info = &dm->init_info; 1270 struct rtw89_btc_module *module = &init_info->module; 1271 struct rtw89_btc_ant_info *ant = &module->ant; 1272 struct sk_buff *skb; 1273 u8 *cmd; 1274 1275 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT); 1276 if (!skb) { 1277 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1278 return -ENOMEM; 1279 } 1280 skb_put(skb, H2C_LEN_CXDRVINFO_INIT); 1281 cmd = skb->data; 1282 1283 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT); 1284 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR); 1285 1286 RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type); 1287 RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num); 1288 RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation); 1289 RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos); 1290 RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity); 1291 1292 RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type); 1293 RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv); 1294 RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo); 1295 RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos); 1296 RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type); 1297 1298 RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch); 1299 RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only); 1300 RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok); 1301 RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en); 1302 RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other); 1303 RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only); 1304 1305 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1306 H2C_CAT_OUTSRC, BTFC_SET, 1307 SET_DRV_INFO, 0, 0, 1308 H2C_LEN_CXDRVINFO_INIT); 1309 1310 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1311 rtw89_err(rtwdev, "failed to send h2c\n"); 1312 goto fail; 1313 } 1314 1315 return 0; 1316 fail: 1317 dev_kfree_skb_any(skb); 1318 1319 return -EBUSY; 1320 } 1321 1322 #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR) 1323 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 1324 { 1325 struct rtw89_btc *btc = &rtwdev->btc; 1326 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1327 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 1328 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1329 struct rtw89_btc_wl_active_role *active = role_info->active_role; 1330 struct sk_buff *skb; 1331 u8 *cmd; 1332 int i; 1333 1334 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE); 1335 if (!skb) { 1336 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1337 return -ENOMEM; 1338 } 1339 skb_put(skb, H2C_LEN_CXDRVINFO_ROLE); 1340 cmd = skb->data; 1341 1342 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1343 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR); 1344 1345 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1346 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1347 1348 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1349 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1350 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1351 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1352 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1353 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1354 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1355 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1356 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1357 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1358 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1359 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1360 1361 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1362 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i); 1363 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i); 1364 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i); 1365 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i); 1366 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i); 1367 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i); 1368 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i); 1369 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i); 1370 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i); 1371 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i); 1372 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i); 1373 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i); 1374 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i); 1375 } 1376 1377 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1378 H2C_CAT_OUTSRC, BTFC_SET, 1379 SET_DRV_INFO, 0, 0, 1380 H2C_LEN_CXDRVINFO_ROLE); 1381 1382 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1383 rtw89_err(rtwdev, "failed to send h2c\n"); 1384 goto fail; 1385 } 1386 1387 return 0; 1388 fail: 1389 dev_kfree_skb_any(skb); 1390 1391 return -EBUSY; 1392 } 1393 1394 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 1395 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 1396 { 1397 struct rtw89_btc *btc = &rtwdev->btc; 1398 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 1399 struct sk_buff *skb; 1400 u8 *cmd; 1401 1402 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL); 1403 if (!skb) { 1404 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 1405 return -ENOMEM; 1406 } 1407 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 1408 cmd = skb->data; 1409 1410 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 1411 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 1412 1413 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 1414 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 1415 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 1416 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 1417 1418 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1419 H2C_CAT_OUTSRC, BTFC_SET, 1420 SET_DRV_INFO, 0, 0, 1421 H2C_LEN_CXDRVINFO_CTRL); 1422 1423 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1424 rtw89_err(rtwdev, "failed to send h2c\n"); 1425 goto fail; 1426 } 1427 1428 return 0; 1429 fail: 1430 dev_kfree_skb_any(skb); 1431 1432 return -EBUSY; 1433 } 1434 1435 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 1436 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 1437 { 1438 struct rtw89_btc *btc = &rtwdev->btc; 1439 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1440 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 1441 struct sk_buff *skb; 1442 u8 *cmd; 1443 1444 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK); 1445 if (!skb) { 1446 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 1447 return -ENOMEM; 1448 } 1449 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 1450 cmd = skb->data; 1451 1452 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 1453 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 1454 1455 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 1456 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 1457 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 1458 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 1459 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 1460 1461 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1462 H2C_CAT_OUTSRC, BTFC_SET, 1463 SET_DRV_INFO, 0, 0, 1464 H2C_LEN_CXDRVINFO_RFK); 1465 1466 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1467 rtw89_err(rtwdev, "failed to send h2c\n"); 1468 goto fail; 1469 } 1470 1471 return 0; 1472 fail: 1473 dev_kfree_skb_any(skb); 1474 1475 return -EBUSY; 1476 } 1477 1478 #define H2C_LEN_PKT_OFLD 4 1479 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 1480 { 1481 struct sk_buff *skb; 1482 u8 *cmd; 1483 1484 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD); 1485 if (!skb) { 1486 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 1487 return -ENOMEM; 1488 } 1489 skb_put(skb, H2C_LEN_PKT_OFLD); 1490 cmd = skb->data; 1491 1492 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 1493 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 1494 1495 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1496 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1497 H2C_FUNC_PACKET_OFLD, 1, 1, 1498 H2C_LEN_PKT_OFLD); 1499 1500 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1501 rtw89_err(rtwdev, "failed to send h2c\n"); 1502 goto fail; 1503 } 1504 1505 return 0; 1506 fail: 1507 dev_kfree_skb_any(skb); 1508 1509 return -EBUSY; 1510 } 1511 1512 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 1513 struct sk_buff *skb_ofld) 1514 { 1515 struct sk_buff *skb; 1516 u8 *cmd; 1517 u8 alloc_id; 1518 1519 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 1520 RTW89_MAX_PKT_OFLD_NUM); 1521 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 1522 return -ENOSPC; 1523 1524 *id = alloc_id; 1525 1526 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD + skb_ofld->len); 1527 if (!skb) { 1528 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 1529 return -ENOMEM; 1530 } 1531 skb_put(skb, H2C_LEN_PKT_OFLD); 1532 cmd = skb->data; 1533 1534 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 1535 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 1536 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 1537 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 1538 1539 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1540 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1541 H2C_FUNC_PACKET_OFLD, 1, 1, 1542 H2C_LEN_PKT_OFLD + skb_ofld->len); 1543 1544 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1545 rtw89_err(rtwdev, "failed to send h2c\n"); 1546 goto fail; 1547 } 1548 1549 return 0; 1550 fail: 1551 dev_kfree_skb_any(skb); 1552 1553 return -EBUSY; 1554 } 1555 1556 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 1557 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 1558 struct list_head *chan_list) 1559 { 1560 struct rtw89_mac_chinfo *ch_info; 1561 struct sk_buff *skb; 1562 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 1563 u8 *cmd; 1564 1565 skb = rtw89_fw_h2c_alloc_skb_with_hdr(skb_len); 1566 if (!skb) { 1567 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 1568 return -ENOMEM; 1569 } 1570 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 1571 cmd = skb->data; 1572 1573 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 1574 /* in unit of 4 bytes */ 1575 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 1576 1577 list_for_each_entry(ch_info, chan_list, list) { 1578 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 1579 1580 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 1581 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 1582 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 1583 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 1584 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 1585 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 1586 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 1587 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 1588 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 1589 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 1590 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 1591 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 1592 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 1593 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 1594 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 1595 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 1596 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 1597 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 1598 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 1599 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 1600 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 1601 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 1602 } 1603 1604 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1605 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1606 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 1607 1608 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1609 rtw89_err(rtwdev, "failed to send h2c\n"); 1610 goto fail; 1611 } 1612 1613 return 0; 1614 fail: 1615 dev_kfree_skb_any(skb); 1616 1617 return -EBUSY; 1618 } 1619 1620 #define H2C_LEN_SCAN_OFFLOAD 20 1621 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 1622 struct rtw89_scan_option *option, 1623 struct rtw89_vif *rtwvif) 1624 { 1625 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 1626 struct sk_buff *skb; 1627 u8 *cmd; 1628 1629 skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_SCAN_OFFLOAD); 1630 if (!skb) { 1631 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 1632 return -ENOMEM; 1633 } 1634 skb_put(skb, H2C_LEN_SCAN_OFFLOAD); 1635 cmd = skb->data; 1636 1637 RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id); 1638 RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port); 1639 RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0); 1640 RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable); 1641 RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true); 1642 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode); 1643 RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE); 1644 RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE); 1645 if (option->target_ch_mode) { 1646 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw); 1647 RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd, 1648 scan_info->op_pri_ch); 1649 RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd, 1650 scan_info->op_chan); 1651 } 1652 1653 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1654 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1655 H2C_FUNC_SCANOFLD, 1, 1, 1656 H2C_LEN_SCAN_OFFLOAD); 1657 1658 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1659 rtw89_err(rtwdev, "failed to send h2c\n"); 1660 goto fail; 1661 } 1662 1663 return 0; 1664 fail: 1665 dev_kfree_skb_any(skb); 1666 1667 return -EBUSY; 1668 } 1669 1670 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 1671 struct rtw89_fw_h2c_rf_reg_info *info, 1672 u16 len, u8 page) 1673 { 1674 struct sk_buff *skb; 1675 u8 class = info->rf_path == RF_PATH_A ? 1676 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 1677 1678 skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); 1679 if (!skb) { 1680 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 1681 return -ENOMEM; 1682 } 1683 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 1684 1685 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1686 H2C_CAT_OUTSRC, class, page, 0, 0, 1687 len); 1688 1689 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1690 rtw89_err(rtwdev, "failed to send h2c\n"); 1691 goto fail; 1692 } 1693 1694 return 0; 1695 fail: 1696 dev_kfree_skb_any(skb); 1697 1698 return -EBUSY; 1699 } 1700 1701 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 1702 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 1703 bool rack, bool dack) 1704 { 1705 struct sk_buff *skb; 1706 1707 skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); 1708 if (!skb) { 1709 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 1710 return -ENOMEM; 1711 } 1712 skb_put_data(skb, buf, len); 1713 1714 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1715 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 1716 len); 1717 1718 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1719 rtw89_err(rtwdev, "failed to send h2c\n"); 1720 goto fail; 1721 } 1722 1723 return 0; 1724 fail: 1725 dev_kfree_skb_any(skb); 1726 1727 return -EBUSY; 1728 } 1729 1730 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 1731 { 1732 struct sk_buff *skb; 1733 1734 skb = rtw89_fw_h2c_alloc_skb_no_hdr(len); 1735 if (!skb) { 1736 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 1737 return -ENOMEM; 1738 } 1739 skb_put_data(skb, buf, len); 1740 1741 if (rtw89_h2c_tx(rtwdev, skb, false)) { 1742 rtw89_err(rtwdev, "failed to send h2c\n"); 1743 goto fail; 1744 } 1745 1746 return 0; 1747 fail: 1748 dev_kfree_skb_any(skb); 1749 1750 return -EBUSY; 1751 } 1752 1753 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 1754 { 1755 struct rtw89_early_h2c *early_h2c; 1756 1757 lockdep_assert_held(&rtwdev->mutex); 1758 1759 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 1760 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 1761 } 1762 } 1763 1764 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 1765 { 1766 struct rtw89_early_h2c *early_h2c, *tmp; 1767 1768 mutex_lock(&rtwdev->mutex); 1769 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 1770 list_del(&early_h2c->list); 1771 kfree(early_h2c->h2c); 1772 kfree(early_h2c); 1773 } 1774 mutex_unlock(&rtwdev->mutex); 1775 } 1776 1777 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 1778 { 1779 skb_queue_tail(&rtwdev->c2h_queue, c2h); 1780 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 1781 } 1782 1783 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 1784 struct sk_buff *skb) 1785 { 1786 u8 category = RTW89_GET_C2H_CATEGORY(skb->data); 1787 u8 class = RTW89_GET_C2H_CLASS(skb->data); 1788 u8 func = RTW89_GET_C2H_FUNC(skb->data); 1789 u16 len = RTW89_GET_C2H_LEN(skb->data); 1790 bool dump = true; 1791 1792 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 1793 return; 1794 1795 switch (category) { 1796 case RTW89_C2H_CAT_TEST: 1797 break; 1798 case RTW89_C2H_CAT_MAC: 1799 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 1800 if (class == RTW89_MAC_C2H_CLASS_INFO && 1801 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 1802 dump = false; 1803 break; 1804 case RTW89_C2H_CAT_OUTSRC: 1805 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 1806 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 1807 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 1808 else 1809 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 1810 break; 1811 } 1812 1813 if (dump) 1814 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 1815 } 1816 1817 void rtw89_fw_c2h_work(struct work_struct *work) 1818 { 1819 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 1820 c2h_work); 1821 struct sk_buff *skb, *tmp; 1822 1823 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 1824 skb_unlink(skb, &rtwdev->c2h_queue); 1825 mutex_lock(&rtwdev->mutex); 1826 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 1827 mutex_unlock(&rtwdev->mutex); 1828 dev_kfree_skb_any(skb); 1829 } 1830 } 1831 1832 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 1833 struct rtw89_mac_h2c_info *info) 1834 { 1835 const struct rtw89_chip_info *chip = rtwdev->chip; 1836 const u32 *h2c_reg = chip->h2c_regs; 1837 u8 i, val, len; 1838 int ret; 1839 1840 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 1841 rtwdev, chip->h2c_ctrl_reg); 1842 if (ret) { 1843 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 1844 return ret; 1845 } 1846 1847 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 1848 sizeof(info->h2creg[0])); 1849 1850 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 1851 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 1852 for (i = 0; i < RTW89_H2CREG_MAX; i++) 1853 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 1854 1855 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 1856 1857 return 0; 1858 } 1859 1860 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 1861 struct rtw89_mac_c2h_info *info) 1862 { 1863 const struct rtw89_chip_info *chip = rtwdev->chip; 1864 const u32 *c2h_reg = chip->c2h_regs; 1865 u32 ret; 1866 u8 i, val; 1867 1868 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 1869 1870 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 1871 RTW89_C2H_TIMEOUT, false, rtwdev, 1872 chip->c2h_ctrl_reg); 1873 if (ret) { 1874 rtw89_warn(rtwdev, "c2h reg timeout\n"); 1875 return ret; 1876 } 1877 1878 for (i = 0; i < RTW89_C2HREG_MAX; i++) 1879 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 1880 1881 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 1882 1883 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 1884 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 1885 RTW89_C2HREG_HDR_LEN; 1886 1887 return 0; 1888 } 1889 1890 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 1891 struct rtw89_mac_h2c_info *h2c_info, 1892 struct rtw89_mac_c2h_info *c2h_info) 1893 { 1894 u32 ret; 1895 1896 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 1897 lockdep_assert_held(&rtwdev->mutex); 1898 1899 if (!h2c_info && !c2h_info) 1900 return -EINVAL; 1901 1902 if (!h2c_info) 1903 goto recv_c2h; 1904 1905 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 1906 if (ret) 1907 return ret; 1908 1909 recv_c2h: 1910 if (!c2h_info) 1911 return 0; 1912 1913 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 1914 if (ret) 1915 return ret; 1916 1917 return 0; 1918 } 1919 1920 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 1921 { 1922 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 1923 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 1924 return; 1925 } 1926 1927 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 1928 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 1929 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 1930 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 1931 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 1932 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 1933 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 1934 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 1935 1936 rtw89_fw_prog_cnt_dump(rtwdev); 1937 } 1938 1939 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 1940 { 1941 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 1942 struct rtw89_pktofld_info *info, *tmp; 1943 u8 idx; 1944 1945 for (idx = RTW89_BAND_2G; idx < NUM_NL80211_BANDS; idx++) { 1946 if (!(rtwdev->chip->support_bands & BIT(idx))) 1947 continue; 1948 1949 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 1950 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1951 rtw89_core_release_bit_map(rtwdev->pkt_offload, 1952 info->id); 1953 list_del(&info->list); 1954 kfree(info); 1955 } 1956 } 1957 } 1958 1959 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 1960 struct rtw89_vif *rtwvif, 1961 struct sk_buff *skb) 1962 { 1963 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 1964 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 1965 struct rtw89_pktofld_info *info; 1966 struct sk_buff *new; 1967 int ret = 0; 1968 u8 band; 1969 1970 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 1971 if (!(rtwdev->chip->support_bands & BIT(band))) 1972 continue; 1973 1974 new = skb_copy(skb, GFP_KERNEL); 1975 if (!new) { 1976 ret = -ENOMEM; 1977 goto out; 1978 } 1979 skb_put_data(new, ies->ies[band], ies->len[band]); 1980 skb_put_data(new, ies->common_ies, ies->common_ie_len); 1981 1982 info = kzalloc(sizeof(*info), GFP_KERNEL); 1983 if (!info) { 1984 ret = -ENOMEM; 1985 kfree_skb(new); 1986 goto out; 1987 } 1988 1989 list_add_tail(&info->list, &scan_info->pkt_list[band]); 1990 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 1991 if (ret) 1992 goto out; 1993 1994 kfree_skb(new); 1995 } 1996 out: 1997 return ret; 1998 } 1999 2000 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 2001 struct rtw89_vif *rtwvif) 2002 { 2003 struct cfg80211_scan_request *req = rtwvif->scan_req; 2004 struct sk_buff *skb; 2005 u8 num = req->n_ssids, i; 2006 int ret; 2007 2008 for (i = 0; i < num; i++) { 2009 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 2010 req->ssids[i].ssid, 2011 req->ssids[i].ssid_len, 2012 req->ie_len); 2013 if (!skb) 2014 return -ENOMEM; 2015 2016 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb); 2017 kfree_skb(skb); 2018 2019 if (ret) 2020 return ret; 2021 } 2022 2023 return 0; 2024 } 2025 2026 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 2027 int ssid_num, 2028 struct rtw89_mac_chinfo *ch_info) 2029 { 2030 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2031 struct rtw89_pktofld_info *info; 2032 u8 band, probe_count = 0; 2033 2034 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 2035 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 2036 ch_info->bw = RTW89_SCAN_WIDTH; 2037 ch_info->tx_pkt = true; 2038 ch_info->cfg_tx_pwr = false; 2039 ch_info->tx_pwr_idx = 0; 2040 ch_info->tx_null = false; 2041 ch_info->pause_data = false; 2042 2043 if (ssid_num) { 2044 ch_info->num_pkt = ssid_num; 2045 band = ch_info->ch_band; 2046 2047 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 2048 ch_info->probe_id = info->id; 2049 ch_info->pkt_id[probe_count] = info->id; 2050 if (++probe_count >= ssid_num) 2051 break; 2052 } 2053 if (probe_count != ssid_num) 2054 rtw89_err(rtwdev, "SSID num differs from list len\n"); 2055 } 2056 2057 switch (chan_type) { 2058 case RTW89_CHAN_OPERATE: 2059 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 2060 ch_info->central_ch = scan_info->op_chan; 2061 ch_info->pri_ch = scan_info->op_pri_ch; 2062 ch_info->ch_band = scan_info->op_band; 2063 ch_info->bw = scan_info->op_bw; 2064 ch_info->tx_null = true; 2065 ch_info->num_pkt = 0; 2066 break; 2067 case RTW89_CHAN_DFS: 2068 ch_info->period = min_t(u8, ch_info->period, 2069 RTW89_DFS_CHAN_TIME); 2070 ch_info->dwell_time = RTW89_DWELL_TIME; 2071 break; 2072 case RTW89_CHAN_ACTIVE: 2073 break; 2074 default: 2075 rtw89_err(rtwdev, "Channel type out of bound\n"); 2076 } 2077 } 2078 2079 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 2080 struct rtw89_vif *rtwvif) 2081 { 2082 struct cfg80211_scan_request *req = rtwvif->scan_req; 2083 struct rtw89_mac_chinfo *ch_info, *tmp; 2084 struct ieee80211_channel *channel; 2085 struct list_head chan_list; 2086 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 2087 int list_len = req->n_channels, off_chan_time = 0; 2088 enum rtw89_chan_type type; 2089 int ret = 0, i; 2090 2091 INIT_LIST_HEAD(&chan_list); 2092 for (i = 0; i < req->n_channels; i++) { 2093 channel = req->channels[i]; 2094 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 2095 if (!ch_info) { 2096 ret = -ENOMEM; 2097 goto out; 2098 } 2099 2100 ch_info->period = req->duration_mandatory ? 2101 req->duration : RTW89_CHANNEL_TIME; 2102 ch_info->ch_band = channel->band; 2103 ch_info->central_ch = channel->hw_value; 2104 ch_info->pri_ch = channel->hw_value; 2105 ch_info->rand_seq_num = random_seq; 2106 2107 if (channel->flags & 2108 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 2109 type = RTW89_CHAN_DFS; 2110 else 2111 type = RTW89_CHAN_ACTIVE; 2112 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 2113 2114 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK && 2115 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 2116 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2117 if (!tmp) { 2118 ret = -ENOMEM; 2119 kfree(ch_info); 2120 goto out; 2121 } 2122 2123 type = RTW89_CHAN_OPERATE; 2124 tmp->period = req->duration_mandatory ? 2125 req->duration : RTW89_CHANNEL_TIME; 2126 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 2127 list_add_tail(&tmp->list, &chan_list); 2128 off_chan_time = 0; 2129 list_len++; 2130 } 2131 list_add_tail(&ch_info->list, &chan_list); 2132 off_chan_time += ch_info->period; 2133 } 2134 rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 2135 2136 out: 2137 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 2138 list_del(&ch_info->list); 2139 kfree(ch_info); 2140 } 2141 2142 return ret; 2143 } 2144 2145 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 2146 struct rtw89_vif *rtwvif) 2147 { 2148 int ret; 2149 2150 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 2151 if (ret) { 2152 rtw89_err(rtwdev, "Update probe request failed\n"); 2153 goto out; 2154 } 2155 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif); 2156 out: 2157 return ret; 2158 } 2159 2160 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2161 struct ieee80211_scan_request *scan_req) 2162 { 2163 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2164 struct cfg80211_scan_request *req = &scan_req->req; 2165 u8 mac_addr[ETH_ALEN]; 2166 2167 rtwdev->scan_info.scanning_vif = vif; 2168 rtwvif->scan_ies = &scan_req->ies; 2169 rtwvif->scan_req = req; 2170 ieee80211_stop_queues(rtwdev->hw); 2171 2172 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 2173 get_random_mask_addr(mac_addr, req->mac_addr, 2174 req->mac_addr_mask); 2175 else 2176 ether_addr_copy(mac_addr, vif->addr); 2177 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 2178 2179 rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN; 2180 rtwdev->hal.rx_fltr &= ~B_AX_A_BC; 2181 rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH; 2182 rtw89_write32_mask(rtwdev, 2183 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2184 B_AX_RX_FLTR_CFG_MASK, 2185 rtwdev->hal.rx_fltr); 2186 } 2187 2188 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2189 bool aborted) 2190 { 2191 struct cfg80211_scan_info info = { 2192 .aborted = aborted, 2193 }; 2194 struct rtw89_vif *rtwvif; 2195 2196 if (!vif) 2197 return; 2198 2199 rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN; 2200 rtwdev->hal.rx_fltr |= B_AX_A_BC; 2201 rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH; 2202 rtw89_write32_mask(rtwdev, 2203 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2204 B_AX_RX_FLTR_CFG_MASK, 2205 rtwdev->hal.rx_fltr); 2206 2207 rtw89_core_scan_complete(rtwdev, vif, true); 2208 ieee80211_scan_completed(rtwdev->hw, &info); 2209 ieee80211_wake_queues(rtwdev->hw); 2210 2211 rtw89_release_pkt_list(rtwdev); 2212 rtwvif = (struct rtw89_vif *)vif->drv_priv; 2213 rtwvif->scan_req = NULL; 2214 rtwvif->scan_ies = NULL; 2215 rtwdev->scan_info.scanning_vif = NULL; 2216 } 2217 2218 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 2219 { 2220 rtw89_hw_scan_offload(rtwdev, vif, false); 2221 rtw89_hw_scan_complete(rtwdev, vif, true); 2222 } 2223 2224 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2225 bool enable) 2226 { 2227 struct rtw89_scan_option opt = {0}; 2228 struct rtw89_vif *rtwvif; 2229 int ret = 0; 2230 2231 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 2232 if (!rtwvif) 2233 return -EINVAL; 2234 2235 opt.enable = enable; 2236 opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2237 if (enable) { 2238 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif); 2239 if (ret) 2240 goto out; 2241 } 2242 rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 2243 out: 2244 return ret; 2245 } 2246 2247 void rtw89_store_op_chan(struct rtw89_dev *rtwdev) 2248 { 2249 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2250 struct rtw89_hal *hal = &rtwdev->hal; 2251 2252 scan_info->op_pri_ch = hal->current_primary_channel; 2253 scan_info->op_chan = hal->current_channel; 2254 scan_info->op_bw = hal->current_band_width; 2255 scan_info->op_band = hal->current_band_type; 2256 } 2257