1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 #include "util.h" 14 15 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 16 struct sk_buff *skb); 17 18 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 19 bool header) 20 { 21 struct sk_buff *skb; 22 u32 header_len = 0; 23 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 24 25 if (header) 26 header_len = H2C_HEADER_LEN; 27 28 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 29 if (!skb) 30 return NULL; 31 skb_reserve(skb, header_len + h2c_desc_size); 32 memset(skb->data, 0, len); 33 34 return skb; 35 } 36 37 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 38 { 39 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 40 } 41 42 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 43 { 44 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 45 } 46 47 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 48 { 49 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 50 51 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 52 } 53 54 #define FWDL_WAIT_CNT 400000 55 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 56 { 57 u8 val; 58 int ret; 59 60 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 61 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 62 1, FWDL_WAIT_CNT, false, rtwdev); 63 if (ret) { 64 switch (val) { 65 case RTW89_FWDL_CHECKSUM_FAIL: 66 rtw89_err(rtwdev, "fw checksum fail\n"); 67 return -EINVAL; 68 69 case RTW89_FWDL_SECURITY_FAIL: 70 rtw89_err(rtwdev, "fw security fail\n"); 71 return -EINVAL; 72 73 case RTW89_FWDL_CV_NOT_MATCH: 74 rtw89_err(rtwdev, "fw cv not match\n"); 75 return -EINVAL; 76 77 default: 78 return -EBUSY; 79 } 80 } 81 82 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 83 84 return 0; 85 } 86 87 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 88 struct rtw89_fw_bin_info *info) 89 { 90 struct rtw89_fw_hdr_section_info *section_info; 91 const u8 *fw_end = fw + len; 92 const u8 *fwdynhdr; 93 const u8 *bin; 94 u32 base_hdr_len; 95 u32 mssc_len = 0; 96 u32 i; 97 98 if (!info) 99 return -EINVAL; 100 101 info->section_num = GET_FW_HDR_SEC_NUM(fw); 102 base_hdr_len = RTW89_FW_HDR_SIZE + 103 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 104 info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw); 105 106 if (info->dynamic_hdr_en) { 107 info->hdr_len = GET_FW_HDR_LEN(fw); 108 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 109 fwdynhdr = fw + base_hdr_len; 110 if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) { 111 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 112 return -EINVAL; 113 } 114 } else { 115 info->hdr_len = base_hdr_len; 116 info->dynamic_hdr_len = 0; 117 } 118 119 bin = fw + info->hdr_len; 120 121 /* jump to section header */ 122 fw += RTW89_FW_HDR_SIZE; 123 section_info = info->section_info; 124 for (i = 0; i < info->section_num; i++) { 125 section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw); 126 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 127 section_info->mssc = GET_FWSECTION_HDR_MSSC(fw); 128 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 129 } else { 130 section_info->mssc = 0; 131 } 132 133 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 134 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 135 section_info->len += FWDL_SECTION_CHKSUM_LEN; 136 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 137 section_info->dladdr = 138 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 139 section_info->addr = bin; 140 bin += section_info->len; 141 fw += RTW89_FW_SECTION_HDR_SIZE; 142 section_info++; 143 } 144 145 if (fw_end != bin + mssc_len) { 146 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 147 return -EINVAL; 148 } 149 150 return 0; 151 } 152 153 static 154 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 155 struct rtw89_fw_suit *fw_suit, bool nowarn) 156 { 157 struct rtw89_fw_info *fw_info = &rtwdev->fw; 158 const u8 *mfw = fw_info->firmware->data; 159 u32 mfw_len = fw_info->firmware->size; 160 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 161 const struct rtw89_mfw_info *mfw_info; 162 int i; 163 164 if (mfw_hdr->sig != RTW89_MFW_SIG) { 165 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 166 /* legacy firmware support normal type only */ 167 if (type != RTW89_FW_NORMAL) 168 return -EINVAL; 169 fw_suit->data = mfw; 170 fw_suit->size = mfw_len; 171 return 0; 172 } 173 174 for (i = 0; i < mfw_hdr->fw_nr; i++) { 175 mfw_info = &mfw_hdr->info[i]; 176 if (mfw_info->cv != rtwdev->hal.cv || 177 mfw_info->type != type || 178 mfw_info->mp) 179 continue; 180 181 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 182 fw_suit->size = le32_to_cpu(mfw_info->size); 183 return 0; 184 } 185 186 if (!nowarn) 187 rtw89_err(rtwdev, "no suitable firmware found\n"); 188 return -ENOENT; 189 } 190 191 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 192 enum rtw89_fw_type type, 193 struct rtw89_fw_suit *fw_suit) 194 { 195 const u8 *hdr = fw_suit->data; 196 197 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 198 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 199 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 200 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 201 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 202 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 203 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 204 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 205 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 206 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 207 208 rtw89_info(rtwdev, 209 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 210 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 211 fw_suit->sub_idex, fw_suit->cmd_ver, type); 212 } 213 214 static 215 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 216 bool nowarn) 217 { 218 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 219 int ret; 220 221 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 222 if (ret) 223 return ret; 224 225 rtw89_fw_update_ver(rtwdev, type, fw_suit); 226 227 return 0; 228 } 229 230 #define __DEF_FW_FEAT_COND(__cond, __op) \ 231 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 232 { \ 233 return suit_ver_code __op comp_ver_code; \ 234 } 235 236 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 237 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 238 239 struct __fw_feat_cfg { 240 enum rtw89_core_chip_id chip_id; 241 enum rtw89_fw_feature feature; 242 u32 ver_code; 243 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 244 }; 245 246 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 247 { \ 248 .chip_id = _chip, \ 249 .feature = RTW89_FW_FEATURE_ ## _feat, \ 250 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 251 .cond = __fw_feat_cond_ ## _cond, \ 252 } 253 254 static const struct __fw_feat_cfg fw_feat_tbl[] = { 255 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 256 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 257 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 258 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 259 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP), 260 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 261 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP), 262 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 263 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 264 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 265 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 266 }; 267 268 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 269 { 270 const struct rtw89_chip_info *chip = rtwdev->chip; 271 const struct __fw_feat_cfg *ent; 272 const struct rtw89_fw_suit *fw_suit; 273 u32 suit_ver_code; 274 int i; 275 276 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 277 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 278 279 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 280 ent = &fw_feat_tbl[i]; 281 if (chip->chip_id != ent->chip_id) 282 continue; 283 284 if (ent->cond(suit_ver_code, ent->ver_code)) 285 RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw); 286 } 287 } 288 289 const struct firmware * 290 rtw89_early_fw_feature_recognize(struct device *device, 291 const struct rtw89_chip_info *chip, 292 u32 *early_feat_map) 293 { 294 union rtw89_compat_fw_hdr buf = {}; 295 const struct firmware *firmware; 296 bool full_req = false; 297 u32 ver_code; 298 int ret; 299 int i; 300 301 /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will 302 * be denied (-EPERM). Then, we don't get right firmware things as 303 * expected. So, in this case, we have to request full firmware here. 304 */ 305 if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) 306 full_req = true; 307 308 if (full_req) 309 ret = request_firmware(&firmware, chip->fw_name, device); 310 else 311 ret = request_partial_firmware_into_buf(&firmware, chip->fw_name, 312 device, &buf, sizeof(buf), 313 0); 314 315 if (ret) { 316 dev_err(device, "failed to early request firmware: %d\n", ret); 317 return NULL; 318 } 319 320 if (full_req) 321 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 322 else 323 ver_code = rtw89_compat_fw_hdr_ver_code(&buf); 324 325 if (!ver_code) 326 goto out; 327 328 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 329 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 330 331 if (chip->chip_id != ent->chip_id) 332 continue; 333 334 if (ent->cond(ver_code, ent->ver_code)) 335 *early_feat_map |= BIT(ent->feature); 336 } 337 338 out: 339 if (full_req) 340 return firmware; 341 342 release_firmware(firmware); 343 return NULL; 344 } 345 346 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 347 { 348 const struct rtw89_chip_info *chip = rtwdev->chip; 349 int ret; 350 351 if (chip->try_ce_fw) { 352 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 353 if (!ret) 354 goto normal_done; 355 } 356 357 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 358 if (ret) 359 return ret; 360 361 normal_done: 362 /* It still works if wowlan firmware isn't existing. */ 363 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 364 365 rtw89_fw_recognize_features(rtwdev); 366 367 rtw89_coex_recognize_ver(rtwdev); 368 369 return 0; 370 } 371 372 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 373 u8 type, u8 cat, u8 class, u8 func, 374 bool rack, bool dack, u32 len) 375 { 376 struct fwcmd_hdr *hdr; 377 378 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 379 380 if (!(rtwdev->fw.h2c_seq % 4)) 381 rack = true; 382 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 383 FIELD_PREP(H2C_HDR_CAT, cat) | 384 FIELD_PREP(H2C_HDR_CLASS, class) | 385 FIELD_PREP(H2C_HDR_FUNC, func) | 386 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 387 388 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 389 len + H2C_HEADER_LEN) | 390 (rack ? H2C_HDR_REC_ACK : 0) | 391 (dack ? H2C_HDR_DONE_ACK : 0)); 392 393 rtwdev->fw.h2c_seq++; 394 } 395 396 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 397 struct sk_buff *skb, 398 u8 type, u8 cat, u8 class, u8 func, 399 u32 len) 400 { 401 struct fwcmd_hdr *hdr; 402 403 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 404 405 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 406 FIELD_PREP(H2C_HDR_CAT, cat) | 407 FIELD_PREP(H2C_HDR_CLASS, class) | 408 FIELD_PREP(H2C_HDR_FUNC, func) | 409 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 410 411 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 412 len + H2C_HEADER_LEN)); 413 } 414 415 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 416 { 417 struct sk_buff *skb; 418 u32 ret = 0; 419 420 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 421 if (!skb) { 422 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 423 return -ENOMEM; 424 } 425 426 skb_put_data(skb, fw, len); 427 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 428 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 429 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 430 H2C_FUNC_MAC_FWHDR_DL, len); 431 432 ret = rtw89_h2c_tx(rtwdev, skb, false); 433 if (ret) { 434 rtw89_err(rtwdev, "failed to send h2c\n"); 435 ret = -1; 436 goto fail; 437 } 438 439 return 0; 440 fail: 441 dev_kfree_skb_any(skb); 442 443 return ret; 444 } 445 446 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 447 { 448 u8 val; 449 int ret; 450 451 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 452 if (ret) { 453 rtw89_err(rtwdev, "[ERR]FW header download\n"); 454 return ret; 455 } 456 457 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 458 1, FWDL_WAIT_CNT, false, 459 rtwdev, R_AX_WCPU_FW_CTRL); 460 if (ret) { 461 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 462 return ret; 463 } 464 465 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 466 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 467 468 return 0; 469 } 470 471 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 472 struct rtw89_fw_hdr_section_info *info) 473 { 474 struct sk_buff *skb; 475 const u8 *section = info->addr; 476 u32 residue_len = info->len; 477 u32 pkt_len; 478 int ret; 479 480 while (residue_len) { 481 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 482 pkt_len = FWDL_SECTION_PER_PKT_LEN; 483 else 484 pkt_len = residue_len; 485 486 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 487 if (!skb) { 488 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 489 return -ENOMEM; 490 } 491 skb_put_data(skb, section, pkt_len); 492 493 ret = rtw89_h2c_tx(rtwdev, skb, true); 494 if (ret) { 495 rtw89_err(rtwdev, "failed to send h2c\n"); 496 ret = -1; 497 goto fail; 498 } 499 500 section += pkt_len; 501 residue_len -= pkt_len; 502 } 503 504 return 0; 505 fail: 506 dev_kfree_skb_any(skb); 507 508 return ret; 509 } 510 511 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 512 struct rtw89_fw_bin_info *info) 513 { 514 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 515 u8 section_num = info->section_num; 516 int ret; 517 518 while (section_num--) { 519 ret = __rtw89_fw_download_main(rtwdev, section_info); 520 if (ret) 521 return ret; 522 section_info++; 523 } 524 525 mdelay(5); 526 527 ret = rtw89_fw_check_rdy(rtwdev); 528 if (ret) { 529 rtw89_warn(rtwdev, "download firmware fail\n"); 530 return ret; 531 } 532 533 return 0; 534 } 535 536 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 537 { 538 u32 val32; 539 u16 index; 540 541 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 542 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 543 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 544 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 545 546 for (index = 0; index < 15; index++) { 547 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 548 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 549 fsleep(10); 550 } 551 } 552 553 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 554 { 555 u32 val32; 556 u16 val16; 557 558 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 559 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 560 561 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 562 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 563 564 rtw89_fw_prog_cnt_dump(rtwdev); 565 } 566 567 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 568 { 569 struct rtw89_fw_info *fw_info = &rtwdev->fw; 570 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 571 struct rtw89_fw_bin_info info; 572 const u8 *fw = fw_suit->data; 573 u32 len = fw_suit->size; 574 u8 val; 575 int ret; 576 577 rtw89_mac_disable_cpu(rtwdev); 578 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 579 if (ret) 580 return ret; 581 582 if (!fw || !len) { 583 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 584 return -ENOENT; 585 } 586 587 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 588 if (ret) { 589 rtw89_err(rtwdev, "parse fw header fail\n"); 590 goto fwdl_err; 591 } 592 593 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 594 1, FWDL_WAIT_CNT, false, 595 rtwdev, R_AX_WCPU_FW_CTRL); 596 if (ret) { 597 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 598 goto fwdl_err; 599 } 600 601 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 602 if (ret) { 603 ret = -EBUSY; 604 goto fwdl_err; 605 } 606 607 ret = rtw89_fw_download_main(rtwdev, fw, &info); 608 if (ret) { 609 ret = -EBUSY; 610 goto fwdl_err; 611 } 612 613 fw_info->h2c_seq = 0; 614 fw_info->rec_seq = 0; 615 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 616 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 617 618 return ret; 619 620 fwdl_err: 621 rtw89_fw_dl_fail_dump(rtwdev); 622 return ret; 623 } 624 625 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 626 { 627 struct rtw89_fw_info *fw = &rtwdev->fw; 628 629 wait_for_completion(&fw->completion); 630 if (!fw->firmware) 631 return -EINVAL; 632 633 return 0; 634 } 635 636 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context) 637 { 638 struct rtw89_fw_info *fw = context; 639 struct rtw89_dev *rtwdev = fw->rtwdev; 640 641 if (!firmware || !firmware->data) { 642 rtw89_err(rtwdev, "failed to request firmware\n"); 643 complete_all(&fw->completion); 644 return; 645 } 646 647 fw->firmware = firmware; 648 complete_all(&fw->completion); 649 } 650 651 int rtw89_load_firmware(struct rtw89_dev *rtwdev) 652 { 653 struct rtw89_fw_info *fw = &rtwdev->fw; 654 const char *fw_name = rtwdev->chip->fw_name; 655 int ret; 656 657 fw->rtwdev = rtwdev; 658 init_completion(&fw->completion); 659 660 if (fw->firmware) { 661 rtw89_debug(rtwdev, RTW89_DBG_FW, 662 "full firmware has been early requested\n"); 663 complete_all(&fw->completion); 664 return 0; 665 } 666 667 ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, 668 GFP_KERNEL, fw, rtw89_load_firmware_cb); 669 if (ret) { 670 rtw89_err(rtwdev, "failed to async firmware request\n"); 671 return ret; 672 } 673 674 return 0; 675 } 676 677 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 678 { 679 struct rtw89_fw_info *fw = &rtwdev->fw; 680 681 rtw89_wait_firmware_completion(rtwdev); 682 683 if (fw->firmware) { 684 release_firmware(fw->firmware); 685 686 /* assign NULL back in case rtw89_free_ieee80211_hw() 687 * try to release the same one again. 688 */ 689 fw->firmware = NULL; 690 } 691 } 692 693 #define H2C_CAM_LEN 60 694 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 695 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 696 { 697 struct sk_buff *skb; 698 int ret; 699 700 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 701 if (!skb) { 702 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 703 return -ENOMEM; 704 } 705 skb_put(skb, H2C_CAM_LEN); 706 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 707 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 708 709 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 710 H2C_CAT_MAC, 711 H2C_CL_MAC_ADDR_CAM_UPDATE, 712 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 713 H2C_CAM_LEN); 714 715 ret = rtw89_h2c_tx(rtwdev, skb, false); 716 if (ret) { 717 rtw89_err(rtwdev, "failed to send h2c\n"); 718 goto fail; 719 } 720 721 return 0; 722 fail: 723 dev_kfree_skb_any(skb); 724 725 return ret; 726 } 727 728 #define H2C_DCTL_SEC_CAM_LEN 68 729 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 730 struct rtw89_vif *rtwvif, 731 struct rtw89_sta *rtwsta) 732 { 733 struct sk_buff *skb; 734 int ret; 735 736 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 737 if (!skb) { 738 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 739 return -ENOMEM; 740 } 741 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 742 743 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 744 745 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 746 H2C_CAT_MAC, 747 H2C_CL_MAC_FR_EXCHG, 748 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 749 H2C_DCTL_SEC_CAM_LEN); 750 751 ret = rtw89_h2c_tx(rtwdev, skb, false); 752 if (ret) { 753 rtw89_err(rtwdev, "failed to send h2c\n"); 754 goto fail; 755 } 756 757 return 0; 758 fail: 759 dev_kfree_skb_any(skb); 760 761 return ret; 762 } 763 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 764 765 #define H2C_BA_CAM_LEN 8 766 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 767 bool valid, struct ieee80211_ampdu_params *params) 768 { 769 const struct rtw89_chip_info *chip = rtwdev->chip; 770 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 771 u8 macid = rtwsta->mac_id; 772 struct sk_buff *skb; 773 u8 entry_idx; 774 int ret; 775 776 ret = valid ? 777 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 778 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 779 if (ret) { 780 /* it still works even if we don't have static BA CAM, because 781 * hardware can create dynamic BA CAM automatically. 782 */ 783 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 784 "failed to %s entry tid=%d for h2c ba cam\n", 785 valid ? "alloc" : "free", params->tid); 786 return 0; 787 } 788 789 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 790 if (!skb) { 791 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 792 return -ENOMEM; 793 } 794 skb_put(skb, H2C_BA_CAM_LEN); 795 SET_BA_CAM_MACID(skb->data, macid); 796 if (chip->bacam_v1) 797 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 798 else 799 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 800 if (!valid) 801 goto end; 802 SET_BA_CAM_VALID(skb->data, valid); 803 SET_BA_CAM_TID(skb->data, params->tid); 804 if (params->buf_size > 64) 805 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 806 else 807 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 808 /* If init req is set, hw will set the ssn */ 809 SET_BA_CAM_INIT_REQ(skb->data, 1); 810 SET_BA_CAM_SSN(skb->data, params->ssn); 811 812 if (chip->bacam_v1) { 813 SET_BA_CAM_STD_EN(skb->data, 1); 814 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 815 } 816 817 end: 818 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 819 H2C_CAT_MAC, 820 H2C_CL_BA_CAM, 821 H2C_FUNC_MAC_BA_CAM, 0, 1, 822 H2C_BA_CAM_LEN); 823 824 ret = rtw89_h2c_tx(rtwdev, skb, false); 825 if (ret) { 826 rtw89_err(rtwdev, "failed to send h2c\n"); 827 goto fail; 828 } 829 830 return 0; 831 fail: 832 dev_kfree_skb_any(skb); 833 834 return ret; 835 } 836 837 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev, 838 u8 entry_idx, u8 uid) 839 { 840 struct sk_buff *skb; 841 int ret; 842 843 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 844 if (!skb) { 845 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 846 return -ENOMEM; 847 } 848 skb_put(skb, H2C_BA_CAM_LEN); 849 850 SET_BA_CAM_VALID(skb->data, 1); 851 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 852 SET_BA_CAM_UID(skb->data, uid); 853 SET_BA_CAM_BAND(skb->data, 0); 854 SET_BA_CAM_STD_EN(skb->data, 0); 855 856 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 857 H2C_CAT_MAC, 858 H2C_CL_BA_CAM, 859 H2C_FUNC_MAC_BA_CAM, 0, 1, 860 H2C_BA_CAM_LEN); 861 862 ret = rtw89_h2c_tx(rtwdev, skb, false); 863 if (ret) { 864 rtw89_err(rtwdev, "failed to send h2c\n"); 865 goto fail; 866 } 867 868 return 0; 869 fail: 870 dev_kfree_skb_any(skb); 871 872 return ret; 873 } 874 875 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev) 876 { 877 const struct rtw89_chip_info *chip = rtwdev->chip; 878 u8 entry_idx = chip->bacam_num; 879 u8 uid = 0; 880 int i; 881 882 for (i = 0; i < chip->bacam_dynamic_num; i++) { 883 rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid); 884 entry_idx++; 885 uid++; 886 } 887 } 888 889 #define H2C_LOG_CFG_LEN 12 890 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 891 { 892 struct sk_buff *skb; 893 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 894 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 895 int ret; 896 897 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 898 if (!skb) { 899 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 900 return -ENOMEM; 901 } 902 903 skb_put(skb, H2C_LOG_CFG_LEN); 904 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 905 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 906 SET_LOG_CFG_COMP(skb->data, comp); 907 SET_LOG_CFG_COMP_EXT(skb->data, 0); 908 909 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 910 H2C_CAT_MAC, 911 H2C_CL_FW_INFO, 912 H2C_FUNC_LOG_CFG, 0, 0, 913 H2C_LOG_CFG_LEN); 914 915 ret = rtw89_h2c_tx(rtwdev, skb, false); 916 if (ret) { 917 rtw89_err(rtwdev, "failed to send h2c\n"); 918 goto fail; 919 } 920 921 return 0; 922 fail: 923 dev_kfree_skb_any(skb); 924 925 return ret; 926 } 927 928 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 929 struct rtw89_vif *rtwvif, 930 enum rtw89_fw_pkt_ofld_type type, 931 u8 *id) 932 { 933 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 934 struct rtw89_pktofld_info *info; 935 struct sk_buff *skb; 936 int ret; 937 938 info = kzalloc(sizeof(*info), GFP_KERNEL); 939 if (!info) 940 return -ENOMEM; 941 942 switch (type) { 943 case RTW89_PKT_OFLD_TYPE_PS_POLL: 944 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 945 break; 946 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 947 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 948 break; 949 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 950 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 951 break; 952 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 953 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 954 break; 955 default: 956 goto err; 957 } 958 959 if (!skb) 960 goto err; 961 962 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 963 kfree_skb(skb); 964 965 if (ret) 966 goto err; 967 968 list_add_tail(&info->list, &rtwvif->general_pkt_list); 969 *id = info->id; 970 return 0; 971 972 err: 973 kfree(info); 974 return -ENOMEM; 975 } 976 977 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 978 struct rtw89_vif *rtwvif, bool notify_fw) 979 { 980 struct list_head *pkt_list = &rtwvif->general_pkt_list; 981 struct rtw89_pktofld_info *info, *tmp; 982 983 list_for_each_entry_safe(info, tmp, pkt_list, list) { 984 if (notify_fw) 985 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 986 rtw89_core_release_bit_map(rtwdev->pkt_offload, 987 info->id); 988 list_del(&info->list); 989 kfree(info); 990 } 991 } 992 993 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 994 { 995 struct rtw89_vif *rtwvif; 996 997 rtw89_for_each_rtwvif(rtwdev, rtwvif) 998 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 999 } 1000 1001 #define H2C_GENERAL_PKT_LEN 6 1002 #define H2C_GENERAL_PKT_ID_UND 0xff 1003 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1004 struct rtw89_vif *rtwvif, u8 macid) 1005 { 1006 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1007 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1008 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1009 struct sk_buff *skb; 1010 int ret; 1011 1012 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1013 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1014 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1015 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1016 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1017 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1018 1019 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1020 if (!skb) { 1021 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1022 return -ENOMEM; 1023 } 1024 skb_put(skb, H2C_GENERAL_PKT_LEN); 1025 SET_GENERAL_PKT_MACID(skb->data, macid); 1026 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1027 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1028 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1029 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1030 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1031 1032 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1033 H2C_CAT_MAC, 1034 H2C_CL_FW_INFO, 1035 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1036 H2C_GENERAL_PKT_LEN); 1037 1038 ret = rtw89_h2c_tx(rtwdev, skb, false); 1039 if (ret) { 1040 rtw89_err(rtwdev, "failed to send h2c\n"); 1041 goto fail; 1042 } 1043 1044 return 0; 1045 fail: 1046 dev_kfree_skb_any(skb); 1047 1048 return ret; 1049 } 1050 1051 #define H2C_LPS_PARM_LEN 8 1052 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1053 struct rtw89_lps_parm *lps_param) 1054 { 1055 struct sk_buff *skb; 1056 int ret; 1057 1058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1059 if (!skb) { 1060 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1061 return -ENOMEM; 1062 } 1063 skb_put(skb, H2C_LPS_PARM_LEN); 1064 1065 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1066 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1067 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1068 SET_LPS_PARM_RLBM(skb->data, 1); 1069 SET_LPS_PARM_SMARTPS(skb->data, 1); 1070 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1071 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1072 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1073 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1074 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1075 1076 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1077 H2C_CAT_MAC, 1078 H2C_CL_MAC_PS, 1079 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1080 H2C_LPS_PARM_LEN); 1081 1082 ret = rtw89_h2c_tx(rtwdev, skb, false); 1083 if (ret) { 1084 rtw89_err(rtwdev, "failed to send h2c\n"); 1085 goto fail; 1086 } 1087 1088 return 0; 1089 fail: 1090 dev_kfree_skb_any(skb); 1091 1092 return ret; 1093 } 1094 1095 #define H2C_P2P_ACT_LEN 20 1096 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1097 struct ieee80211_p2p_noa_desc *desc, 1098 u8 act, u8 noa_id) 1099 { 1100 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1101 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1102 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1103 struct sk_buff *skb; 1104 u8 *cmd; 1105 int ret; 1106 1107 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1108 if (!skb) { 1109 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1110 return -ENOMEM; 1111 } 1112 skb_put(skb, H2C_P2P_ACT_LEN); 1113 cmd = skb->data; 1114 1115 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1116 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1117 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1118 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1119 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1120 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1121 if (desc) { 1122 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1123 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1124 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1125 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1126 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1127 } 1128 1129 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1130 H2C_CAT_MAC, H2C_CL_MAC_PS, 1131 H2C_FUNC_P2P_ACT, 0, 0, 1132 H2C_P2P_ACT_LEN); 1133 1134 ret = rtw89_h2c_tx(rtwdev, skb, false); 1135 if (ret) { 1136 rtw89_err(rtwdev, "failed to send h2c\n"); 1137 goto fail; 1138 } 1139 1140 return 0; 1141 fail: 1142 dev_kfree_skb_any(skb); 1143 1144 return ret; 1145 } 1146 1147 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1148 struct sk_buff *skb) 1149 { 1150 struct rtw89_hal *hal = &rtwdev->hal; 1151 u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1152 u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1153 1154 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1155 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1156 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1157 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1158 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1159 } 1160 1161 #define H2C_CMC_TBL_LEN 68 1162 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1163 struct rtw89_vif *rtwvif) 1164 { 1165 const struct rtw89_chip_info *chip = rtwdev->chip; 1166 struct sk_buff *skb; 1167 u8 macid = rtwvif->mac_id; 1168 int ret; 1169 1170 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1171 if (!skb) { 1172 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1173 return -ENOMEM; 1174 } 1175 skb_put(skb, H2C_CMC_TBL_LEN); 1176 SET_CTRL_INFO_MACID(skb->data, macid); 1177 SET_CTRL_INFO_OPERATION(skb->data, 1); 1178 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1179 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1180 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1181 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1182 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1183 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1184 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1185 } 1186 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1187 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1188 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1189 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1190 1191 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1192 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1193 chip->h2c_cctl_func_id, 0, 1, 1194 H2C_CMC_TBL_LEN); 1195 1196 ret = rtw89_h2c_tx(rtwdev, skb, false); 1197 if (ret) { 1198 rtw89_err(rtwdev, "failed to send h2c\n"); 1199 goto fail; 1200 } 1201 1202 return 0; 1203 fail: 1204 dev_kfree_skb_any(skb); 1205 1206 return ret; 1207 } 1208 1209 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1210 struct ieee80211_sta *sta, u8 *pads) 1211 { 1212 bool ppe_th; 1213 u8 ppe16, ppe8; 1214 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1215 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1216 u8 ru_bitmap; 1217 u8 n, idx, sh; 1218 u16 ppe; 1219 int i; 1220 1221 if (!sta->deflink.he_cap.has_he) 1222 return; 1223 1224 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1225 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1226 if (!ppe_th) { 1227 u8 pad; 1228 1229 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1230 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1231 1232 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1233 pads[i] = pad; 1234 1235 return; 1236 } 1237 1238 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1239 n = hweight8(ru_bitmap); 1240 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1241 1242 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1243 if (!(ru_bitmap & BIT(i))) { 1244 pads[i] = 1; 1245 continue; 1246 } 1247 1248 idx = n >> 3; 1249 sh = n & 7; 1250 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1251 1252 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1253 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1254 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1255 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1256 1257 if (ppe16 != 7 && ppe8 == 7) 1258 pads[i] = 2; 1259 else if (ppe8 != 7) 1260 pads[i] = 1; 1261 else 1262 pads[i] = 0; 1263 } 1264 } 1265 1266 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1267 struct ieee80211_vif *vif, 1268 struct ieee80211_sta *sta) 1269 { 1270 const struct rtw89_chip_info *chip = rtwdev->chip; 1271 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1272 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1273 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1274 struct sk_buff *skb; 1275 u8 pads[RTW89_PPE_BW_NUM]; 1276 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1277 u16 lowest_rate; 1278 int ret; 1279 1280 memset(pads, 0, sizeof(pads)); 1281 if (sta) 1282 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1283 1284 if (vif->p2p) 1285 lowest_rate = RTW89_HW_RATE_OFDM6; 1286 else if (chan->band_type == RTW89_BAND_2G) 1287 lowest_rate = RTW89_HW_RATE_CCK1; 1288 else 1289 lowest_rate = RTW89_HW_RATE_OFDM6; 1290 1291 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1292 if (!skb) { 1293 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1294 return -ENOMEM; 1295 } 1296 skb_put(skb, H2C_CMC_TBL_LEN); 1297 SET_CTRL_INFO_MACID(skb->data, mac_id); 1298 SET_CTRL_INFO_OPERATION(skb->data, 1); 1299 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1300 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1301 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1302 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1303 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1304 if (vif->type == NL80211_IFTYPE_STATION) 1305 SET_CMC_TBL_ULDL(skb->data, 1); 1306 else 1307 SET_CMC_TBL_ULDL(skb->data, 0); 1308 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1309 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1310 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1311 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1312 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1313 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1314 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1315 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1316 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1317 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1318 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1319 } 1320 if (sta) 1321 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1322 sta->deflink.he_cap.has_he); 1323 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1324 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1325 1326 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1327 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1328 chip->h2c_cctl_func_id, 0, 1, 1329 H2C_CMC_TBL_LEN); 1330 1331 ret = rtw89_h2c_tx(rtwdev, skb, false); 1332 if (ret) { 1333 rtw89_err(rtwdev, "failed to send h2c\n"); 1334 goto fail; 1335 } 1336 1337 return 0; 1338 fail: 1339 dev_kfree_skb_any(skb); 1340 1341 return ret; 1342 } 1343 1344 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1345 struct rtw89_sta *rtwsta) 1346 { 1347 const struct rtw89_chip_info *chip = rtwdev->chip; 1348 struct sk_buff *skb; 1349 int ret; 1350 1351 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1352 if (!skb) { 1353 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1354 return -ENOMEM; 1355 } 1356 skb_put(skb, H2C_CMC_TBL_LEN); 1357 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1358 SET_CTRL_INFO_OPERATION(skb->data, 1); 1359 if (rtwsta->cctl_tx_time) { 1360 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1361 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1362 } 1363 if (rtwsta->cctl_tx_retry_limit) { 1364 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1365 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1366 } 1367 1368 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1369 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1370 chip->h2c_cctl_func_id, 0, 1, 1371 H2C_CMC_TBL_LEN); 1372 1373 ret = rtw89_h2c_tx(rtwdev, skb, false); 1374 if (ret) { 1375 rtw89_err(rtwdev, "failed to send h2c\n"); 1376 goto fail; 1377 } 1378 1379 return 0; 1380 fail: 1381 dev_kfree_skb_any(skb); 1382 1383 return ret; 1384 } 1385 1386 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1387 struct rtw89_sta *rtwsta) 1388 { 1389 const struct rtw89_chip_info *chip = rtwdev->chip; 1390 struct sk_buff *skb; 1391 int ret; 1392 1393 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1394 return 0; 1395 1396 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1397 if (!skb) { 1398 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1399 return -ENOMEM; 1400 } 1401 skb_put(skb, H2C_CMC_TBL_LEN); 1402 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1403 SET_CTRL_INFO_OPERATION(skb->data, 1); 1404 1405 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1406 1407 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1408 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1409 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1410 H2C_CMC_TBL_LEN); 1411 1412 ret = rtw89_h2c_tx(rtwdev, skb, false); 1413 if (ret) { 1414 rtw89_err(rtwdev, "failed to send h2c\n"); 1415 goto fail; 1416 } 1417 1418 return 0; 1419 fail: 1420 dev_kfree_skb_any(skb); 1421 1422 return ret; 1423 } 1424 1425 #define H2C_BCN_BASE_LEN 12 1426 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1427 struct rtw89_vif *rtwvif) 1428 { 1429 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1430 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1431 struct sk_buff *skb; 1432 struct sk_buff *skb_beacon; 1433 u16 tim_offset; 1434 int bcn_total_len; 1435 u16 beacon_rate; 1436 int ret; 1437 1438 if (vif->p2p) 1439 beacon_rate = RTW89_HW_RATE_OFDM6; 1440 else if (chan->band_type == RTW89_BAND_2G) 1441 beacon_rate = RTW89_HW_RATE_CCK1; 1442 else 1443 beacon_rate = RTW89_HW_RATE_OFDM6; 1444 1445 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1446 NULL, 0); 1447 if (!skb_beacon) { 1448 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1449 return -ENOMEM; 1450 } 1451 1452 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1453 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1454 if (!skb) { 1455 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1456 dev_kfree_skb_any(skb_beacon); 1457 return -ENOMEM; 1458 } 1459 skb_put(skb, H2C_BCN_BASE_LEN); 1460 1461 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1462 SET_BCN_UPD_MBSSID(skb->data, 0); 1463 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1464 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1465 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1466 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1467 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1468 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1469 1470 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1471 dev_kfree_skb_any(skb_beacon); 1472 1473 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1474 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1475 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1476 bcn_total_len); 1477 1478 ret = rtw89_h2c_tx(rtwdev, skb, false); 1479 if (ret) { 1480 rtw89_err(rtwdev, "failed to send h2c\n"); 1481 dev_kfree_skb_any(skb); 1482 return ret; 1483 } 1484 1485 return 0; 1486 } 1487 1488 #define H2C_ROLE_MAINTAIN_LEN 4 1489 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1490 struct rtw89_vif *rtwvif, 1491 struct rtw89_sta *rtwsta, 1492 enum rtw89_upd_mode upd_mode) 1493 { 1494 struct sk_buff *skb; 1495 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1496 u8 self_role; 1497 int ret; 1498 1499 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1500 if (rtwsta) 1501 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1502 else 1503 self_role = rtwvif->self_role; 1504 } else { 1505 self_role = rtwvif->self_role; 1506 } 1507 1508 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1509 if (!skb) { 1510 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1511 return -ENOMEM; 1512 } 1513 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1514 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1515 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1516 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1517 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1518 1519 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1520 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1521 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1522 H2C_ROLE_MAINTAIN_LEN); 1523 1524 ret = rtw89_h2c_tx(rtwdev, skb, false); 1525 if (ret) { 1526 rtw89_err(rtwdev, "failed to send h2c\n"); 1527 goto fail; 1528 } 1529 1530 return 0; 1531 fail: 1532 dev_kfree_skb_any(skb); 1533 1534 return ret; 1535 } 1536 1537 #define H2C_JOIN_INFO_LEN 4 1538 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1539 struct rtw89_sta *rtwsta, bool dis_conn) 1540 { 1541 struct sk_buff *skb; 1542 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1543 u8 self_role = rtwvif->self_role; 1544 u8 net_type = rtwvif->net_type; 1545 int ret; 1546 1547 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1548 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1549 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1550 } 1551 1552 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1553 if (!skb) { 1554 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1555 return -ENOMEM; 1556 } 1557 skb_put(skb, H2C_JOIN_INFO_LEN); 1558 SET_JOININFO_MACID(skb->data, mac_id); 1559 SET_JOININFO_OP(skb->data, dis_conn); 1560 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1561 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1562 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1563 SET_JOININFO_ISHESTA(skb->data, 0); 1564 SET_JOININFO_DLBW(skb->data, 0); 1565 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1566 SET_JOININFO_DL_T_PE(skb->data, 0); 1567 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1568 SET_JOININFO_NET_TYPE(skb->data, net_type); 1569 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1570 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1571 1572 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1573 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1574 H2C_FUNC_MAC_JOININFO, 0, 1, 1575 H2C_JOIN_INFO_LEN); 1576 1577 ret = rtw89_h2c_tx(rtwdev, skb, false); 1578 if (ret) { 1579 rtw89_err(rtwdev, "failed to send h2c\n"); 1580 goto fail; 1581 } 1582 1583 return 0; 1584 fail: 1585 dev_kfree_skb_any(skb); 1586 1587 return ret; 1588 } 1589 1590 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1591 bool pause) 1592 { 1593 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1594 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1595 struct sk_buff *skb; 1596 int ret; 1597 1598 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1599 if (!skb) { 1600 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1601 return -ENOMEM; 1602 } 1603 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1604 if (pause) 1605 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1606 skb_put_data(skb, &h2c, len); 1607 1608 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1609 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1610 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1611 len); 1612 1613 ret = rtw89_h2c_tx(rtwdev, skb, false); 1614 if (ret) { 1615 rtw89_err(rtwdev, "failed to send h2c\n"); 1616 goto fail; 1617 } 1618 1619 return 0; 1620 fail: 1621 dev_kfree_skb_any(skb); 1622 1623 return ret; 1624 } 1625 1626 #define H2C_EDCA_LEN 12 1627 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1628 u8 ac, u32 val) 1629 { 1630 struct sk_buff *skb; 1631 int ret; 1632 1633 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1634 if (!skb) { 1635 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1636 return -ENOMEM; 1637 } 1638 skb_put(skb, H2C_EDCA_LEN); 1639 RTW89_SET_EDCA_SEL(skb->data, 0); 1640 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1641 RTW89_SET_EDCA_WMM(skb->data, 0); 1642 RTW89_SET_EDCA_AC(skb->data, ac); 1643 RTW89_SET_EDCA_PARAM(skb->data, val); 1644 1645 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1646 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1647 H2C_FUNC_USR_EDCA, 0, 1, 1648 H2C_EDCA_LEN); 1649 1650 ret = rtw89_h2c_tx(rtwdev, skb, false); 1651 if (ret) { 1652 rtw89_err(rtwdev, "failed to send h2c\n"); 1653 goto fail; 1654 } 1655 1656 return 0; 1657 fail: 1658 dev_kfree_skb_any(skb); 1659 1660 return ret; 1661 } 1662 1663 #define H2C_TSF32_TOGL_LEN 4 1664 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1665 bool en) 1666 { 1667 struct sk_buff *skb; 1668 u16 early_us = en ? 2000 : 0; 1669 u8 *cmd; 1670 int ret; 1671 1672 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1673 if (!skb) { 1674 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1675 return -ENOMEM; 1676 } 1677 skb_put(skb, H2C_TSF32_TOGL_LEN); 1678 cmd = skb->data; 1679 1680 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1681 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1682 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1683 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1684 1685 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1686 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1687 H2C_FUNC_TSF32_TOGL, 0, 0, 1688 H2C_TSF32_TOGL_LEN); 1689 1690 ret = rtw89_h2c_tx(rtwdev, skb, false); 1691 if (ret) { 1692 rtw89_err(rtwdev, "failed to send h2c\n"); 1693 goto fail; 1694 } 1695 1696 return 0; 1697 fail: 1698 dev_kfree_skb_any(skb); 1699 1700 return ret; 1701 } 1702 1703 #define H2C_OFLD_CFG_LEN 8 1704 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1705 { 1706 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1707 struct sk_buff *skb; 1708 int ret; 1709 1710 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1711 if (!skb) { 1712 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1713 return -ENOMEM; 1714 } 1715 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1716 1717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1718 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1719 H2C_FUNC_OFLD_CFG, 0, 1, 1720 H2C_OFLD_CFG_LEN); 1721 1722 ret = rtw89_h2c_tx(rtwdev, skb, false); 1723 if (ret) { 1724 rtw89_err(rtwdev, "failed to send h2c\n"); 1725 goto fail; 1726 } 1727 1728 return 0; 1729 fail: 1730 dev_kfree_skb_any(skb); 1731 1732 return ret; 1733 } 1734 1735 #define H2C_RA_LEN 16 1736 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1737 { 1738 struct sk_buff *skb; 1739 u8 *cmd; 1740 int ret; 1741 1742 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1743 if (!skb) { 1744 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1745 return -ENOMEM; 1746 } 1747 skb_put(skb, H2C_RA_LEN); 1748 cmd = skb->data; 1749 rtw89_debug(rtwdev, RTW89_DBG_RA, 1750 "ra cmd msk: %llx ", ra->ra_mask); 1751 1752 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1753 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1754 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1755 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1756 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1757 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1758 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1759 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1760 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1761 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1762 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1763 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1764 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1765 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1766 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1767 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1768 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1769 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1770 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1771 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1772 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1773 1774 if (csi) { 1775 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1776 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1777 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1778 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1779 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1780 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1781 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1782 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1783 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1784 } 1785 1786 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1787 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1788 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1789 H2C_RA_LEN); 1790 1791 ret = rtw89_h2c_tx(rtwdev, skb, false); 1792 if (ret) { 1793 rtw89_err(rtwdev, "failed to send h2c\n"); 1794 goto fail; 1795 } 1796 1797 return 0; 1798 fail: 1799 dev_kfree_skb_any(skb); 1800 1801 return ret; 1802 } 1803 1804 #define H2C_LEN_CXDRVHDR 2 1805 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR) 1806 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1807 { 1808 struct rtw89_btc *btc = &rtwdev->btc; 1809 struct rtw89_btc_dm *dm = &btc->dm; 1810 struct rtw89_btc_init_info *init_info = &dm->init_info; 1811 struct rtw89_btc_module *module = &init_info->module; 1812 struct rtw89_btc_ant_info *ant = &module->ant; 1813 struct sk_buff *skb; 1814 u8 *cmd; 1815 int ret; 1816 1817 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT); 1818 if (!skb) { 1819 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1820 return -ENOMEM; 1821 } 1822 skb_put(skb, H2C_LEN_CXDRVINFO_INIT); 1823 cmd = skb->data; 1824 1825 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT); 1826 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR); 1827 1828 RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type); 1829 RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num); 1830 RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation); 1831 RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos); 1832 RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity); 1833 1834 RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type); 1835 RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv); 1836 RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo); 1837 RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos); 1838 RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type); 1839 1840 RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch); 1841 RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only); 1842 RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok); 1843 RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en); 1844 RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other); 1845 RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only); 1846 1847 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1848 H2C_CAT_OUTSRC, BTFC_SET, 1849 SET_DRV_INFO, 0, 0, 1850 H2C_LEN_CXDRVINFO_INIT); 1851 1852 ret = rtw89_h2c_tx(rtwdev, skb, false); 1853 if (ret) { 1854 rtw89_err(rtwdev, "failed to send h2c\n"); 1855 goto fail; 1856 } 1857 1858 return 0; 1859 fail: 1860 dev_kfree_skb_any(skb); 1861 1862 return ret; 1863 } 1864 1865 #define PORT_DATA_OFFSET 4 1866 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 1867 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 1868 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 1869 1870 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 1871 { 1872 struct rtw89_btc *btc = &rtwdev->btc; 1873 const struct rtw89_btc_ver *ver = btc->ver; 1874 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1875 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 1876 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1877 struct rtw89_btc_wl_active_role *active = role_info->active_role; 1878 struct sk_buff *skb; 1879 u32 len; 1880 u8 offset = 0; 1881 u8 *cmd; 1882 int ret; 1883 int i; 1884 1885 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 1886 1887 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1888 if (!skb) { 1889 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1890 return -ENOMEM; 1891 } 1892 skb_put(skb, len); 1893 cmd = skb->data; 1894 1895 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1896 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 1897 1898 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1899 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1900 1901 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1902 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1903 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1904 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1905 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1906 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1907 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1908 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1909 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1910 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1911 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1912 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1913 1914 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1915 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1916 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1917 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1918 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1919 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1920 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1921 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1922 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1923 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1924 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1925 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1926 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1927 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1928 } 1929 1930 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1931 H2C_CAT_OUTSRC, BTFC_SET, 1932 SET_DRV_INFO, 0, 0, 1933 len); 1934 1935 ret = rtw89_h2c_tx(rtwdev, skb, false); 1936 if (ret) { 1937 rtw89_err(rtwdev, "failed to send h2c\n"); 1938 goto fail; 1939 } 1940 1941 return 0; 1942 fail: 1943 dev_kfree_skb_any(skb); 1944 1945 return ret; 1946 } 1947 1948 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 1949 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 1950 1951 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 1952 { 1953 struct rtw89_btc *btc = &rtwdev->btc; 1954 const struct rtw89_btc_ver *ver = btc->ver; 1955 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1956 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 1957 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1958 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 1959 struct sk_buff *skb; 1960 u32 len; 1961 u8 *cmd, offset; 1962 int ret; 1963 int i; 1964 1965 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 1966 1967 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1968 if (!skb) { 1969 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1970 return -ENOMEM; 1971 } 1972 skb_put(skb, len); 1973 cmd = skb->data; 1974 1975 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1976 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 1977 1978 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1979 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1980 1981 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1982 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1983 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1984 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1985 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1986 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1987 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1988 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1989 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1990 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1991 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1992 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1993 1994 offset = PORT_DATA_OFFSET; 1995 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1996 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1997 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1998 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1999 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2000 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2001 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2002 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2003 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2004 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2005 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2006 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2007 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2008 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2009 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2010 } 2011 2012 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2013 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2014 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2015 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2016 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2017 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2018 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2019 2020 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2021 H2C_CAT_OUTSRC, BTFC_SET, 2022 SET_DRV_INFO, 0, 0, 2023 len); 2024 2025 ret = rtw89_h2c_tx(rtwdev, skb, false); 2026 if (ret) { 2027 rtw89_err(rtwdev, "failed to send h2c\n"); 2028 goto fail; 2029 } 2030 2031 return 0; 2032 fail: 2033 dev_kfree_skb_any(skb); 2034 2035 return ret; 2036 } 2037 2038 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2039 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2040 { 2041 struct rtw89_btc *btc = &rtwdev->btc; 2042 const struct rtw89_btc_ver *ver = btc->ver; 2043 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2044 struct sk_buff *skb; 2045 u8 *cmd; 2046 int ret; 2047 2048 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2049 if (!skb) { 2050 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2051 return -ENOMEM; 2052 } 2053 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2054 cmd = skb->data; 2055 2056 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2057 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2058 2059 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2060 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2061 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2062 if (ver->fcxctrl == 0) 2063 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2064 2065 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2066 H2C_CAT_OUTSRC, BTFC_SET, 2067 SET_DRV_INFO, 0, 0, 2068 H2C_LEN_CXDRVINFO_CTRL); 2069 2070 ret = rtw89_h2c_tx(rtwdev, skb, false); 2071 if (ret) { 2072 rtw89_err(rtwdev, "failed to send h2c\n"); 2073 goto fail; 2074 } 2075 2076 return 0; 2077 fail: 2078 dev_kfree_skb_any(skb); 2079 2080 return ret; 2081 } 2082 2083 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2084 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2085 { 2086 struct rtw89_btc *btc = &rtwdev->btc; 2087 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2088 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2089 struct sk_buff *skb; 2090 u8 *cmd; 2091 int ret; 2092 2093 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2094 if (!skb) { 2095 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2096 return -ENOMEM; 2097 } 2098 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2099 cmd = skb->data; 2100 2101 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2102 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2103 2104 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2105 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2106 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2107 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2108 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2109 2110 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2111 H2C_CAT_OUTSRC, BTFC_SET, 2112 SET_DRV_INFO, 0, 0, 2113 H2C_LEN_CXDRVINFO_RFK); 2114 2115 ret = rtw89_h2c_tx(rtwdev, skb, false); 2116 if (ret) { 2117 rtw89_err(rtwdev, "failed to send h2c\n"); 2118 goto fail; 2119 } 2120 2121 return 0; 2122 fail: 2123 dev_kfree_skb_any(skb); 2124 2125 return ret; 2126 } 2127 2128 #define H2C_LEN_PKT_OFLD 4 2129 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2130 { 2131 struct sk_buff *skb; 2132 u8 *cmd; 2133 int ret; 2134 2135 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2136 if (!skb) { 2137 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2138 return -ENOMEM; 2139 } 2140 skb_put(skb, H2C_LEN_PKT_OFLD); 2141 cmd = skb->data; 2142 2143 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2144 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2145 2146 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2147 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2148 H2C_FUNC_PACKET_OFLD, 1, 1, 2149 H2C_LEN_PKT_OFLD); 2150 2151 ret = rtw89_h2c_tx(rtwdev, skb, false); 2152 if (ret) { 2153 rtw89_err(rtwdev, "failed to send h2c\n"); 2154 goto fail; 2155 } 2156 2157 return 0; 2158 fail: 2159 dev_kfree_skb_any(skb); 2160 2161 return ret; 2162 } 2163 2164 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2165 struct sk_buff *skb_ofld) 2166 { 2167 struct sk_buff *skb; 2168 u8 *cmd; 2169 u8 alloc_id; 2170 int ret; 2171 2172 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2173 RTW89_MAX_PKT_OFLD_NUM); 2174 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2175 return -ENOSPC; 2176 2177 *id = alloc_id; 2178 2179 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2180 if (!skb) { 2181 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2182 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2183 return -ENOMEM; 2184 } 2185 skb_put(skb, H2C_LEN_PKT_OFLD); 2186 cmd = skb->data; 2187 2188 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2189 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2190 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2191 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2192 2193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2194 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2195 H2C_FUNC_PACKET_OFLD, 1, 1, 2196 H2C_LEN_PKT_OFLD + skb_ofld->len); 2197 2198 ret = rtw89_h2c_tx(rtwdev, skb, false); 2199 if (ret) { 2200 rtw89_err(rtwdev, "failed to send h2c\n"); 2201 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2202 goto fail; 2203 } 2204 2205 return 0; 2206 fail: 2207 dev_kfree_skb_any(skb); 2208 2209 return ret; 2210 } 2211 2212 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2213 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2214 struct list_head *chan_list) 2215 { 2216 struct rtw89_mac_chinfo *ch_info; 2217 struct sk_buff *skb; 2218 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2219 u8 *cmd; 2220 int ret; 2221 2222 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2223 if (!skb) { 2224 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2225 return -ENOMEM; 2226 } 2227 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2228 cmd = skb->data; 2229 2230 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2231 /* in unit of 4 bytes */ 2232 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2233 2234 list_for_each_entry(ch_info, chan_list, list) { 2235 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2236 2237 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2238 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2239 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2240 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2241 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2242 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2243 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2244 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2245 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2246 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2247 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2248 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2249 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2250 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2251 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2252 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2253 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2254 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2255 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2256 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2257 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2258 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2259 } 2260 2261 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2262 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2263 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2264 2265 ret = rtw89_h2c_tx(rtwdev, skb, false); 2266 if (ret) { 2267 rtw89_err(rtwdev, "failed to send h2c\n"); 2268 goto fail; 2269 } 2270 2271 return 0; 2272 fail: 2273 dev_kfree_skb_any(skb); 2274 2275 return ret; 2276 } 2277 2278 #define H2C_LEN_SCAN_OFFLOAD 28 2279 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2280 struct rtw89_scan_option *option, 2281 struct rtw89_vif *rtwvif) 2282 { 2283 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2284 struct sk_buff *skb; 2285 u8 *cmd; 2286 int ret; 2287 2288 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD); 2289 if (!skb) { 2290 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2291 return -ENOMEM; 2292 } 2293 skb_put(skb, H2C_LEN_SCAN_OFFLOAD); 2294 cmd = skb->data; 2295 2296 RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id); 2297 RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port); 2298 RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0); 2299 RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable); 2300 RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true); 2301 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode); 2302 RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE); 2303 RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE); 2304 if (option->target_ch_mode) { 2305 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw); 2306 RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd, 2307 scan_info->op_pri_ch); 2308 RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd, 2309 scan_info->op_chan); 2310 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd, 2311 scan_info->op_band); 2312 } 2313 2314 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2315 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2316 H2C_FUNC_SCANOFLD, 1, 1, 2317 H2C_LEN_SCAN_OFFLOAD); 2318 2319 ret = rtw89_h2c_tx(rtwdev, skb, false); 2320 if (ret) { 2321 rtw89_err(rtwdev, "failed to send h2c\n"); 2322 goto fail; 2323 } 2324 2325 return 0; 2326 fail: 2327 dev_kfree_skb_any(skb); 2328 2329 return ret; 2330 } 2331 2332 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2333 struct rtw89_fw_h2c_rf_reg_info *info, 2334 u16 len, u8 page) 2335 { 2336 struct sk_buff *skb; 2337 u8 class = info->rf_path == RF_PATH_A ? 2338 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2339 int ret; 2340 2341 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2342 if (!skb) { 2343 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2344 return -ENOMEM; 2345 } 2346 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2347 2348 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2349 H2C_CAT_OUTSRC, class, page, 0, 0, 2350 len); 2351 2352 ret = rtw89_h2c_tx(rtwdev, skb, false); 2353 if (ret) { 2354 rtw89_err(rtwdev, "failed to send h2c\n"); 2355 goto fail; 2356 } 2357 2358 return 0; 2359 fail: 2360 dev_kfree_skb_any(skb); 2361 2362 return ret; 2363 } 2364 2365 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2366 { 2367 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2368 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2369 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2370 struct sk_buff *skb; 2371 int ret; 2372 2373 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2374 if (!skb) { 2375 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2376 return -ENOMEM; 2377 } 2378 skb_put(skb, sizeof(*mccch)); 2379 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2380 2381 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2382 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2383 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2384 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2385 mccch->current_channel = cpu_to_le32(chan->channel); 2386 mccch->current_band_type = cpu_to_le32(chan->band_type); 2387 2388 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2389 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2390 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2391 sizeof(*mccch)); 2392 2393 ret = rtw89_h2c_tx(rtwdev, skb, false); 2394 if (ret) { 2395 rtw89_err(rtwdev, "failed to send h2c\n"); 2396 goto fail; 2397 } 2398 2399 return 0; 2400 fail: 2401 dev_kfree_skb_any(skb); 2402 2403 return ret; 2404 } 2405 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2406 2407 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2408 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2409 bool rack, bool dack) 2410 { 2411 struct sk_buff *skb; 2412 int ret; 2413 2414 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2415 if (!skb) { 2416 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2417 return -ENOMEM; 2418 } 2419 skb_put_data(skb, buf, len); 2420 2421 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2422 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2423 len); 2424 2425 ret = rtw89_h2c_tx(rtwdev, skb, false); 2426 if (ret) { 2427 rtw89_err(rtwdev, "failed to send h2c\n"); 2428 goto fail; 2429 } 2430 2431 return 0; 2432 fail: 2433 dev_kfree_skb_any(skb); 2434 2435 return ret; 2436 } 2437 2438 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2439 { 2440 struct sk_buff *skb; 2441 int ret; 2442 2443 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2444 if (!skb) { 2445 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2446 return -ENOMEM; 2447 } 2448 skb_put_data(skb, buf, len); 2449 2450 ret = rtw89_h2c_tx(rtwdev, skb, false); 2451 if (ret) { 2452 rtw89_err(rtwdev, "failed to send h2c\n"); 2453 goto fail; 2454 } 2455 2456 return 0; 2457 fail: 2458 dev_kfree_skb_any(skb); 2459 2460 return ret; 2461 } 2462 2463 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2464 { 2465 struct rtw89_early_h2c *early_h2c; 2466 2467 lockdep_assert_held(&rtwdev->mutex); 2468 2469 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2470 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2471 } 2472 } 2473 2474 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2475 { 2476 struct rtw89_early_h2c *early_h2c, *tmp; 2477 2478 mutex_lock(&rtwdev->mutex); 2479 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2480 list_del(&early_h2c->list); 2481 kfree(early_h2c->h2c); 2482 kfree(early_h2c); 2483 } 2484 mutex_unlock(&rtwdev->mutex); 2485 } 2486 2487 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2488 { 2489 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2490 2491 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2492 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2493 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2494 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2495 } 2496 2497 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2498 struct sk_buff *c2h) 2499 { 2500 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2501 u8 category = attr->category; 2502 u8 class = attr->class; 2503 u8 func = attr->func; 2504 2505 switch (category) { 2506 default: 2507 return false; 2508 case RTW89_C2H_CAT_MAC: 2509 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2510 } 2511 } 2512 2513 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2514 { 2515 rtw89_fw_c2h_parse_attr(c2h); 2516 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2517 goto enqueue; 2518 2519 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2520 dev_kfree_skb_any(c2h); 2521 return; 2522 2523 enqueue: 2524 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2525 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2526 } 2527 2528 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2529 struct sk_buff *skb) 2530 { 2531 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2532 u8 category = attr->category; 2533 u8 class = attr->class; 2534 u8 func = attr->func; 2535 u16 len = attr->len; 2536 bool dump = true; 2537 2538 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2539 return; 2540 2541 switch (category) { 2542 case RTW89_C2H_CAT_TEST: 2543 break; 2544 case RTW89_C2H_CAT_MAC: 2545 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2546 if (class == RTW89_MAC_C2H_CLASS_INFO && 2547 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2548 dump = false; 2549 break; 2550 case RTW89_C2H_CAT_OUTSRC: 2551 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2552 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2553 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2554 else 2555 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2556 break; 2557 } 2558 2559 if (dump) 2560 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2561 } 2562 2563 void rtw89_fw_c2h_work(struct work_struct *work) 2564 { 2565 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2566 c2h_work); 2567 struct sk_buff *skb, *tmp; 2568 2569 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2570 skb_unlink(skb, &rtwdev->c2h_queue); 2571 mutex_lock(&rtwdev->mutex); 2572 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2573 mutex_unlock(&rtwdev->mutex); 2574 dev_kfree_skb_any(skb); 2575 } 2576 } 2577 2578 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2579 struct rtw89_mac_h2c_info *info) 2580 { 2581 const struct rtw89_chip_info *chip = rtwdev->chip; 2582 const u32 *h2c_reg = chip->h2c_regs; 2583 u8 i, val, len; 2584 int ret; 2585 2586 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2587 rtwdev, chip->h2c_ctrl_reg); 2588 if (ret) { 2589 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2590 return ret; 2591 } 2592 2593 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2594 sizeof(info->h2creg[0])); 2595 2596 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 2597 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 2598 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2599 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 2600 2601 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2602 2603 return 0; 2604 } 2605 2606 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2607 struct rtw89_mac_c2h_info *info) 2608 { 2609 const struct rtw89_chip_info *chip = rtwdev->chip; 2610 const u32 *c2h_reg = chip->c2h_regs; 2611 u32 ret; 2612 u8 i, val; 2613 2614 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2615 2616 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2617 RTW89_C2H_TIMEOUT, false, rtwdev, 2618 chip->c2h_ctrl_reg); 2619 if (ret) { 2620 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2621 return ret; 2622 } 2623 2624 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2625 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2626 2627 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2628 2629 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 2630 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 2631 RTW89_C2HREG_HDR_LEN; 2632 2633 return 0; 2634 } 2635 2636 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2637 struct rtw89_mac_h2c_info *h2c_info, 2638 struct rtw89_mac_c2h_info *c2h_info) 2639 { 2640 u32 ret; 2641 2642 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2643 lockdep_assert_held(&rtwdev->mutex); 2644 2645 if (!h2c_info && !c2h_info) 2646 return -EINVAL; 2647 2648 if (!h2c_info) 2649 goto recv_c2h; 2650 2651 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2652 if (ret) 2653 return ret; 2654 2655 recv_c2h: 2656 if (!c2h_info) 2657 return 0; 2658 2659 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2660 if (ret) 2661 return ret; 2662 2663 return 0; 2664 } 2665 2666 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 2667 { 2668 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 2669 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 2670 return; 2671 } 2672 2673 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 2674 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 2675 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 2676 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 2677 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 2678 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 2679 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 2680 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 2681 2682 rtw89_fw_prog_cnt_dump(rtwdev); 2683 } 2684 2685 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 2686 { 2687 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 2688 struct rtw89_pktofld_info *info, *tmp; 2689 u8 idx; 2690 2691 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 2692 if (!(rtwdev->chip->support_bands & BIT(idx))) 2693 continue; 2694 2695 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 2696 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2697 rtw89_core_release_bit_map(rtwdev->pkt_offload, 2698 info->id); 2699 list_del(&info->list); 2700 kfree(info); 2701 } 2702 } 2703 } 2704 2705 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 2706 struct rtw89_vif *rtwvif, 2707 struct sk_buff *skb) 2708 { 2709 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2710 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 2711 struct rtw89_pktofld_info *info; 2712 struct sk_buff *new; 2713 int ret = 0; 2714 u8 band; 2715 2716 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 2717 if (!(rtwdev->chip->support_bands & BIT(band))) 2718 continue; 2719 2720 new = skb_copy(skb, GFP_KERNEL); 2721 if (!new) { 2722 ret = -ENOMEM; 2723 goto out; 2724 } 2725 skb_put_data(new, ies->ies[band], ies->len[band]); 2726 skb_put_data(new, ies->common_ies, ies->common_ie_len); 2727 2728 info = kzalloc(sizeof(*info), GFP_KERNEL); 2729 if (!info) { 2730 ret = -ENOMEM; 2731 kfree_skb(new); 2732 goto out; 2733 } 2734 2735 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 2736 if (ret) { 2737 kfree_skb(new); 2738 kfree(info); 2739 goto out; 2740 } 2741 2742 list_add_tail(&info->list, &scan_info->pkt_list[band]); 2743 kfree_skb(new); 2744 } 2745 out: 2746 return ret; 2747 } 2748 2749 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 2750 struct rtw89_vif *rtwvif) 2751 { 2752 struct cfg80211_scan_request *req = rtwvif->scan_req; 2753 struct sk_buff *skb; 2754 u8 num = req->n_ssids, i; 2755 int ret; 2756 2757 for (i = 0; i < num; i++) { 2758 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 2759 req->ssids[i].ssid, 2760 req->ssids[i].ssid_len, 2761 req->ie_len); 2762 if (!skb) 2763 return -ENOMEM; 2764 2765 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb); 2766 kfree_skb(skb); 2767 2768 if (ret) 2769 return ret; 2770 } 2771 2772 return 0; 2773 } 2774 2775 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 2776 int ssid_num, 2777 struct rtw89_mac_chinfo *ch_info) 2778 { 2779 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2780 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 2781 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2782 struct cfg80211_scan_request *req = rtwvif->scan_req; 2783 struct rtw89_pktofld_info *info; 2784 u8 band, probe_count = 0; 2785 2786 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 2787 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 2788 ch_info->bw = RTW89_SCAN_WIDTH; 2789 ch_info->tx_pkt = true; 2790 ch_info->cfg_tx_pwr = false; 2791 ch_info->tx_pwr_idx = 0; 2792 ch_info->tx_null = false; 2793 ch_info->pause_data = false; 2794 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 2795 2796 if (ssid_num) { 2797 ch_info->num_pkt = ssid_num; 2798 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 2799 2800 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 2801 ch_info->pkt_id[probe_count] = info->id; 2802 if (++probe_count >= ssid_num) 2803 break; 2804 } 2805 if (probe_count != ssid_num) 2806 rtw89_err(rtwdev, "SSID num differs from list len\n"); 2807 } 2808 2809 if (ch_info->ch_band == RTW89_BAND_6G) { 2810 if (ssid_num == 1 && req->ssids[0].ssid_len == 0) { 2811 ch_info->tx_pkt = false; 2812 if (!req->duration_mandatory) 2813 ch_info->period -= RTW89_DWELL_TIME_6G; 2814 } 2815 } 2816 2817 switch (chan_type) { 2818 case RTW89_CHAN_OPERATE: 2819 ch_info->central_ch = scan_info->op_chan; 2820 ch_info->pri_ch = scan_info->op_pri_ch; 2821 ch_info->ch_band = scan_info->op_band; 2822 ch_info->bw = scan_info->op_bw; 2823 ch_info->tx_null = true; 2824 ch_info->num_pkt = 0; 2825 break; 2826 case RTW89_CHAN_DFS: 2827 if (ch_info->ch_band != RTW89_BAND_6G) 2828 ch_info->period = max_t(u8, ch_info->period, 2829 RTW89_DFS_CHAN_TIME); 2830 ch_info->dwell_time = RTW89_DWELL_TIME; 2831 break; 2832 case RTW89_CHAN_ACTIVE: 2833 break; 2834 default: 2835 rtw89_err(rtwdev, "Channel type out of bound\n"); 2836 } 2837 } 2838 2839 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 2840 struct rtw89_vif *rtwvif) 2841 { 2842 struct cfg80211_scan_request *req = rtwvif->scan_req; 2843 struct rtw89_mac_chinfo *ch_info, *tmp; 2844 struct ieee80211_channel *channel; 2845 struct list_head chan_list; 2846 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 2847 int list_len, off_chan_time = 0; 2848 enum rtw89_chan_type type; 2849 int ret = 0; 2850 u32 idx; 2851 2852 INIT_LIST_HEAD(&chan_list); 2853 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 2854 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 2855 idx++, list_len++) { 2856 channel = req->channels[idx]; 2857 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 2858 if (!ch_info) { 2859 ret = -ENOMEM; 2860 goto out; 2861 } 2862 2863 if (req->duration_mandatory) 2864 ch_info->period = req->duration; 2865 else if (channel->band == NL80211_BAND_6GHZ) 2866 ch_info->period = RTW89_CHANNEL_TIME_6G + 2867 RTW89_DWELL_TIME_6G; 2868 else 2869 ch_info->period = RTW89_CHANNEL_TIME; 2870 2871 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 2872 ch_info->central_ch = channel->hw_value; 2873 ch_info->pri_ch = channel->hw_value; 2874 ch_info->rand_seq_num = random_seq; 2875 2876 if (channel->flags & 2877 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 2878 type = RTW89_CHAN_DFS; 2879 else 2880 type = RTW89_CHAN_ACTIVE; 2881 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 2882 2883 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK && 2884 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 2885 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2886 if (!tmp) { 2887 ret = -ENOMEM; 2888 kfree(ch_info); 2889 goto out; 2890 } 2891 2892 type = RTW89_CHAN_OPERATE; 2893 tmp->period = req->duration_mandatory ? 2894 req->duration : RTW89_CHANNEL_TIME; 2895 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 2896 list_add_tail(&tmp->list, &chan_list); 2897 off_chan_time = 0; 2898 list_len++; 2899 } 2900 list_add_tail(&ch_info->list, &chan_list); 2901 off_chan_time += ch_info->period; 2902 } 2903 rtwdev->scan_info.last_chan_idx = idx; 2904 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 2905 2906 out: 2907 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 2908 list_del(&ch_info->list); 2909 kfree(ch_info); 2910 } 2911 2912 return ret; 2913 } 2914 2915 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 2916 struct rtw89_vif *rtwvif) 2917 { 2918 int ret; 2919 2920 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 2921 if (ret) { 2922 rtw89_err(rtwdev, "Update probe request failed\n"); 2923 goto out; 2924 } 2925 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif); 2926 out: 2927 return ret; 2928 } 2929 2930 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2931 struct ieee80211_scan_request *scan_req) 2932 { 2933 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2934 struct cfg80211_scan_request *req = &scan_req->req; 2935 u32 rx_fltr = rtwdev->hal.rx_fltr; 2936 u8 mac_addr[ETH_ALEN]; 2937 2938 rtwdev->scan_info.scanning_vif = vif; 2939 rtwdev->scan_info.last_chan_idx = 0; 2940 rtwvif->scan_ies = &scan_req->ies; 2941 rtwvif->scan_req = req; 2942 ieee80211_stop_queues(rtwdev->hw); 2943 2944 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 2945 get_random_mask_addr(mac_addr, req->mac_addr, 2946 req->mac_addr_mask); 2947 else 2948 ether_addr_copy(mac_addr, vif->addr); 2949 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 2950 2951 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 2952 rx_fltr &= ~B_AX_A_BC; 2953 rx_fltr &= ~B_AX_A_A1_MATCH; 2954 rtw89_write32_mask(rtwdev, 2955 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2956 B_AX_RX_FLTR_CFG_MASK, 2957 rx_fltr); 2958 } 2959 2960 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2961 bool aborted) 2962 { 2963 struct cfg80211_scan_info info = { 2964 .aborted = aborted, 2965 }; 2966 struct rtw89_vif *rtwvif; 2967 2968 if (!vif) 2969 return; 2970 2971 rtw89_write32_mask(rtwdev, 2972 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2973 B_AX_RX_FLTR_CFG_MASK, 2974 rtwdev->hal.rx_fltr); 2975 2976 rtw89_core_scan_complete(rtwdev, vif, true); 2977 ieee80211_scan_completed(rtwdev->hw, &info); 2978 ieee80211_wake_queues(rtwdev->hw); 2979 2980 rtw89_release_pkt_list(rtwdev); 2981 rtwvif = (struct rtw89_vif *)vif->drv_priv; 2982 rtwvif->scan_req = NULL; 2983 rtwvif->scan_ies = NULL; 2984 rtwdev->scan_info.last_chan_idx = 0; 2985 rtwdev->scan_info.scanning_vif = NULL; 2986 2987 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK) 2988 rtw89_store_op_chan(rtwdev, false); 2989 rtw89_set_channel(rtwdev); 2990 } 2991 2992 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 2993 { 2994 rtw89_hw_scan_offload(rtwdev, vif, false); 2995 rtw89_hw_scan_complete(rtwdev, vif, true); 2996 } 2997 2998 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2999 bool enable) 3000 { 3001 struct rtw89_scan_option opt = {0}; 3002 struct rtw89_vif *rtwvif; 3003 int ret = 0; 3004 3005 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3006 if (!rtwvif) 3007 return -EINVAL; 3008 3009 opt.enable = enable; 3010 opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 3011 if (enable) { 3012 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif); 3013 if (ret) 3014 goto out; 3015 } 3016 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 3017 out: 3018 return ret; 3019 } 3020 3021 void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup) 3022 { 3023 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3024 const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3025 struct rtw89_chan new; 3026 3027 if (backup) { 3028 scan_info->op_pri_ch = cur->primary_channel; 3029 scan_info->op_chan = cur->channel; 3030 scan_info->op_bw = cur->band_width; 3031 scan_info->op_band = cur->band_type; 3032 } else { 3033 rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch, 3034 scan_info->op_band, scan_info->op_bw); 3035 rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new); 3036 } 3037 } 3038 3039 #define H2C_FW_CPU_EXCEPTION_LEN 4 3040 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 3041 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 3042 { 3043 struct sk_buff *skb; 3044 int ret; 3045 3046 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 3047 if (!skb) { 3048 rtw89_err(rtwdev, 3049 "failed to alloc skb for fw cpu exception\n"); 3050 return -ENOMEM; 3051 } 3052 3053 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3054 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3055 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3056 3057 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3058 H2C_CAT_TEST, 3059 H2C_CL_FW_STATUS_TEST, 3060 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3061 H2C_FW_CPU_EXCEPTION_LEN); 3062 3063 ret = rtw89_h2c_tx(rtwdev, skb, false); 3064 if (ret) { 3065 rtw89_err(rtwdev, "failed to send h2c\n"); 3066 goto fail; 3067 } 3068 3069 return 0; 3070 3071 fail: 3072 dev_kfree_skb_any(skb); 3073 return ret; 3074 } 3075 3076 #define H2C_PKT_DROP_LEN 24 3077 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3078 const struct rtw89_pkt_drop_params *params) 3079 { 3080 struct sk_buff *skb; 3081 int ret; 3082 3083 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 3084 if (!skb) { 3085 rtw89_err(rtwdev, 3086 "failed to alloc skb for packet drop\n"); 3087 return -ENOMEM; 3088 } 3089 3090 switch (params->sel) { 3091 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 3092 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 3093 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 3094 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 3095 case RTW89_PKT_DROP_SEL_BAND_ONCE: 3096 break; 3097 default: 3098 rtw89_debug(rtwdev, RTW89_DBG_FW, 3099 "H2C of pkt drop might not fully support sel: %d yet\n", 3100 params->sel); 3101 break; 3102 } 3103 3104 skb_put(skb, H2C_PKT_DROP_LEN); 3105 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3106 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3107 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3108 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3109 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3110 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3111 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3112 params->macid_band_sel[0]); 3113 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3114 params->macid_band_sel[1]); 3115 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3116 params->macid_band_sel[2]); 3117 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3118 params->macid_band_sel[3]); 3119 3120 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3121 H2C_CAT_MAC, 3122 H2C_CL_MAC_FW_OFLD, 3123 H2C_FUNC_PKT_DROP, 0, 0, 3124 H2C_PKT_DROP_LEN); 3125 3126 ret = rtw89_h2c_tx(rtwdev, skb, false); 3127 if (ret) { 3128 rtw89_err(rtwdev, "failed to send h2c\n"); 3129 goto fail; 3130 } 3131 3132 return 0; 3133 3134 fail: 3135 dev_kfree_skb_any(skb); 3136 return ret; 3137 } 3138 3139 #define H2C_KEEP_ALIVE_LEN 4 3140 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3141 bool enable) 3142 { 3143 struct sk_buff *skb; 3144 u8 pkt_id = 0; 3145 int ret; 3146 3147 if (enable) { 3148 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 3149 RTW89_PKT_OFLD_TYPE_NULL_DATA, 3150 &pkt_id); 3151 if (ret) 3152 return -EPERM; 3153 } 3154 3155 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3156 if (!skb) { 3157 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3158 return -ENOMEM; 3159 } 3160 3161 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3162 3163 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3164 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3165 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3166 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3167 3168 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3169 H2C_CAT_MAC, 3170 H2C_CL_MAC_WOW, 3171 H2C_FUNC_KEEP_ALIVE, 0, 1, 3172 H2C_KEEP_ALIVE_LEN); 3173 3174 ret = rtw89_h2c_tx(rtwdev, skb, false); 3175 if (ret) { 3176 rtw89_err(rtwdev, "failed to send h2c\n"); 3177 goto fail; 3178 } 3179 3180 return 0; 3181 3182 fail: 3183 dev_kfree_skb_any(skb); 3184 3185 return ret; 3186 } 3187 3188 #define H2C_DISCONNECT_DETECT_LEN 8 3189 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3190 struct rtw89_vif *rtwvif, bool enable) 3191 { 3192 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3193 struct sk_buff *skb; 3194 u8 macid = rtwvif->mac_id; 3195 int ret; 3196 3197 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3198 if (!skb) { 3199 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3200 return -ENOMEM; 3201 } 3202 3203 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3204 3205 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3206 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3207 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3208 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3209 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3210 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3211 } 3212 3213 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3214 H2C_CAT_MAC, 3215 H2C_CL_MAC_WOW, 3216 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3217 H2C_DISCONNECT_DETECT_LEN); 3218 3219 ret = rtw89_h2c_tx(rtwdev, skb, false); 3220 if (ret) { 3221 rtw89_err(rtwdev, "failed to send h2c\n"); 3222 goto fail; 3223 } 3224 3225 return 0; 3226 3227 fail: 3228 dev_kfree_skb_any(skb); 3229 3230 return ret; 3231 } 3232 3233 #define H2C_WOW_GLOBAL_LEN 8 3234 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3235 bool enable) 3236 { 3237 struct sk_buff *skb; 3238 u8 macid = rtwvif->mac_id; 3239 int ret; 3240 3241 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3242 if (!skb) { 3243 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3244 return -ENOMEM; 3245 } 3246 3247 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3248 3249 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3250 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3251 3252 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3253 H2C_CAT_MAC, 3254 H2C_CL_MAC_WOW, 3255 H2C_FUNC_WOW_GLOBAL, 0, 1, 3256 H2C_WOW_GLOBAL_LEN); 3257 3258 ret = rtw89_h2c_tx(rtwdev, skb, false); 3259 if (ret) { 3260 rtw89_err(rtwdev, "failed to send h2c\n"); 3261 goto fail; 3262 } 3263 3264 return 0; 3265 3266 fail: 3267 dev_kfree_skb_any(skb); 3268 3269 return ret; 3270 } 3271 3272 #define H2C_WAKEUP_CTRL_LEN 4 3273 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3274 struct rtw89_vif *rtwvif, 3275 bool enable) 3276 { 3277 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3278 struct sk_buff *skb; 3279 u8 macid = rtwvif->mac_id; 3280 int ret; 3281 3282 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3283 if (!skb) { 3284 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3285 return -ENOMEM; 3286 } 3287 3288 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3289 3290 if (rtw_wow->pattern_cnt) 3291 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3292 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3293 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3294 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3295 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3296 3297 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3298 3299 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3300 H2C_CAT_MAC, 3301 H2C_CL_MAC_WOW, 3302 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3303 H2C_WAKEUP_CTRL_LEN); 3304 3305 ret = rtw89_h2c_tx(rtwdev, skb, false); 3306 if (ret) { 3307 rtw89_err(rtwdev, "failed to send h2c\n"); 3308 goto fail; 3309 } 3310 3311 return 0; 3312 3313 fail: 3314 dev_kfree_skb_any(skb); 3315 3316 return ret; 3317 } 3318 3319 #define H2C_WOW_CAM_UPD_LEN 24 3320 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3321 struct rtw89_wow_cam_info *cam_info) 3322 { 3323 struct sk_buff *skb; 3324 int ret; 3325 3326 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3327 if (!skb) { 3328 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3329 return -ENOMEM; 3330 } 3331 3332 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3333 3334 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3335 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3336 if (cam_info->valid) { 3337 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3338 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3339 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3340 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3341 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3342 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3343 cam_info->negative_pattern_match); 3344 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3345 cam_info->skip_mac_hdr); 3346 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3347 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3348 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3349 } 3350 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3351 3352 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3353 H2C_CAT_MAC, 3354 H2C_CL_MAC_WOW, 3355 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3356 H2C_WOW_CAM_UPD_LEN); 3357 3358 ret = rtw89_h2c_tx(rtwdev, skb, false); 3359 if (ret) { 3360 rtw89_err(rtwdev, "failed to send h2c\n"); 3361 goto fail; 3362 } 3363 3364 return 0; 3365 fail: 3366 dev_kfree_skb_any(skb); 3367 3368 return ret; 3369 } 3370 3371 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3372 struct rtw89_wait_info *wait, unsigned int cond) 3373 { 3374 int ret; 3375 3376 ret = rtw89_h2c_tx(rtwdev, skb, false); 3377 if (ret) { 3378 rtw89_err(rtwdev, "failed to send h2c\n"); 3379 dev_kfree_skb_any(skb); 3380 return -EBUSY; 3381 } 3382 3383 return rtw89_wait_for_cond(wait, cond); 3384 } 3385 3386 #define H2C_ADD_MCC_LEN 16 3387 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3388 const struct rtw89_fw_mcc_add_req *p) 3389 { 3390 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3391 struct sk_buff *skb; 3392 unsigned int cond; 3393 3394 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3395 if (!skb) { 3396 rtw89_err(rtwdev, 3397 "failed to alloc skb for add mcc\n"); 3398 return -ENOMEM; 3399 } 3400 3401 skb_put(skb, H2C_ADD_MCC_LEN); 3402 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3403 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3404 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3405 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3406 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3407 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3408 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3409 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3410 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3411 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3412 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3413 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3414 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3415 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3416 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3417 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3418 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3419 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3420 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3421 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3422 3423 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3424 H2C_CAT_MAC, 3425 H2C_CL_MCC, 3426 H2C_FUNC_ADD_MCC, 0, 0, 3427 H2C_ADD_MCC_LEN); 3428 3429 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3430 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3431 } 3432 3433 #define H2C_START_MCC_LEN 12 3434 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3435 const struct rtw89_fw_mcc_start_req *p) 3436 { 3437 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3438 struct sk_buff *skb; 3439 unsigned int cond; 3440 3441 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3442 if (!skb) { 3443 rtw89_err(rtwdev, 3444 "failed to alloc skb for start mcc\n"); 3445 return -ENOMEM; 3446 } 3447 3448 skb_put(skb, H2C_START_MCC_LEN); 3449 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3450 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3451 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3452 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3453 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3454 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3455 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3456 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3457 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3458 3459 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3460 H2C_CAT_MAC, 3461 H2C_CL_MCC, 3462 H2C_FUNC_START_MCC, 0, 0, 3463 H2C_START_MCC_LEN); 3464 3465 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3466 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3467 } 3468 3469 #define H2C_STOP_MCC_LEN 4 3470 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3471 bool prev_groups) 3472 { 3473 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3474 struct sk_buff *skb; 3475 unsigned int cond; 3476 3477 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3478 if (!skb) { 3479 rtw89_err(rtwdev, 3480 "failed to alloc skb for stop mcc\n"); 3481 return -ENOMEM; 3482 } 3483 3484 skb_put(skb, H2C_STOP_MCC_LEN); 3485 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3486 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3487 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3488 3489 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3490 H2C_CAT_MAC, 3491 H2C_CL_MCC, 3492 H2C_FUNC_STOP_MCC, 0, 0, 3493 H2C_STOP_MCC_LEN); 3494 3495 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3496 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3497 } 3498 3499 #define H2C_DEL_MCC_GROUP_LEN 4 3500 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3501 bool prev_groups) 3502 { 3503 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3504 struct sk_buff *skb; 3505 unsigned int cond; 3506 3507 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3508 if (!skb) { 3509 rtw89_err(rtwdev, 3510 "failed to alloc skb for del mcc group\n"); 3511 return -ENOMEM; 3512 } 3513 3514 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3515 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3516 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3517 3518 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3519 H2C_CAT_MAC, 3520 H2C_CL_MCC, 3521 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3522 H2C_DEL_MCC_GROUP_LEN); 3523 3524 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3525 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3526 } 3527 3528 #define H2C_RESET_MCC_GROUP_LEN 4 3529 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3530 { 3531 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3532 struct sk_buff *skb; 3533 unsigned int cond; 3534 3535 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3536 if (!skb) { 3537 rtw89_err(rtwdev, 3538 "failed to alloc skb for reset mcc group\n"); 3539 return -ENOMEM; 3540 } 3541 3542 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3543 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3544 3545 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3546 H2C_CAT_MAC, 3547 H2C_CL_MCC, 3548 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3549 H2C_RESET_MCC_GROUP_LEN); 3550 3551 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3552 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3553 } 3554 3555 #define H2C_MCC_REQ_TSF_LEN 4 3556 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3557 const struct rtw89_fw_mcc_tsf_req *req, 3558 struct rtw89_mac_mcc_tsf_rpt *rpt) 3559 { 3560 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3561 struct rtw89_mac_mcc_tsf_rpt *tmp; 3562 struct sk_buff *skb; 3563 unsigned int cond; 3564 int ret; 3565 3566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 3567 if (!skb) { 3568 rtw89_err(rtwdev, 3569 "failed to alloc skb for mcc req tsf\n"); 3570 return -ENOMEM; 3571 } 3572 3573 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 3574 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 3575 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 3576 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 3577 3578 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3579 H2C_CAT_MAC, 3580 H2C_CL_MCC, 3581 H2C_FUNC_MCC_REQ_TSF, 0, 0, 3582 H2C_MCC_REQ_TSF_LEN); 3583 3584 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 3585 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3586 if (ret) 3587 return ret; 3588 3589 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 3590 *rpt = *tmp; 3591 3592 return 0; 3593 } 3594 3595 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 3596 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3597 u8 *bitmap) 3598 { 3599 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3600 struct sk_buff *skb; 3601 unsigned int cond; 3602 u8 map_len; 3603 u8 h2c_len; 3604 3605 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 3606 map_len = RTW89_MAX_MAC_ID_NUM / 8; 3607 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 3608 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 3609 if (!skb) { 3610 rtw89_err(rtwdev, 3611 "failed to alloc skb for mcc macid bitmap\n"); 3612 return -ENOMEM; 3613 } 3614 3615 skb_put(skb, h2c_len); 3616 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 3617 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 3618 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 3619 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 3620 3621 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3622 H2C_CAT_MAC, 3623 H2C_CL_MCC, 3624 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 3625 h2c_len); 3626 3627 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 3628 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3629 } 3630 3631 #define H2C_MCC_SYNC_LEN 4 3632 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 3633 u8 target, u8 offset) 3634 { 3635 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3636 struct sk_buff *skb; 3637 unsigned int cond; 3638 3639 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 3640 if (!skb) { 3641 rtw89_err(rtwdev, 3642 "failed to alloc skb for mcc sync\n"); 3643 return -ENOMEM; 3644 } 3645 3646 skb_put(skb, H2C_MCC_SYNC_LEN); 3647 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 3648 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 3649 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 3650 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 3651 3652 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3653 H2C_CAT_MAC, 3654 H2C_CL_MCC, 3655 H2C_FUNC_MCC_SYNC, 0, 0, 3656 H2C_MCC_SYNC_LEN); 3657 3658 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 3659 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3660 } 3661 3662 #define H2C_MCC_SET_DURATION_LEN 20 3663 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 3664 const struct rtw89_fw_mcc_duration *p) 3665 { 3666 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3667 struct sk_buff *skb; 3668 unsigned int cond; 3669 3670 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 3671 if (!skb) { 3672 rtw89_err(rtwdev, 3673 "failed to alloc skb for mcc set duration\n"); 3674 return -ENOMEM; 3675 } 3676 3677 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 3678 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 3679 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 3680 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 3681 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 3682 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 3683 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 3684 p->start_tsf_low); 3685 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 3686 p->start_tsf_high); 3687 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 3688 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 3689 3690 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3691 H2C_CAT_MAC, 3692 H2C_CL_MCC, 3693 H2C_FUNC_MCC_SET_DURATION, 0, 0, 3694 H2C_MCC_SET_DURATION_LEN); 3695 3696 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 3697 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3698 } 3699