1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 #include "util.h" 14 15 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 16 struct sk_buff *skb); 17 18 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 19 bool header) 20 { 21 struct sk_buff *skb; 22 u32 header_len = 0; 23 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 24 25 if (header) 26 header_len = H2C_HEADER_LEN; 27 28 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 29 if (!skb) 30 return NULL; 31 skb_reserve(skb, header_len + h2c_desc_size); 32 memset(skb->data, 0, len); 33 34 return skb; 35 } 36 37 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 38 { 39 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 40 } 41 42 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 43 { 44 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 45 } 46 47 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 48 { 49 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 50 51 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 52 } 53 54 #define FWDL_WAIT_CNT 400000 55 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 56 { 57 u8 val; 58 int ret; 59 60 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 61 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 62 1, FWDL_WAIT_CNT, false, rtwdev); 63 if (ret) { 64 switch (val) { 65 case RTW89_FWDL_CHECKSUM_FAIL: 66 rtw89_err(rtwdev, "fw checksum fail\n"); 67 return -EINVAL; 68 69 case RTW89_FWDL_SECURITY_FAIL: 70 rtw89_err(rtwdev, "fw security fail\n"); 71 return -EINVAL; 72 73 case RTW89_FWDL_CV_NOT_MATCH: 74 rtw89_err(rtwdev, "fw cv not match\n"); 75 return -EINVAL; 76 77 default: 78 return -EBUSY; 79 } 80 } 81 82 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 83 84 return 0; 85 } 86 87 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 88 struct rtw89_fw_bin_info *info) 89 { 90 struct rtw89_fw_hdr_section_info *section_info; 91 const u8 *fw_end = fw + len; 92 const u8 *fwdynhdr; 93 const u8 *bin; 94 u32 base_hdr_len; 95 u32 mssc_len = 0; 96 u32 i; 97 98 if (!info) 99 return -EINVAL; 100 101 info->section_num = GET_FW_HDR_SEC_NUM(fw); 102 base_hdr_len = RTW89_FW_HDR_SIZE + 103 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 104 info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw); 105 106 if (info->dynamic_hdr_en) { 107 info->hdr_len = GET_FW_HDR_LEN(fw); 108 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 109 fwdynhdr = fw + base_hdr_len; 110 if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) { 111 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 112 return -EINVAL; 113 } 114 } else { 115 info->hdr_len = base_hdr_len; 116 info->dynamic_hdr_len = 0; 117 } 118 119 bin = fw + info->hdr_len; 120 121 /* jump to section header */ 122 fw += RTW89_FW_HDR_SIZE; 123 section_info = info->section_info; 124 for (i = 0; i < info->section_num; i++) { 125 section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw); 126 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 127 section_info->mssc = GET_FWSECTION_HDR_MSSC(fw); 128 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 129 } else { 130 section_info->mssc = 0; 131 } 132 133 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 134 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 135 section_info->len += FWDL_SECTION_CHKSUM_LEN; 136 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 137 section_info->dladdr = 138 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 139 section_info->addr = bin; 140 bin += section_info->len; 141 fw += RTW89_FW_SECTION_HDR_SIZE; 142 section_info++; 143 } 144 145 if (fw_end != bin + mssc_len) { 146 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 147 return -EINVAL; 148 } 149 150 return 0; 151 } 152 153 static 154 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 155 struct rtw89_fw_suit *fw_suit, bool nowarn) 156 { 157 struct rtw89_fw_info *fw_info = &rtwdev->fw; 158 const struct firmware *firmware = fw_info->req.firmware; 159 const u8 *mfw = firmware->data; 160 u32 mfw_len = firmware->size; 161 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 162 const struct rtw89_mfw_info *mfw_info; 163 int i; 164 165 if (mfw_hdr->sig != RTW89_MFW_SIG) { 166 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 167 /* legacy firmware support normal type only */ 168 if (type != RTW89_FW_NORMAL) 169 return -EINVAL; 170 fw_suit->data = mfw; 171 fw_suit->size = mfw_len; 172 return 0; 173 } 174 175 for (i = 0; i < mfw_hdr->fw_nr; i++) { 176 mfw_info = &mfw_hdr->info[i]; 177 if (mfw_info->cv != rtwdev->hal.cv || 178 mfw_info->type != type || 179 mfw_info->mp) 180 continue; 181 182 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 183 fw_suit->size = le32_to_cpu(mfw_info->size); 184 return 0; 185 } 186 187 if (!nowarn) 188 rtw89_err(rtwdev, "no suitable firmware found\n"); 189 return -ENOENT; 190 } 191 192 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 193 enum rtw89_fw_type type, 194 struct rtw89_fw_suit *fw_suit) 195 { 196 const u8 *hdr = fw_suit->data; 197 198 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 199 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 200 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 201 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 202 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 203 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 204 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 205 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 206 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 207 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 208 209 rtw89_info(rtwdev, 210 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 211 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 212 fw_suit->sub_idex, fw_suit->cmd_ver, type); 213 } 214 215 static 216 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 217 bool nowarn) 218 { 219 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 220 int ret; 221 222 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 223 if (ret) 224 return ret; 225 226 rtw89_fw_update_ver(rtwdev, type, fw_suit); 227 228 return 0; 229 } 230 231 #define __DEF_FW_FEAT_COND(__cond, __op) \ 232 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 233 { \ 234 return suit_ver_code __op comp_ver_code; \ 235 } 236 237 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 238 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 239 __DEF_FW_FEAT_COND(lt, <); /* less than */ 240 241 struct __fw_feat_cfg { 242 enum rtw89_core_chip_id chip_id; 243 enum rtw89_fw_feature feature; 244 u32 ver_code; 245 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 246 }; 247 248 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 249 { \ 250 .chip_id = _chip, \ 251 .feature = RTW89_FW_FEATURE_ ## _feat, \ 252 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 253 .cond = __fw_feat_cond_ ## _cond, \ 254 } 255 256 static const struct __fw_feat_cfg fw_feat_tbl[] = { 257 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 258 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 259 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 260 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 261 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 262 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 263 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 264 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 265 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 266 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 267 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 268 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 269 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 270 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 271 }; 272 273 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 274 const struct rtw89_chip_info *chip, 275 u32 ver_code) 276 { 277 int i; 278 279 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 280 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 281 282 if (chip->chip_id != ent->chip_id) 283 continue; 284 285 if (ent->cond(ver_code, ent->ver_code)) 286 RTW89_SET_FW_FEATURE(ent->feature, fw); 287 } 288 } 289 290 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 291 { 292 const struct rtw89_chip_info *chip = rtwdev->chip; 293 const struct rtw89_fw_suit *fw_suit; 294 u32 suit_ver_code; 295 296 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 297 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 298 299 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 300 } 301 302 const struct firmware * 303 rtw89_early_fw_feature_recognize(struct device *device, 304 const struct rtw89_chip_info *chip, 305 struct rtw89_fw_info *early_fw, 306 int *used_fw_format) 307 { 308 union rtw89_compat_fw_hdr buf = {}; 309 const struct firmware *firmware; 310 bool full_req = false; 311 char fw_name[64]; 312 int fw_format; 313 u32 ver_code; 314 int ret; 315 316 /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will 317 * be denied (-EPERM). Then, we don't get right firmware things as 318 * expected. So, in this case, we have to request full firmware here. 319 */ 320 if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) 321 full_req = true; 322 323 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 324 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 325 chip->fw_basename, fw_format); 326 327 if (full_req) 328 ret = request_firmware(&firmware, fw_name, device); 329 else 330 ret = request_partial_firmware_into_buf(&firmware, fw_name, 331 device, &buf, sizeof(buf), 332 0); 333 if (!ret) { 334 dev_info(device, "loaded firmware %s\n", fw_name); 335 *used_fw_format = fw_format; 336 break; 337 } 338 } 339 340 if (ret) { 341 dev_err(device, "failed to early request firmware: %d\n", ret); 342 return NULL; 343 } 344 345 if (full_req) 346 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 347 else 348 ver_code = rtw89_compat_fw_hdr_ver_code(&buf); 349 350 if (!ver_code) 351 goto out; 352 353 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 354 355 out: 356 if (full_req) 357 return firmware; 358 359 release_firmware(firmware); 360 return NULL; 361 } 362 363 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 364 { 365 const struct rtw89_chip_info *chip = rtwdev->chip; 366 int ret; 367 368 if (chip->try_ce_fw) { 369 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 370 if (!ret) 371 goto normal_done; 372 } 373 374 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 375 if (ret) 376 return ret; 377 378 normal_done: 379 /* It still works if wowlan firmware isn't existing. */ 380 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 381 382 rtw89_fw_recognize_features(rtwdev); 383 384 rtw89_coex_recognize_ver(rtwdev); 385 386 return 0; 387 } 388 389 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 390 u8 type, u8 cat, u8 class, u8 func, 391 bool rack, bool dack, u32 len) 392 { 393 struct fwcmd_hdr *hdr; 394 395 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 396 397 if (!(rtwdev->fw.h2c_seq % 4)) 398 rack = true; 399 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 400 FIELD_PREP(H2C_HDR_CAT, cat) | 401 FIELD_PREP(H2C_HDR_CLASS, class) | 402 FIELD_PREP(H2C_HDR_FUNC, func) | 403 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 404 405 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 406 len + H2C_HEADER_LEN) | 407 (rack ? H2C_HDR_REC_ACK : 0) | 408 (dack ? H2C_HDR_DONE_ACK : 0)); 409 410 rtwdev->fw.h2c_seq++; 411 } 412 413 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 414 struct sk_buff *skb, 415 u8 type, u8 cat, u8 class, u8 func, 416 u32 len) 417 { 418 struct fwcmd_hdr *hdr; 419 420 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 421 422 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 423 FIELD_PREP(H2C_HDR_CAT, cat) | 424 FIELD_PREP(H2C_HDR_CLASS, class) | 425 FIELD_PREP(H2C_HDR_FUNC, func) | 426 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 427 428 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 429 len + H2C_HEADER_LEN)); 430 } 431 432 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 433 { 434 struct sk_buff *skb; 435 u32 ret = 0; 436 437 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 438 if (!skb) { 439 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 440 return -ENOMEM; 441 } 442 443 skb_put_data(skb, fw, len); 444 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 445 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 446 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 447 H2C_FUNC_MAC_FWHDR_DL, len); 448 449 ret = rtw89_h2c_tx(rtwdev, skb, false); 450 if (ret) { 451 rtw89_err(rtwdev, "failed to send h2c\n"); 452 ret = -1; 453 goto fail; 454 } 455 456 return 0; 457 fail: 458 dev_kfree_skb_any(skb); 459 460 return ret; 461 } 462 463 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 464 { 465 u8 val; 466 int ret; 467 468 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 469 if (ret) { 470 rtw89_err(rtwdev, "[ERR]FW header download\n"); 471 return ret; 472 } 473 474 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 475 1, FWDL_WAIT_CNT, false, 476 rtwdev, R_AX_WCPU_FW_CTRL); 477 if (ret) { 478 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 479 return ret; 480 } 481 482 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 483 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 484 485 return 0; 486 } 487 488 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 489 struct rtw89_fw_hdr_section_info *info) 490 { 491 struct sk_buff *skb; 492 const u8 *section = info->addr; 493 u32 residue_len = info->len; 494 u32 pkt_len; 495 int ret; 496 497 while (residue_len) { 498 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 499 pkt_len = FWDL_SECTION_PER_PKT_LEN; 500 else 501 pkt_len = residue_len; 502 503 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 504 if (!skb) { 505 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 506 return -ENOMEM; 507 } 508 skb_put_data(skb, section, pkt_len); 509 510 ret = rtw89_h2c_tx(rtwdev, skb, true); 511 if (ret) { 512 rtw89_err(rtwdev, "failed to send h2c\n"); 513 ret = -1; 514 goto fail; 515 } 516 517 section += pkt_len; 518 residue_len -= pkt_len; 519 } 520 521 return 0; 522 fail: 523 dev_kfree_skb_any(skb); 524 525 return ret; 526 } 527 528 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 529 struct rtw89_fw_bin_info *info) 530 { 531 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 532 u8 section_num = info->section_num; 533 int ret; 534 535 while (section_num--) { 536 ret = __rtw89_fw_download_main(rtwdev, section_info); 537 if (ret) 538 return ret; 539 section_info++; 540 } 541 542 mdelay(5); 543 544 ret = rtw89_fw_check_rdy(rtwdev); 545 if (ret) { 546 rtw89_warn(rtwdev, "download firmware fail\n"); 547 return ret; 548 } 549 550 return 0; 551 } 552 553 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 554 { 555 u32 val32; 556 u16 index; 557 558 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 559 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 560 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 561 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 562 563 for (index = 0; index < 15; index++) { 564 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 565 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 566 fsleep(10); 567 } 568 } 569 570 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 571 { 572 u32 val32; 573 u16 val16; 574 575 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 576 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 577 578 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 579 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 580 581 rtw89_fw_prog_cnt_dump(rtwdev); 582 } 583 584 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 585 { 586 struct rtw89_fw_info *fw_info = &rtwdev->fw; 587 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 588 struct rtw89_fw_bin_info info; 589 const u8 *fw = fw_suit->data; 590 u32 len = fw_suit->size; 591 u8 val; 592 int ret; 593 594 rtw89_mac_disable_cpu(rtwdev); 595 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 596 if (ret) 597 return ret; 598 599 if (!fw || !len) { 600 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 601 return -ENOENT; 602 } 603 604 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 605 if (ret) { 606 rtw89_err(rtwdev, "parse fw header fail\n"); 607 goto fwdl_err; 608 } 609 610 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 611 1, FWDL_WAIT_CNT, false, 612 rtwdev, R_AX_WCPU_FW_CTRL); 613 if (ret) { 614 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 615 goto fwdl_err; 616 } 617 618 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 619 if (ret) { 620 ret = -EBUSY; 621 goto fwdl_err; 622 } 623 624 ret = rtw89_fw_download_main(rtwdev, fw, &info); 625 if (ret) { 626 ret = -EBUSY; 627 goto fwdl_err; 628 } 629 630 fw_info->h2c_seq = 0; 631 fw_info->rec_seq = 0; 632 fw_info->h2c_counter = 0; 633 fw_info->c2h_counter = 0; 634 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 635 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 636 637 return ret; 638 639 fwdl_err: 640 rtw89_fw_dl_fail_dump(rtwdev); 641 return ret; 642 } 643 644 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 645 { 646 struct rtw89_fw_info *fw = &rtwdev->fw; 647 648 wait_for_completion(&fw->req.completion); 649 if (!fw->req.firmware) 650 return -EINVAL; 651 652 return 0; 653 } 654 655 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 656 struct rtw89_fw_req_info *req, 657 const char *fw_name, bool nowarn) 658 { 659 int ret; 660 661 if (req->firmware) { 662 rtw89_debug(rtwdev, RTW89_DBG_FW, 663 "full firmware has been early requested\n"); 664 complete_all(&req->completion); 665 return 0; 666 } 667 668 if (nowarn) 669 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 670 else 671 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 672 673 complete_all(&req->completion); 674 675 return ret; 676 } 677 678 void rtw89_load_firmware_work(struct work_struct *work) 679 { 680 struct rtw89_dev *rtwdev = 681 container_of(work, struct rtw89_dev, load_firmware_work); 682 const struct rtw89_chip_info *chip = rtwdev->chip; 683 char fw_name[64]; 684 685 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 686 chip->fw_basename, rtwdev->fw.fw_format); 687 688 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 689 } 690 691 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 692 { 693 struct rtw89_fw_info *fw = &rtwdev->fw; 694 695 cancel_work_sync(&rtwdev->load_firmware_work); 696 697 if (fw->req.firmware) { 698 release_firmware(fw->req.firmware); 699 700 /* assign NULL back in case rtw89_free_ieee80211_hw() 701 * try to release the same one again. 702 */ 703 fw->req.firmware = NULL; 704 } 705 } 706 707 #define H2C_CAM_LEN 60 708 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 709 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 710 { 711 struct sk_buff *skb; 712 int ret; 713 714 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 715 if (!skb) { 716 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 717 return -ENOMEM; 718 } 719 skb_put(skb, H2C_CAM_LEN); 720 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 721 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 722 723 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 724 H2C_CAT_MAC, 725 H2C_CL_MAC_ADDR_CAM_UPDATE, 726 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 727 H2C_CAM_LEN); 728 729 ret = rtw89_h2c_tx(rtwdev, skb, false); 730 if (ret) { 731 rtw89_err(rtwdev, "failed to send h2c\n"); 732 goto fail; 733 } 734 735 return 0; 736 fail: 737 dev_kfree_skb_any(skb); 738 739 return ret; 740 } 741 742 #define H2C_DCTL_SEC_CAM_LEN 68 743 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 744 struct rtw89_vif *rtwvif, 745 struct rtw89_sta *rtwsta) 746 { 747 struct sk_buff *skb; 748 int ret; 749 750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 751 if (!skb) { 752 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 753 return -ENOMEM; 754 } 755 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 756 757 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 758 759 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 760 H2C_CAT_MAC, 761 H2C_CL_MAC_FR_EXCHG, 762 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 763 H2C_DCTL_SEC_CAM_LEN); 764 765 ret = rtw89_h2c_tx(rtwdev, skb, false); 766 if (ret) { 767 rtw89_err(rtwdev, "failed to send h2c\n"); 768 goto fail; 769 } 770 771 return 0; 772 fail: 773 dev_kfree_skb_any(skb); 774 775 return ret; 776 } 777 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 778 779 #define H2C_BA_CAM_LEN 8 780 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 781 bool valid, struct ieee80211_ampdu_params *params) 782 { 783 const struct rtw89_chip_info *chip = rtwdev->chip; 784 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 785 u8 macid = rtwsta->mac_id; 786 struct sk_buff *skb; 787 u8 entry_idx; 788 int ret; 789 790 ret = valid ? 791 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 792 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 793 if (ret) { 794 /* it still works even if we don't have static BA CAM, because 795 * hardware can create dynamic BA CAM automatically. 796 */ 797 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 798 "failed to %s entry tid=%d for h2c ba cam\n", 799 valid ? "alloc" : "free", params->tid); 800 return 0; 801 } 802 803 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 804 if (!skb) { 805 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 806 return -ENOMEM; 807 } 808 skb_put(skb, H2C_BA_CAM_LEN); 809 SET_BA_CAM_MACID(skb->data, macid); 810 if (chip->bacam_v1) 811 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 812 else 813 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 814 if (!valid) 815 goto end; 816 SET_BA_CAM_VALID(skb->data, valid); 817 SET_BA_CAM_TID(skb->data, params->tid); 818 if (params->buf_size > 64) 819 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 820 else 821 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 822 /* If init req is set, hw will set the ssn */ 823 SET_BA_CAM_INIT_REQ(skb->data, 1); 824 SET_BA_CAM_SSN(skb->data, params->ssn); 825 826 if (chip->bacam_v1) { 827 SET_BA_CAM_STD_EN(skb->data, 1); 828 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 829 } 830 831 end: 832 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 833 H2C_CAT_MAC, 834 H2C_CL_BA_CAM, 835 H2C_FUNC_MAC_BA_CAM, 0, 1, 836 H2C_BA_CAM_LEN); 837 838 ret = rtw89_h2c_tx(rtwdev, skb, false); 839 if (ret) { 840 rtw89_err(rtwdev, "failed to send h2c\n"); 841 goto fail; 842 } 843 844 return 0; 845 fail: 846 dev_kfree_skb_any(skb); 847 848 return ret; 849 } 850 851 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev, 852 u8 entry_idx, u8 uid) 853 { 854 struct sk_buff *skb; 855 int ret; 856 857 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 858 if (!skb) { 859 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 860 return -ENOMEM; 861 } 862 skb_put(skb, H2C_BA_CAM_LEN); 863 864 SET_BA_CAM_VALID(skb->data, 1); 865 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 866 SET_BA_CAM_UID(skb->data, uid); 867 SET_BA_CAM_BAND(skb->data, 0); 868 SET_BA_CAM_STD_EN(skb->data, 0); 869 870 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 871 H2C_CAT_MAC, 872 H2C_CL_BA_CAM, 873 H2C_FUNC_MAC_BA_CAM, 0, 1, 874 H2C_BA_CAM_LEN); 875 876 ret = rtw89_h2c_tx(rtwdev, skb, false); 877 if (ret) { 878 rtw89_err(rtwdev, "failed to send h2c\n"); 879 goto fail; 880 } 881 882 return 0; 883 fail: 884 dev_kfree_skb_any(skb); 885 886 return ret; 887 } 888 889 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev) 890 { 891 const struct rtw89_chip_info *chip = rtwdev->chip; 892 u8 entry_idx = chip->bacam_num; 893 u8 uid = 0; 894 int i; 895 896 for (i = 0; i < chip->bacam_dynamic_num; i++) { 897 rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid); 898 entry_idx++; 899 uid++; 900 } 901 } 902 903 #define H2C_LOG_CFG_LEN 12 904 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 905 { 906 struct sk_buff *skb; 907 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 908 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 909 int ret; 910 911 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 912 if (!skb) { 913 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 914 return -ENOMEM; 915 } 916 917 skb_put(skb, H2C_LOG_CFG_LEN); 918 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 919 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 920 SET_LOG_CFG_COMP(skb->data, comp); 921 SET_LOG_CFG_COMP_EXT(skb->data, 0); 922 923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 924 H2C_CAT_MAC, 925 H2C_CL_FW_INFO, 926 H2C_FUNC_LOG_CFG, 0, 0, 927 H2C_LOG_CFG_LEN); 928 929 ret = rtw89_h2c_tx(rtwdev, skb, false); 930 if (ret) { 931 rtw89_err(rtwdev, "failed to send h2c\n"); 932 goto fail; 933 } 934 935 return 0; 936 fail: 937 dev_kfree_skb_any(skb); 938 939 return ret; 940 } 941 942 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 943 struct rtw89_vif *rtwvif, 944 enum rtw89_fw_pkt_ofld_type type, 945 u8 *id) 946 { 947 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 948 struct rtw89_pktofld_info *info; 949 struct sk_buff *skb; 950 int ret; 951 952 info = kzalloc(sizeof(*info), GFP_KERNEL); 953 if (!info) 954 return -ENOMEM; 955 956 switch (type) { 957 case RTW89_PKT_OFLD_TYPE_PS_POLL: 958 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 959 break; 960 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 961 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 962 break; 963 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 964 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 965 break; 966 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 967 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 968 break; 969 default: 970 goto err; 971 } 972 973 if (!skb) 974 goto err; 975 976 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 977 kfree_skb(skb); 978 979 if (ret) 980 goto err; 981 982 list_add_tail(&info->list, &rtwvif->general_pkt_list); 983 *id = info->id; 984 return 0; 985 986 err: 987 kfree(info); 988 return -ENOMEM; 989 } 990 991 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 992 struct rtw89_vif *rtwvif, bool notify_fw) 993 { 994 struct list_head *pkt_list = &rtwvif->general_pkt_list; 995 struct rtw89_pktofld_info *info, *tmp; 996 997 list_for_each_entry_safe(info, tmp, pkt_list, list) { 998 if (notify_fw) 999 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1000 rtw89_core_release_bit_map(rtwdev->pkt_offload, 1001 info->id); 1002 list_del(&info->list); 1003 kfree(info); 1004 } 1005 } 1006 1007 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1008 { 1009 struct rtw89_vif *rtwvif; 1010 1011 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1012 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1013 } 1014 1015 #define H2C_GENERAL_PKT_LEN 6 1016 #define H2C_GENERAL_PKT_ID_UND 0xff 1017 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1018 struct rtw89_vif *rtwvif, u8 macid) 1019 { 1020 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1021 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1022 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1023 struct sk_buff *skb; 1024 int ret; 1025 1026 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1027 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1028 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1029 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1030 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1031 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1032 1033 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1034 if (!skb) { 1035 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1036 return -ENOMEM; 1037 } 1038 skb_put(skb, H2C_GENERAL_PKT_LEN); 1039 SET_GENERAL_PKT_MACID(skb->data, macid); 1040 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1041 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1042 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1043 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1044 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1045 1046 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1047 H2C_CAT_MAC, 1048 H2C_CL_FW_INFO, 1049 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1050 H2C_GENERAL_PKT_LEN); 1051 1052 ret = rtw89_h2c_tx(rtwdev, skb, false); 1053 if (ret) { 1054 rtw89_err(rtwdev, "failed to send h2c\n"); 1055 goto fail; 1056 } 1057 1058 return 0; 1059 fail: 1060 dev_kfree_skb_any(skb); 1061 1062 return ret; 1063 } 1064 1065 #define H2C_LPS_PARM_LEN 8 1066 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1067 struct rtw89_lps_parm *lps_param) 1068 { 1069 struct sk_buff *skb; 1070 int ret; 1071 1072 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1073 if (!skb) { 1074 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1075 return -ENOMEM; 1076 } 1077 skb_put(skb, H2C_LPS_PARM_LEN); 1078 1079 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1080 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1081 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1082 SET_LPS_PARM_RLBM(skb->data, 1); 1083 SET_LPS_PARM_SMARTPS(skb->data, 1); 1084 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1085 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1086 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1087 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1088 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1089 1090 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1091 H2C_CAT_MAC, 1092 H2C_CL_MAC_PS, 1093 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1094 H2C_LPS_PARM_LEN); 1095 1096 ret = rtw89_h2c_tx(rtwdev, skb, false); 1097 if (ret) { 1098 rtw89_err(rtwdev, "failed to send h2c\n"); 1099 goto fail; 1100 } 1101 1102 return 0; 1103 fail: 1104 dev_kfree_skb_any(skb); 1105 1106 return ret; 1107 } 1108 1109 #define H2C_P2P_ACT_LEN 20 1110 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1111 struct ieee80211_p2p_noa_desc *desc, 1112 u8 act, u8 noa_id) 1113 { 1114 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1115 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1116 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1117 struct sk_buff *skb; 1118 u8 *cmd; 1119 int ret; 1120 1121 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1122 if (!skb) { 1123 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1124 return -ENOMEM; 1125 } 1126 skb_put(skb, H2C_P2P_ACT_LEN); 1127 cmd = skb->data; 1128 1129 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1130 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1131 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1132 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1133 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1134 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1135 if (desc) { 1136 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1137 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1138 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1139 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1140 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1141 } 1142 1143 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1144 H2C_CAT_MAC, H2C_CL_MAC_PS, 1145 H2C_FUNC_P2P_ACT, 0, 0, 1146 H2C_P2P_ACT_LEN); 1147 1148 ret = rtw89_h2c_tx(rtwdev, skb, false); 1149 if (ret) { 1150 rtw89_err(rtwdev, "failed to send h2c\n"); 1151 goto fail; 1152 } 1153 1154 return 0; 1155 fail: 1156 dev_kfree_skb_any(skb); 1157 1158 return ret; 1159 } 1160 1161 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1162 struct sk_buff *skb) 1163 { 1164 const struct rtw89_chip_info *chip = rtwdev->chip; 1165 struct rtw89_hal *hal = &rtwdev->hal; 1166 u8 ntx_path; 1167 u8 map_b; 1168 1169 if (chip->rf_path_num == 1) { 1170 ntx_path = RF_A; 1171 map_b = 0; 1172 } else { 1173 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1174 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1175 } 1176 1177 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1178 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1179 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1180 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1181 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1182 } 1183 1184 #define H2C_CMC_TBL_LEN 68 1185 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1186 struct rtw89_vif *rtwvif) 1187 { 1188 const struct rtw89_chip_info *chip = rtwdev->chip; 1189 struct sk_buff *skb; 1190 u8 macid = rtwvif->mac_id; 1191 int ret; 1192 1193 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1194 if (!skb) { 1195 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1196 return -ENOMEM; 1197 } 1198 skb_put(skb, H2C_CMC_TBL_LEN); 1199 SET_CTRL_INFO_MACID(skb->data, macid); 1200 SET_CTRL_INFO_OPERATION(skb->data, 1); 1201 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1202 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1203 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1204 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1205 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1206 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1207 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1208 } 1209 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1210 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1211 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1212 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1213 1214 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1215 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1216 chip->h2c_cctl_func_id, 0, 1, 1217 H2C_CMC_TBL_LEN); 1218 1219 ret = rtw89_h2c_tx(rtwdev, skb, false); 1220 if (ret) { 1221 rtw89_err(rtwdev, "failed to send h2c\n"); 1222 goto fail; 1223 } 1224 1225 return 0; 1226 fail: 1227 dev_kfree_skb_any(skb); 1228 1229 return ret; 1230 } 1231 1232 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1233 struct ieee80211_sta *sta, u8 *pads) 1234 { 1235 bool ppe_th; 1236 u8 ppe16, ppe8; 1237 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1238 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1239 u8 ru_bitmap; 1240 u8 n, idx, sh; 1241 u16 ppe; 1242 int i; 1243 1244 if (!sta->deflink.he_cap.has_he) 1245 return; 1246 1247 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1248 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1249 if (!ppe_th) { 1250 u8 pad; 1251 1252 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1253 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1254 1255 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1256 pads[i] = pad; 1257 1258 return; 1259 } 1260 1261 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1262 n = hweight8(ru_bitmap); 1263 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1264 1265 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1266 if (!(ru_bitmap & BIT(i))) { 1267 pads[i] = 1; 1268 continue; 1269 } 1270 1271 idx = n >> 3; 1272 sh = n & 7; 1273 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1274 1275 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1276 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1277 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1278 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1279 1280 if (ppe16 != 7 && ppe8 == 7) 1281 pads[i] = 2; 1282 else if (ppe8 != 7) 1283 pads[i] = 1; 1284 else 1285 pads[i] = 0; 1286 } 1287 } 1288 1289 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1290 struct ieee80211_vif *vif, 1291 struct ieee80211_sta *sta) 1292 { 1293 const struct rtw89_chip_info *chip = rtwdev->chip; 1294 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1295 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1296 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1297 struct sk_buff *skb; 1298 u8 pads[RTW89_PPE_BW_NUM]; 1299 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1300 u16 lowest_rate; 1301 int ret; 1302 1303 memset(pads, 0, sizeof(pads)); 1304 if (sta) 1305 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1306 1307 if (vif->p2p) 1308 lowest_rate = RTW89_HW_RATE_OFDM6; 1309 else if (chan->band_type == RTW89_BAND_2G) 1310 lowest_rate = RTW89_HW_RATE_CCK1; 1311 else 1312 lowest_rate = RTW89_HW_RATE_OFDM6; 1313 1314 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1315 if (!skb) { 1316 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1317 return -ENOMEM; 1318 } 1319 skb_put(skb, H2C_CMC_TBL_LEN); 1320 SET_CTRL_INFO_MACID(skb->data, mac_id); 1321 SET_CTRL_INFO_OPERATION(skb->data, 1); 1322 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1323 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1324 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1325 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1326 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1327 if (vif->type == NL80211_IFTYPE_STATION) 1328 SET_CMC_TBL_ULDL(skb->data, 1); 1329 else 1330 SET_CMC_TBL_ULDL(skb->data, 0); 1331 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1332 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1333 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1334 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1335 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1336 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1337 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1338 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1339 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1340 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1341 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1342 } 1343 if (sta) 1344 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1345 sta->deflink.he_cap.has_he); 1346 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1347 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1348 1349 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1350 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1351 chip->h2c_cctl_func_id, 0, 1, 1352 H2C_CMC_TBL_LEN); 1353 1354 ret = rtw89_h2c_tx(rtwdev, skb, false); 1355 if (ret) { 1356 rtw89_err(rtwdev, "failed to send h2c\n"); 1357 goto fail; 1358 } 1359 1360 return 0; 1361 fail: 1362 dev_kfree_skb_any(skb); 1363 1364 return ret; 1365 } 1366 1367 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1368 struct rtw89_sta *rtwsta) 1369 { 1370 const struct rtw89_chip_info *chip = rtwdev->chip; 1371 struct sk_buff *skb; 1372 int ret; 1373 1374 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1375 if (!skb) { 1376 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1377 return -ENOMEM; 1378 } 1379 skb_put(skb, H2C_CMC_TBL_LEN); 1380 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1381 SET_CTRL_INFO_OPERATION(skb->data, 1); 1382 if (rtwsta->cctl_tx_time) { 1383 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1384 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1385 } 1386 if (rtwsta->cctl_tx_retry_limit) { 1387 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1388 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1389 } 1390 1391 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1392 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1393 chip->h2c_cctl_func_id, 0, 1, 1394 H2C_CMC_TBL_LEN); 1395 1396 ret = rtw89_h2c_tx(rtwdev, skb, false); 1397 if (ret) { 1398 rtw89_err(rtwdev, "failed to send h2c\n"); 1399 goto fail; 1400 } 1401 1402 return 0; 1403 fail: 1404 dev_kfree_skb_any(skb); 1405 1406 return ret; 1407 } 1408 1409 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1410 struct rtw89_sta *rtwsta) 1411 { 1412 const struct rtw89_chip_info *chip = rtwdev->chip; 1413 struct sk_buff *skb; 1414 int ret; 1415 1416 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1417 return 0; 1418 1419 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1420 if (!skb) { 1421 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1422 return -ENOMEM; 1423 } 1424 skb_put(skb, H2C_CMC_TBL_LEN); 1425 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1426 SET_CTRL_INFO_OPERATION(skb->data, 1); 1427 1428 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1429 1430 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1431 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1432 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1433 H2C_CMC_TBL_LEN); 1434 1435 ret = rtw89_h2c_tx(rtwdev, skb, false); 1436 if (ret) { 1437 rtw89_err(rtwdev, "failed to send h2c\n"); 1438 goto fail; 1439 } 1440 1441 return 0; 1442 fail: 1443 dev_kfree_skb_any(skb); 1444 1445 return ret; 1446 } 1447 1448 #define H2C_BCN_BASE_LEN 12 1449 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1450 struct rtw89_vif *rtwvif) 1451 { 1452 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1453 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1454 struct sk_buff *skb; 1455 struct sk_buff *skb_beacon; 1456 u16 tim_offset; 1457 int bcn_total_len; 1458 u16 beacon_rate; 1459 int ret; 1460 1461 if (vif->p2p) 1462 beacon_rate = RTW89_HW_RATE_OFDM6; 1463 else if (chan->band_type == RTW89_BAND_2G) 1464 beacon_rate = RTW89_HW_RATE_CCK1; 1465 else 1466 beacon_rate = RTW89_HW_RATE_OFDM6; 1467 1468 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1469 NULL, 0); 1470 if (!skb_beacon) { 1471 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1472 return -ENOMEM; 1473 } 1474 1475 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1476 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1477 if (!skb) { 1478 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1479 dev_kfree_skb_any(skb_beacon); 1480 return -ENOMEM; 1481 } 1482 skb_put(skb, H2C_BCN_BASE_LEN); 1483 1484 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1485 SET_BCN_UPD_MBSSID(skb->data, 0); 1486 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1487 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1488 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1489 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1490 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1491 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1492 1493 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1494 dev_kfree_skb_any(skb_beacon); 1495 1496 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1497 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1498 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1499 bcn_total_len); 1500 1501 ret = rtw89_h2c_tx(rtwdev, skb, false); 1502 if (ret) { 1503 rtw89_err(rtwdev, "failed to send h2c\n"); 1504 dev_kfree_skb_any(skb); 1505 return ret; 1506 } 1507 1508 return 0; 1509 } 1510 1511 #define H2C_ROLE_MAINTAIN_LEN 4 1512 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1513 struct rtw89_vif *rtwvif, 1514 struct rtw89_sta *rtwsta, 1515 enum rtw89_upd_mode upd_mode) 1516 { 1517 struct sk_buff *skb; 1518 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1519 u8 self_role; 1520 int ret; 1521 1522 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1523 if (rtwsta) 1524 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1525 else 1526 self_role = rtwvif->self_role; 1527 } else { 1528 self_role = rtwvif->self_role; 1529 } 1530 1531 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1532 if (!skb) { 1533 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1534 return -ENOMEM; 1535 } 1536 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1537 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1538 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1539 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1540 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1541 1542 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1543 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1544 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1545 H2C_ROLE_MAINTAIN_LEN); 1546 1547 ret = rtw89_h2c_tx(rtwdev, skb, false); 1548 if (ret) { 1549 rtw89_err(rtwdev, "failed to send h2c\n"); 1550 goto fail; 1551 } 1552 1553 return 0; 1554 fail: 1555 dev_kfree_skb_any(skb); 1556 1557 return ret; 1558 } 1559 1560 #define H2C_JOIN_INFO_LEN 4 1561 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1562 struct rtw89_sta *rtwsta, bool dis_conn) 1563 { 1564 struct sk_buff *skb; 1565 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1566 u8 self_role = rtwvif->self_role; 1567 u8 net_type = rtwvif->net_type; 1568 int ret; 1569 1570 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1571 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1572 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1573 } 1574 1575 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1576 if (!skb) { 1577 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1578 return -ENOMEM; 1579 } 1580 skb_put(skb, H2C_JOIN_INFO_LEN); 1581 SET_JOININFO_MACID(skb->data, mac_id); 1582 SET_JOININFO_OP(skb->data, dis_conn); 1583 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1584 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1585 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1586 SET_JOININFO_ISHESTA(skb->data, 0); 1587 SET_JOININFO_DLBW(skb->data, 0); 1588 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1589 SET_JOININFO_DL_T_PE(skb->data, 0); 1590 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1591 SET_JOININFO_NET_TYPE(skb->data, net_type); 1592 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1593 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1594 1595 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1596 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1597 H2C_FUNC_MAC_JOININFO, 0, 1, 1598 H2C_JOIN_INFO_LEN); 1599 1600 ret = rtw89_h2c_tx(rtwdev, skb, false); 1601 if (ret) { 1602 rtw89_err(rtwdev, "failed to send h2c\n"); 1603 goto fail; 1604 } 1605 1606 return 0; 1607 fail: 1608 dev_kfree_skb_any(skb); 1609 1610 return ret; 1611 } 1612 1613 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1614 bool pause) 1615 { 1616 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1617 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1618 struct sk_buff *skb; 1619 int ret; 1620 1621 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1622 if (!skb) { 1623 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1624 return -ENOMEM; 1625 } 1626 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1627 if (pause) 1628 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1629 skb_put_data(skb, &h2c, len); 1630 1631 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1632 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1633 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1634 len); 1635 1636 ret = rtw89_h2c_tx(rtwdev, skb, false); 1637 if (ret) { 1638 rtw89_err(rtwdev, "failed to send h2c\n"); 1639 goto fail; 1640 } 1641 1642 return 0; 1643 fail: 1644 dev_kfree_skb_any(skb); 1645 1646 return ret; 1647 } 1648 1649 #define H2C_EDCA_LEN 12 1650 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1651 u8 ac, u32 val) 1652 { 1653 struct sk_buff *skb; 1654 int ret; 1655 1656 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1657 if (!skb) { 1658 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1659 return -ENOMEM; 1660 } 1661 skb_put(skb, H2C_EDCA_LEN); 1662 RTW89_SET_EDCA_SEL(skb->data, 0); 1663 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1664 RTW89_SET_EDCA_WMM(skb->data, 0); 1665 RTW89_SET_EDCA_AC(skb->data, ac); 1666 RTW89_SET_EDCA_PARAM(skb->data, val); 1667 1668 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1669 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1670 H2C_FUNC_USR_EDCA, 0, 1, 1671 H2C_EDCA_LEN); 1672 1673 ret = rtw89_h2c_tx(rtwdev, skb, false); 1674 if (ret) { 1675 rtw89_err(rtwdev, "failed to send h2c\n"); 1676 goto fail; 1677 } 1678 1679 return 0; 1680 fail: 1681 dev_kfree_skb_any(skb); 1682 1683 return ret; 1684 } 1685 1686 #define H2C_TSF32_TOGL_LEN 4 1687 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1688 bool en) 1689 { 1690 struct sk_buff *skb; 1691 u16 early_us = en ? 2000 : 0; 1692 u8 *cmd; 1693 int ret; 1694 1695 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1696 if (!skb) { 1697 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1698 return -ENOMEM; 1699 } 1700 skb_put(skb, H2C_TSF32_TOGL_LEN); 1701 cmd = skb->data; 1702 1703 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1704 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1705 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1706 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1707 1708 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1709 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1710 H2C_FUNC_TSF32_TOGL, 0, 0, 1711 H2C_TSF32_TOGL_LEN); 1712 1713 ret = rtw89_h2c_tx(rtwdev, skb, false); 1714 if (ret) { 1715 rtw89_err(rtwdev, "failed to send h2c\n"); 1716 goto fail; 1717 } 1718 1719 return 0; 1720 fail: 1721 dev_kfree_skb_any(skb); 1722 1723 return ret; 1724 } 1725 1726 #define H2C_OFLD_CFG_LEN 8 1727 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1728 { 1729 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1730 struct sk_buff *skb; 1731 int ret; 1732 1733 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1734 if (!skb) { 1735 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1736 return -ENOMEM; 1737 } 1738 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1739 1740 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1741 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1742 H2C_FUNC_OFLD_CFG, 0, 1, 1743 H2C_OFLD_CFG_LEN); 1744 1745 ret = rtw89_h2c_tx(rtwdev, skb, false); 1746 if (ret) { 1747 rtw89_err(rtwdev, "failed to send h2c\n"); 1748 goto fail; 1749 } 1750 1751 return 0; 1752 fail: 1753 dev_kfree_skb_any(skb); 1754 1755 return ret; 1756 } 1757 1758 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 1759 struct ieee80211_vif *vif, 1760 bool connect) 1761 { 1762 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 1763 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 1764 struct rtw89_h2c_bcnfltr *h2c; 1765 u32 len = sizeof(*h2c); 1766 struct sk_buff *skb; 1767 int ret; 1768 1769 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1770 return -EINVAL; 1771 1772 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1773 return -EINVAL; 1774 1775 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1776 if (!skb) { 1777 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 1778 return -ENOMEM; 1779 } 1780 1781 skb_put(skb, len); 1782 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 1783 1784 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 1785 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 1786 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 1787 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 1788 RTW89_H2C_BCNFLTR_W0_MODE) | 1789 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 1790 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 1791 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 1792 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 1793 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 1794 1795 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1796 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1797 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 1798 1799 ret = rtw89_h2c_tx(rtwdev, skb, false); 1800 if (ret) { 1801 rtw89_err(rtwdev, "failed to send h2c\n"); 1802 goto fail; 1803 } 1804 1805 return 0; 1806 fail: 1807 dev_kfree_skb_any(skb); 1808 1809 return ret; 1810 } 1811 1812 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 1813 struct rtw89_rx_phy_ppdu *phy_ppdu) 1814 { 1815 struct rtw89_h2c_ofld_rssi *h2c; 1816 u32 len = sizeof(*h2c); 1817 struct sk_buff *skb; 1818 s8 rssi; 1819 int ret; 1820 1821 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1822 return -EINVAL; 1823 1824 if (!phy_ppdu) 1825 return -EINVAL; 1826 1827 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1828 if (!skb) { 1829 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 1830 return -ENOMEM; 1831 } 1832 1833 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 1834 skb_put(skb, len); 1835 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 1836 1837 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 1838 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 1839 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 1840 1841 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1842 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1843 H2C_FUNC_OFLD_RSSI, 0, 1, len); 1844 1845 ret = rtw89_h2c_tx(rtwdev, skb, false); 1846 if (ret) { 1847 rtw89_err(rtwdev, "failed to send h2c\n"); 1848 goto fail; 1849 } 1850 1851 return 0; 1852 fail: 1853 dev_kfree_skb_any(skb); 1854 1855 return ret; 1856 } 1857 1858 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 1859 { 1860 struct rtw89_traffic_stats *stats = &rtwvif->stats; 1861 struct rtw89_h2c_ofld *h2c; 1862 u32 len = sizeof(*h2c); 1863 struct sk_buff *skb; 1864 int ret; 1865 1866 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1867 return -EINVAL; 1868 1869 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1870 if (!skb) { 1871 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 1872 return -ENOMEM; 1873 } 1874 1875 skb_put(skb, len); 1876 h2c = (struct rtw89_h2c_ofld *)skb->data; 1877 1878 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 1879 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 1880 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 1881 1882 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1883 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1884 H2C_FUNC_OFLD_TP, 0, 1, len); 1885 1886 ret = rtw89_h2c_tx(rtwdev, skb, false); 1887 if (ret) { 1888 rtw89_err(rtwdev, "failed to send h2c\n"); 1889 goto fail; 1890 } 1891 1892 return 0; 1893 fail: 1894 dev_kfree_skb_any(skb); 1895 1896 return ret; 1897 } 1898 1899 #define H2C_RA_LEN 16 1900 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1901 { 1902 struct sk_buff *skb; 1903 u8 *cmd; 1904 int ret; 1905 1906 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1907 if (!skb) { 1908 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1909 return -ENOMEM; 1910 } 1911 skb_put(skb, H2C_RA_LEN); 1912 cmd = skb->data; 1913 rtw89_debug(rtwdev, RTW89_DBG_RA, 1914 "ra cmd msk: %llx ", ra->ra_mask); 1915 1916 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1917 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1918 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1919 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1920 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1921 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1922 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1923 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1924 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1925 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1926 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1927 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1928 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1929 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1930 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1931 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1932 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1933 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1934 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1935 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1936 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1937 1938 if (csi) { 1939 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1940 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1941 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1942 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1943 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1944 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1945 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1946 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1947 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1948 } 1949 1950 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1951 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1952 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1953 H2C_RA_LEN); 1954 1955 ret = rtw89_h2c_tx(rtwdev, skb, false); 1956 if (ret) { 1957 rtw89_err(rtwdev, "failed to send h2c\n"); 1958 goto fail; 1959 } 1960 1961 return 0; 1962 fail: 1963 dev_kfree_skb_any(skb); 1964 1965 return ret; 1966 } 1967 1968 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1969 { 1970 struct rtw89_btc *btc = &rtwdev->btc; 1971 struct rtw89_btc_dm *dm = &btc->dm; 1972 struct rtw89_btc_init_info *init_info = &dm->init_info; 1973 struct rtw89_btc_module *module = &init_info->module; 1974 struct rtw89_btc_ant_info *ant = &module->ant; 1975 struct rtw89_h2c_cxinit *h2c; 1976 u32 len = sizeof(*h2c); 1977 struct sk_buff *skb; 1978 int ret; 1979 1980 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1981 if (!skb) { 1982 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1983 return -ENOMEM; 1984 } 1985 skb_put(skb, len); 1986 h2c = (struct rtw89_h2c_cxinit *)skb->data; 1987 1988 h2c->hdr.type = CXDRVINFO_INIT; 1989 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 1990 1991 h2c->ant_type = ant->type; 1992 h2c->ant_num = ant->num; 1993 h2c->ant_iso = ant->isolation; 1994 h2c->ant_info = 1995 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 1996 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 1997 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 1998 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 1999 2000 h2c->mod_rfe = module->rfe_type; 2001 h2c->mod_cv = module->cv; 2002 h2c->mod_info = 2003 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2004 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2005 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2006 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2007 h2c->mod_adie_kt = module->kt_ver_adie; 2008 h2c->wl_gch = init_info->wl_guard_ch; 2009 2010 h2c->info = 2011 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2012 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2013 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2014 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2015 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2016 2017 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2018 H2C_CAT_OUTSRC, BTFC_SET, 2019 SET_DRV_INFO, 0, 0, 2020 len); 2021 2022 ret = rtw89_h2c_tx(rtwdev, skb, false); 2023 if (ret) { 2024 rtw89_err(rtwdev, "failed to send h2c\n"); 2025 goto fail; 2026 } 2027 2028 return 0; 2029 fail: 2030 dev_kfree_skb_any(skb); 2031 2032 return ret; 2033 } 2034 2035 #define PORT_DATA_OFFSET 4 2036 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2037 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2038 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2039 2040 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2041 { 2042 struct rtw89_btc *btc = &rtwdev->btc; 2043 const struct rtw89_btc_ver *ver = btc->ver; 2044 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2045 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2046 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2047 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2048 struct sk_buff *skb; 2049 u32 len; 2050 u8 offset = 0; 2051 u8 *cmd; 2052 int ret; 2053 int i; 2054 2055 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2056 2057 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2058 if (!skb) { 2059 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2060 return -ENOMEM; 2061 } 2062 skb_put(skb, len); 2063 cmd = skb->data; 2064 2065 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2066 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2067 2068 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2069 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2070 2071 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2072 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2073 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2074 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2075 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2076 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2077 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2078 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2079 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2080 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2081 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2082 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2083 2084 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2085 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2086 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2087 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2088 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2089 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2090 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2091 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2092 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2093 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2094 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2095 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2096 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2097 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2098 } 2099 2100 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2101 H2C_CAT_OUTSRC, BTFC_SET, 2102 SET_DRV_INFO, 0, 0, 2103 len); 2104 2105 ret = rtw89_h2c_tx(rtwdev, skb, false); 2106 if (ret) { 2107 rtw89_err(rtwdev, "failed to send h2c\n"); 2108 goto fail; 2109 } 2110 2111 return 0; 2112 fail: 2113 dev_kfree_skb_any(skb); 2114 2115 return ret; 2116 } 2117 2118 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2119 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2120 2121 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2122 { 2123 struct rtw89_btc *btc = &rtwdev->btc; 2124 const struct rtw89_btc_ver *ver = btc->ver; 2125 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2126 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2127 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2128 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2129 struct sk_buff *skb; 2130 u32 len; 2131 u8 *cmd, offset; 2132 int ret; 2133 int i; 2134 2135 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2136 2137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2138 if (!skb) { 2139 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2140 return -ENOMEM; 2141 } 2142 skb_put(skb, len); 2143 cmd = skb->data; 2144 2145 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2146 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2147 2148 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2149 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2150 2151 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2152 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2153 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2154 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2155 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2156 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2157 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2158 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2159 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2160 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2161 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2162 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2163 2164 offset = PORT_DATA_OFFSET; 2165 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2166 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2167 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2168 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2169 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2170 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2171 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2172 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2173 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2174 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2175 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2176 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2177 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2178 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2179 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2180 } 2181 2182 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2183 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2184 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2185 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2186 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2187 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2188 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2189 2190 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2191 H2C_CAT_OUTSRC, BTFC_SET, 2192 SET_DRV_INFO, 0, 0, 2193 len); 2194 2195 ret = rtw89_h2c_tx(rtwdev, skb, false); 2196 if (ret) { 2197 rtw89_err(rtwdev, "failed to send h2c\n"); 2198 goto fail; 2199 } 2200 2201 return 0; 2202 fail: 2203 dev_kfree_skb_any(skb); 2204 2205 return ret; 2206 } 2207 2208 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2209 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2210 2211 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2212 { 2213 struct rtw89_btc *btc = &rtwdev->btc; 2214 const struct rtw89_btc_ver *ver = btc->ver; 2215 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2216 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2217 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2218 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2219 struct sk_buff *skb; 2220 u32 len; 2221 u8 *cmd, offset; 2222 int ret; 2223 int i; 2224 2225 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2226 2227 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2228 if (!skb) { 2229 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2230 return -ENOMEM; 2231 } 2232 skb_put(skb, len); 2233 cmd = skb->data; 2234 2235 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2236 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2237 2238 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2239 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2240 2241 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2242 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2243 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2244 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2245 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2246 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2247 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2248 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2249 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2250 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2251 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2252 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2253 2254 offset = PORT_DATA_OFFSET; 2255 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2256 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2257 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2258 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2259 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2260 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2261 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2262 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2263 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2264 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2265 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2266 } 2267 2268 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2269 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2270 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2271 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2272 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2273 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2274 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2275 2276 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2277 H2C_CAT_OUTSRC, BTFC_SET, 2278 SET_DRV_INFO, 0, 0, 2279 len); 2280 2281 ret = rtw89_h2c_tx(rtwdev, skb, false); 2282 if (ret) { 2283 rtw89_err(rtwdev, "failed to send h2c\n"); 2284 goto fail; 2285 } 2286 2287 return 0; 2288 fail: 2289 dev_kfree_skb_any(skb); 2290 2291 return ret; 2292 } 2293 2294 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2295 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2296 { 2297 struct rtw89_btc *btc = &rtwdev->btc; 2298 const struct rtw89_btc_ver *ver = btc->ver; 2299 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2300 struct sk_buff *skb; 2301 u8 *cmd; 2302 int ret; 2303 2304 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2305 if (!skb) { 2306 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2307 return -ENOMEM; 2308 } 2309 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2310 cmd = skb->data; 2311 2312 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2313 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2314 2315 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2316 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2317 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2318 if (ver->fcxctrl == 0) 2319 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2320 2321 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2322 H2C_CAT_OUTSRC, BTFC_SET, 2323 SET_DRV_INFO, 0, 0, 2324 H2C_LEN_CXDRVINFO_CTRL); 2325 2326 ret = rtw89_h2c_tx(rtwdev, skb, false); 2327 if (ret) { 2328 rtw89_err(rtwdev, "failed to send h2c\n"); 2329 goto fail; 2330 } 2331 2332 return 0; 2333 fail: 2334 dev_kfree_skb_any(skb); 2335 2336 return ret; 2337 } 2338 2339 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2340 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2341 { 2342 struct rtw89_btc *btc = &rtwdev->btc; 2343 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2344 struct sk_buff *skb; 2345 u8 *cmd; 2346 int ret; 2347 2348 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2349 if (!skb) { 2350 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2351 return -ENOMEM; 2352 } 2353 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2354 cmd = skb->data; 2355 2356 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2357 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2358 2359 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2360 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2361 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2362 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2363 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2364 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2365 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2366 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2367 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2368 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2369 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2370 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2371 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2372 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2373 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2374 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2375 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2376 2377 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2378 H2C_CAT_OUTSRC, BTFC_SET, 2379 SET_DRV_INFO, 0, 0, 2380 H2C_LEN_CXDRVINFO_TRX); 2381 2382 ret = rtw89_h2c_tx(rtwdev, skb, false); 2383 if (ret) { 2384 rtw89_err(rtwdev, "failed to send h2c\n"); 2385 goto fail; 2386 } 2387 2388 return 0; 2389 fail: 2390 dev_kfree_skb_any(skb); 2391 2392 return ret; 2393 } 2394 2395 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2396 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2397 { 2398 struct rtw89_btc *btc = &rtwdev->btc; 2399 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2400 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2401 struct sk_buff *skb; 2402 u8 *cmd; 2403 int ret; 2404 2405 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2406 if (!skb) { 2407 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2408 return -ENOMEM; 2409 } 2410 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2411 cmd = skb->data; 2412 2413 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2414 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2415 2416 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2417 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2418 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2419 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2420 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2421 2422 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2423 H2C_CAT_OUTSRC, BTFC_SET, 2424 SET_DRV_INFO, 0, 0, 2425 H2C_LEN_CXDRVINFO_RFK); 2426 2427 ret = rtw89_h2c_tx(rtwdev, skb, false); 2428 if (ret) { 2429 rtw89_err(rtwdev, "failed to send h2c\n"); 2430 goto fail; 2431 } 2432 2433 return 0; 2434 fail: 2435 dev_kfree_skb_any(skb); 2436 2437 return ret; 2438 } 2439 2440 #define H2C_LEN_PKT_OFLD 4 2441 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2442 { 2443 struct sk_buff *skb; 2444 u8 *cmd; 2445 int ret; 2446 2447 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2448 if (!skb) { 2449 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2450 return -ENOMEM; 2451 } 2452 skb_put(skb, H2C_LEN_PKT_OFLD); 2453 cmd = skb->data; 2454 2455 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2456 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2457 2458 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2459 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2460 H2C_FUNC_PACKET_OFLD, 1, 1, 2461 H2C_LEN_PKT_OFLD); 2462 2463 ret = rtw89_h2c_tx(rtwdev, skb, false); 2464 if (ret) { 2465 rtw89_err(rtwdev, "failed to send h2c\n"); 2466 goto fail; 2467 } 2468 2469 return 0; 2470 fail: 2471 dev_kfree_skb_any(skb); 2472 2473 return ret; 2474 } 2475 2476 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2477 struct sk_buff *skb_ofld) 2478 { 2479 struct sk_buff *skb; 2480 u8 *cmd; 2481 u8 alloc_id; 2482 int ret; 2483 2484 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2485 RTW89_MAX_PKT_OFLD_NUM); 2486 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2487 return -ENOSPC; 2488 2489 *id = alloc_id; 2490 2491 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2492 if (!skb) { 2493 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2494 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2495 return -ENOMEM; 2496 } 2497 skb_put(skb, H2C_LEN_PKT_OFLD); 2498 cmd = skb->data; 2499 2500 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2501 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2502 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2503 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2504 2505 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2506 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2507 H2C_FUNC_PACKET_OFLD, 1, 1, 2508 H2C_LEN_PKT_OFLD + skb_ofld->len); 2509 2510 ret = rtw89_h2c_tx(rtwdev, skb, false); 2511 if (ret) { 2512 rtw89_err(rtwdev, "failed to send h2c\n"); 2513 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2514 goto fail; 2515 } 2516 2517 return 0; 2518 fail: 2519 dev_kfree_skb_any(skb); 2520 2521 return ret; 2522 } 2523 2524 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2525 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2526 struct list_head *chan_list) 2527 { 2528 struct rtw89_mac_chinfo *ch_info; 2529 struct sk_buff *skb; 2530 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2531 u8 *cmd; 2532 int ret; 2533 2534 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2535 if (!skb) { 2536 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2537 return -ENOMEM; 2538 } 2539 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2540 cmd = skb->data; 2541 2542 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2543 /* in unit of 4 bytes */ 2544 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2545 2546 list_for_each_entry(ch_info, chan_list, list) { 2547 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2548 2549 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2550 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2551 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2552 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2553 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2554 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2555 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2556 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2557 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2558 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2559 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2560 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2561 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2562 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2563 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2564 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2565 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2566 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2567 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2568 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2569 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2570 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2571 } 2572 2573 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2574 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2575 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2576 2577 ret = rtw89_h2c_tx(rtwdev, skb, false); 2578 if (ret) { 2579 rtw89_err(rtwdev, "failed to send h2c\n"); 2580 goto fail; 2581 } 2582 2583 return 0; 2584 fail: 2585 dev_kfree_skb_any(skb); 2586 2587 return ret; 2588 } 2589 2590 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2591 struct rtw89_scan_option *option, 2592 struct rtw89_vif *rtwvif) 2593 { 2594 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 2595 struct rtw89_h2c_scanofld *h2c; 2596 u32 len = sizeof(*h2c); 2597 struct sk_buff *skb; 2598 int ret; 2599 2600 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2601 if (!skb) { 2602 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2603 return -ENOMEM; 2604 } 2605 skb_put(skb, len); 2606 h2c = (struct rtw89_h2c_scanofld *)skb->data; 2607 2608 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 2609 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 2610 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 2611 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 2612 2613 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 2614 le32_encode_bits(option->target_ch_mode, 2615 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 2616 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 2617 RTW89_H2C_SCANOFLD_W1_START_MODE) | 2618 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 2619 2620 if (option->target_ch_mode) { 2621 h2c->w1 |= le32_encode_bits(op->band_width, 2622 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 2623 le32_encode_bits(op->primary_channel, 2624 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 2625 le32_encode_bits(op->channel, 2626 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 2627 h2c->w0 |= le32_encode_bits(op->band_type, 2628 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 2629 } 2630 2631 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2632 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2633 H2C_FUNC_SCANOFLD, 1, 1, 2634 len); 2635 2636 ret = rtw89_h2c_tx(rtwdev, skb, false); 2637 if (ret) { 2638 rtw89_err(rtwdev, "failed to send h2c\n"); 2639 goto fail; 2640 } 2641 2642 return 0; 2643 fail: 2644 dev_kfree_skb_any(skb); 2645 2646 return ret; 2647 } 2648 2649 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2650 struct rtw89_fw_h2c_rf_reg_info *info, 2651 u16 len, u8 page) 2652 { 2653 struct sk_buff *skb; 2654 u8 class = info->rf_path == RF_PATH_A ? 2655 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2656 int ret; 2657 2658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2659 if (!skb) { 2660 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2661 return -ENOMEM; 2662 } 2663 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2664 2665 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2666 H2C_CAT_OUTSRC, class, page, 0, 0, 2667 len); 2668 2669 ret = rtw89_h2c_tx(rtwdev, skb, false); 2670 if (ret) { 2671 rtw89_err(rtwdev, "failed to send h2c\n"); 2672 goto fail; 2673 } 2674 2675 return 0; 2676 fail: 2677 dev_kfree_skb_any(skb); 2678 2679 return ret; 2680 } 2681 2682 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2683 { 2684 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2685 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2686 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2687 struct sk_buff *skb; 2688 int ret; 2689 2690 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2691 if (!skb) { 2692 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2693 return -ENOMEM; 2694 } 2695 skb_put(skb, sizeof(*mccch)); 2696 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2697 2698 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2699 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2700 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2701 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2702 mccch->current_channel = cpu_to_le32(chan->channel); 2703 mccch->current_band_type = cpu_to_le32(chan->band_type); 2704 2705 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2706 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2707 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2708 sizeof(*mccch)); 2709 2710 ret = rtw89_h2c_tx(rtwdev, skb, false); 2711 if (ret) { 2712 rtw89_err(rtwdev, "failed to send h2c\n"); 2713 goto fail; 2714 } 2715 2716 return 0; 2717 fail: 2718 dev_kfree_skb_any(skb); 2719 2720 return ret; 2721 } 2722 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2723 2724 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2725 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2726 bool rack, bool dack) 2727 { 2728 struct sk_buff *skb; 2729 int ret; 2730 2731 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2732 if (!skb) { 2733 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2734 return -ENOMEM; 2735 } 2736 skb_put_data(skb, buf, len); 2737 2738 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2739 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2740 len); 2741 2742 ret = rtw89_h2c_tx(rtwdev, skb, false); 2743 if (ret) { 2744 rtw89_err(rtwdev, "failed to send h2c\n"); 2745 goto fail; 2746 } 2747 2748 return 0; 2749 fail: 2750 dev_kfree_skb_any(skb); 2751 2752 return ret; 2753 } 2754 2755 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2756 { 2757 struct sk_buff *skb; 2758 int ret; 2759 2760 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2761 if (!skb) { 2762 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2763 return -ENOMEM; 2764 } 2765 skb_put_data(skb, buf, len); 2766 2767 ret = rtw89_h2c_tx(rtwdev, skb, false); 2768 if (ret) { 2769 rtw89_err(rtwdev, "failed to send h2c\n"); 2770 goto fail; 2771 } 2772 2773 return 0; 2774 fail: 2775 dev_kfree_skb_any(skb); 2776 2777 return ret; 2778 } 2779 2780 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2781 { 2782 struct rtw89_early_h2c *early_h2c; 2783 2784 lockdep_assert_held(&rtwdev->mutex); 2785 2786 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2787 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2788 } 2789 } 2790 2791 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2792 { 2793 struct rtw89_early_h2c *early_h2c, *tmp; 2794 2795 mutex_lock(&rtwdev->mutex); 2796 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2797 list_del(&early_h2c->list); 2798 kfree(early_h2c->h2c); 2799 kfree(early_h2c); 2800 } 2801 mutex_unlock(&rtwdev->mutex); 2802 } 2803 2804 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2805 { 2806 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2807 2808 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2809 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2810 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2811 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2812 } 2813 2814 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2815 struct sk_buff *c2h) 2816 { 2817 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2818 u8 category = attr->category; 2819 u8 class = attr->class; 2820 u8 func = attr->func; 2821 2822 switch (category) { 2823 default: 2824 return false; 2825 case RTW89_C2H_CAT_MAC: 2826 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2827 } 2828 } 2829 2830 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2831 { 2832 rtw89_fw_c2h_parse_attr(c2h); 2833 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2834 goto enqueue; 2835 2836 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2837 dev_kfree_skb_any(c2h); 2838 return; 2839 2840 enqueue: 2841 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2842 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2843 } 2844 2845 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2846 struct sk_buff *skb) 2847 { 2848 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2849 u8 category = attr->category; 2850 u8 class = attr->class; 2851 u8 func = attr->func; 2852 u16 len = attr->len; 2853 bool dump = true; 2854 2855 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2856 return; 2857 2858 switch (category) { 2859 case RTW89_C2H_CAT_TEST: 2860 break; 2861 case RTW89_C2H_CAT_MAC: 2862 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2863 if (class == RTW89_MAC_C2H_CLASS_INFO && 2864 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2865 dump = false; 2866 break; 2867 case RTW89_C2H_CAT_OUTSRC: 2868 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2869 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2870 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2871 else 2872 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2873 break; 2874 } 2875 2876 if (dump) 2877 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2878 } 2879 2880 void rtw89_fw_c2h_work(struct work_struct *work) 2881 { 2882 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2883 c2h_work); 2884 struct sk_buff *skb, *tmp; 2885 2886 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2887 skb_unlink(skb, &rtwdev->c2h_queue); 2888 mutex_lock(&rtwdev->mutex); 2889 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2890 mutex_unlock(&rtwdev->mutex); 2891 dev_kfree_skb_any(skb); 2892 } 2893 } 2894 2895 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2896 struct rtw89_mac_h2c_info *info) 2897 { 2898 const struct rtw89_chip_info *chip = rtwdev->chip; 2899 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2900 const u32 *h2c_reg = chip->h2c_regs; 2901 u8 i, val, len; 2902 int ret; 2903 2904 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2905 rtwdev, chip->h2c_ctrl_reg); 2906 if (ret) { 2907 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2908 return ret; 2909 } 2910 2911 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2912 sizeof(info->h2creg[0])); 2913 2914 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 2915 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 2916 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2917 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 2918 2919 fw_info->h2c_counter++; 2920 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 2921 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 2922 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2923 2924 return 0; 2925 } 2926 2927 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2928 struct rtw89_mac_c2h_info *info) 2929 { 2930 const struct rtw89_chip_info *chip = rtwdev->chip; 2931 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2932 const u32 *c2h_reg = chip->c2h_regs; 2933 u32 ret; 2934 u8 i, val; 2935 2936 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2937 2938 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2939 RTW89_C2H_TIMEOUT, false, rtwdev, 2940 chip->c2h_ctrl_reg); 2941 if (ret) { 2942 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2943 return ret; 2944 } 2945 2946 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2947 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2948 2949 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2950 2951 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 2952 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 2953 RTW89_C2HREG_HDR_LEN; 2954 2955 fw_info->c2h_counter++; 2956 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 2957 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 2958 2959 return 0; 2960 } 2961 2962 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2963 struct rtw89_mac_h2c_info *h2c_info, 2964 struct rtw89_mac_c2h_info *c2h_info) 2965 { 2966 u32 ret; 2967 2968 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2969 lockdep_assert_held(&rtwdev->mutex); 2970 2971 if (!h2c_info && !c2h_info) 2972 return -EINVAL; 2973 2974 if (!h2c_info) 2975 goto recv_c2h; 2976 2977 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2978 if (ret) 2979 return ret; 2980 2981 recv_c2h: 2982 if (!c2h_info) 2983 return 0; 2984 2985 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2986 if (ret) 2987 return ret; 2988 2989 return 0; 2990 } 2991 2992 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 2993 { 2994 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 2995 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 2996 return; 2997 } 2998 2999 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3000 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3001 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3002 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3003 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3004 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3005 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3006 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3007 3008 rtw89_fw_prog_cnt_dump(rtwdev); 3009 } 3010 3011 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3012 { 3013 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3014 struct rtw89_pktofld_info *info, *tmp; 3015 u8 idx; 3016 3017 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3018 if (!(rtwdev->chip->support_bands & BIT(idx))) 3019 continue; 3020 3021 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3022 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3023 rtw89_core_release_bit_map(rtwdev->pkt_offload, 3024 info->id); 3025 list_del(&info->list); 3026 kfree(info); 3027 } 3028 } 3029 } 3030 3031 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3032 struct rtw89_vif *rtwvif, 3033 struct rtw89_pktofld_info *info, 3034 enum nl80211_band band, u8 ssid_idx) 3035 { 3036 struct cfg80211_scan_request *req = rtwvif->scan_req; 3037 3038 if (band != NL80211_BAND_6GHZ) 3039 return false; 3040 3041 if (req->ssids[ssid_idx].ssid_len) { 3042 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3043 req->ssids[ssid_idx].ssid_len); 3044 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3045 return false; 3046 } else { 3047 return true; 3048 } 3049 } 3050 3051 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3052 struct rtw89_vif *rtwvif, 3053 struct sk_buff *skb, u8 ssid_idx) 3054 { 3055 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3056 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3057 struct rtw89_pktofld_info *info; 3058 struct sk_buff *new; 3059 int ret = 0; 3060 u8 band; 3061 3062 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3063 if (!(rtwdev->chip->support_bands & BIT(band))) 3064 continue; 3065 3066 new = skb_copy(skb, GFP_KERNEL); 3067 if (!new) { 3068 ret = -ENOMEM; 3069 goto out; 3070 } 3071 skb_put_data(new, ies->ies[band], ies->len[band]); 3072 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3073 3074 info = kzalloc(sizeof(*info), GFP_KERNEL); 3075 if (!info) { 3076 ret = -ENOMEM; 3077 kfree_skb(new); 3078 goto out; 3079 } 3080 3081 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3082 ssid_idx)) { 3083 kfree_skb(new); 3084 kfree(info); 3085 goto out; 3086 } 3087 3088 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3089 if (ret) { 3090 kfree_skb(new); 3091 kfree(info); 3092 goto out; 3093 } 3094 3095 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3096 kfree_skb(new); 3097 } 3098 out: 3099 return ret; 3100 } 3101 3102 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3103 struct rtw89_vif *rtwvif) 3104 { 3105 struct cfg80211_scan_request *req = rtwvif->scan_req; 3106 struct sk_buff *skb; 3107 u8 num = req->n_ssids, i; 3108 int ret; 3109 3110 for (i = 0; i < num; i++) { 3111 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3112 req->ssids[i].ssid, 3113 req->ssids[i].ssid_len, 3114 req->ie_len); 3115 if (!skb) 3116 return -ENOMEM; 3117 3118 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3119 kfree_skb(skb); 3120 3121 if (ret) 3122 return ret; 3123 } 3124 3125 return 0; 3126 } 3127 3128 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3129 struct cfg80211_scan_request *req, 3130 struct rtw89_mac_chinfo *ch_info) 3131 { 3132 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3133 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3134 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3135 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3136 struct cfg80211_scan_6ghz_params *params; 3137 struct rtw89_pktofld_info *info, *tmp; 3138 struct ieee80211_hdr *hdr; 3139 struct sk_buff *skb; 3140 bool found; 3141 int ret = 0; 3142 u8 i; 3143 3144 if (!req->n_6ghz_params) 3145 return 0; 3146 3147 for (i = 0; i < req->n_6ghz_params; i++) { 3148 params = &req->scan_6ghz_params[i]; 3149 3150 if (req->channels[params->channel_idx]->hw_value != 3151 ch_info->pri_ch) 3152 continue; 3153 3154 found = false; 3155 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3156 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3157 found = true; 3158 break; 3159 } 3160 } 3161 if (found) 3162 continue; 3163 3164 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3165 NULL, 0, req->ie_len); 3166 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3167 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3168 hdr = (struct ieee80211_hdr *)skb->data; 3169 ether_addr_copy(hdr->addr3, params->bssid); 3170 3171 info = kzalloc(sizeof(*info), GFP_KERNEL); 3172 if (!info) { 3173 ret = -ENOMEM; 3174 kfree_skb(skb); 3175 goto out; 3176 } 3177 3178 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3179 if (ret) { 3180 kfree_skb(skb); 3181 kfree(info); 3182 goto out; 3183 } 3184 3185 ether_addr_copy(info->bssid, params->bssid); 3186 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3187 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3188 3189 ch_info->tx_pkt = true; 3190 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3191 3192 kfree_skb(skb); 3193 } 3194 3195 out: 3196 return ret; 3197 } 3198 3199 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3200 int ssid_num, 3201 struct rtw89_mac_chinfo *ch_info) 3202 { 3203 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3204 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3205 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3206 struct cfg80211_scan_request *req = rtwvif->scan_req; 3207 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3208 struct rtw89_pktofld_info *info; 3209 u8 band, probe_count = 0; 3210 int ret; 3211 3212 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3213 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3214 ch_info->bw = RTW89_SCAN_WIDTH; 3215 ch_info->tx_pkt = true; 3216 ch_info->cfg_tx_pwr = false; 3217 ch_info->tx_pwr_idx = 0; 3218 ch_info->tx_null = false; 3219 ch_info->pause_data = false; 3220 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3221 3222 if (ch_info->ch_band == RTW89_BAND_6G) { 3223 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3224 !ch_info->is_psc) { 3225 ch_info->tx_pkt = false; 3226 if (!req->duration_mandatory) 3227 ch_info->period -= RTW89_DWELL_TIME_6G; 3228 } 3229 } 3230 3231 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3232 if (ret) 3233 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3234 3235 if (ssid_num) { 3236 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3237 3238 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3239 if (info->channel_6ghz && 3240 ch_info->pri_ch != info->channel_6ghz) 3241 continue; 3242 ch_info->pkt_id[probe_count++] = info->id; 3243 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3244 break; 3245 } 3246 ch_info->num_pkt = probe_count; 3247 } 3248 3249 switch (chan_type) { 3250 case RTW89_CHAN_OPERATE: 3251 ch_info->central_ch = op->channel; 3252 ch_info->pri_ch = op->primary_channel; 3253 ch_info->ch_band = op->band_type; 3254 ch_info->bw = op->band_width; 3255 ch_info->tx_null = true; 3256 ch_info->num_pkt = 0; 3257 break; 3258 case RTW89_CHAN_DFS: 3259 if (ch_info->ch_band != RTW89_BAND_6G) 3260 ch_info->period = max_t(u8, ch_info->period, 3261 RTW89_DFS_CHAN_TIME); 3262 ch_info->dwell_time = RTW89_DWELL_TIME; 3263 break; 3264 case RTW89_CHAN_ACTIVE: 3265 break; 3266 default: 3267 rtw89_err(rtwdev, "Channel type out of bound\n"); 3268 } 3269 } 3270 3271 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3272 struct rtw89_vif *rtwvif, bool connected) 3273 { 3274 struct cfg80211_scan_request *req = rtwvif->scan_req; 3275 struct rtw89_mac_chinfo *ch_info, *tmp; 3276 struct ieee80211_channel *channel; 3277 struct list_head chan_list; 3278 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3279 int list_len, off_chan_time = 0; 3280 enum rtw89_chan_type type; 3281 int ret = 0; 3282 u32 idx; 3283 3284 INIT_LIST_HEAD(&chan_list); 3285 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3286 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3287 idx++, list_len++) { 3288 channel = req->channels[idx]; 3289 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3290 if (!ch_info) { 3291 ret = -ENOMEM; 3292 goto out; 3293 } 3294 3295 if (req->duration_mandatory) 3296 ch_info->period = req->duration; 3297 else if (channel->band == NL80211_BAND_6GHZ) 3298 ch_info->period = RTW89_CHANNEL_TIME_6G + 3299 RTW89_DWELL_TIME_6G; 3300 else 3301 ch_info->period = RTW89_CHANNEL_TIME; 3302 3303 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3304 ch_info->central_ch = channel->hw_value; 3305 ch_info->pri_ch = channel->hw_value; 3306 ch_info->rand_seq_num = random_seq; 3307 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3308 3309 if (channel->flags & 3310 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3311 type = RTW89_CHAN_DFS; 3312 else 3313 type = RTW89_CHAN_ACTIVE; 3314 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3315 3316 if (connected && 3317 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3318 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3319 if (!tmp) { 3320 ret = -ENOMEM; 3321 kfree(ch_info); 3322 goto out; 3323 } 3324 3325 type = RTW89_CHAN_OPERATE; 3326 tmp->period = req->duration_mandatory ? 3327 req->duration : RTW89_CHANNEL_TIME; 3328 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3329 list_add_tail(&tmp->list, &chan_list); 3330 off_chan_time = 0; 3331 list_len++; 3332 } 3333 list_add_tail(&ch_info->list, &chan_list); 3334 off_chan_time += ch_info->period; 3335 } 3336 rtwdev->scan_info.last_chan_idx = idx; 3337 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3338 3339 out: 3340 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3341 list_del(&ch_info->list); 3342 kfree(ch_info); 3343 } 3344 3345 return ret; 3346 } 3347 3348 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3349 struct rtw89_vif *rtwvif, bool connected) 3350 { 3351 int ret; 3352 3353 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3354 if (ret) { 3355 rtw89_err(rtwdev, "Update probe request failed\n"); 3356 goto out; 3357 } 3358 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3359 out: 3360 return ret; 3361 } 3362 3363 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3364 struct ieee80211_scan_request *scan_req) 3365 { 3366 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3367 struct cfg80211_scan_request *req = &scan_req->req; 3368 u32 rx_fltr = rtwdev->hal.rx_fltr; 3369 u8 mac_addr[ETH_ALEN]; 3370 3371 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 3372 rtwdev->scan_info.scanning_vif = vif; 3373 rtwdev->scan_info.last_chan_idx = 0; 3374 rtwvif->scan_ies = &scan_req->ies; 3375 rtwvif->scan_req = req; 3376 ieee80211_stop_queues(rtwdev->hw); 3377 3378 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 3379 get_random_mask_addr(mac_addr, req->mac_addr, 3380 req->mac_addr_mask); 3381 else 3382 ether_addr_copy(mac_addr, vif->addr); 3383 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 3384 3385 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 3386 rx_fltr &= ~B_AX_A_BC; 3387 rx_fltr &= ~B_AX_A_A1_MATCH; 3388 rtw89_write32_mask(rtwdev, 3389 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3390 B_AX_RX_FLTR_CFG_MASK, 3391 rx_fltr); 3392 } 3393 3394 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3395 bool aborted) 3396 { 3397 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3398 struct cfg80211_scan_info info = { 3399 .aborted = aborted, 3400 }; 3401 struct rtw89_vif *rtwvif; 3402 3403 if (!vif) 3404 return; 3405 3406 rtw89_write32_mask(rtwdev, 3407 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3408 B_AX_RX_FLTR_CFG_MASK, 3409 rtwdev->hal.rx_fltr); 3410 3411 rtw89_core_scan_complete(rtwdev, vif, true); 3412 ieee80211_scan_completed(rtwdev->hw, &info); 3413 ieee80211_wake_queues(rtwdev->hw); 3414 3415 rtw89_release_pkt_list(rtwdev); 3416 rtwvif = (struct rtw89_vif *)vif->drv_priv; 3417 rtwvif->scan_req = NULL; 3418 rtwvif->scan_ies = NULL; 3419 scan_info->last_chan_idx = 0; 3420 scan_info->scanning_vif = NULL; 3421 3422 rtw89_set_channel(rtwdev); 3423 } 3424 3425 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 3426 { 3427 rtw89_hw_scan_offload(rtwdev, vif, false); 3428 rtw89_hw_scan_complete(rtwdev, vif, true); 3429 } 3430 3431 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3432 bool enable) 3433 { 3434 struct rtw89_scan_option opt = {0}; 3435 struct rtw89_vif *rtwvif; 3436 bool connected; 3437 int ret = 0; 3438 3439 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3440 if (!rtwvif) 3441 return -EINVAL; 3442 3443 /* This variable implies connected or during attempt to connect */ 3444 connected = !is_zero_ether_addr(rtwvif->bssid); 3445 opt.enable = enable; 3446 opt.target_ch_mode = connected; 3447 if (enable) { 3448 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 3449 if (ret) 3450 goto out; 3451 } 3452 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 3453 out: 3454 return ret; 3455 } 3456 3457 #define H2C_FW_CPU_EXCEPTION_LEN 4 3458 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 3459 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 3460 { 3461 struct sk_buff *skb; 3462 int ret; 3463 3464 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 3465 if (!skb) { 3466 rtw89_err(rtwdev, 3467 "failed to alloc skb for fw cpu exception\n"); 3468 return -ENOMEM; 3469 } 3470 3471 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3472 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3473 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3474 3475 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3476 H2C_CAT_TEST, 3477 H2C_CL_FW_STATUS_TEST, 3478 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3479 H2C_FW_CPU_EXCEPTION_LEN); 3480 3481 ret = rtw89_h2c_tx(rtwdev, skb, false); 3482 if (ret) { 3483 rtw89_err(rtwdev, "failed to send h2c\n"); 3484 goto fail; 3485 } 3486 3487 return 0; 3488 3489 fail: 3490 dev_kfree_skb_any(skb); 3491 return ret; 3492 } 3493 3494 #define H2C_PKT_DROP_LEN 24 3495 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3496 const struct rtw89_pkt_drop_params *params) 3497 { 3498 struct sk_buff *skb; 3499 int ret; 3500 3501 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 3502 if (!skb) { 3503 rtw89_err(rtwdev, 3504 "failed to alloc skb for packet drop\n"); 3505 return -ENOMEM; 3506 } 3507 3508 switch (params->sel) { 3509 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 3510 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 3511 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 3512 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 3513 case RTW89_PKT_DROP_SEL_BAND_ONCE: 3514 break; 3515 default: 3516 rtw89_debug(rtwdev, RTW89_DBG_FW, 3517 "H2C of pkt drop might not fully support sel: %d yet\n", 3518 params->sel); 3519 break; 3520 } 3521 3522 skb_put(skb, H2C_PKT_DROP_LEN); 3523 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3524 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3525 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3526 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3527 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3528 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3529 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3530 params->macid_band_sel[0]); 3531 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3532 params->macid_band_sel[1]); 3533 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3534 params->macid_band_sel[2]); 3535 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3536 params->macid_band_sel[3]); 3537 3538 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3539 H2C_CAT_MAC, 3540 H2C_CL_MAC_FW_OFLD, 3541 H2C_FUNC_PKT_DROP, 0, 0, 3542 H2C_PKT_DROP_LEN); 3543 3544 ret = rtw89_h2c_tx(rtwdev, skb, false); 3545 if (ret) { 3546 rtw89_err(rtwdev, "failed to send h2c\n"); 3547 goto fail; 3548 } 3549 3550 return 0; 3551 3552 fail: 3553 dev_kfree_skb_any(skb); 3554 return ret; 3555 } 3556 3557 #define H2C_KEEP_ALIVE_LEN 4 3558 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3559 bool enable) 3560 { 3561 struct sk_buff *skb; 3562 u8 pkt_id = 0; 3563 int ret; 3564 3565 if (enable) { 3566 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 3567 RTW89_PKT_OFLD_TYPE_NULL_DATA, 3568 &pkt_id); 3569 if (ret) 3570 return -EPERM; 3571 } 3572 3573 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3574 if (!skb) { 3575 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3576 return -ENOMEM; 3577 } 3578 3579 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3580 3581 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3582 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3583 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3584 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3585 3586 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3587 H2C_CAT_MAC, 3588 H2C_CL_MAC_WOW, 3589 H2C_FUNC_KEEP_ALIVE, 0, 1, 3590 H2C_KEEP_ALIVE_LEN); 3591 3592 ret = rtw89_h2c_tx(rtwdev, skb, false); 3593 if (ret) { 3594 rtw89_err(rtwdev, "failed to send h2c\n"); 3595 goto fail; 3596 } 3597 3598 return 0; 3599 3600 fail: 3601 dev_kfree_skb_any(skb); 3602 3603 return ret; 3604 } 3605 3606 #define H2C_DISCONNECT_DETECT_LEN 8 3607 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3608 struct rtw89_vif *rtwvif, bool enable) 3609 { 3610 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3611 struct sk_buff *skb; 3612 u8 macid = rtwvif->mac_id; 3613 int ret; 3614 3615 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3616 if (!skb) { 3617 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3618 return -ENOMEM; 3619 } 3620 3621 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3622 3623 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3624 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3625 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3626 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3627 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3628 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3629 } 3630 3631 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3632 H2C_CAT_MAC, 3633 H2C_CL_MAC_WOW, 3634 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3635 H2C_DISCONNECT_DETECT_LEN); 3636 3637 ret = rtw89_h2c_tx(rtwdev, skb, false); 3638 if (ret) { 3639 rtw89_err(rtwdev, "failed to send h2c\n"); 3640 goto fail; 3641 } 3642 3643 return 0; 3644 3645 fail: 3646 dev_kfree_skb_any(skb); 3647 3648 return ret; 3649 } 3650 3651 #define H2C_WOW_GLOBAL_LEN 8 3652 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3653 bool enable) 3654 { 3655 struct sk_buff *skb; 3656 u8 macid = rtwvif->mac_id; 3657 int ret; 3658 3659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3660 if (!skb) { 3661 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3662 return -ENOMEM; 3663 } 3664 3665 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3666 3667 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3668 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3669 3670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3671 H2C_CAT_MAC, 3672 H2C_CL_MAC_WOW, 3673 H2C_FUNC_WOW_GLOBAL, 0, 1, 3674 H2C_WOW_GLOBAL_LEN); 3675 3676 ret = rtw89_h2c_tx(rtwdev, skb, false); 3677 if (ret) { 3678 rtw89_err(rtwdev, "failed to send h2c\n"); 3679 goto fail; 3680 } 3681 3682 return 0; 3683 3684 fail: 3685 dev_kfree_skb_any(skb); 3686 3687 return ret; 3688 } 3689 3690 #define H2C_WAKEUP_CTRL_LEN 4 3691 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3692 struct rtw89_vif *rtwvif, 3693 bool enable) 3694 { 3695 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3696 struct sk_buff *skb; 3697 u8 macid = rtwvif->mac_id; 3698 int ret; 3699 3700 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3701 if (!skb) { 3702 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3703 return -ENOMEM; 3704 } 3705 3706 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3707 3708 if (rtw_wow->pattern_cnt) 3709 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3710 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3711 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3712 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3713 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3714 3715 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3716 3717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3718 H2C_CAT_MAC, 3719 H2C_CL_MAC_WOW, 3720 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3721 H2C_WAKEUP_CTRL_LEN); 3722 3723 ret = rtw89_h2c_tx(rtwdev, skb, false); 3724 if (ret) { 3725 rtw89_err(rtwdev, "failed to send h2c\n"); 3726 goto fail; 3727 } 3728 3729 return 0; 3730 3731 fail: 3732 dev_kfree_skb_any(skb); 3733 3734 return ret; 3735 } 3736 3737 #define H2C_WOW_CAM_UPD_LEN 24 3738 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3739 struct rtw89_wow_cam_info *cam_info) 3740 { 3741 struct sk_buff *skb; 3742 int ret; 3743 3744 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3745 if (!skb) { 3746 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3747 return -ENOMEM; 3748 } 3749 3750 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3751 3752 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3753 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3754 if (cam_info->valid) { 3755 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3756 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3757 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3758 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3759 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3760 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3761 cam_info->negative_pattern_match); 3762 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3763 cam_info->skip_mac_hdr); 3764 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3765 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3766 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3767 } 3768 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3769 3770 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3771 H2C_CAT_MAC, 3772 H2C_CL_MAC_WOW, 3773 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3774 H2C_WOW_CAM_UPD_LEN); 3775 3776 ret = rtw89_h2c_tx(rtwdev, skb, false); 3777 if (ret) { 3778 rtw89_err(rtwdev, "failed to send h2c\n"); 3779 goto fail; 3780 } 3781 3782 return 0; 3783 fail: 3784 dev_kfree_skb_any(skb); 3785 3786 return ret; 3787 } 3788 3789 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3790 struct rtw89_wait_info *wait, unsigned int cond) 3791 { 3792 int ret; 3793 3794 ret = rtw89_h2c_tx(rtwdev, skb, false); 3795 if (ret) { 3796 rtw89_err(rtwdev, "failed to send h2c\n"); 3797 dev_kfree_skb_any(skb); 3798 return -EBUSY; 3799 } 3800 3801 return rtw89_wait_for_cond(wait, cond); 3802 } 3803 3804 #define H2C_ADD_MCC_LEN 16 3805 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3806 const struct rtw89_fw_mcc_add_req *p) 3807 { 3808 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3809 struct sk_buff *skb; 3810 unsigned int cond; 3811 3812 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3813 if (!skb) { 3814 rtw89_err(rtwdev, 3815 "failed to alloc skb for add mcc\n"); 3816 return -ENOMEM; 3817 } 3818 3819 skb_put(skb, H2C_ADD_MCC_LEN); 3820 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3821 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3822 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3823 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3824 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3825 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3826 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3827 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3828 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3829 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3830 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3831 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3832 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3833 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3834 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3835 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3836 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3837 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3838 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3839 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3840 3841 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3842 H2C_CAT_MAC, 3843 H2C_CL_MCC, 3844 H2C_FUNC_ADD_MCC, 0, 0, 3845 H2C_ADD_MCC_LEN); 3846 3847 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3848 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3849 } 3850 3851 #define H2C_START_MCC_LEN 12 3852 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3853 const struct rtw89_fw_mcc_start_req *p) 3854 { 3855 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3856 struct sk_buff *skb; 3857 unsigned int cond; 3858 3859 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3860 if (!skb) { 3861 rtw89_err(rtwdev, 3862 "failed to alloc skb for start mcc\n"); 3863 return -ENOMEM; 3864 } 3865 3866 skb_put(skb, H2C_START_MCC_LEN); 3867 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3868 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3869 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3870 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3871 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3872 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3873 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3874 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3875 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3876 3877 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3878 H2C_CAT_MAC, 3879 H2C_CL_MCC, 3880 H2C_FUNC_START_MCC, 0, 0, 3881 H2C_START_MCC_LEN); 3882 3883 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3884 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3885 } 3886 3887 #define H2C_STOP_MCC_LEN 4 3888 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3889 bool prev_groups) 3890 { 3891 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3892 struct sk_buff *skb; 3893 unsigned int cond; 3894 3895 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3896 if (!skb) { 3897 rtw89_err(rtwdev, 3898 "failed to alloc skb for stop mcc\n"); 3899 return -ENOMEM; 3900 } 3901 3902 skb_put(skb, H2C_STOP_MCC_LEN); 3903 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3904 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3905 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3906 3907 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3908 H2C_CAT_MAC, 3909 H2C_CL_MCC, 3910 H2C_FUNC_STOP_MCC, 0, 0, 3911 H2C_STOP_MCC_LEN); 3912 3913 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3914 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3915 } 3916 3917 #define H2C_DEL_MCC_GROUP_LEN 4 3918 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3919 bool prev_groups) 3920 { 3921 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3922 struct sk_buff *skb; 3923 unsigned int cond; 3924 3925 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3926 if (!skb) { 3927 rtw89_err(rtwdev, 3928 "failed to alloc skb for del mcc group\n"); 3929 return -ENOMEM; 3930 } 3931 3932 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3933 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3934 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3935 3936 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3937 H2C_CAT_MAC, 3938 H2C_CL_MCC, 3939 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3940 H2C_DEL_MCC_GROUP_LEN); 3941 3942 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3943 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3944 } 3945 3946 #define H2C_RESET_MCC_GROUP_LEN 4 3947 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3948 { 3949 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3950 struct sk_buff *skb; 3951 unsigned int cond; 3952 3953 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3954 if (!skb) { 3955 rtw89_err(rtwdev, 3956 "failed to alloc skb for reset mcc group\n"); 3957 return -ENOMEM; 3958 } 3959 3960 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3961 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3962 3963 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3964 H2C_CAT_MAC, 3965 H2C_CL_MCC, 3966 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3967 H2C_RESET_MCC_GROUP_LEN); 3968 3969 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3970 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3971 } 3972 3973 #define H2C_MCC_REQ_TSF_LEN 4 3974 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3975 const struct rtw89_fw_mcc_tsf_req *req, 3976 struct rtw89_mac_mcc_tsf_rpt *rpt) 3977 { 3978 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3979 struct rtw89_mac_mcc_tsf_rpt *tmp; 3980 struct sk_buff *skb; 3981 unsigned int cond; 3982 int ret; 3983 3984 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 3985 if (!skb) { 3986 rtw89_err(rtwdev, 3987 "failed to alloc skb for mcc req tsf\n"); 3988 return -ENOMEM; 3989 } 3990 3991 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 3992 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 3993 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 3994 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 3995 3996 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3997 H2C_CAT_MAC, 3998 H2C_CL_MCC, 3999 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4000 H2C_MCC_REQ_TSF_LEN); 4001 4002 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4003 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4004 if (ret) 4005 return ret; 4006 4007 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4008 *rpt = *tmp; 4009 4010 return 0; 4011 } 4012 4013 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4014 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4015 u8 *bitmap) 4016 { 4017 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4018 struct sk_buff *skb; 4019 unsigned int cond; 4020 u8 map_len; 4021 u8 h2c_len; 4022 4023 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4024 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4025 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4026 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4027 if (!skb) { 4028 rtw89_err(rtwdev, 4029 "failed to alloc skb for mcc macid bitmap\n"); 4030 return -ENOMEM; 4031 } 4032 4033 skb_put(skb, h2c_len); 4034 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4035 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4036 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4037 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4038 4039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4040 H2C_CAT_MAC, 4041 H2C_CL_MCC, 4042 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4043 h2c_len); 4044 4045 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4046 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4047 } 4048 4049 #define H2C_MCC_SYNC_LEN 4 4050 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4051 u8 target, u8 offset) 4052 { 4053 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4054 struct sk_buff *skb; 4055 unsigned int cond; 4056 4057 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4058 if (!skb) { 4059 rtw89_err(rtwdev, 4060 "failed to alloc skb for mcc sync\n"); 4061 return -ENOMEM; 4062 } 4063 4064 skb_put(skb, H2C_MCC_SYNC_LEN); 4065 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4066 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4067 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4068 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4069 4070 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4071 H2C_CAT_MAC, 4072 H2C_CL_MCC, 4073 H2C_FUNC_MCC_SYNC, 0, 0, 4074 H2C_MCC_SYNC_LEN); 4075 4076 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4077 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4078 } 4079 4080 #define H2C_MCC_SET_DURATION_LEN 20 4081 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4082 const struct rtw89_fw_mcc_duration *p) 4083 { 4084 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4085 struct sk_buff *skb; 4086 unsigned int cond; 4087 4088 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4089 if (!skb) { 4090 rtw89_err(rtwdev, 4091 "failed to alloc skb for mcc set duration\n"); 4092 return -ENOMEM; 4093 } 4094 4095 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4096 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4097 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4098 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4099 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4100 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4101 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4102 p->start_tsf_low); 4103 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4104 p->start_tsf_high); 4105 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4106 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4107 4108 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4109 H2C_CAT_MAC, 4110 H2C_CL_MCC, 4111 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4112 H2C_MCC_SET_DURATION_LEN); 4113 4114 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4115 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4116 } 4117