1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 #include "util.h" 14 15 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 16 struct sk_buff *skb); 17 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 18 struct rtw89_wait_info *wait, unsigned int cond); 19 20 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 21 bool header) 22 { 23 struct sk_buff *skb; 24 u32 header_len = 0; 25 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 26 27 if (header) 28 header_len = H2C_HEADER_LEN; 29 30 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 31 if (!skb) 32 return NULL; 33 skb_reserve(skb, header_len + h2c_desc_size); 34 memset(skb->data, 0, len); 35 36 return skb; 37 } 38 39 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 40 { 41 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 42 } 43 44 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 45 { 46 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 47 } 48 49 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 50 { 51 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 52 53 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 54 } 55 56 #define FWDL_WAIT_CNT 400000 57 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 58 { 59 u8 val; 60 int ret; 61 62 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 63 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 64 1, FWDL_WAIT_CNT, false, rtwdev); 65 if (ret) { 66 switch (val) { 67 case RTW89_FWDL_CHECKSUM_FAIL: 68 rtw89_err(rtwdev, "fw checksum fail\n"); 69 return -EINVAL; 70 71 case RTW89_FWDL_SECURITY_FAIL: 72 rtw89_err(rtwdev, "fw security fail\n"); 73 return -EINVAL; 74 75 case RTW89_FWDL_CV_NOT_MATCH: 76 rtw89_err(rtwdev, "fw cv not match\n"); 77 return -EINVAL; 78 79 default: 80 return -EBUSY; 81 } 82 } 83 84 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 85 86 return 0; 87 } 88 89 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 90 struct rtw89_fw_bin_info *info) 91 { 92 struct rtw89_fw_hdr_section_info *section_info; 93 const u8 *fw_end = fw + len; 94 const u8 *fwdynhdr; 95 const u8 *bin; 96 u32 base_hdr_len; 97 u32 mssc_len = 0; 98 u32 i; 99 100 if (!info) 101 return -EINVAL; 102 103 info->section_num = GET_FW_HDR_SEC_NUM(fw); 104 base_hdr_len = RTW89_FW_HDR_SIZE + 105 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 106 info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw); 107 108 if (info->dynamic_hdr_en) { 109 info->hdr_len = GET_FW_HDR_LEN(fw); 110 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 111 fwdynhdr = fw + base_hdr_len; 112 if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) { 113 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 114 return -EINVAL; 115 } 116 } else { 117 info->hdr_len = base_hdr_len; 118 info->dynamic_hdr_len = 0; 119 } 120 121 bin = fw + info->hdr_len; 122 123 /* jump to section header */ 124 fw += RTW89_FW_HDR_SIZE; 125 section_info = info->section_info; 126 for (i = 0; i < info->section_num; i++) { 127 section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw); 128 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 129 section_info->mssc = GET_FWSECTION_HDR_MSSC(fw); 130 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 131 } else { 132 section_info->mssc = 0; 133 } 134 135 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 136 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 137 section_info->len += FWDL_SECTION_CHKSUM_LEN; 138 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 139 section_info->dladdr = 140 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 141 section_info->addr = bin; 142 bin += section_info->len; 143 fw += RTW89_FW_SECTION_HDR_SIZE; 144 section_info++; 145 } 146 147 if (fw_end != bin + mssc_len) { 148 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 149 return -EINVAL; 150 } 151 152 return 0; 153 } 154 155 static 156 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 157 struct rtw89_fw_suit *fw_suit, bool nowarn) 158 { 159 struct rtw89_fw_info *fw_info = &rtwdev->fw; 160 const struct firmware *firmware = fw_info->req.firmware; 161 const u8 *mfw = firmware->data; 162 u32 mfw_len = firmware->size; 163 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 164 const struct rtw89_mfw_info *mfw_info; 165 int i; 166 167 if (mfw_hdr->sig != RTW89_MFW_SIG) { 168 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 169 /* legacy firmware support normal type only */ 170 if (type != RTW89_FW_NORMAL) 171 return -EINVAL; 172 fw_suit->data = mfw; 173 fw_suit->size = mfw_len; 174 return 0; 175 } 176 177 for (i = 0; i < mfw_hdr->fw_nr; i++) { 178 mfw_info = &mfw_hdr->info[i]; 179 if (mfw_info->cv != rtwdev->hal.cv || 180 mfw_info->type != type || 181 mfw_info->mp) 182 continue; 183 184 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 185 fw_suit->size = le32_to_cpu(mfw_info->size); 186 return 0; 187 } 188 189 if (!nowarn) 190 rtw89_err(rtwdev, "no suitable firmware found\n"); 191 return -ENOENT; 192 } 193 194 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 195 enum rtw89_fw_type type, 196 struct rtw89_fw_suit *fw_suit) 197 { 198 const u8 *hdr = fw_suit->data; 199 200 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 201 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 202 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 203 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 204 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 205 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 206 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 207 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 208 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 209 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 210 211 rtw89_info(rtwdev, 212 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 213 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 214 fw_suit->sub_idex, fw_suit->cmd_ver, type); 215 } 216 217 static 218 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 219 bool nowarn) 220 { 221 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 222 int ret; 223 224 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 225 if (ret) 226 return ret; 227 228 rtw89_fw_update_ver(rtwdev, type, fw_suit); 229 230 return 0; 231 } 232 233 #define __DEF_FW_FEAT_COND(__cond, __op) \ 234 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 235 { \ 236 return suit_ver_code __op comp_ver_code; \ 237 } 238 239 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 240 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 241 __DEF_FW_FEAT_COND(lt, <); /* less than */ 242 243 struct __fw_feat_cfg { 244 enum rtw89_core_chip_id chip_id; 245 enum rtw89_fw_feature feature; 246 u32 ver_code; 247 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 248 }; 249 250 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 251 { \ 252 .chip_id = _chip, \ 253 .feature = RTW89_FW_FEATURE_ ## _feat, \ 254 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 255 .cond = __fw_feat_cond_ ## _cond, \ 256 } 257 258 static const struct __fw_feat_cfg fw_feat_tbl[] = { 259 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 260 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 261 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 262 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 263 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 264 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 265 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 266 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 267 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 268 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 269 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 270 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 271 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 272 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 273 }; 274 275 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 276 const struct rtw89_chip_info *chip, 277 u32 ver_code) 278 { 279 int i; 280 281 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 282 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 283 284 if (chip->chip_id != ent->chip_id) 285 continue; 286 287 if (ent->cond(ver_code, ent->ver_code)) 288 RTW89_SET_FW_FEATURE(ent->feature, fw); 289 } 290 } 291 292 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 293 { 294 const struct rtw89_chip_info *chip = rtwdev->chip; 295 const struct rtw89_fw_suit *fw_suit; 296 u32 suit_ver_code; 297 298 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 299 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 300 301 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 302 } 303 304 const struct firmware * 305 rtw89_early_fw_feature_recognize(struct device *device, 306 const struct rtw89_chip_info *chip, 307 struct rtw89_fw_info *early_fw, 308 int *used_fw_format) 309 { 310 union rtw89_compat_fw_hdr buf = {}; 311 const struct firmware *firmware; 312 bool full_req = false; 313 char fw_name[64]; 314 int fw_format; 315 u32 ver_code; 316 int ret; 317 318 /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will 319 * be denied (-EPERM). Then, we don't get right firmware things as 320 * expected. So, in this case, we have to request full firmware here. 321 */ 322 if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) 323 full_req = true; 324 325 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 326 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 327 chip->fw_basename, fw_format); 328 329 if (full_req) 330 ret = request_firmware(&firmware, fw_name, device); 331 else 332 ret = request_partial_firmware_into_buf(&firmware, fw_name, 333 device, &buf, sizeof(buf), 334 0); 335 if (!ret) { 336 dev_info(device, "loaded firmware %s\n", fw_name); 337 *used_fw_format = fw_format; 338 break; 339 } 340 } 341 342 if (ret) { 343 dev_err(device, "failed to early request firmware: %d\n", ret); 344 return NULL; 345 } 346 347 if (full_req) 348 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 349 else 350 ver_code = rtw89_compat_fw_hdr_ver_code(&buf); 351 352 if (!ver_code) 353 goto out; 354 355 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 356 357 out: 358 if (full_req) 359 return firmware; 360 361 release_firmware(firmware); 362 return NULL; 363 } 364 365 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 366 { 367 const struct rtw89_chip_info *chip = rtwdev->chip; 368 int ret; 369 370 if (chip->try_ce_fw) { 371 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 372 if (!ret) 373 goto normal_done; 374 } 375 376 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 377 if (ret) 378 return ret; 379 380 normal_done: 381 /* It still works if wowlan firmware isn't existing. */ 382 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 383 384 rtw89_fw_recognize_features(rtwdev); 385 386 rtw89_coex_recognize_ver(rtwdev); 387 388 return 0; 389 } 390 391 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 392 u8 type, u8 cat, u8 class, u8 func, 393 bool rack, bool dack, u32 len) 394 { 395 struct fwcmd_hdr *hdr; 396 397 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 398 399 if (!(rtwdev->fw.h2c_seq % 4)) 400 rack = true; 401 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 402 FIELD_PREP(H2C_HDR_CAT, cat) | 403 FIELD_PREP(H2C_HDR_CLASS, class) | 404 FIELD_PREP(H2C_HDR_FUNC, func) | 405 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 406 407 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 408 len + H2C_HEADER_LEN) | 409 (rack ? H2C_HDR_REC_ACK : 0) | 410 (dack ? H2C_HDR_DONE_ACK : 0)); 411 412 rtwdev->fw.h2c_seq++; 413 } 414 415 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 416 struct sk_buff *skb, 417 u8 type, u8 cat, u8 class, u8 func, 418 u32 len) 419 { 420 struct fwcmd_hdr *hdr; 421 422 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 423 424 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 425 FIELD_PREP(H2C_HDR_CAT, cat) | 426 FIELD_PREP(H2C_HDR_CLASS, class) | 427 FIELD_PREP(H2C_HDR_FUNC, func) | 428 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 429 430 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 431 len + H2C_HEADER_LEN)); 432 } 433 434 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 435 { 436 struct sk_buff *skb; 437 u32 ret = 0; 438 439 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 440 if (!skb) { 441 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 442 return -ENOMEM; 443 } 444 445 skb_put_data(skb, fw, len); 446 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 447 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 448 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 449 H2C_FUNC_MAC_FWHDR_DL, len); 450 451 ret = rtw89_h2c_tx(rtwdev, skb, false); 452 if (ret) { 453 rtw89_err(rtwdev, "failed to send h2c\n"); 454 ret = -1; 455 goto fail; 456 } 457 458 return 0; 459 fail: 460 dev_kfree_skb_any(skb); 461 462 return ret; 463 } 464 465 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 466 { 467 u8 val; 468 int ret; 469 470 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 471 if (ret) { 472 rtw89_err(rtwdev, "[ERR]FW header download\n"); 473 return ret; 474 } 475 476 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 477 1, FWDL_WAIT_CNT, false, 478 rtwdev, R_AX_WCPU_FW_CTRL); 479 if (ret) { 480 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 481 return ret; 482 } 483 484 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 485 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 486 487 return 0; 488 } 489 490 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 491 struct rtw89_fw_hdr_section_info *info) 492 { 493 struct sk_buff *skb; 494 const u8 *section = info->addr; 495 u32 residue_len = info->len; 496 u32 pkt_len; 497 int ret; 498 499 while (residue_len) { 500 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 501 pkt_len = FWDL_SECTION_PER_PKT_LEN; 502 else 503 pkt_len = residue_len; 504 505 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 506 if (!skb) { 507 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 508 return -ENOMEM; 509 } 510 skb_put_data(skb, section, pkt_len); 511 512 ret = rtw89_h2c_tx(rtwdev, skb, true); 513 if (ret) { 514 rtw89_err(rtwdev, "failed to send h2c\n"); 515 ret = -1; 516 goto fail; 517 } 518 519 section += pkt_len; 520 residue_len -= pkt_len; 521 } 522 523 return 0; 524 fail: 525 dev_kfree_skb_any(skb); 526 527 return ret; 528 } 529 530 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 531 struct rtw89_fw_bin_info *info) 532 { 533 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 534 u8 section_num = info->section_num; 535 int ret; 536 537 while (section_num--) { 538 ret = __rtw89_fw_download_main(rtwdev, section_info); 539 if (ret) 540 return ret; 541 section_info++; 542 } 543 544 mdelay(5); 545 546 ret = rtw89_fw_check_rdy(rtwdev); 547 if (ret) { 548 rtw89_warn(rtwdev, "download firmware fail\n"); 549 return ret; 550 } 551 552 return 0; 553 } 554 555 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 556 { 557 u32 val32; 558 u16 index; 559 560 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 561 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 562 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 563 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 564 565 for (index = 0; index < 15; index++) { 566 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 567 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 568 fsleep(10); 569 } 570 } 571 572 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 573 { 574 u32 val32; 575 u16 val16; 576 577 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 578 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 579 580 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 581 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 582 583 rtw89_fw_prog_cnt_dump(rtwdev); 584 } 585 586 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 587 { 588 struct rtw89_fw_info *fw_info = &rtwdev->fw; 589 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 590 struct rtw89_fw_bin_info info; 591 const u8 *fw = fw_suit->data; 592 u32 len = fw_suit->size; 593 u8 val; 594 int ret; 595 596 rtw89_mac_disable_cpu(rtwdev); 597 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 598 if (ret) 599 return ret; 600 601 if (!fw || !len) { 602 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 603 return -ENOENT; 604 } 605 606 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 607 if (ret) { 608 rtw89_err(rtwdev, "parse fw header fail\n"); 609 goto fwdl_err; 610 } 611 612 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 613 1, FWDL_WAIT_CNT, false, 614 rtwdev, R_AX_WCPU_FW_CTRL); 615 if (ret) { 616 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 617 goto fwdl_err; 618 } 619 620 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 621 if (ret) { 622 ret = -EBUSY; 623 goto fwdl_err; 624 } 625 626 ret = rtw89_fw_download_main(rtwdev, fw, &info); 627 if (ret) { 628 ret = -EBUSY; 629 goto fwdl_err; 630 } 631 632 fw_info->h2c_seq = 0; 633 fw_info->rec_seq = 0; 634 fw_info->h2c_counter = 0; 635 fw_info->c2h_counter = 0; 636 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 637 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 638 639 return ret; 640 641 fwdl_err: 642 rtw89_fw_dl_fail_dump(rtwdev); 643 return ret; 644 } 645 646 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 647 { 648 struct rtw89_fw_info *fw = &rtwdev->fw; 649 650 wait_for_completion(&fw->req.completion); 651 if (!fw->req.firmware) 652 return -EINVAL; 653 654 return 0; 655 } 656 657 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 658 struct rtw89_fw_req_info *req, 659 const char *fw_name, bool nowarn) 660 { 661 int ret; 662 663 if (req->firmware) { 664 rtw89_debug(rtwdev, RTW89_DBG_FW, 665 "full firmware has been early requested\n"); 666 complete_all(&req->completion); 667 return 0; 668 } 669 670 if (nowarn) 671 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 672 else 673 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 674 675 complete_all(&req->completion); 676 677 return ret; 678 } 679 680 void rtw89_load_firmware_work(struct work_struct *work) 681 { 682 struct rtw89_dev *rtwdev = 683 container_of(work, struct rtw89_dev, load_firmware_work); 684 const struct rtw89_chip_info *chip = rtwdev->chip; 685 char fw_name[64]; 686 687 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 688 chip->fw_basename, rtwdev->fw.fw_format); 689 690 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 691 } 692 693 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 694 { 695 struct rtw89_fw_info *fw = &rtwdev->fw; 696 697 cancel_work_sync(&rtwdev->load_firmware_work); 698 699 if (fw->req.firmware) { 700 release_firmware(fw->req.firmware); 701 702 /* assign NULL back in case rtw89_free_ieee80211_hw() 703 * try to release the same one again. 704 */ 705 fw->req.firmware = NULL; 706 } 707 } 708 709 #define H2C_CAM_LEN 60 710 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 711 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 712 { 713 struct sk_buff *skb; 714 int ret; 715 716 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 717 if (!skb) { 718 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 719 return -ENOMEM; 720 } 721 skb_put(skb, H2C_CAM_LEN); 722 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 723 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 724 725 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 726 H2C_CAT_MAC, 727 H2C_CL_MAC_ADDR_CAM_UPDATE, 728 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 729 H2C_CAM_LEN); 730 731 ret = rtw89_h2c_tx(rtwdev, skb, false); 732 if (ret) { 733 rtw89_err(rtwdev, "failed to send h2c\n"); 734 goto fail; 735 } 736 737 return 0; 738 fail: 739 dev_kfree_skb_any(skb); 740 741 return ret; 742 } 743 744 #define H2C_DCTL_SEC_CAM_LEN 68 745 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 746 struct rtw89_vif *rtwvif, 747 struct rtw89_sta *rtwsta) 748 { 749 struct sk_buff *skb; 750 int ret; 751 752 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 753 if (!skb) { 754 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 755 return -ENOMEM; 756 } 757 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 758 759 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 760 761 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 762 H2C_CAT_MAC, 763 H2C_CL_MAC_FR_EXCHG, 764 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 765 H2C_DCTL_SEC_CAM_LEN); 766 767 ret = rtw89_h2c_tx(rtwdev, skb, false); 768 if (ret) { 769 rtw89_err(rtwdev, "failed to send h2c\n"); 770 goto fail; 771 } 772 773 return 0; 774 fail: 775 dev_kfree_skb_any(skb); 776 777 return ret; 778 } 779 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 780 781 #define H2C_BA_CAM_LEN 8 782 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 783 bool valid, struct ieee80211_ampdu_params *params) 784 { 785 const struct rtw89_chip_info *chip = rtwdev->chip; 786 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 787 u8 macid = rtwsta->mac_id; 788 struct sk_buff *skb; 789 u8 entry_idx; 790 int ret; 791 792 ret = valid ? 793 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 794 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 795 if (ret) { 796 /* it still works even if we don't have static BA CAM, because 797 * hardware can create dynamic BA CAM automatically. 798 */ 799 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 800 "failed to %s entry tid=%d for h2c ba cam\n", 801 valid ? "alloc" : "free", params->tid); 802 return 0; 803 } 804 805 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 806 if (!skb) { 807 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 808 return -ENOMEM; 809 } 810 skb_put(skb, H2C_BA_CAM_LEN); 811 SET_BA_CAM_MACID(skb->data, macid); 812 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 813 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 814 else 815 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 816 if (!valid) 817 goto end; 818 SET_BA_CAM_VALID(skb->data, valid); 819 SET_BA_CAM_TID(skb->data, params->tid); 820 if (params->buf_size > 64) 821 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 822 else 823 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 824 /* If init req is set, hw will set the ssn */ 825 SET_BA_CAM_INIT_REQ(skb->data, 1); 826 SET_BA_CAM_SSN(skb->data, params->ssn); 827 828 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 829 SET_BA_CAM_STD_EN(skb->data, 1); 830 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 831 } 832 833 end: 834 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 835 H2C_CAT_MAC, 836 H2C_CL_BA_CAM, 837 H2C_FUNC_MAC_BA_CAM, 0, 1, 838 H2C_BA_CAM_LEN); 839 840 ret = rtw89_h2c_tx(rtwdev, skb, false); 841 if (ret) { 842 rtw89_err(rtwdev, "failed to send h2c\n"); 843 goto fail; 844 } 845 846 return 0; 847 fail: 848 dev_kfree_skb_any(skb); 849 850 return ret; 851 } 852 853 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 854 u8 entry_idx, u8 uid) 855 { 856 struct sk_buff *skb; 857 int ret; 858 859 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 860 if (!skb) { 861 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 862 return -ENOMEM; 863 } 864 skb_put(skb, H2C_BA_CAM_LEN); 865 866 SET_BA_CAM_VALID(skb->data, 1); 867 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 868 SET_BA_CAM_UID(skb->data, uid); 869 SET_BA_CAM_BAND(skb->data, 0); 870 SET_BA_CAM_STD_EN(skb->data, 0); 871 872 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 873 H2C_CAT_MAC, 874 H2C_CL_BA_CAM, 875 H2C_FUNC_MAC_BA_CAM, 0, 1, 876 H2C_BA_CAM_LEN); 877 878 ret = rtw89_h2c_tx(rtwdev, skb, false); 879 if (ret) { 880 rtw89_err(rtwdev, "failed to send h2c\n"); 881 goto fail; 882 } 883 884 return 0; 885 fail: 886 dev_kfree_skb_any(skb); 887 888 return ret; 889 } 890 891 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 892 { 893 const struct rtw89_chip_info *chip = rtwdev->chip; 894 u8 entry_idx = chip->bacam_num; 895 u8 uid = 0; 896 int i; 897 898 for (i = 0; i < chip->bacam_dynamic_num; i++) { 899 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 900 entry_idx++; 901 uid++; 902 } 903 } 904 905 #define H2C_LOG_CFG_LEN 12 906 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 907 { 908 struct sk_buff *skb; 909 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 910 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 911 int ret; 912 913 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 914 if (!skb) { 915 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 916 return -ENOMEM; 917 } 918 919 skb_put(skb, H2C_LOG_CFG_LEN); 920 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 921 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 922 SET_LOG_CFG_COMP(skb->data, comp); 923 SET_LOG_CFG_COMP_EXT(skb->data, 0); 924 925 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 926 H2C_CAT_MAC, 927 H2C_CL_FW_INFO, 928 H2C_FUNC_LOG_CFG, 0, 0, 929 H2C_LOG_CFG_LEN); 930 931 ret = rtw89_h2c_tx(rtwdev, skb, false); 932 if (ret) { 933 rtw89_err(rtwdev, "failed to send h2c\n"); 934 goto fail; 935 } 936 937 return 0; 938 fail: 939 dev_kfree_skb_any(skb); 940 941 return ret; 942 } 943 944 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 945 struct rtw89_vif *rtwvif, 946 enum rtw89_fw_pkt_ofld_type type, 947 u8 *id) 948 { 949 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 950 struct rtw89_pktofld_info *info; 951 struct sk_buff *skb; 952 int ret; 953 954 info = kzalloc(sizeof(*info), GFP_KERNEL); 955 if (!info) 956 return -ENOMEM; 957 958 switch (type) { 959 case RTW89_PKT_OFLD_TYPE_PS_POLL: 960 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 961 break; 962 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 963 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 964 break; 965 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 966 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 967 break; 968 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 969 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 970 break; 971 default: 972 goto err; 973 } 974 975 if (!skb) 976 goto err; 977 978 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 979 kfree_skb(skb); 980 981 if (ret) 982 goto err; 983 984 list_add_tail(&info->list, &rtwvif->general_pkt_list); 985 *id = info->id; 986 return 0; 987 988 err: 989 kfree(info); 990 return -ENOMEM; 991 } 992 993 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 994 struct rtw89_vif *rtwvif, bool notify_fw) 995 { 996 struct list_head *pkt_list = &rtwvif->general_pkt_list; 997 struct rtw89_pktofld_info *info, *tmp; 998 999 list_for_each_entry_safe(info, tmp, pkt_list, list) { 1000 if (notify_fw) 1001 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1002 else 1003 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 1004 list_del(&info->list); 1005 kfree(info); 1006 } 1007 } 1008 1009 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1010 { 1011 struct rtw89_vif *rtwvif; 1012 1013 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1014 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1015 } 1016 1017 #define H2C_GENERAL_PKT_LEN 6 1018 #define H2C_GENERAL_PKT_ID_UND 0xff 1019 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1020 struct rtw89_vif *rtwvif, u8 macid) 1021 { 1022 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1023 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1024 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1025 struct sk_buff *skb; 1026 int ret; 1027 1028 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1029 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1030 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1031 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1032 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1033 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1034 1035 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1036 if (!skb) { 1037 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1038 return -ENOMEM; 1039 } 1040 skb_put(skb, H2C_GENERAL_PKT_LEN); 1041 SET_GENERAL_PKT_MACID(skb->data, macid); 1042 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1043 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1044 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1045 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1046 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1047 1048 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1049 H2C_CAT_MAC, 1050 H2C_CL_FW_INFO, 1051 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1052 H2C_GENERAL_PKT_LEN); 1053 1054 ret = rtw89_h2c_tx(rtwdev, skb, false); 1055 if (ret) { 1056 rtw89_err(rtwdev, "failed to send h2c\n"); 1057 goto fail; 1058 } 1059 1060 return 0; 1061 fail: 1062 dev_kfree_skb_any(skb); 1063 1064 return ret; 1065 } 1066 1067 #define H2C_LPS_PARM_LEN 8 1068 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1069 struct rtw89_lps_parm *lps_param) 1070 { 1071 struct sk_buff *skb; 1072 int ret; 1073 1074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1075 if (!skb) { 1076 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1077 return -ENOMEM; 1078 } 1079 skb_put(skb, H2C_LPS_PARM_LEN); 1080 1081 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1082 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1083 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1084 SET_LPS_PARM_RLBM(skb->data, 1); 1085 SET_LPS_PARM_SMARTPS(skb->data, 1); 1086 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1087 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1088 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1089 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1090 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1091 1092 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1093 H2C_CAT_MAC, 1094 H2C_CL_MAC_PS, 1095 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1096 H2C_LPS_PARM_LEN); 1097 1098 ret = rtw89_h2c_tx(rtwdev, skb, false); 1099 if (ret) { 1100 rtw89_err(rtwdev, "failed to send h2c\n"); 1101 goto fail; 1102 } 1103 1104 return 0; 1105 fail: 1106 dev_kfree_skb_any(skb); 1107 1108 return ret; 1109 } 1110 1111 #define H2C_P2P_ACT_LEN 20 1112 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1113 struct ieee80211_p2p_noa_desc *desc, 1114 u8 act, u8 noa_id) 1115 { 1116 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1117 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1118 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1119 struct sk_buff *skb; 1120 u8 *cmd; 1121 int ret; 1122 1123 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1124 if (!skb) { 1125 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1126 return -ENOMEM; 1127 } 1128 skb_put(skb, H2C_P2P_ACT_LEN); 1129 cmd = skb->data; 1130 1131 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1132 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1133 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1134 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1135 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1136 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1137 if (desc) { 1138 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1139 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1140 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1141 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1142 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1143 } 1144 1145 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1146 H2C_CAT_MAC, H2C_CL_MAC_PS, 1147 H2C_FUNC_P2P_ACT, 0, 0, 1148 H2C_P2P_ACT_LEN); 1149 1150 ret = rtw89_h2c_tx(rtwdev, skb, false); 1151 if (ret) { 1152 rtw89_err(rtwdev, "failed to send h2c\n"); 1153 goto fail; 1154 } 1155 1156 return 0; 1157 fail: 1158 dev_kfree_skb_any(skb); 1159 1160 return ret; 1161 } 1162 1163 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1164 struct sk_buff *skb) 1165 { 1166 const struct rtw89_chip_info *chip = rtwdev->chip; 1167 struct rtw89_hal *hal = &rtwdev->hal; 1168 u8 ntx_path; 1169 u8 map_b; 1170 1171 if (chip->rf_path_num == 1) { 1172 ntx_path = RF_A; 1173 map_b = 0; 1174 } else { 1175 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1176 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1177 } 1178 1179 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1180 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1181 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1182 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1183 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1184 } 1185 1186 #define H2C_CMC_TBL_LEN 68 1187 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1188 struct rtw89_vif *rtwvif) 1189 { 1190 const struct rtw89_chip_info *chip = rtwdev->chip; 1191 struct sk_buff *skb; 1192 u8 macid = rtwvif->mac_id; 1193 int ret; 1194 1195 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1196 if (!skb) { 1197 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1198 return -ENOMEM; 1199 } 1200 skb_put(skb, H2C_CMC_TBL_LEN); 1201 SET_CTRL_INFO_MACID(skb->data, macid); 1202 SET_CTRL_INFO_OPERATION(skb->data, 1); 1203 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1204 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1205 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1206 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1207 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1208 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1209 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1210 } 1211 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1212 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1213 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1214 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1215 1216 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1217 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1218 chip->h2c_cctl_func_id, 0, 1, 1219 H2C_CMC_TBL_LEN); 1220 1221 ret = rtw89_h2c_tx(rtwdev, skb, false); 1222 if (ret) { 1223 rtw89_err(rtwdev, "failed to send h2c\n"); 1224 goto fail; 1225 } 1226 1227 return 0; 1228 fail: 1229 dev_kfree_skb_any(skb); 1230 1231 return ret; 1232 } 1233 1234 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1235 struct ieee80211_sta *sta, u8 *pads) 1236 { 1237 bool ppe_th; 1238 u8 ppe16, ppe8; 1239 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1240 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1241 u8 ru_bitmap; 1242 u8 n, idx, sh; 1243 u16 ppe; 1244 int i; 1245 1246 if (!sta->deflink.he_cap.has_he) 1247 return; 1248 1249 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1250 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1251 if (!ppe_th) { 1252 u8 pad; 1253 1254 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1255 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1256 1257 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1258 pads[i] = pad; 1259 1260 return; 1261 } 1262 1263 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1264 n = hweight8(ru_bitmap); 1265 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1266 1267 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1268 if (!(ru_bitmap & BIT(i))) { 1269 pads[i] = 1; 1270 continue; 1271 } 1272 1273 idx = n >> 3; 1274 sh = n & 7; 1275 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1276 1277 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1278 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1279 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1280 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1281 1282 if (ppe16 != 7 && ppe8 == 7) 1283 pads[i] = 2; 1284 else if (ppe8 != 7) 1285 pads[i] = 1; 1286 else 1287 pads[i] = 0; 1288 } 1289 } 1290 1291 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1292 struct ieee80211_vif *vif, 1293 struct ieee80211_sta *sta) 1294 { 1295 const struct rtw89_chip_info *chip = rtwdev->chip; 1296 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1297 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1298 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1299 struct sk_buff *skb; 1300 u8 pads[RTW89_PPE_BW_NUM]; 1301 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1302 u16 lowest_rate; 1303 int ret; 1304 1305 memset(pads, 0, sizeof(pads)); 1306 if (sta) 1307 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1308 1309 if (vif->p2p) 1310 lowest_rate = RTW89_HW_RATE_OFDM6; 1311 else if (chan->band_type == RTW89_BAND_2G) 1312 lowest_rate = RTW89_HW_RATE_CCK1; 1313 else 1314 lowest_rate = RTW89_HW_RATE_OFDM6; 1315 1316 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1317 if (!skb) { 1318 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1319 return -ENOMEM; 1320 } 1321 skb_put(skb, H2C_CMC_TBL_LEN); 1322 SET_CTRL_INFO_MACID(skb->data, mac_id); 1323 SET_CTRL_INFO_OPERATION(skb->data, 1); 1324 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1325 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1326 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1327 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1328 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1329 if (vif->type == NL80211_IFTYPE_STATION) 1330 SET_CMC_TBL_ULDL(skb->data, 1); 1331 else 1332 SET_CMC_TBL_ULDL(skb->data, 0); 1333 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1334 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1335 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1336 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1337 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1338 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1339 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1340 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1341 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1342 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1343 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1344 } 1345 if (sta) 1346 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1347 sta->deflink.he_cap.has_he); 1348 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1349 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1350 1351 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1352 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1353 chip->h2c_cctl_func_id, 0, 1, 1354 H2C_CMC_TBL_LEN); 1355 1356 ret = rtw89_h2c_tx(rtwdev, skb, false); 1357 if (ret) { 1358 rtw89_err(rtwdev, "failed to send h2c\n"); 1359 goto fail; 1360 } 1361 1362 return 0; 1363 fail: 1364 dev_kfree_skb_any(skb); 1365 1366 return ret; 1367 } 1368 1369 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1370 struct rtw89_sta *rtwsta) 1371 { 1372 const struct rtw89_chip_info *chip = rtwdev->chip; 1373 struct sk_buff *skb; 1374 int ret; 1375 1376 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1377 if (!skb) { 1378 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1379 return -ENOMEM; 1380 } 1381 skb_put(skb, H2C_CMC_TBL_LEN); 1382 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1383 SET_CTRL_INFO_OPERATION(skb->data, 1); 1384 if (rtwsta->cctl_tx_time) { 1385 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1386 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1387 } 1388 if (rtwsta->cctl_tx_retry_limit) { 1389 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1390 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1391 } 1392 1393 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1394 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1395 chip->h2c_cctl_func_id, 0, 1, 1396 H2C_CMC_TBL_LEN); 1397 1398 ret = rtw89_h2c_tx(rtwdev, skb, false); 1399 if (ret) { 1400 rtw89_err(rtwdev, "failed to send h2c\n"); 1401 goto fail; 1402 } 1403 1404 return 0; 1405 fail: 1406 dev_kfree_skb_any(skb); 1407 1408 return ret; 1409 } 1410 1411 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1412 struct rtw89_sta *rtwsta) 1413 { 1414 const struct rtw89_chip_info *chip = rtwdev->chip; 1415 struct sk_buff *skb; 1416 int ret; 1417 1418 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1419 return 0; 1420 1421 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1422 if (!skb) { 1423 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1424 return -ENOMEM; 1425 } 1426 skb_put(skb, H2C_CMC_TBL_LEN); 1427 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1428 SET_CTRL_INFO_OPERATION(skb->data, 1); 1429 1430 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1431 1432 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1433 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1434 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1435 H2C_CMC_TBL_LEN); 1436 1437 ret = rtw89_h2c_tx(rtwdev, skb, false); 1438 if (ret) { 1439 rtw89_err(rtwdev, "failed to send h2c\n"); 1440 goto fail; 1441 } 1442 1443 return 0; 1444 fail: 1445 dev_kfree_skb_any(skb); 1446 1447 return ret; 1448 } 1449 1450 #define H2C_BCN_BASE_LEN 12 1451 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1452 struct rtw89_vif *rtwvif) 1453 { 1454 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1455 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1456 struct sk_buff *skb; 1457 struct sk_buff *skb_beacon; 1458 u16 tim_offset; 1459 int bcn_total_len; 1460 u16 beacon_rate; 1461 int ret; 1462 1463 if (vif->p2p) 1464 beacon_rate = RTW89_HW_RATE_OFDM6; 1465 else if (chan->band_type == RTW89_BAND_2G) 1466 beacon_rate = RTW89_HW_RATE_CCK1; 1467 else 1468 beacon_rate = RTW89_HW_RATE_OFDM6; 1469 1470 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1471 NULL, 0); 1472 if (!skb_beacon) { 1473 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1474 return -ENOMEM; 1475 } 1476 1477 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1478 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1479 if (!skb) { 1480 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1481 dev_kfree_skb_any(skb_beacon); 1482 return -ENOMEM; 1483 } 1484 skb_put(skb, H2C_BCN_BASE_LEN); 1485 1486 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1487 SET_BCN_UPD_MBSSID(skb->data, 0); 1488 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1489 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1490 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1491 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1492 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1493 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1494 1495 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1496 dev_kfree_skb_any(skb_beacon); 1497 1498 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1499 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1500 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1501 bcn_total_len); 1502 1503 ret = rtw89_h2c_tx(rtwdev, skb, false); 1504 if (ret) { 1505 rtw89_err(rtwdev, "failed to send h2c\n"); 1506 dev_kfree_skb_any(skb); 1507 return ret; 1508 } 1509 1510 return 0; 1511 } 1512 1513 #define H2C_ROLE_MAINTAIN_LEN 4 1514 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1515 struct rtw89_vif *rtwvif, 1516 struct rtw89_sta *rtwsta, 1517 enum rtw89_upd_mode upd_mode) 1518 { 1519 struct sk_buff *skb; 1520 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1521 u8 self_role; 1522 int ret; 1523 1524 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1525 if (rtwsta) 1526 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1527 else 1528 self_role = rtwvif->self_role; 1529 } else { 1530 self_role = rtwvif->self_role; 1531 } 1532 1533 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1534 if (!skb) { 1535 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1536 return -ENOMEM; 1537 } 1538 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1539 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1540 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1541 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1542 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1543 1544 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1545 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1546 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1547 H2C_ROLE_MAINTAIN_LEN); 1548 1549 ret = rtw89_h2c_tx(rtwdev, skb, false); 1550 if (ret) { 1551 rtw89_err(rtwdev, "failed to send h2c\n"); 1552 goto fail; 1553 } 1554 1555 return 0; 1556 fail: 1557 dev_kfree_skb_any(skb); 1558 1559 return ret; 1560 } 1561 1562 #define H2C_JOIN_INFO_LEN 4 1563 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1564 struct rtw89_sta *rtwsta, bool dis_conn) 1565 { 1566 struct sk_buff *skb; 1567 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1568 u8 self_role = rtwvif->self_role; 1569 u8 net_type = rtwvif->net_type; 1570 int ret; 1571 1572 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1573 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1574 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1575 } 1576 1577 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1578 if (!skb) { 1579 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1580 return -ENOMEM; 1581 } 1582 skb_put(skb, H2C_JOIN_INFO_LEN); 1583 SET_JOININFO_MACID(skb->data, mac_id); 1584 SET_JOININFO_OP(skb->data, dis_conn); 1585 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1586 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1587 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1588 SET_JOININFO_ISHESTA(skb->data, 0); 1589 SET_JOININFO_DLBW(skb->data, 0); 1590 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1591 SET_JOININFO_DL_T_PE(skb->data, 0); 1592 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1593 SET_JOININFO_NET_TYPE(skb->data, net_type); 1594 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1595 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1596 1597 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1598 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1599 H2C_FUNC_MAC_JOININFO, 0, 1, 1600 H2C_JOIN_INFO_LEN); 1601 1602 ret = rtw89_h2c_tx(rtwdev, skb, false); 1603 if (ret) { 1604 rtw89_err(rtwdev, "failed to send h2c\n"); 1605 goto fail; 1606 } 1607 1608 return 0; 1609 fail: 1610 dev_kfree_skb_any(skb); 1611 1612 return ret; 1613 } 1614 1615 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1616 bool pause) 1617 { 1618 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1619 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1620 struct sk_buff *skb; 1621 int ret; 1622 1623 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1624 if (!skb) { 1625 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1626 return -ENOMEM; 1627 } 1628 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1629 if (pause) 1630 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1631 skb_put_data(skb, &h2c, len); 1632 1633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1634 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1635 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1636 len); 1637 1638 ret = rtw89_h2c_tx(rtwdev, skb, false); 1639 if (ret) { 1640 rtw89_err(rtwdev, "failed to send h2c\n"); 1641 goto fail; 1642 } 1643 1644 return 0; 1645 fail: 1646 dev_kfree_skb_any(skb); 1647 1648 return ret; 1649 } 1650 1651 #define H2C_EDCA_LEN 12 1652 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1653 u8 ac, u32 val) 1654 { 1655 struct sk_buff *skb; 1656 int ret; 1657 1658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1659 if (!skb) { 1660 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1661 return -ENOMEM; 1662 } 1663 skb_put(skb, H2C_EDCA_LEN); 1664 RTW89_SET_EDCA_SEL(skb->data, 0); 1665 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1666 RTW89_SET_EDCA_WMM(skb->data, 0); 1667 RTW89_SET_EDCA_AC(skb->data, ac); 1668 RTW89_SET_EDCA_PARAM(skb->data, val); 1669 1670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1671 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1672 H2C_FUNC_USR_EDCA, 0, 1, 1673 H2C_EDCA_LEN); 1674 1675 ret = rtw89_h2c_tx(rtwdev, skb, false); 1676 if (ret) { 1677 rtw89_err(rtwdev, "failed to send h2c\n"); 1678 goto fail; 1679 } 1680 1681 return 0; 1682 fail: 1683 dev_kfree_skb_any(skb); 1684 1685 return ret; 1686 } 1687 1688 #define H2C_TSF32_TOGL_LEN 4 1689 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1690 bool en) 1691 { 1692 struct sk_buff *skb; 1693 u16 early_us = en ? 2000 : 0; 1694 u8 *cmd; 1695 int ret; 1696 1697 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1698 if (!skb) { 1699 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1700 return -ENOMEM; 1701 } 1702 skb_put(skb, H2C_TSF32_TOGL_LEN); 1703 cmd = skb->data; 1704 1705 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1706 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1707 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1708 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1709 1710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1711 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1712 H2C_FUNC_TSF32_TOGL, 0, 0, 1713 H2C_TSF32_TOGL_LEN); 1714 1715 ret = rtw89_h2c_tx(rtwdev, skb, false); 1716 if (ret) { 1717 rtw89_err(rtwdev, "failed to send h2c\n"); 1718 goto fail; 1719 } 1720 1721 return 0; 1722 fail: 1723 dev_kfree_skb_any(skb); 1724 1725 return ret; 1726 } 1727 1728 #define H2C_OFLD_CFG_LEN 8 1729 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1730 { 1731 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1732 struct sk_buff *skb; 1733 int ret; 1734 1735 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1736 if (!skb) { 1737 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1738 return -ENOMEM; 1739 } 1740 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1741 1742 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1743 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1744 H2C_FUNC_OFLD_CFG, 0, 1, 1745 H2C_OFLD_CFG_LEN); 1746 1747 ret = rtw89_h2c_tx(rtwdev, skb, false); 1748 if (ret) { 1749 rtw89_err(rtwdev, "failed to send h2c\n"); 1750 goto fail; 1751 } 1752 1753 return 0; 1754 fail: 1755 dev_kfree_skb_any(skb); 1756 1757 return ret; 1758 } 1759 1760 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 1761 struct ieee80211_vif *vif, 1762 bool connect) 1763 { 1764 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 1765 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 1766 struct rtw89_h2c_bcnfltr *h2c; 1767 u32 len = sizeof(*h2c); 1768 struct sk_buff *skb; 1769 int ret; 1770 1771 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1772 return -EINVAL; 1773 1774 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1775 return -EINVAL; 1776 1777 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1778 if (!skb) { 1779 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 1780 return -ENOMEM; 1781 } 1782 1783 skb_put(skb, len); 1784 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 1785 1786 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 1787 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 1788 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 1789 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 1790 RTW89_H2C_BCNFLTR_W0_MODE) | 1791 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 1792 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 1793 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 1794 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 1795 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 1796 1797 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1798 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1799 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 1800 1801 ret = rtw89_h2c_tx(rtwdev, skb, false); 1802 if (ret) { 1803 rtw89_err(rtwdev, "failed to send h2c\n"); 1804 goto fail; 1805 } 1806 1807 return 0; 1808 fail: 1809 dev_kfree_skb_any(skb); 1810 1811 return ret; 1812 } 1813 1814 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 1815 struct rtw89_rx_phy_ppdu *phy_ppdu) 1816 { 1817 struct rtw89_h2c_ofld_rssi *h2c; 1818 u32 len = sizeof(*h2c); 1819 struct sk_buff *skb; 1820 s8 rssi; 1821 int ret; 1822 1823 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1824 return -EINVAL; 1825 1826 if (!phy_ppdu) 1827 return -EINVAL; 1828 1829 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1830 if (!skb) { 1831 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 1832 return -ENOMEM; 1833 } 1834 1835 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 1836 skb_put(skb, len); 1837 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 1838 1839 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 1840 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 1841 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 1842 1843 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1844 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1845 H2C_FUNC_OFLD_RSSI, 0, 1, len); 1846 1847 ret = rtw89_h2c_tx(rtwdev, skb, false); 1848 if (ret) { 1849 rtw89_err(rtwdev, "failed to send h2c\n"); 1850 goto fail; 1851 } 1852 1853 return 0; 1854 fail: 1855 dev_kfree_skb_any(skb); 1856 1857 return ret; 1858 } 1859 1860 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 1861 { 1862 struct rtw89_traffic_stats *stats = &rtwvif->stats; 1863 struct rtw89_h2c_ofld *h2c; 1864 u32 len = sizeof(*h2c); 1865 struct sk_buff *skb; 1866 int ret; 1867 1868 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1869 return -EINVAL; 1870 1871 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1872 if (!skb) { 1873 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 1874 return -ENOMEM; 1875 } 1876 1877 skb_put(skb, len); 1878 h2c = (struct rtw89_h2c_ofld *)skb->data; 1879 1880 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 1881 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 1882 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 1883 1884 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1885 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1886 H2C_FUNC_OFLD_TP, 0, 1, len); 1887 1888 ret = rtw89_h2c_tx(rtwdev, skb, false); 1889 if (ret) { 1890 rtw89_err(rtwdev, "failed to send h2c\n"); 1891 goto fail; 1892 } 1893 1894 return 0; 1895 fail: 1896 dev_kfree_skb_any(skb); 1897 1898 return ret; 1899 } 1900 1901 #define H2C_RA_LEN 16 1902 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1903 { 1904 struct sk_buff *skb; 1905 u8 *cmd; 1906 int ret; 1907 1908 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1909 if (!skb) { 1910 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1911 return -ENOMEM; 1912 } 1913 skb_put(skb, H2C_RA_LEN); 1914 cmd = skb->data; 1915 rtw89_debug(rtwdev, RTW89_DBG_RA, 1916 "ra cmd msk: %llx ", ra->ra_mask); 1917 1918 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1919 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1920 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1921 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1922 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1923 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1924 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1925 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1926 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1927 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1928 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1929 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1930 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1931 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1932 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1933 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1934 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1935 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1936 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1937 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1938 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1939 1940 if (csi) { 1941 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1942 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1943 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1944 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1945 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1946 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1947 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1948 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1949 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1950 } 1951 1952 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1953 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1954 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1955 H2C_RA_LEN); 1956 1957 ret = rtw89_h2c_tx(rtwdev, skb, false); 1958 if (ret) { 1959 rtw89_err(rtwdev, "failed to send h2c\n"); 1960 goto fail; 1961 } 1962 1963 return 0; 1964 fail: 1965 dev_kfree_skb_any(skb); 1966 1967 return ret; 1968 } 1969 1970 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1971 { 1972 struct rtw89_btc *btc = &rtwdev->btc; 1973 struct rtw89_btc_dm *dm = &btc->dm; 1974 struct rtw89_btc_init_info *init_info = &dm->init_info; 1975 struct rtw89_btc_module *module = &init_info->module; 1976 struct rtw89_btc_ant_info *ant = &module->ant; 1977 struct rtw89_h2c_cxinit *h2c; 1978 u32 len = sizeof(*h2c); 1979 struct sk_buff *skb; 1980 int ret; 1981 1982 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1983 if (!skb) { 1984 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1985 return -ENOMEM; 1986 } 1987 skb_put(skb, len); 1988 h2c = (struct rtw89_h2c_cxinit *)skb->data; 1989 1990 h2c->hdr.type = CXDRVINFO_INIT; 1991 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 1992 1993 h2c->ant_type = ant->type; 1994 h2c->ant_num = ant->num; 1995 h2c->ant_iso = ant->isolation; 1996 h2c->ant_info = 1997 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 1998 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 1999 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 2000 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 2001 2002 h2c->mod_rfe = module->rfe_type; 2003 h2c->mod_cv = module->cv; 2004 h2c->mod_info = 2005 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2006 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2007 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2008 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2009 h2c->mod_adie_kt = module->kt_ver_adie; 2010 h2c->wl_gch = init_info->wl_guard_ch; 2011 2012 h2c->info = 2013 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2014 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2015 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2016 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2017 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2018 2019 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2020 H2C_CAT_OUTSRC, BTFC_SET, 2021 SET_DRV_INFO, 0, 0, 2022 len); 2023 2024 ret = rtw89_h2c_tx(rtwdev, skb, false); 2025 if (ret) { 2026 rtw89_err(rtwdev, "failed to send h2c\n"); 2027 goto fail; 2028 } 2029 2030 return 0; 2031 fail: 2032 dev_kfree_skb_any(skb); 2033 2034 return ret; 2035 } 2036 2037 #define PORT_DATA_OFFSET 4 2038 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2039 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2040 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2041 2042 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2043 { 2044 struct rtw89_btc *btc = &rtwdev->btc; 2045 const struct rtw89_btc_ver *ver = btc->ver; 2046 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2047 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2048 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2049 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2050 struct sk_buff *skb; 2051 u32 len; 2052 u8 offset = 0; 2053 u8 *cmd; 2054 int ret; 2055 int i; 2056 2057 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2058 2059 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2060 if (!skb) { 2061 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2062 return -ENOMEM; 2063 } 2064 skb_put(skb, len); 2065 cmd = skb->data; 2066 2067 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2068 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2069 2070 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2071 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2072 2073 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2074 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2075 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2076 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2077 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2078 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2079 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2080 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2081 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2082 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2083 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2084 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2085 2086 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2087 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2088 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2089 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2090 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2091 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2092 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2093 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2094 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2095 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2096 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2097 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2098 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2099 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2100 } 2101 2102 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2103 H2C_CAT_OUTSRC, BTFC_SET, 2104 SET_DRV_INFO, 0, 0, 2105 len); 2106 2107 ret = rtw89_h2c_tx(rtwdev, skb, false); 2108 if (ret) { 2109 rtw89_err(rtwdev, "failed to send h2c\n"); 2110 goto fail; 2111 } 2112 2113 return 0; 2114 fail: 2115 dev_kfree_skb_any(skb); 2116 2117 return ret; 2118 } 2119 2120 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2121 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2122 2123 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2124 { 2125 struct rtw89_btc *btc = &rtwdev->btc; 2126 const struct rtw89_btc_ver *ver = btc->ver; 2127 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2128 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2129 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2130 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2131 struct sk_buff *skb; 2132 u32 len; 2133 u8 *cmd, offset; 2134 int ret; 2135 int i; 2136 2137 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2138 2139 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2140 if (!skb) { 2141 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2142 return -ENOMEM; 2143 } 2144 skb_put(skb, len); 2145 cmd = skb->data; 2146 2147 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2148 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2149 2150 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2151 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2152 2153 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2154 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2155 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2156 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2157 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2158 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2159 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2160 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2161 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2162 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2163 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2164 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2165 2166 offset = PORT_DATA_OFFSET; 2167 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2168 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2169 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2170 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2171 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2172 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2173 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2174 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2175 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2176 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2177 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2178 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2179 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2180 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2181 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2182 } 2183 2184 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2185 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2186 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2187 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2188 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2189 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2190 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2191 2192 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2193 H2C_CAT_OUTSRC, BTFC_SET, 2194 SET_DRV_INFO, 0, 0, 2195 len); 2196 2197 ret = rtw89_h2c_tx(rtwdev, skb, false); 2198 if (ret) { 2199 rtw89_err(rtwdev, "failed to send h2c\n"); 2200 goto fail; 2201 } 2202 2203 return 0; 2204 fail: 2205 dev_kfree_skb_any(skb); 2206 2207 return ret; 2208 } 2209 2210 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2211 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2212 2213 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2214 { 2215 struct rtw89_btc *btc = &rtwdev->btc; 2216 const struct rtw89_btc_ver *ver = btc->ver; 2217 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2218 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2219 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2220 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2221 struct sk_buff *skb; 2222 u32 len; 2223 u8 *cmd, offset; 2224 int ret; 2225 int i; 2226 2227 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2228 2229 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2230 if (!skb) { 2231 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2232 return -ENOMEM; 2233 } 2234 skb_put(skb, len); 2235 cmd = skb->data; 2236 2237 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2238 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2239 2240 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2241 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2242 2243 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2244 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2245 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2246 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2247 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2248 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2249 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2250 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2251 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2252 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2253 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2254 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2255 2256 offset = PORT_DATA_OFFSET; 2257 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2258 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2259 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2260 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2261 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2262 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2263 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2264 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2265 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2266 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2267 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2268 } 2269 2270 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2271 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2272 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2273 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2274 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2275 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2276 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2277 2278 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2279 H2C_CAT_OUTSRC, BTFC_SET, 2280 SET_DRV_INFO, 0, 0, 2281 len); 2282 2283 ret = rtw89_h2c_tx(rtwdev, skb, false); 2284 if (ret) { 2285 rtw89_err(rtwdev, "failed to send h2c\n"); 2286 goto fail; 2287 } 2288 2289 return 0; 2290 fail: 2291 dev_kfree_skb_any(skb); 2292 2293 return ret; 2294 } 2295 2296 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2297 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2298 { 2299 struct rtw89_btc *btc = &rtwdev->btc; 2300 const struct rtw89_btc_ver *ver = btc->ver; 2301 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2302 struct sk_buff *skb; 2303 u8 *cmd; 2304 int ret; 2305 2306 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2307 if (!skb) { 2308 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2309 return -ENOMEM; 2310 } 2311 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2312 cmd = skb->data; 2313 2314 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2315 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2316 2317 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2318 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2319 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2320 if (ver->fcxctrl == 0) 2321 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2322 2323 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2324 H2C_CAT_OUTSRC, BTFC_SET, 2325 SET_DRV_INFO, 0, 0, 2326 H2C_LEN_CXDRVINFO_CTRL); 2327 2328 ret = rtw89_h2c_tx(rtwdev, skb, false); 2329 if (ret) { 2330 rtw89_err(rtwdev, "failed to send h2c\n"); 2331 goto fail; 2332 } 2333 2334 return 0; 2335 fail: 2336 dev_kfree_skb_any(skb); 2337 2338 return ret; 2339 } 2340 2341 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2342 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2343 { 2344 struct rtw89_btc *btc = &rtwdev->btc; 2345 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2346 struct sk_buff *skb; 2347 u8 *cmd; 2348 int ret; 2349 2350 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2351 if (!skb) { 2352 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2353 return -ENOMEM; 2354 } 2355 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2356 cmd = skb->data; 2357 2358 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2359 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2360 2361 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2362 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2363 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2364 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2365 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2366 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2367 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2368 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2369 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2370 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2371 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2372 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2373 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2374 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2375 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2376 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2377 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2378 2379 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2380 H2C_CAT_OUTSRC, BTFC_SET, 2381 SET_DRV_INFO, 0, 0, 2382 H2C_LEN_CXDRVINFO_TRX); 2383 2384 ret = rtw89_h2c_tx(rtwdev, skb, false); 2385 if (ret) { 2386 rtw89_err(rtwdev, "failed to send h2c\n"); 2387 goto fail; 2388 } 2389 2390 return 0; 2391 fail: 2392 dev_kfree_skb_any(skb); 2393 2394 return ret; 2395 } 2396 2397 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2398 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2399 { 2400 struct rtw89_btc *btc = &rtwdev->btc; 2401 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2402 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2403 struct sk_buff *skb; 2404 u8 *cmd; 2405 int ret; 2406 2407 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2408 if (!skb) { 2409 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2410 return -ENOMEM; 2411 } 2412 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2413 cmd = skb->data; 2414 2415 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2416 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2417 2418 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2419 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2420 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2421 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2422 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2423 2424 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2425 H2C_CAT_OUTSRC, BTFC_SET, 2426 SET_DRV_INFO, 0, 0, 2427 H2C_LEN_CXDRVINFO_RFK); 2428 2429 ret = rtw89_h2c_tx(rtwdev, skb, false); 2430 if (ret) { 2431 rtw89_err(rtwdev, "failed to send h2c\n"); 2432 goto fail; 2433 } 2434 2435 return 0; 2436 fail: 2437 dev_kfree_skb_any(skb); 2438 2439 return ret; 2440 } 2441 2442 #define H2C_LEN_PKT_OFLD 4 2443 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2444 { 2445 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2446 struct sk_buff *skb; 2447 unsigned int cond; 2448 u8 *cmd; 2449 int ret; 2450 2451 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2452 if (!skb) { 2453 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2454 return -ENOMEM; 2455 } 2456 skb_put(skb, H2C_LEN_PKT_OFLD); 2457 cmd = skb->data; 2458 2459 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2460 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2461 2462 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2463 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2464 H2C_FUNC_PACKET_OFLD, 1, 1, 2465 H2C_LEN_PKT_OFLD); 2466 2467 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 2468 2469 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2470 if (ret) { 2471 rtw89_debug(rtwdev, RTW89_DBG_FW, 2472 "failed to del pkt ofld: id %d, ret %d\n", 2473 id, ret); 2474 return ret; 2475 } 2476 2477 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 2478 return 0; 2479 } 2480 2481 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2482 struct sk_buff *skb_ofld) 2483 { 2484 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2485 struct sk_buff *skb; 2486 unsigned int cond; 2487 u8 *cmd; 2488 u8 alloc_id; 2489 int ret; 2490 2491 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2492 RTW89_MAX_PKT_OFLD_NUM); 2493 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2494 return -ENOSPC; 2495 2496 *id = alloc_id; 2497 2498 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2499 if (!skb) { 2500 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2501 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2502 return -ENOMEM; 2503 } 2504 skb_put(skb, H2C_LEN_PKT_OFLD); 2505 cmd = skb->data; 2506 2507 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2508 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2509 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2510 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2511 2512 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2513 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2514 H2C_FUNC_PACKET_OFLD, 1, 1, 2515 H2C_LEN_PKT_OFLD + skb_ofld->len); 2516 2517 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 2518 2519 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2520 if (ret) { 2521 rtw89_debug(rtwdev, RTW89_DBG_FW, 2522 "failed to add pkt ofld: id %d, ret %d\n", 2523 alloc_id, ret); 2524 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2525 return ret; 2526 } 2527 2528 return 0; 2529 } 2530 2531 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2532 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2533 struct list_head *chan_list) 2534 { 2535 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2536 struct rtw89_mac_chinfo *ch_info; 2537 struct sk_buff *skb; 2538 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2539 unsigned int cond; 2540 u8 *cmd; 2541 int ret; 2542 2543 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2544 if (!skb) { 2545 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2546 return -ENOMEM; 2547 } 2548 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2549 cmd = skb->data; 2550 2551 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2552 /* in unit of 4 bytes */ 2553 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2554 2555 list_for_each_entry(ch_info, chan_list, list) { 2556 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2557 2558 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2559 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2560 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2561 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2562 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2563 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2564 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2565 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2566 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2567 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2568 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2569 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2570 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2571 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2572 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2573 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2574 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2575 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2576 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2577 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2578 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2579 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2580 } 2581 2582 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2583 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2584 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2585 2586 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH); 2587 2588 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2589 if (ret) { 2590 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 2591 return ret; 2592 } 2593 2594 return 0; 2595 } 2596 2597 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2598 struct rtw89_scan_option *option, 2599 struct rtw89_vif *rtwvif) 2600 { 2601 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2602 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 2603 struct rtw89_h2c_scanofld *h2c; 2604 u32 len = sizeof(*h2c); 2605 struct sk_buff *skb; 2606 unsigned int cond; 2607 int ret; 2608 2609 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2610 if (!skb) { 2611 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2612 return -ENOMEM; 2613 } 2614 skb_put(skb, len); 2615 h2c = (struct rtw89_h2c_scanofld *)skb->data; 2616 2617 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 2618 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 2619 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 2620 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 2621 2622 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 2623 le32_encode_bits(option->target_ch_mode, 2624 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 2625 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 2626 RTW89_H2C_SCANOFLD_W1_START_MODE) | 2627 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 2628 2629 if (option->target_ch_mode) { 2630 h2c->w1 |= le32_encode_bits(op->band_width, 2631 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 2632 le32_encode_bits(op->primary_channel, 2633 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 2634 le32_encode_bits(op->channel, 2635 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 2636 h2c->w0 |= le32_encode_bits(op->band_type, 2637 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 2638 } 2639 2640 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2641 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2642 H2C_FUNC_SCANOFLD, 1, 1, 2643 len); 2644 2645 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD); 2646 2647 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2648 if (ret) { 2649 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 2650 return ret; 2651 } 2652 2653 return 0; 2654 } 2655 2656 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2657 struct rtw89_fw_h2c_rf_reg_info *info, 2658 u16 len, u8 page) 2659 { 2660 struct sk_buff *skb; 2661 u8 class = info->rf_path == RF_PATH_A ? 2662 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2663 int ret; 2664 2665 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2666 if (!skb) { 2667 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2668 return -ENOMEM; 2669 } 2670 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2671 2672 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2673 H2C_CAT_OUTSRC, class, page, 0, 0, 2674 len); 2675 2676 ret = rtw89_h2c_tx(rtwdev, skb, false); 2677 if (ret) { 2678 rtw89_err(rtwdev, "failed to send h2c\n"); 2679 goto fail; 2680 } 2681 2682 return 0; 2683 fail: 2684 dev_kfree_skb_any(skb); 2685 2686 return ret; 2687 } 2688 2689 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2690 { 2691 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2692 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2693 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2694 struct sk_buff *skb; 2695 int ret; 2696 2697 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2698 if (!skb) { 2699 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2700 return -ENOMEM; 2701 } 2702 skb_put(skb, sizeof(*mccch)); 2703 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2704 2705 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2706 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2707 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2708 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2709 mccch->current_channel = cpu_to_le32(chan->channel); 2710 mccch->current_band_type = cpu_to_le32(chan->band_type); 2711 2712 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2713 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2714 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2715 sizeof(*mccch)); 2716 2717 ret = rtw89_h2c_tx(rtwdev, skb, false); 2718 if (ret) { 2719 rtw89_err(rtwdev, "failed to send h2c\n"); 2720 goto fail; 2721 } 2722 2723 return 0; 2724 fail: 2725 dev_kfree_skb_any(skb); 2726 2727 return ret; 2728 } 2729 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2730 2731 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2732 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2733 bool rack, bool dack) 2734 { 2735 struct sk_buff *skb; 2736 int ret; 2737 2738 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2739 if (!skb) { 2740 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2741 return -ENOMEM; 2742 } 2743 skb_put_data(skb, buf, len); 2744 2745 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2746 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2747 len); 2748 2749 ret = rtw89_h2c_tx(rtwdev, skb, false); 2750 if (ret) { 2751 rtw89_err(rtwdev, "failed to send h2c\n"); 2752 goto fail; 2753 } 2754 2755 return 0; 2756 fail: 2757 dev_kfree_skb_any(skb); 2758 2759 return ret; 2760 } 2761 2762 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2763 { 2764 struct sk_buff *skb; 2765 int ret; 2766 2767 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2768 if (!skb) { 2769 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2770 return -ENOMEM; 2771 } 2772 skb_put_data(skb, buf, len); 2773 2774 ret = rtw89_h2c_tx(rtwdev, skb, false); 2775 if (ret) { 2776 rtw89_err(rtwdev, "failed to send h2c\n"); 2777 goto fail; 2778 } 2779 2780 return 0; 2781 fail: 2782 dev_kfree_skb_any(skb); 2783 2784 return ret; 2785 } 2786 2787 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2788 { 2789 struct rtw89_early_h2c *early_h2c; 2790 2791 lockdep_assert_held(&rtwdev->mutex); 2792 2793 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2794 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2795 } 2796 } 2797 2798 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2799 { 2800 struct rtw89_early_h2c *early_h2c, *tmp; 2801 2802 mutex_lock(&rtwdev->mutex); 2803 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2804 list_del(&early_h2c->list); 2805 kfree(early_h2c->h2c); 2806 kfree(early_h2c); 2807 } 2808 mutex_unlock(&rtwdev->mutex); 2809 } 2810 2811 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2812 { 2813 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2814 2815 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2816 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2817 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2818 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2819 } 2820 2821 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2822 struct sk_buff *c2h) 2823 { 2824 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2825 u8 category = attr->category; 2826 u8 class = attr->class; 2827 u8 func = attr->func; 2828 2829 switch (category) { 2830 default: 2831 return false; 2832 case RTW89_C2H_CAT_MAC: 2833 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2834 } 2835 } 2836 2837 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2838 { 2839 rtw89_fw_c2h_parse_attr(c2h); 2840 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2841 goto enqueue; 2842 2843 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2844 dev_kfree_skb_any(c2h); 2845 return; 2846 2847 enqueue: 2848 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2849 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2850 } 2851 2852 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2853 struct sk_buff *skb) 2854 { 2855 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2856 u8 category = attr->category; 2857 u8 class = attr->class; 2858 u8 func = attr->func; 2859 u16 len = attr->len; 2860 bool dump = true; 2861 2862 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2863 return; 2864 2865 switch (category) { 2866 case RTW89_C2H_CAT_TEST: 2867 break; 2868 case RTW89_C2H_CAT_MAC: 2869 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2870 if (class == RTW89_MAC_C2H_CLASS_INFO && 2871 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2872 dump = false; 2873 break; 2874 case RTW89_C2H_CAT_OUTSRC: 2875 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2876 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2877 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2878 else 2879 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2880 break; 2881 } 2882 2883 if (dump) 2884 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2885 } 2886 2887 void rtw89_fw_c2h_work(struct work_struct *work) 2888 { 2889 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2890 c2h_work); 2891 struct sk_buff *skb, *tmp; 2892 2893 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2894 skb_unlink(skb, &rtwdev->c2h_queue); 2895 mutex_lock(&rtwdev->mutex); 2896 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2897 mutex_unlock(&rtwdev->mutex); 2898 dev_kfree_skb_any(skb); 2899 } 2900 } 2901 2902 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2903 struct rtw89_mac_h2c_info *info) 2904 { 2905 const struct rtw89_chip_info *chip = rtwdev->chip; 2906 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2907 const u32 *h2c_reg = chip->h2c_regs; 2908 u8 i, val, len; 2909 int ret; 2910 2911 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2912 rtwdev, chip->h2c_ctrl_reg); 2913 if (ret) { 2914 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2915 return ret; 2916 } 2917 2918 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2919 sizeof(info->h2creg[0])); 2920 2921 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 2922 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 2923 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2924 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 2925 2926 fw_info->h2c_counter++; 2927 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 2928 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 2929 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2930 2931 return 0; 2932 } 2933 2934 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2935 struct rtw89_mac_c2h_info *info) 2936 { 2937 const struct rtw89_chip_info *chip = rtwdev->chip; 2938 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2939 const u32 *c2h_reg = chip->c2h_regs; 2940 u32 ret; 2941 u8 i, val; 2942 2943 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2944 2945 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2946 RTW89_C2H_TIMEOUT, false, rtwdev, 2947 chip->c2h_ctrl_reg); 2948 if (ret) { 2949 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2950 return ret; 2951 } 2952 2953 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2954 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2955 2956 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2957 2958 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 2959 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 2960 RTW89_C2HREG_HDR_LEN; 2961 2962 fw_info->c2h_counter++; 2963 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 2964 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 2965 2966 return 0; 2967 } 2968 2969 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2970 struct rtw89_mac_h2c_info *h2c_info, 2971 struct rtw89_mac_c2h_info *c2h_info) 2972 { 2973 u32 ret; 2974 2975 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2976 lockdep_assert_held(&rtwdev->mutex); 2977 2978 if (!h2c_info && !c2h_info) 2979 return -EINVAL; 2980 2981 if (!h2c_info) 2982 goto recv_c2h; 2983 2984 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2985 if (ret) 2986 return ret; 2987 2988 recv_c2h: 2989 if (!c2h_info) 2990 return 0; 2991 2992 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2993 if (ret) 2994 return ret; 2995 2996 return 0; 2997 } 2998 2999 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 3000 { 3001 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 3002 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 3003 return; 3004 } 3005 3006 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3007 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3008 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3009 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3010 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3011 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3012 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3013 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3014 3015 rtw89_fw_prog_cnt_dump(rtwdev); 3016 } 3017 3018 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3019 { 3020 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3021 struct rtw89_pktofld_info *info, *tmp; 3022 u8 idx; 3023 3024 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3025 if (!(rtwdev->chip->support_bands & BIT(idx))) 3026 continue; 3027 3028 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3029 if (test_bit(info->id, rtwdev->pkt_offload)) 3030 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3031 list_del(&info->list); 3032 kfree(info); 3033 } 3034 } 3035 } 3036 3037 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3038 struct rtw89_vif *rtwvif, 3039 struct rtw89_pktofld_info *info, 3040 enum nl80211_band band, u8 ssid_idx) 3041 { 3042 struct cfg80211_scan_request *req = rtwvif->scan_req; 3043 3044 if (band != NL80211_BAND_6GHZ) 3045 return false; 3046 3047 if (req->ssids[ssid_idx].ssid_len) { 3048 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3049 req->ssids[ssid_idx].ssid_len); 3050 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3051 return false; 3052 } else { 3053 return true; 3054 } 3055 } 3056 3057 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3058 struct rtw89_vif *rtwvif, 3059 struct sk_buff *skb, u8 ssid_idx) 3060 { 3061 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3062 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3063 struct rtw89_pktofld_info *info; 3064 struct sk_buff *new; 3065 int ret = 0; 3066 u8 band; 3067 3068 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3069 if (!(rtwdev->chip->support_bands & BIT(band))) 3070 continue; 3071 3072 new = skb_copy(skb, GFP_KERNEL); 3073 if (!new) { 3074 ret = -ENOMEM; 3075 goto out; 3076 } 3077 skb_put_data(new, ies->ies[band], ies->len[band]); 3078 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3079 3080 info = kzalloc(sizeof(*info), GFP_KERNEL); 3081 if (!info) { 3082 ret = -ENOMEM; 3083 kfree_skb(new); 3084 goto out; 3085 } 3086 3087 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3088 ssid_idx)) { 3089 kfree_skb(new); 3090 kfree(info); 3091 goto out; 3092 } 3093 3094 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3095 if (ret) { 3096 kfree_skb(new); 3097 kfree(info); 3098 goto out; 3099 } 3100 3101 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3102 kfree_skb(new); 3103 } 3104 out: 3105 return ret; 3106 } 3107 3108 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3109 struct rtw89_vif *rtwvif) 3110 { 3111 struct cfg80211_scan_request *req = rtwvif->scan_req; 3112 struct sk_buff *skb; 3113 u8 num = req->n_ssids, i; 3114 int ret; 3115 3116 for (i = 0; i < num; i++) { 3117 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3118 req->ssids[i].ssid, 3119 req->ssids[i].ssid_len, 3120 req->ie_len); 3121 if (!skb) 3122 return -ENOMEM; 3123 3124 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3125 kfree_skb(skb); 3126 3127 if (ret) 3128 return ret; 3129 } 3130 3131 return 0; 3132 } 3133 3134 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3135 struct cfg80211_scan_request *req, 3136 struct rtw89_mac_chinfo *ch_info) 3137 { 3138 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3139 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3140 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3141 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3142 struct cfg80211_scan_6ghz_params *params; 3143 struct rtw89_pktofld_info *info, *tmp; 3144 struct ieee80211_hdr *hdr; 3145 struct sk_buff *skb; 3146 bool found; 3147 int ret = 0; 3148 u8 i; 3149 3150 if (!req->n_6ghz_params) 3151 return 0; 3152 3153 for (i = 0; i < req->n_6ghz_params; i++) { 3154 params = &req->scan_6ghz_params[i]; 3155 3156 if (req->channels[params->channel_idx]->hw_value != 3157 ch_info->pri_ch) 3158 continue; 3159 3160 found = false; 3161 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3162 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3163 found = true; 3164 break; 3165 } 3166 } 3167 if (found) 3168 continue; 3169 3170 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3171 NULL, 0, req->ie_len); 3172 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3173 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3174 hdr = (struct ieee80211_hdr *)skb->data; 3175 ether_addr_copy(hdr->addr3, params->bssid); 3176 3177 info = kzalloc(sizeof(*info), GFP_KERNEL); 3178 if (!info) { 3179 ret = -ENOMEM; 3180 kfree_skb(skb); 3181 goto out; 3182 } 3183 3184 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3185 if (ret) { 3186 kfree_skb(skb); 3187 kfree(info); 3188 goto out; 3189 } 3190 3191 ether_addr_copy(info->bssid, params->bssid); 3192 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3193 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3194 3195 ch_info->tx_pkt = true; 3196 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3197 3198 kfree_skb(skb); 3199 } 3200 3201 out: 3202 return ret; 3203 } 3204 3205 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3206 int ssid_num, 3207 struct rtw89_mac_chinfo *ch_info) 3208 { 3209 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3210 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3211 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3212 struct cfg80211_scan_request *req = rtwvif->scan_req; 3213 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3214 struct rtw89_pktofld_info *info; 3215 u8 band, probe_count = 0; 3216 int ret; 3217 3218 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3219 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3220 ch_info->bw = RTW89_SCAN_WIDTH; 3221 ch_info->tx_pkt = true; 3222 ch_info->cfg_tx_pwr = false; 3223 ch_info->tx_pwr_idx = 0; 3224 ch_info->tx_null = false; 3225 ch_info->pause_data = false; 3226 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3227 3228 if (ch_info->ch_band == RTW89_BAND_6G) { 3229 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3230 !ch_info->is_psc) { 3231 ch_info->tx_pkt = false; 3232 if (!req->duration_mandatory) 3233 ch_info->period -= RTW89_DWELL_TIME_6G; 3234 } 3235 } 3236 3237 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3238 if (ret) 3239 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3240 3241 if (ssid_num) { 3242 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3243 3244 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3245 if (info->channel_6ghz && 3246 ch_info->pri_ch != info->channel_6ghz) 3247 continue; 3248 ch_info->pkt_id[probe_count++] = info->id; 3249 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3250 break; 3251 } 3252 ch_info->num_pkt = probe_count; 3253 } 3254 3255 switch (chan_type) { 3256 case RTW89_CHAN_OPERATE: 3257 ch_info->central_ch = op->channel; 3258 ch_info->pri_ch = op->primary_channel; 3259 ch_info->ch_band = op->band_type; 3260 ch_info->bw = op->band_width; 3261 ch_info->tx_null = true; 3262 ch_info->num_pkt = 0; 3263 break; 3264 case RTW89_CHAN_DFS: 3265 if (ch_info->ch_band != RTW89_BAND_6G) 3266 ch_info->period = max_t(u8, ch_info->period, 3267 RTW89_DFS_CHAN_TIME); 3268 ch_info->dwell_time = RTW89_DWELL_TIME; 3269 break; 3270 case RTW89_CHAN_ACTIVE: 3271 break; 3272 default: 3273 rtw89_err(rtwdev, "Channel type out of bound\n"); 3274 } 3275 } 3276 3277 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3278 struct rtw89_vif *rtwvif, bool connected) 3279 { 3280 struct cfg80211_scan_request *req = rtwvif->scan_req; 3281 struct rtw89_mac_chinfo *ch_info, *tmp; 3282 struct ieee80211_channel *channel; 3283 struct list_head chan_list; 3284 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3285 int list_len, off_chan_time = 0; 3286 enum rtw89_chan_type type; 3287 int ret = 0; 3288 u32 idx; 3289 3290 INIT_LIST_HEAD(&chan_list); 3291 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3292 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3293 idx++, list_len++) { 3294 channel = req->channels[idx]; 3295 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3296 if (!ch_info) { 3297 ret = -ENOMEM; 3298 goto out; 3299 } 3300 3301 if (req->duration_mandatory) 3302 ch_info->period = req->duration; 3303 else if (channel->band == NL80211_BAND_6GHZ) 3304 ch_info->period = RTW89_CHANNEL_TIME_6G + 3305 RTW89_DWELL_TIME_6G; 3306 else 3307 ch_info->period = RTW89_CHANNEL_TIME; 3308 3309 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3310 ch_info->central_ch = channel->hw_value; 3311 ch_info->pri_ch = channel->hw_value; 3312 ch_info->rand_seq_num = random_seq; 3313 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3314 3315 if (channel->flags & 3316 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3317 type = RTW89_CHAN_DFS; 3318 else 3319 type = RTW89_CHAN_ACTIVE; 3320 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3321 3322 if (connected && 3323 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3324 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3325 if (!tmp) { 3326 ret = -ENOMEM; 3327 kfree(ch_info); 3328 goto out; 3329 } 3330 3331 type = RTW89_CHAN_OPERATE; 3332 tmp->period = req->duration_mandatory ? 3333 req->duration : RTW89_CHANNEL_TIME; 3334 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3335 list_add_tail(&tmp->list, &chan_list); 3336 off_chan_time = 0; 3337 list_len++; 3338 } 3339 list_add_tail(&ch_info->list, &chan_list); 3340 off_chan_time += ch_info->period; 3341 } 3342 rtwdev->scan_info.last_chan_idx = idx; 3343 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3344 3345 out: 3346 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3347 list_del(&ch_info->list); 3348 kfree(ch_info); 3349 } 3350 3351 return ret; 3352 } 3353 3354 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3355 struct rtw89_vif *rtwvif, bool connected) 3356 { 3357 int ret; 3358 3359 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3360 if (ret) { 3361 rtw89_err(rtwdev, "Update probe request failed\n"); 3362 goto out; 3363 } 3364 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3365 out: 3366 return ret; 3367 } 3368 3369 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3370 struct ieee80211_scan_request *scan_req) 3371 { 3372 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3373 struct cfg80211_scan_request *req = &scan_req->req; 3374 u32 rx_fltr = rtwdev->hal.rx_fltr; 3375 u8 mac_addr[ETH_ALEN]; 3376 3377 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 3378 rtwdev->scan_info.scanning_vif = vif; 3379 rtwdev->scan_info.last_chan_idx = 0; 3380 rtwvif->scan_ies = &scan_req->ies; 3381 rtwvif->scan_req = req; 3382 ieee80211_stop_queues(rtwdev->hw); 3383 3384 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 3385 get_random_mask_addr(mac_addr, req->mac_addr, 3386 req->mac_addr_mask); 3387 else 3388 ether_addr_copy(mac_addr, vif->addr); 3389 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 3390 3391 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 3392 rx_fltr &= ~B_AX_A_BC; 3393 rx_fltr &= ~B_AX_A_A1_MATCH; 3394 rtw89_write32_mask(rtwdev, 3395 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3396 B_AX_RX_FLTR_CFG_MASK, 3397 rx_fltr); 3398 } 3399 3400 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3401 bool aborted) 3402 { 3403 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3404 struct cfg80211_scan_info info = { 3405 .aborted = aborted, 3406 }; 3407 struct rtw89_vif *rtwvif; 3408 3409 if (!vif) 3410 return; 3411 3412 rtw89_write32_mask(rtwdev, 3413 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3414 B_AX_RX_FLTR_CFG_MASK, 3415 rtwdev->hal.rx_fltr); 3416 3417 rtw89_core_scan_complete(rtwdev, vif, true); 3418 ieee80211_scan_completed(rtwdev->hw, &info); 3419 ieee80211_wake_queues(rtwdev->hw); 3420 3421 rtw89_release_pkt_list(rtwdev); 3422 rtwvif = (struct rtw89_vif *)vif->drv_priv; 3423 rtwvif->scan_req = NULL; 3424 rtwvif->scan_ies = NULL; 3425 scan_info->last_chan_idx = 0; 3426 scan_info->scanning_vif = NULL; 3427 3428 rtw89_set_channel(rtwdev); 3429 } 3430 3431 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 3432 { 3433 rtw89_hw_scan_offload(rtwdev, vif, false); 3434 rtw89_hw_scan_complete(rtwdev, vif, true); 3435 } 3436 3437 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3438 bool enable) 3439 { 3440 struct rtw89_scan_option opt = {0}; 3441 struct rtw89_vif *rtwvif; 3442 bool connected; 3443 int ret = 0; 3444 3445 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3446 if (!rtwvif) 3447 return -EINVAL; 3448 3449 /* This variable implies connected or during attempt to connect */ 3450 connected = !is_zero_ether_addr(rtwvif->bssid); 3451 opt.enable = enable; 3452 opt.target_ch_mode = connected; 3453 if (enable) { 3454 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 3455 if (ret) 3456 goto out; 3457 } 3458 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 3459 out: 3460 return ret; 3461 } 3462 3463 #define H2C_FW_CPU_EXCEPTION_LEN 4 3464 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 3465 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 3466 { 3467 struct sk_buff *skb; 3468 int ret; 3469 3470 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 3471 if (!skb) { 3472 rtw89_err(rtwdev, 3473 "failed to alloc skb for fw cpu exception\n"); 3474 return -ENOMEM; 3475 } 3476 3477 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3478 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3479 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3480 3481 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3482 H2C_CAT_TEST, 3483 H2C_CL_FW_STATUS_TEST, 3484 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3485 H2C_FW_CPU_EXCEPTION_LEN); 3486 3487 ret = rtw89_h2c_tx(rtwdev, skb, false); 3488 if (ret) { 3489 rtw89_err(rtwdev, "failed to send h2c\n"); 3490 goto fail; 3491 } 3492 3493 return 0; 3494 3495 fail: 3496 dev_kfree_skb_any(skb); 3497 return ret; 3498 } 3499 3500 #define H2C_PKT_DROP_LEN 24 3501 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3502 const struct rtw89_pkt_drop_params *params) 3503 { 3504 struct sk_buff *skb; 3505 int ret; 3506 3507 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 3508 if (!skb) { 3509 rtw89_err(rtwdev, 3510 "failed to alloc skb for packet drop\n"); 3511 return -ENOMEM; 3512 } 3513 3514 switch (params->sel) { 3515 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 3516 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 3517 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 3518 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 3519 case RTW89_PKT_DROP_SEL_BAND_ONCE: 3520 break; 3521 default: 3522 rtw89_debug(rtwdev, RTW89_DBG_FW, 3523 "H2C of pkt drop might not fully support sel: %d yet\n", 3524 params->sel); 3525 break; 3526 } 3527 3528 skb_put(skb, H2C_PKT_DROP_LEN); 3529 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3530 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3531 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3532 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3533 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3534 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3535 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3536 params->macid_band_sel[0]); 3537 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3538 params->macid_band_sel[1]); 3539 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3540 params->macid_band_sel[2]); 3541 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3542 params->macid_band_sel[3]); 3543 3544 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3545 H2C_CAT_MAC, 3546 H2C_CL_MAC_FW_OFLD, 3547 H2C_FUNC_PKT_DROP, 0, 0, 3548 H2C_PKT_DROP_LEN); 3549 3550 ret = rtw89_h2c_tx(rtwdev, skb, false); 3551 if (ret) { 3552 rtw89_err(rtwdev, "failed to send h2c\n"); 3553 goto fail; 3554 } 3555 3556 return 0; 3557 3558 fail: 3559 dev_kfree_skb_any(skb); 3560 return ret; 3561 } 3562 3563 #define H2C_KEEP_ALIVE_LEN 4 3564 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3565 bool enable) 3566 { 3567 struct sk_buff *skb; 3568 u8 pkt_id = 0; 3569 int ret; 3570 3571 if (enable) { 3572 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 3573 RTW89_PKT_OFLD_TYPE_NULL_DATA, 3574 &pkt_id); 3575 if (ret) 3576 return -EPERM; 3577 } 3578 3579 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3580 if (!skb) { 3581 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3582 return -ENOMEM; 3583 } 3584 3585 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3586 3587 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3588 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3589 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3590 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3591 3592 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3593 H2C_CAT_MAC, 3594 H2C_CL_MAC_WOW, 3595 H2C_FUNC_KEEP_ALIVE, 0, 1, 3596 H2C_KEEP_ALIVE_LEN); 3597 3598 ret = rtw89_h2c_tx(rtwdev, skb, false); 3599 if (ret) { 3600 rtw89_err(rtwdev, "failed to send h2c\n"); 3601 goto fail; 3602 } 3603 3604 return 0; 3605 3606 fail: 3607 dev_kfree_skb_any(skb); 3608 3609 return ret; 3610 } 3611 3612 #define H2C_DISCONNECT_DETECT_LEN 8 3613 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3614 struct rtw89_vif *rtwvif, bool enable) 3615 { 3616 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3617 struct sk_buff *skb; 3618 u8 macid = rtwvif->mac_id; 3619 int ret; 3620 3621 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3622 if (!skb) { 3623 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3624 return -ENOMEM; 3625 } 3626 3627 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3628 3629 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3630 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3631 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3632 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3633 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3634 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3635 } 3636 3637 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3638 H2C_CAT_MAC, 3639 H2C_CL_MAC_WOW, 3640 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3641 H2C_DISCONNECT_DETECT_LEN); 3642 3643 ret = rtw89_h2c_tx(rtwdev, skb, false); 3644 if (ret) { 3645 rtw89_err(rtwdev, "failed to send h2c\n"); 3646 goto fail; 3647 } 3648 3649 return 0; 3650 3651 fail: 3652 dev_kfree_skb_any(skb); 3653 3654 return ret; 3655 } 3656 3657 #define H2C_WOW_GLOBAL_LEN 8 3658 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3659 bool enable) 3660 { 3661 struct sk_buff *skb; 3662 u8 macid = rtwvif->mac_id; 3663 int ret; 3664 3665 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3666 if (!skb) { 3667 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3668 return -ENOMEM; 3669 } 3670 3671 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3672 3673 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3674 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3675 3676 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3677 H2C_CAT_MAC, 3678 H2C_CL_MAC_WOW, 3679 H2C_FUNC_WOW_GLOBAL, 0, 1, 3680 H2C_WOW_GLOBAL_LEN); 3681 3682 ret = rtw89_h2c_tx(rtwdev, skb, false); 3683 if (ret) { 3684 rtw89_err(rtwdev, "failed to send h2c\n"); 3685 goto fail; 3686 } 3687 3688 return 0; 3689 3690 fail: 3691 dev_kfree_skb_any(skb); 3692 3693 return ret; 3694 } 3695 3696 #define H2C_WAKEUP_CTRL_LEN 4 3697 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3698 struct rtw89_vif *rtwvif, 3699 bool enable) 3700 { 3701 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3702 struct sk_buff *skb; 3703 u8 macid = rtwvif->mac_id; 3704 int ret; 3705 3706 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3707 if (!skb) { 3708 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3709 return -ENOMEM; 3710 } 3711 3712 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3713 3714 if (rtw_wow->pattern_cnt) 3715 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3716 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3717 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3718 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3719 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3720 3721 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3722 3723 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3724 H2C_CAT_MAC, 3725 H2C_CL_MAC_WOW, 3726 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3727 H2C_WAKEUP_CTRL_LEN); 3728 3729 ret = rtw89_h2c_tx(rtwdev, skb, false); 3730 if (ret) { 3731 rtw89_err(rtwdev, "failed to send h2c\n"); 3732 goto fail; 3733 } 3734 3735 return 0; 3736 3737 fail: 3738 dev_kfree_skb_any(skb); 3739 3740 return ret; 3741 } 3742 3743 #define H2C_WOW_CAM_UPD_LEN 24 3744 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3745 struct rtw89_wow_cam_info *cam_info) 3746 { 3747 struct sk_buff *skb; 3748 int ret; 3749 3750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3751 if (!skb) { 3752 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3753 return -ENOMEM; 3754 } 3755 3756 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3757 3758 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3759 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3760 if (cam_info->valid) { 3761 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3762 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3763 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3764 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3765 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3766 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3767 cam_info->negative_pattern_match); 3768 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3769 cam_info->skip_mac_hdr); 3770 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3771 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3772 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3773 } 3774 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3775 3776 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3777 H2C_CAT_MAC, 3778 H2C_CL_MAC_WOW, 3779 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3780 H2C_WOW_CAM_UPD_LEN); 3781 3782 ret = rtw89_h2c_tx(rtwdev, skb, false); 3783 if (ret) { 3784 rtw89_err(rtwdev, "failed to send h2c\n"); 3785 goto fail; 3786 } 3787 3788 return 0; 3789 fail: 3790 dev_kfree_skb_any(skb); 3791 3792 return ret; 3793 } 3794 3795 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3796 struct rtw89_wait_info *wait, unsigned int cond) 3797 { 3798 int ret; 3799 3800 ret = rtw89_h2c_tx(rtwdev, skb, false); 3801 if (ret) { 3802 rtw89_err(rtwdev, "failed to send h2c\n"); 3803 dev_kfree_skb_any(skb); 3804 return -EBUSY; 3805 } 3806 3807 return rtw89_wait_for_cond(wait, cond); 3808 } 3809 3810 #define H2C_ADD_MCC_LEN 16 3811 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3812 const struct rtw89_fw_mcc_add_req *p) 3813 { 3814 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3815 struct sk_buff *skb; 3816 unsigned int cond; 3817 3818 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3819 if (!skb) { 3820 rtw89_err(rtwdev, 3821 "failed to alloc skb for add mcc\n"); 3822 return -ENOMEM; 3823 } 3824 3825 skb_put(skb, H2C_ADD_MCC_LEN); 3826 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3827 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3828 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3829 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3830 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3831 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3832 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3833 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3834 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3835 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3836 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3837 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3838 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3839 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3840 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3841 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3842 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3843 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3844 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3845 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3846 3847 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3848 H2C_CAT_MAC, 3849 H2C_CL_MCC, 3850 H2C_FUNC_ADD_MCC, 0, 0, 3851 H2C_ADD_MCC_LEN); 3852 3853 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3854 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3855 } 3856 3857 #define H2C_START_MCC_LEN 12 3858 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3859 const struct rtw89_fw_mcc_start_req *p) 3860 { 3861 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3862 struct sk_buff *skb; 3863 unsigned int cond; 3864 3865 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3866 if (!skb) { 3867 rtw89_err(rtwdev, 3868 "failed to alloc skb for start mcc\n"); 3869 return -ENOMEM; 3870 } 3871 3872 skb_put(skb, H2C_START_MCC_LEN); 3873 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3874 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3875 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3876 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3877 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3878 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3879 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3880 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3881 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3882 3883 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3884 H2C_CAT_MAC, 3885 H2C_CL_MCC, 3886 H2C_FUNC_START_MCC, 0, 0, 3887 H2C_START_MCC_LEN); 3888 3889 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3890 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3891 } 3892 3893 #define H2C_STOP_MCC_LEN 4 3894 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3895 bool prev_groups) 3896 { 3897 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3898 struct sk_buff *skb; 3899 unsigned int cond; 3900 3901 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3902 if (!skb) { 3903 rtw89_err(rtwdev, 3904 "failed to alloc skb for stop mcc\n"); 3905 return -ENOMEM; 3906 } 3907 3908 skb_put(skb, H2C_STOP_MCC_LEN); 3909 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3910 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3911 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3912 3913 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3914 H2C_CAT_MAC, 3915 H2C_CL_MCC, 3916 H2C_FUNC_STOP_MCC, 0, 0, 3917 H2C_STOP_MCC_LEN); 3918 3919 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3920 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3921 } 3922 3923 #define H2C_DEL_MCC_GROUP_LEN 4 3924 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3925 bool prev_groups) 3926 { 3927 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3928 struct sk_buff *skb; 3929 unsigned int cond; 3930 3931 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3932 if (!skb) { 3933 rtw89_err(rtwdev, 3934 "failed to alloc skb for del mcc group\n"); 3935 return -ENOMEM; 3936 } 3937 3938 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3939 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3940 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3941 3942 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3943 H2C_CAT_MAC, 3944 H2C_CL_MCC, 3945 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3946 H2C_DEL_MCC_GROUP_LEN); 3947 3948 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3949 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3950 } 3951 3952 #define H2C_RESET_MCC_GROUP_LEN 4 3953 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3954 { 3955 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3956 struct sk_buff *skb; 3957 unsigned int cond; 3958 3959 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3960 if (!skb) { 3961 rtw89_err(rtwdev, 3962 "failed to alloc skb for reset mcc group\n"); 3963 return -ENOMEM; 3964 } 3965 3966 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3967 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3968 3969 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3970 H2C_CAT_MAC, 3971 H2C_CL_MCC, 3972 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3973 H2C_RESET_MCC_GROUP_LEN); 3974 3975 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3976 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3977 } 3978 3979 #define H2C_MCC_REQ_TSF_LEN 4 3980 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3981 const struct rtw89_fw_mcc_tsf_req *req, 3982 struct rtw89_mac_mcc_tsf_rpt *rpt) 3983 { 3984 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3985 struct rtw89_mac_mcc_tsf_rpt *tmp; 3986 struct sk_buff *skb; 3987 unsigned int cond; 3988 int ret; 3989 3990 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 3991 if (!skb) { 3992 rtw89_err(rtwdev, 3993 "failed to alloc skb for mcc req tsf\n"); 3994 return -ENOMEM; 3995 } 3996 3997 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 3998 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 3999 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 4000 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 4001 4002 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4003 H2C_CAT_MAC, 4004 H2C_CL_MCC, 4005 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4006 H2C_MCC_REQ_TSF_LEN); 4007 4008 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4009 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4010 if (ret) 4011 return ret; 4012 4013 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4014 *rpt = *tmp; 4015 4016 return 0; 4017 } 4018 4019 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4020 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4021 u8 *bitmap) 4022 { 4023 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4024 struct sk_buff *skb; 4025 unsigned int cond; 4026 u8 map_len; 4027 u8 h2c_len; 4028 4029 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4030 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4031 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4032 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4033 if (!skb) { 4034 rtw89_err(rtwdev, 4035 "failed to alloc skb for mcc macid bitmap\n"); 4036 return -ENOMEM; 4037 } 4038 4039 skb_put(skb, h2c_len); 4040 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4041 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4042 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4043 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4044 4045 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4046 H2C_CAT_MAC, 4047 H2C_CL_MCC, 4048 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4049 h2c_len); 4050 4051 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4052 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4053 } 4054 4055 #define H2C_MCC_SYNC_LEN 4 4056 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4057 u8 target, u8 offset) 4058 { 4059 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4060 struct sk_buff *skb; 4061 unsigned int cond; 4062 4063 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4064 if (!skb) { 4065 rtw89_err(rtwdev, 4066 "failed to alloc skb for mcc sync\n"); 4067 return -ENOMEM; 4068 } 4069 4070 skb_put(skb, H2C_MCC_SYNC_LEN); 4071 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4072 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4073 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4074 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4075 4076 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4077 H2C_CAT_MAC, 4078 H2C_CL_MCC, 4079 H2C_FUNC_MCC_SYNC, 0, 0, 4080 H2C_MCC_SYNC_LEN); 4081 4082 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4083 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4084 } 4085 4086 #define H2C_MCC_SET_DURATION_LEN 20 4087 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4088 const struct rtw89_fw_mcc_duration *p) 4089 { 4090 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4091 struct sk_buff *skb; 4092 unsigned int cond; 4093 4094 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4095 if (!skb) { 4096 rtw89_err(rtwdev, 4097 "failed to alloc skb for mcc set duration\n"); 4098 return -ENOMEM; 4099 } 4100 4101 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4102 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4103 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4104 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4105 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4106 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4107 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4108 p->start_tsf_low); 4109 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4110 p->start_tsf_high); 4111 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4112 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4113 4114 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4115 H2C_CAT_MAC, 4116 H2C_CL_MCC, 4117 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4118 H2C_MCC_SET_DURATION_LEN); 4119 4120 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4121 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4122 } 4123