1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 14 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 15 struct sk_buff *skb); 16 17 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 18 bool header) 19 { 20 struct sk_buff *skb; 21 u32 header_len = 0; 22 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 23 24 if (header) 25 header_len = H2C_HEADER_LEN; 26 27 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 28 if (!skb) 29 return NULL; 30 skb_reserve(skb, header_len + h2c_desc_size); 31 memset(skb->data, 0, len); 32 33 return skb; 34 } 35 36 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 37 { 38 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 39 } 40 41 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 42 { 43 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 44 } 45 46 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 47 { 48 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 49 50 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 51 } 52 53 #define FWDL_WAIT_CNT 400000 54 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 55 { 56 u8 val; 57 int ret; 58 59 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 60 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 61 1, FWDL_WAIT_CNT, false, rtwdev); 62 if (ret) { 63 switch (val) { 64 case RTW89_FWDL_CHECKSUM_FAIL: 65 rtw89_err(rtwdev, "fw checksum fail\n"); 66 return -EINVAL; 67 68 case RTW89_FWDL_SECURITY_FAIL: 69 rtw89_err(rtwdev, "fw security fail\n"); 70 return -EINVAL; 71 72 case RTW89_FWDL_CV_NOT_MATCH: 73 rtw89_err(rtwdev, "fw cv not match\n"); 74 return -EINVAL; 75 76 default: 77 return -EBUSY; 78 } 79 } 80 81 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 82 83 return 0; 84 } 85 86 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 87 struct rtw89_fw_bin_info *info) 88 { 89 struct rtw89_fw_hdr_section_info *section_info; 90 const u8 *fw_end = fw + len; 91 const u8 *fwdynhdr; 92 const u8 *bin; 93 u32 base_hdr_len; 94 u32 i; 95 96 if (!info) 97 return -EINVAL; 98 99 info->section_num = GET_FW_HDR_SEC_NUM(fw); 100 base_hdr_len = RTW89_FW_HDR_SIZE + 101 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 102 info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw); 103 104 if (info->dynamic_hdr_en) { 105 info->hdr_len = GET_FW_HDR_LEN(fw); 106 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 107 fwdynhdr = fw + base_hdr_len; 108 if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) { 109 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 110 return -EINVAL; 111 } 112 } else { 113 info->hdr_len = base_hdr_len; 114 info->dynamic_hdr_len = 0; 115 } 116 117 bin = fw + info->hdr_len; 118 119 /* jump to section header */ 120 fw += RTW89_FW_HDR_SIZE; 121 section_info = info->section_info; 122 for (i = 0; i < info->section_num; i++) { 123 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 124 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 125 section_info->len += FWDL_SECTION_CHKSUM_LEN; 126 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 127 section_info->dladdr = 128 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 129 section_info->addr = bin; 130 bin += section_info->len; 131 fw += RTW89_FW_SECTION_HDR_SIZE; 132 section_info++; 133 } 134 135 if (fw_end != bin) { 136 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 137 return -EINVAL; 138 } 139 140 return 0; 141 } 142 143 static 144 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 145 struct rtw89_fw_suit *fw_suit) 146 { 147 struct rtw89_fw_info *fw_info = &rtwdev->fw; 148 const u8 *mfw = fw_info->firmware->data; 149 u32 mfw_len = fw_info->firmware->size; 150 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 151 const struct rtw89_mfw_info *mfw_info; 152 int i; 153 154 if (mfw_hdr->sig != RTW89_MFW_SIG) { 155 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 156 /* legacy firmware support normal type only */ 157 if (type != RTW89_FW_NORMAL) 158 return -EINVAL; 159 fw_suit->data = mfw; 160 fw_suit->size = mfw_len; 161 return 0; 162 } 163 164 for (i = 0; i < mfw_hdr->fw_nr; i++) { 165 mfw_info = &mfw_hdr->info[i]; 166 if (mfw_info->cv != rtwdev->hal.cv || 167 mfw_info->type != type || 168 mfw_info->mp) 169 continue; 170 171 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 172 fw_suit->size = le32_to_cpu(mfw_info->size); 173 return 0; 174 } 175 176 rtw89_err(rtwdev, "no suitable firmware found\n"); 177 return -ENOENT; 178 } 179 180 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 181 enum rtw89_fw_type type, 182 struct rtw89_fw_suit *fw_suit) 183 { 184 const u8 *hdr = fw_suit->data; 185 186 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 187 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 188 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 189 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 190 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 191 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 192 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 193 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 194 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 195 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 196 197 rtw89_info(rtwdev, 198 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 199 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 200 fw_suit->sub_idex, fw_suit->cmd_ver, type); 201 } 202 203 static 204 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 205 { 206 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 207 int ret; 208 209 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit); 210 if (ret) 211 return ret; 212 213 rtw89_fw_update_ver(rtwdev, type, fw_suit); 214 215 return 0; 216 } 217 218 #define __DEF_FW_FEAT_COND(__cond, __op) \ 219 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 220 { \ 221 return suit_ver_code __op comp_ver_code; \ 222 } 223 224 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 225 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 226 227 struct __fw_feat_cfg { 228 enum rtw89_core_chip_id chip_id; 229 enum rtw89_fw_feature feature; 230 u32 ver_code; 231 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 232 }; 233 234 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 235 { \ 236 .chip_id = _chip, \ 237 .feature = RTW89_FW_FEATURE_ ## _feat, \ 238 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 239 .cond = __fw_feat_cond_ ## _cond, \ 240 } 241 242 static const struct __fw_feat_cfg fw_feat_tbl[] = { 243 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 244 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 245 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 246 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 247 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP), 248 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP), 249 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 250 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 251 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 252 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 253 }; 254 255 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 256 { 257 const struct rtw89_chip_info *chip = rtwdev->chip; 258 const struct __fw_feat_cfg *ent; 259 const struct rtw89_fw_suit *fw_suit; 260 u32 suit_ver_code; 261 int i; 262 263 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 264 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 265 266 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 267 ent = &fw_feat_tbl[i]; 268 if (chip->chip_id != ent->chip_id) 269 continue; 270 271 if (ent->cond(suit_ver_code, ent->ver_code)) 272 RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw); 273 } 274 } 275 276 void rtw89_early_fw_feature_recognize(struct device *device, 277 const struct rtw89_chip_info *chip, 278 u32 *early_feat_map) 279 { 280 union { 281 struct rtw89_mfw_hdr mfw_hdr; 282 u8 fw_hdr[RTW89_FW_HDR_SIZE]; 283 } buf = {}; 284 const struct firmware *firmware; 285 u32 ver_code; 286 int ret; 287 int i; 288 289 ret = request_partial_firmware_into_buf(&firmware, chip->fw_name, 290 device, &buf, sizeof(buf), 0); 291 if (ret) { 292 dev_err(device, "failed to early request firmware: %d\n", ret); 293 return; 294 } 295 296 ver_code = buf.mfw_hdr.sig != RTW89_MFW_SIG ? 297 RTW89_FW_HDR_VER_CODE(&buf.fw_hdr) : 298 RTW89_MFW_HDR_VER_CODE(&buf.mfw_hdr); 299 if (!ver_code) 300 goto out; 301 302 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 303 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 304 305 if (chip->chip_id != ent->chip_id) 306 continue; 307 308 if (ent->cond(ver_code, ent->ver_code)) 309 *early_feat_map |= BIT(ent->feature); 310 } 311 312 out: 313 release_firmware(firmware); 314 } 315 316 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 317 { 318 int ret; 319 320 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL); 321 if (ret) 322 return ret; 323 324 /* It still works if wowlan firmware isn't existing. */ 325 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN); 326 327 rtw89_fw_recognize_features(rtwdev); 328 329 return 0; 330 } 331 332 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 333 u8 type, u8 cat, u8 class, u8 func, 334 bool rack, bool dack, u32 len) 335 { 336 struct fwcmd_hdr *hdr; 337 338 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 339 340 if (!(rtwdev->fw.h2c_seq % 4)) 341 rack = true; 342 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 343 FIELD_PREP(H2C_HDR_CAT, cat) | 344 FIELD_PREP(H2C_HDR_CLASS, class) | 345 FIELD_PREP(H2C_HDR_FUNC, func) | 346 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 347 348 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 349 len + H2C_HEADER_LEN) | 350 (rack ? H2C_HDR_REC_ACK : 0) | 351 (dack ? H2C_HDR_DONE_ACK : 0)); 352 353 rtwdev->fw.h2c_seq++; 354 } 355 356 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 357 struct sk_buff *skb, 358 u8 type, u8 cat, u8 class, u8 func, 359 u32 len) 360 { 361 struct fwcmd_hdr *hdr; 362 363 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 364 365 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 366 FIELD_PREP(H2C_HDR_CAT, cat) | 367 FIELD_PREP(H2C_HDR_CLASS, class) | 368 FIELD_PREP(H2C_HDR_FUNC, func) | 369 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 370 371 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 372 len + H2C_HEADER_LEN)); 373 } 374 375 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 376 { 377 struct sk_buff *skb; 378 u32 ret = 0; 379 380 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 381 if (!skb) { 382 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 383 return -ENOMEM; 384 } 385 386 skb_put_data(skb, fw, len); 387 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 388 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 389 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 390 H2C_FUNC_MAC_FWHDR_DL, len); 391 392 ret = rtw89_h2c_tx(rtwdev, skb, false); 393 if (ret) { 394 rtw89_err(rtwdev, "failed to send h2c\n"); 395 ret = -1; 396 goto fail; 397 } 398 399 return 0; 400 fail: 401 dev_kfree_skb_any(skb); 402 403 return ret; 404 } 405 406 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 407 { 408 u8 val; 409 int ret; 410 411 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 412 if (ret) { 413 rtw89_err(rtwdev, "[ERR]FW header download\n"); 414 return ret; 415 } 416 417 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 418 1, FWDL_WAIT_CNT, false, 419 rtwdev, R_AX_WCPU_FW_CTRL); 420 if (ret) { 421 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 422 return ret; 423 } 424 425 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 426 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 427 428 return 0; 429 } 430 431 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 432 struct rtw89_fw_hdr_section_info *info) 433 { 434 struct sk_buff *skb; 435 const u8 *section = info->addr; 436 u32 residue_len = info->len; 437 u32 pkt_len; 438 int ret; 439 440 while (residue_len) { 441 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 442 pkt_len = FWDL_SECTION_PER_PKT_LEN; 443 else 444 pkt_len = residue_len; 445 446 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 447 if (!skb) { 448 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 449 return -ENOMEM; 450 } 451 skb_put_data(skb, section, pkt_len); 452 453 ret = rtw89_h2c_tx(rtwdev, skb, true); 454 if (ret) { 455 rtw89_err(rtwdev, "failed to send h2c\n"); 456 ret = -1; 457 goto fail; 458 } 459 460 section += pkt_len; 461 residue_len -= pkt_len; 462 } 463 464 return 0; 465 fail: 466 dev_kfree_skb_any(skb); 467 468 return ret; 469 } 470 471 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 472 struct rtw89_fw_bin_info *info) 473 { 474 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 475 u8 section_num = info->section_num; 476 int ret; 477 478 while (section_num--) { 479 ret = __rtw89_fw_download_main(rtwdev, section_info); 480 if (ret) 481 return ret; 482 section_info++; 483 } 484 485 mdelay(5); 486 487 ret = rtw89_fw_check_rdy(rtwdev); 488 if (ret) { 489 rtw89_warn(rtwdev, "download firmware fail\n"); 490 return ret; 491 } 492 493 return 0; 494 } 495 496 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 497 { 498 u32 val32; 499 u16 index; 500 501 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 502 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 503 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 504 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 505 506 for (index = 0; index < 15; index++) { 507 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 508 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 509 fsleep(10); 510 } 511 } 512 513 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 514 { 515 u32 val32; 516 u16 val16; 517 518 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 519 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 520 521 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 522 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 523 524 rtw89_fw_prog_cnt_dump(rtwdev); 525 } 526 527 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 528 { 529 struct rtw89_fw_info *fw_info = &rtwdev->fw; 530 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 531 struct rtw89_fw_bin_info info; 532 const u8 *fw = fw_suit->data; 533 u32 len = fw_suit->size; 534 u8 val; 535 int ret; 536 537 rtw89_mac_disable_cpu(rtwdev); 538 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 539 if (ret) 540 return ret; 541 542 if (!fw || !len) { 543 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 544 return -ENOENT; 545 } 546 547 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 548 if (ret) { 549 rtw89_err(rtwdev, "parse fw header fail\n"); 550 goto fwdl_err; 551 } 552 553 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 554 1, FWDL_WAIT_CNT, false, 555 rtwdev, R_AX_WCPU_FW_CTRL); 556 if (ret) { 557 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 558 goto fwdl_err; 559 } 560 561 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 562 if (ret) { 563 ret = -EBUSY; 564 goto fwdl_err; 565 } 566 567 ret = rtw89_fw_download_main(rtwdev, fw, &info); 568 if (ret) { 569 ret = -EBUSY; 570 goto fwdl_err; 571 } 572 573 fw_info->h2c_seq = 0; 574 fw_info->rec_seq = 0; 575 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 576 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 577 578 return ret; 579 580 fwdl_err: 581 rtw89_fw_dl_fail_dump(rtwdev); 582 return ret; 583 } 584 585 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 586 { 587 struct rtw89_fw_info *fw = &rtwdev->fw; 588 589 wait_for_completion(&fw->completion); 590 if (!fw->firmware) 591 return -EINVAL; 592 593 return 0; 594 } 595 596 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context) 597 { 598 struct rtw89_fw_info *fw = context; 599 struct rtw89_dev *rtwdev = fw->rtwdev; 600 601 if (!firmware || !firmware->data) { 602 rtw89_err(rtwdev, "failed to request firmware\n"); 603 complete_all(&fw->completion); 604 return; 605 } 606 607 fw->firmware = firmware; 608 complete_all(&fw->completion); 609 } 610 611 int rtw89_load_firmware(struct rtw89_dev *rtwdev) 612 { 613 struct rtw89_fw_info *fw = &rtwdev->fw; 614 const char *fw_name = rtwdev->chip->fw_name; 615 int ret; 616 617 fw->rtwdev = rtwdev; 618 init_completion(&fw->completion); 619 620 ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, 621 GFP_KERNEL, fw, rtw89_load_firmware_cb); 622 if (ret) { 623 rtw89_err(rtwdev, "failed to async firmware request\n"); 624 return ret; 625 } 626 627 return 0; 628 } 629 630 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 631 { 632 struct rtw89_fw_info *fw = &rtwdev->fw; 633 634 rtw89_wait_firmware_completion(rtwdev); 635 636 if (fw->firmware) 637 release_firmware(fw->firmware); 638 } 639 640 #define H2C_CAM_LEN 60 641 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 642 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 643 { 644 struct sk_buff *skb; 645 int ret; 646 647 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 648 if (!skb) { 649 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 650 return -ENOMEM; 651 } 652 skb_put(skb, H2C_CAM_LEN); 653 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 654 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 655 656 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 657 H2C_CAT_MAC, 658 H2C_CL_MAC_ADDR_CAM_UPDATE, 659 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 660 H2C_CAM_LEN); 661 662 ret = rtw89_h2c_tx(rtwdev, skb, false); 663 if (ret) { 664 rtw89_err(rtwdev, "failed to send h2c\n"); 665 goto fail; 666 } 667 668 return 0; 669 fail: 670 dev_kfree_skb_any(skb); 671 672 return ret; 673 } 674 675 #define H2C_DCTL_SEC_CAM_LEN 68 676 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 677 struct rtw89_vif *rtwvif, 678 struct rtw89_sta *rtwsta) 679 { 680 struct sk_buff *skb; 681 int ret; 682 683 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 684 if (!skb) { 685 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 686 return -ENOMEM; 687 } 688 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 689 690 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 691 692 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 693 H2C_CAT_MAC, 694 H2C_CL_MAC_FR_EXCHG, 695 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 696 H2C_DCTL_SEC_CAM_LEN); 697 698 ret = rtw89_h2c_tx(rtwdev, skb, false); 699 if (ret) { 700 rtw89_err(rtwdev, "failed to send h2c\n"); 701 goto fail; 702 } 703 704 return 0; 705 fail: 706 dev_kfree_skb_any(skb); 707 708 return ret; 709 } 710 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 711 712 #define H2C_BA_CAM_LEN 8 713 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 714 bool valid, struct ieee80211_ampdu_params *params) 715 { 716 const struct rtw89_chip_info *chip = rtwdev->chip; 717 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 718 u8 macid = rtwsta->mac_id; 719 struct sk_buff *skb; 720 u8 entry_idx; 721 int ret; 722 723 ret = valid ? 724 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 725 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 726 if (ret) { 727 /* it still works even if we don't have static BA CAM, because 728 * hardware can create dynamic BA CAM automatically. 729 */ 730 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 731 "failed to %s entry tid=%d for h2c ba cam\n", 732 valid ? "alloc" : "free", params->tid); 733 return 0; 734 } 735 736 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 737 if (!skb) { 738 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 739 return -ENOMEM; 740 } 741 skb_put(skb, H2C_BA_CAM_LEN); 742 SET_BA_CAM_MACID(skb->data, macid); 743 if (chip->bacam_v1) 744 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 745 else 746 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 747 if (!valid) 748 goto end; 749 SET_BA_CAM_VALID(skb->data, valid); 750 SET_BA_CAM_TID(skb->data, params->tid); 751 if (params->buf_size > 64) 752 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 753 else 754 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 755 /* If init req is set, hw will set the ssn */ 756 SET_BA_CAM_INIT_REQ(skb->data, 1); 757 SET_BA_CAM_SSN(skb->data, params->ssn); 758 759 if (chip->bacam_v1) { 760 SET_BA_CAM_STD_EN(skb->data, 1); 761 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 762 } 763 764 end: 765 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 766 H2C_CAT_MAC, 767 H2C_CL_BA_CAM, 768 H2C_FUNC_MAC_BA_CAM, 0, 1, 769 H2C_BA_CAM_LEN); 770 771 ret = rtw89_h2c_tx(rtwdev, skb, false); 772 if (ret) { 773 rtw89_err(rtwdev, "failed to send h2c\n"); 774 goto fail; 775 } 776 777 return 0; 778 fail: 779 dev_kfree_skb_any(skb); 780 781 return ret; 782 } 783 784 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev, 785 u8 entry_idx, u8 uid) 786 { 787 struct sk_buff *skb; 788 int ret; 789 790 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 791 if (!skb) { 792 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 793 return -ENOMEM; 794 } 795 skb_put(skb, H2C_BA_CAM_LEN); 796 797 SET_BA_CAM_VALID(skb->data, 1); 798 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 799 SET_BA_CAM_UID(skb->data, uid); 800 SET_BA_CAM_BAND(skb->data, 0); 801 SET_BA_CAM_STD_EN(skb->data, 0); 802 803 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 804 H2C_CAT_MAC, 805 H2C_CL_BA_CAM, 806 H2C_FUNC_MAC_BA_CAM, 0, 1, 807 H2C_BA_CAM_LEN); 808 809 ret = rtw89_h2c_tx(rtwdev, skb, false); 810 if (ret) { 811 rtw89_err(rtwdev, "failed to send h2c\n"); 812 goto fail; 813 } 814 815 return 0; 816 fail: 817 dev_kfree_skb_any(skb); 818 819 return ret; 820 } 821 822 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev) 823 { 824 const struct rtw89_chip_info *chip = rtwdev->chip; 825 u8 entry_idx = chip->bacam_num; 826 u8 uid = 0; 827 int i; 828 829 for (i = 0; i < chip->bacam_dynamic_num; i++) { 830 rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid); 831 entry_idx++; 832 uid++; 833 } 834 } 835 836 #define H2C_LOG_CFG_LEN 12 837 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 838 { 839 struct sk_buff *skb; 840 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 841 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 842 int ret; 843 844 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 845 if (!skb) { 846 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 847 return -ENOMEM; 848 } 849 850 skb_put(skb, H2C_LOG_CFG_LEN); 851 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 852 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 853 SET_LOG_CFG_COMP(skb->data, comp); 854 SET_LOG_CFG_COMP_EXT(skb->data, 0); 855 856 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 857 H2C_CAT_MAC, 858 H2C_CL_FW_INFO, 859 H2C_FUNC_LOG_CFG, 0, 0, 860 H2C_LOG_CFG_LEN); 861 862 ret = rtw89_h2c_tx(rtwdev, skb, false); 863 if (ret) { 864 rtw89_err(rtwdev, "failed to send h2c\n"); 865 goto fail; 866 } 867 868 return 0; 869 fail: 870 dev_kfree_skb_any(skb); 871 872 return ret; 873 } 874 875 static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev, 876 struct rtw89_vif *rtwvif, 877 enum rtw89_fw_pkt_ofld_type type, 878 u8 *id) 879 { 880 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 881 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 882 struct rtw89_pktofld_info *info; 883 struct sk_buff *skb; 884 int ret; 885 886 info = kzalloc(sizeof(*info), GFP_KERNEL); 887 if (!info) 888 return -ENOMEM; 889 890 switch (type) { 891 case RTW89_PKT_OFLD_TYPE_PS_POLL: 892 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 893 break; 894 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 895 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 896 break; 897 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 898 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 899 break; 900 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 901 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 902 break; 903 default: 904 goto err; 905 } 906 907 if (!skb) 908 goto err; 909 910 list_add_tail(&info->list, &rtw_wow->pkt_list); 911 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 912 kfree_skb(skb); 913 914 if (ret) 915 return ret; 916 917 *id = info->id; 918 return 0; 919 920 err: 921 kfree(info); 922 return -ENOMEM; 923 } 924 925 #define H2C_GENERAL_PKT_LEN 6 926 #define H2C_GENERAL_PKT_ID_UND 0xff 927 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid) 928 { 929 struct sk_buff *skb; 930 int ret; 931 932 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 933 if (!skb) { 934 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 935 return -ENOMEM; 936 } 937 skb_put(skb, H2C_GENERAL_PKT_LEN); 938 SET_GENERAL_PKT_MACID(skb->data, macid); 939 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 940 SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 941 SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 942 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 943 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 944 945 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 946 H2C_CAT_MAC, 947 H2C_CL_FW_INFO, 948 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 949 H2C_GENERAL_PKT_LEN); 950 951 ret = rtw89_h2c_tx(rtwdev, skb, false); 952 if (ret) { 953 rtw89_err(rtwdev, "failed to send h2c\n"); 954 goto fail; 955 } 956 957 return 0; 958 fail: 959 dev_kfree_skb_any(skb); 960 961 return ret; 962 } 963 964 #define H2C_LPS_PARM_LEN 8 965 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 966 struct rtw89_lps_parm *lps_param) 967 { 968 struct sk_buff *skb; 969 int ret; 970 971 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 972 if (!skb) { 973 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 974 return -ENOMEM; 975 } 976 skb_put(skb, H2C_LPS_PARM_LEN); 977 978 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 979 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 980 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 981 SET_LPS_PARM_RLBM(skb->data, 1); 982 SET_LPS_PARM_SMARTPS(skb->data, 1); 983 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 984 SET_LPS_PARM_VOUAPSD(skb->data, 0); 985 SET_LPS_PARM_VIUAPSD(skb->data, 0); 986 SET_LPS_PARM_BEUAPSD(skb->data, 0); 987 SET_LPS_PARM_BKUAPSD(skb->data, 0); 988 989 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 990 H2C_CAT_MAC, 991 H2C_CL_MAC_PS, 992 H2C_FUNC_MAC_LPS_PARM, 0, 1, 993 H2C_LPS_PARM_LEN); 994 995 ret = rtw89_h2c_tx(rtwdev, skb, false); 996 if (ret) { 997 rtw89_err(rtwdev, "failed to send h2c\n"); 998 goto fail; 999 } 1000 1001 return 0; 1002 fail: 1003 dev_kfree_skb_any(skb); 1004 1005 return ret; 1006 } 1007 1008 #define H2C_P2P_ACT_LEN 20 1009 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1010 struct ieee80211_p2p_noa_desc *desc, 1011 u8 act, u8 noa_id) 1012 { 1013 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1014 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1015 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1016 struct sk_buff *skb; 1017 u8 *cmd; 1018 int ret; 1019 1020 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1021 if (!skb) { 1022 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1023 return -ENOMEM; 1024 } 1025 skb_put(skb, H2C_P2P_ACT_LEN); 1026 cmd = skb->data; 1027 1028 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1029 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1030 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1031 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1032 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1033 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1034 if (desc) { 1035 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1036 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1037 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1038 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1039 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1040 } 1041 1042 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1043 H2C_CAT_MAC, H2C_CL_MAC_PS, 1044 H2C_FUNC_P2P_ACT, 0, 0, 1045 H2C_P2P_ACT_LEN); 1046 1047 ret = rtw89_h2c_tx(rtwdev, skb, false); 1048 if (ret) { 1049 rtw89_err(rtwdev, "failed to send h2c\n"); 1050 goto fail; 1051 } 1052 1053 return 0; 1054 fail: 1055 dev_kfree_skb_any(skb); 1056 1057 return ret; 1058 } 1059 1060 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1061 struct sk_buff *skb) 1062 { 1063 struct rtw89_hal *hal = &rtwdev->hal; 1064 u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1065 u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1066 1067 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1068 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1069 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1070 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1071 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1072 } 1073 1074 #define H2C_CMC_TBL_LEN 68 1075 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1076 struct rtw89_vif *rtwvif) 1077 { 1078 const struct rtw89_chip_info *chip = rtwdev->chip; 1079 struct sk_buff *skb; 1080 u8 macid = rtwvif->mac_id; 1081 int ret; 1082 1083 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1084 if (!skb) { 1085 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1086 return -ENOMEM; 1087 } 1088 skb_put(skb, H2C_CMC_TBL_LEN); 1089 SET_CTRL_INFO_MACID(skb->data, macid); 1090 SET_CTRL_INFO_OPERATION(skb->data, 1); 1091 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1092 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1093 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1094 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1095 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1096 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1097 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1098 } 1099 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1100 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1101 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1102 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1103 1104 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1105 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1106 chip->h2c_cctl_func_id, 0, 1, 1107 H2C_CMC_TBL_LEN); 1108 1109 ret = rtw89_h2c_tx(rtwdev, skb, false); 1110 if (ret) { 1111 rtw89_err(rtwdev, "failed to send h2c\n"); 1112 goto fail; 1113 } 1114 1115 return 0; 1116 fail: 1117 dev_kfree_skb_any(skb); 1118 1119 return ret; 1120 } 1121 1122 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1123 struct ieee80211_sta *sta, u8 *pads) 1124 { 1125 bool ppe_th; 1126 u8 ppe16, ppe8; 1127 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1128 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1129 u8 ru_bitmap; 1130 u8 n, idx, sh; 1131 u16 ppe; 1132 int i; 1133 1134 if (!sta->deflink.he_cap.has_he) 1135 return; 1136 1137 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1138 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1139 if (!ppe_th) { 1140 u8 pad; 1141 1142 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1143 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1144 1145 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1146 pads[i] = pad; 1147 1148 return; 1149 } 1150 1151 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1152 n = hweight8(ru_bitmap); 1153 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1154 1155 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1156 if (!(ru_bitmap & BIT(i))) { 1157 pads[i] = 1; 1158 continue; 1159 } 1160 1161 idx = n >> 3; 1162 sh = n & 7; 1163 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1164 1165 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1166 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1167 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1168 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1169 1170 if (ppe16 != 7 && ppe8 == 7) 1171 pads[i] = 2; 1172 else if (ppe8 != 7) 1173 pads[i] = 1; 1174 else 1175 pads[i] = 0; 1176 } 1177 } 1178 1179 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1180 struct ieee80211_vif *vif, 1181 struct ieee80211_sta *sta) 1182 { 1183 const struct rtw89_chip_info *chip = rtwdev->chip; 1184 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1185 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1186 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1187 struct sk_buff *skb; 1188 u8 pads[RTW89_PPE_BW_NUM]; 1189 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1190 u16 lowest_rate; 1191 int ret; 1192 1193 memset(pads, 0, sizeof(pads)); 1194 if (sta) 1195 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1196 1197 if (vif->p2p) 1198 lowest_rate = RTW89_HW_RATE_OFDM6; 1199 else if (chan->band_type == RTW89_BAND_2G) 1200 lowest_rate = RTW89_HW_RATE_CCK1; 1201 else 1202 lowest_rate = RTW89_HW_RATE_OFDM6; 1203 1204 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1205 if (!skb) { 1206 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1207 return -ENOMEM; 1208 } 1209 skb_put(skb, H2C_CMC_TBL_LEN); 1210 SET_CTRL_INFO_MACID(skb->data, mac_id); 1211 SET_CTRL_INFO_OPERATION(skb->data, 1); 1212 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1213 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1214 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1215 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1216 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1217 if (vif->type == NL80211_IFTYPE_STATION) 1218 SET_CMC_TBL_ULDL(skb->data, 1); 1219 else 1220 SET_CMC_TBL_ULDL(skb->data, 0); 1221 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1222 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1223 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1224 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1225 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1226 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1227 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1228 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1229 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1230 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1231 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1232 } 1233 if (sta) 1234 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1235 sta->deflink.he_cap.has_he); 1236 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1237 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1238 1239 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1240 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1241 chip->h2c_cctl_func_id, 0, 1, 1242 H2C_CMC_TBL_LEN); 1243 1244 ret = rtw89_h2c_tx(rtwdev, skb, false); 1245 if (ret) { 1246 rtw89_err(rtwdev, "failed to send h2c\n"); 1247 goto fail; 1248 } 1249 1250 return 0; 1251 fail: 1252 dev_kfree_skb_any(skb); 1253 1254 return ret; 1255 } 1256 1257 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1258 struct rtw89_sta *rtwsta) 1259 { 1260 const struct rtw89_chip_info *chip = rtwdev->chip; 1261 struct sk_buff *skb; 1262 int ret; 1263 1264 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1265 if (!skb) { 1266 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1267 return -ENOMEM; 1268 } 1269 skb_put(skb, H2C_CMC_TBL_LEN); 1270 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1271 SET_CTRL_INFO_OPERATION(skb->data, 1); 1272 if (rtwsta->cctl_tx_time) { 1273 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1274 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1275 } 1276 if (rtwsta->cctl_tx_retry_limit) { 1277 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1278 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1279 } 1280 1281 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1282 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1283 chip->h2c_cctl_func_id, 0, 1, 1284 H2C_CMC_TBL_LEN); 1285 1286 ret = rtw89_h2c_tx(rtwdev, skb, false); 1287 if (ret) { 1288 rtw89_err(rtwdev, "failed to send h2c\n"); 1289 goto fail; 1290 } 1291 1292 return 0; 1293 fail: 1294 dev_kfree_skb_any(skb); 1295 1296 return ret; 1297 } 1298 1299 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1300 struct rtw89_sta *rtwsta) 1301 { 1302 const struct rtw89_chip_info *chip = rtwdev->chip; 1303 struct sk_buff *skb; 1304 int ret; 1305 1306 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1307 return 0; 1308 1309 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1310 if (!skb) { 1311 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1312 return -ENOMEM; 1313 } 1314 skb_put(skb, H2C_CMC_TBL_LEN); 1315 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1316 SET_CTRL_INFO_OPERATION(skb->data, 1); 1317 1318 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1319 1320 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1321 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1322 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1323 H2C_CMC_TBL_LEN); 1324 1325 ret = rtw89_h2c_tx(rtwdev, skb, false); 1326 if (ret) { 1327 rtw89_err(rtwdev, "failed to send h2c\n"); 1328 goto fail; 1329 } 1330 1331 return 0; 1332 fail: 1333 dev_kfree_skb_any(skb); 1334 1335 return ret; 1336 } 1337 1338 #define H2C_BCN_BASE_LEN 12 1339 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1340 struct rtw89_vif *rtwvif) 1341 { 1342 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1343 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1344 struct sk_buff *skb; 1345 struct sk_buff *skb_beacon; 1346 u16 tim_offset; 1347 int bcn_total_len; 1348 u16 beacon_rate; 1349 int ret; 1350 1351 if (vif->p2p) 1352 beacon_rate = RTW89_HW_RATE_OFDM6; 1353 else if (chan->band_type == RTW89_BAND_2G) 1354 beacon_rate = RTW89_HW_RATE_CCK1; 1355 else 1356 beacon_rate = RTW89_HW_RATE_OFDM6; 1357 1358 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1359 NULL, 0); 1360 if (!skb_beacon) { 1361 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1362 return -ENOMEM; 1363 } 1364 1365 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1366 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1367 if (!skb) { 1368 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1369 dev_kfree_skb_any(skb_beacon); 1370 return -ENOMEM; 1371 } 1372 skb_put(skb, H2C_BCN_BASE_LEN); 1373 1374 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1375 SET_BCN_UPD_MBSSID(skb->data, 0); 1376 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1377 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1378 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1379 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1380 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1381 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1382 1383 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1384 dev_kfree_skb_any(skb_beacon); 1385 1386 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1387 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1388 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1389 bcn_total_len); 1390 1391 ret = rtw89_h2c_tx(rtwdev, skb, false); 1392 if (ret) { 1393 rtw89_err(rtwdev, "failed to send h2c\n"); 1394 dev_kfree_skb_any(skb); 1395 return ret; 1396 } 1397 1398 return 0; 1399 } 1400 1401 #define H2C_ROLE_MAINTAIN_LEN 4 1402 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1403 struct rtw89_vif *rtwvif, 1404 struct rtw89_sta *rtwsta, 1405 enum rtw89_upd_mode upd_mode) 1406 { 1407 struct sk_buff *skb; 1408 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1409 u8 self_role; 1410 int ret; 1411 1412 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1413 if (rtwsta) 1414 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1415 else 1416 self_role = rtwvif->self_role; 1417 } else { 1418 self_role = rtwvif->self_role; 1419 } 1420 1421 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1422 if (!skb) { 1423 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1424 return -ENOMEM; 1425 } 1426 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1427 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1428 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1429 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1430 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1431 1432 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1433 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1434 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1435 H2C_ROLE_MAINTAIN_LEN); 1436 1437 ret = rtw89_h2c_tx(rtwdev, skb, false); 1438 if (ret) { 1439 rtw89_err(rtwdev, "failed to send h2c\n"); 1440 goto fail; 1441 } 1442 1443 return 0; 1444 fail: 1445 dev_kfree_skb_any(skb); 1446 1447 return ret; 1448 } 1449 1450 #define H2C_JOIN_INFO_LEN 4 1451 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1452 struct rtw89_sta *rtwsta, bool dis_conn) 1453 { 1454 struct sk_buff *skb; 1455 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1456 u8 self_role = rtwvif->self_role; 1457 u8 net_type = rtwvif->net_type; 1458 int ret; 1459 1460 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1461 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1462 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1463 } 1464 1465 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1466 if (!skb) { 1467 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1468 return -ENOMEM; 1469 } 1470 skb_put(skb, H2C_JOIN_INFO_LEN); 1471 SET_JOININFO_MACID(skb->data, mac_id); 1472 SET_JOININFO_OP(skb->data, dis_conn); 1473 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1474 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1475 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1476 SET_JOININFO_ISHESTA(skb->data, 0); 1477 SET_JOININFO_DLBW(skb->data, 0); 1478 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1479 SET_JOININFO_DL_T_PE(skb->data, 0); 1480 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1481 SET_JOININFO_NET_TYPE(skb->data, net_type); 1482 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1483 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1484 1485 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1486 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1487 H2C_FUNC_MAC_JOININFO, 0, 1, 1488 H2C_JOIN_INFO_LEN); 1489 1490 ret = rtw89_h2c_tx(rtwdev, skb, false); 1491 if (ret) { 1492 rtw89_err(rtwdev, "failed to send h2c\n"); 1493 goto fail; 1494 } 1495 1496 return 0; 1497 fail: 1498 dev_kfree_skb_any(skb); 1499 1500 return ret; 1501 } 1502 1503 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1504 bool pause) 1505 { 1506 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1507 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1508 struct sk_buff *skb; 1509 int ret; 1510 1511 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1512 if (!skb) { 1513 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1514 return -ENOMEM; 1515 } 1516 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1517 if (pause) 1518 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1519 skb_put_data(skb, &h2c, len); 1520 1521 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1522 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1523 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1524 len); 1525 1526 ret = rtw89_h2c_tx(rtwdev, skb, false); 1527 if (ret) { 1528 rtw89_err(rtwdev, "failed to send h2c\n"); 1529 goto fail; 1530 } 1531 1532 return 0; 1533 fail: 1534 dev_kfree_skb_any(skb); 1535 1536 return ret; 1537 } 1538 1539 #define H2C_EDCA_LEN 12 1540 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1541 u8 ac, u32 val) 1542 { 1543 struct sk_buff *skb; 1544 int ret; 1545 1546 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1547 if (!skb) { 1548 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1549 return -ENOMEM; 1550 } 1551 skb_put(skb, H2C_EDCA_LEN); 1552 RTW89_SET_EDCA_SEL(skb->data, 0); 1553 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1554 RTW89_SET_EDCA_WMM(skb->data, 0); 1555 RTW89_SET_EDCA_AC(skb->data, ac); 1556 RTW89_SET_EDCA_PARAM(skb->data, val); 1557 1558 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1559 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1560 H2C_FUNC_USR_EDCA, 0, 1, 1561 H2C_EDCA_LEN); 1562 1563 ret = rtw89_h2c_tx(rtwdev, skb, false); 1564 if (ret) { 1565 rtw89_err(rtwdev, "failed to send h2c\n"); 1566 goto fail; 1567 } 1568 1569 return 0; 1570 fail: 1571 dev_kfree_skb_any(skb); 1572 1573 return ret; 1574 } 1575 1576 #define H2C_TSF32_TOGL_LEN 4 1577 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1578 bool en) 1579 { 1580 struct sk_buff *skb; 1581 u16 early_us = en ? 2000 : 0; 1582 u8 *cmd; 1583 int ret; 1584 1585 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1586 if (!skb) { 1587 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1588 return -ENOMEM; 1589 } 1590 skb_put(skb, H2C_TSF32_TOGL_LEN); 1591 cmd = skb->data; 1592 1593 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1594 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1595 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1596 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1597 1598 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1599 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1600 H2C_FUNC_TSF32_TOGL, 0, 0, 1601 H2C_TSF32_TOGL_LEN); 1602 1603 ret = rtw89_h2c_tx(rtwdev, skb, false); 1604 if (ret) { 1605 rtw89_err(rtwdev, "failed to send h2c\n"); 1606 goto fail; 1607 } 1608 1609 return 0; 1610 fail: 1611 dev_kfree_skb_any(skb); 1612 1613 return ret; 1614 } 1615 1616 #define H2C_OFLD_CFG_LEN 8 1617 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1618 { 1619 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1620 struct sk_buff *skb; 1621 int ret; 1622 1623 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1624 if (!skb) { 1625 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1626 return -ENOMEM; 1627 } 1628 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1629 1630 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1631 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1632 H2C_FUNC_OFLD_CFG, 0, 1, 1633 H2C_OFLD_CFG_LEN); 1634 1635 ret = rtw89_h2c_tx(rtwdev, skb, false); 1636 if (ret) { 1637 rtw89_err(rtwdev, "failed to send h2c\n"); 1638 goto fail; 1639 } 1640 1641 return 0; 1642 fail: 1643 dev_kfree_skb_any(skb); 1644 1645 return ret; 1646 } 1647 1648 #define H2C_RA_LEN 16 1649 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1650 { 1651 struct sk_buff *skb; 1652 u8 *cmd; 1653 int ret; 1654 1655 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1656 if (!skb) { 1657 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1658 return -ENOMEM; 1659 } 1660 skb_put(skb, H2C_RA_LEN); 1661 cmd = skb->data; 1662 rtw89_debug(rtwdev, RTW89_DBG_RA, 1663 "ra cmd msk: %llx ", ra->ra_mask); 1664 1665 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1666 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1667 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1668 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1669 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1670 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1671 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1672 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1673 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1674 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1675 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1676 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1677 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1678 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1679 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1680 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1681 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1682 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1683 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1684 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1685 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1686 1687 if (csi) { 1688 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1689 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1690 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1691 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1692 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1693 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1694 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1695 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1696 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1697 } 1698 1699 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1700 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1701 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1702 H2C_RA_LEN); 1703 1704 ret = rtw89_h2c_tx(rtwdev, skb, false); 1705 if (ret) { 1706 rtw89_err(rtwdev, "failed to send h2c\n"); 1707 goto fail; 1708 } 1709 1710 return 0; 1711 fail: 1712 dev_kfree_skb_any(skb); 1713 1714 return ret; 1715 } 1716 1717 #define H2C_LEN_CXDRVHDR 2 1718 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR) 1719 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1720 { 1721 struct rtw89_btc *btc = &rtwdev->btc; 1722 struct rtw89_btc_dm *dm = &btc->dm; 1723 struct rtw89_btc_init_info *init_info = &dm->init_info; 1724 struct rtw89_btc_module *module = &init_info->module; 1725 struct rtw89_btc_ant_info *ant = &module->ant; 1726 struct sk_buff *skb; 1727 u8 *cmd; 1728 int ret; 1729 1730 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT); 1731 if (!skb) { 1732 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1733 return -ENOMEM; 1734 } 1735 skb_put(skb, H2C_LEN_CXDRVINFO_INIT); 1736 cmd = skb->data; 1737 1738 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT); 1739 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR); 1740 1741 RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type); 1742 RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num); 1743 RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation); 1744 RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos); 1745 RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity); 1746 1747 RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type); 1748 RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv); 1749 RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo); 1750 RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos); 1751 RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type); 1752 1753 RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch); 1754 RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only); 1755 RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok); 1756 RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en); 1757 RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other); 1758 RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only); 1759 1760 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1761 H2C_CAT_OUTSRC, BTFC_SET, 1762 SET_DRV_INFO, 0, 0, 1763 H2C_LEN_CXDRVINFO_INIT); 1764 1765 ret = rtw89_h2c_tx(rtwdev, skb, false); 1766 if (ret) { 1767 rtw89_err(rtwdev, "failed to send h2c\n"); 1768 goto fail; 1769 } 1770 1771 return 0; 1772 fail: 1773 dev_kfree_skb_any(skb); 1774 1775 return ret; 1776 } 1777 1778 #define PORT_DATA_OFFSET 4 1779 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 1780 #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR) 1781 #define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \ 1782 H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \ 1783 H2C_LEN_CXDRVHDR) 1784 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 1785 { 1786 struct rtw89_btc *btc = &rtwdev->btc; 1787 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1788 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 1789 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1790 struct rtw89_btc_wl_active_role *active = role_info->active_role; 1791 struct sk_buff *skb; 1792 u8 offset = 0; 1793 u8 *cmd; 1794 int ret; 1795 int i; 1796 1797 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE); 1798 if (!skb) { 1799 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1800 return -ENOMEM; 1801 } 1802 skb_put(skb, H2C_LEN_CXDRVINFO_ROLE); 1803 cmd = skb->data; 1804 1805 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1806 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR); 1807 1808 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1809 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1810 1811 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1812 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1813 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1814 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1815 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1816 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1817 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1818 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1819 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1820 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1821 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1822 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1823 1824 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1825 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1826 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1827 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1828 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1829 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1830 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1831 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1832 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1833 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1834 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1835 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1836 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1837 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1838 } 1839 1840 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1841 H2C_CAT_OUTSRC, BTFC_SET, 1842 SET_DRV_INFO, 0, 0, 1843 H2C_LEN_CXDRVINFO_ROLE); 1844 1845 ret = rtw89_h2c_tx(rtwdev, skb, false); 1846 if (ret) { 1847 rtw89_err(rtwdev, "failed to send h2c\n"); 1848 goto fail; 1849 } 1850 1851 return 0; 1852 fail: 1853 dev_kfree_skb_any(skb); 1854 1855 return ret; 1856 } 1857 1858 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 1859 { 1860 struct rtw89_btc *btc = &rtwdev->btc; 1861 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1862 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 1863 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1864 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 1865 struct sk_buff *skb; 1866 u8 *cmd, offset; 1867 int ret; 1868 int i; 1869 1870 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1); 1871 if (!skb) { 1872 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1873 return -ENOMEM; 1874 } 1875 skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1); 1876 cmd = skb->data; 1877 1878 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1879 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR); 1880 1881 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1882 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1883 1884 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1885 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1886 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1887 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1888 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1889 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1890 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1891 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1892 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1893 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1894 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1895 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1896 1897 offset = PORT_DATA_OFFSET; 1898 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1899 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1900 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1901 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1902 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1903 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1904 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1905 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1906 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1907 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1908 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1909 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1910 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1911 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1912 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 1913 } 1914 1915 offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 1916 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 1917 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 1918 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 1919 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 1920 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 1921 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 1922 1923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1924 H2C_CAT_OUTSRC, BTFC_SET, 1925 SET_DRV_INFO, 0, 0, 1926 H2C_LEN_CXDRVINFO_ROLE_V1); 1927 1928 ret = rtw89_h2c_tx(rtwdev, skb, false); 1929 if (ret) { 1930 rtw89_err(rtwdev, "failed to send h2c\n"); 1931 goto fail; 1932 } 1933 1934 return 0; 1935 fail: 1936 dev_kfree_skb_any(skb); 1937 1938 return ret; 1939 } 1940 1941 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 1942 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 1943 { 1944 const struct rtw89_chip_info *chip = rtwdev->chip; 1945 struct rtw89_btc *btc = &rtwdev->btc; 1946 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 1947 struct sk_buff *skb; 1948 u8 *cmd; 1949 int ret; 1950 1951 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 1952 if (!skb) { 1953 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 1954 return -ENOMEM; 1955 } 1956 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 1957 cmd = skb->data; 1958 1959 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 1960 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 1961 1962 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 1963 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 1964 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 1965 if (chip->chip_id == RTL8852A) 1966 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 1967 1968 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1969 H2C_CAT_OUTSRC, BTFC_SET, 1970 SET_DRV_INFO, 0, 0, 1971 H2C_LEN_CXDRVINFO_CTRL); 1972 1973 ret = rtw89_h2c_tx(rtwdev, skb, false); 1974 if (ret) { 1975 rtw89_err(rtwdev, "failed to send h2c\n"); 1976 goto fail; 1977 } 1978 1979 return 0; 1980 fail: 1981 dev_kfree_skb_any(skb); 1982 1983 return ret; 1984 } 1985 1986 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 1987 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 1988 { 1989 struct rtw89_btc *btc = &rtwdev->btc; 1990 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1991 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 1992 struct sk_buff *skb; 1993 u8 *cmd; 1994 int ret; 1995 1996 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 1997 if (!skb) { 1998 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 1999 return -ENOMEM; 2000 } 2001 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2002 cmd = skb->data; 2003 2004 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2005 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2006 2007 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2008 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2009 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2010 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2011 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2012 2013 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2014 H2C_CAT_OUTSRC, BTFC_SET, 2015 SET_DRV_INFO, 0, 0, 2016 H2C_LEN_CXDRVINFO_RFK); 2017 2018 ret = rtw89_h2c_tx(rtwdev, skb, false); 2019 if (ret) { 2020 rtw89_err(rtwdev, "failed to send h2c\n"); 2021 goto fail; 2022 } 2023 2024 return 0; 2025 fail: 2026 dev_kfree_skb_any(skb); 2027 2028 return ret; 2029 } 2030 2031 #define H2C_LEN_PKT_OFLD 4 2032 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2033 { 2034 struct sk_buff *skb; 2035 u8 *cmd; 2036 int ret; 2037 2038 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2039 if (!skb) { 2040 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2041 return -ENOMEM; 2042 } 2043 skb_put(skb, H2C_LEN_PKT_OFLD); 2044 cmd = skb->data; 2045 2046 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2047 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2048 2049 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2050 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2051 H2C_FUNC_PACKET_OFLD, 1, 1, 2052 H2C_LEN_PKT_OFLD); 2053 2054 ret = rtw89_h2c_tx(rtwdev, skb, false); 2055 if (ret) { 2056 rtw89_err(rtwdev, "failed to send h2c\n"); 2057 goto fail; 2058 } 2059 2060 return 0; 2061 fail: 2062 dev_kfree_skb_any(skb); 2063 2064 return ret; 2065 } 2066 2067 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2068 struct sk_buff *skb_ofld) 2069 { 2070 struct sk_buff *skb; 2071 u8 *cmd; 2072 u8 alloc_id; 2073 int ret; 2074 2075 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2076 RTW89_MAX_PKT_OFLD_NUM); 2077 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2078 return -ENOSPC; 2079 2080 *id = alloc_id; 2081 2082 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2083 if (!skb) { 2084 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2085 return -ENOMEM; 2086 } 2087 skb_put(skb, H2C_LEN_PKT_OFLD); 2088 cmd = skb->data; 2089 2090 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2091 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2092 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2093 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2094 2095 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2096 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2097 H2C_FUNC_PACKET_OFLD, 1, 1, 2098 H2C_LEN_PKT_OFLD + skb_ofld->len); 2099 2100 ret = rtw89_h2c_tx(rtwdev, skb, false); 2101 if (ret) { 2102 rtw89_err(rtwdev, "failed to send h2c\n"); 2103 goto fail; 2104 } 2105 2106 return 0; 2107 fail: 2108 dev_kfree_skb_any(skb); 2109 2110 return ret; 2111 } 2112 2113 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2114 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2115 struct list_head *chan_list) 2116 { 2117 struct rtw89_mac_chinfo *ch_info; 2118 struct sk_buff *skb; 2119 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2120 u8 *cmd; 2121 int ret; 2122 2123 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2124 if (!skb) { 2125 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2126 return -ENOMEM; 2127 } 2128 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2129 cmd = skb->data; 2130 2131 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2132 /* in unit of 4 bytes */ 2133 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2134 2135 list_for_each_entry(ch_info, chan_list, list) { 2136 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2137 2138 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2139 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2140 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2141 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2142 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2143 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2144 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2145 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2146 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2147 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2148 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2149 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2150 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2151 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2152 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2153 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2154 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2155 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2156 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2157 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2158 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2159 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2160 } 2161 2162 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2163 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2164 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2165 2166 ret = rtw89_h2c_tx(rtwdev, skb, false); 2167 if (ret) { 2168 rtw89_err(rtwdev, "failed to send h2c\n"); 2169 goto fail; 2170 } 2171 2172 return 0; 2173 fail: 2174 dev_kfree_skb_any(skb); 2175 2176 return ret; 2177 } 2178 2179 #define H2C_LEN_SCAN_OFFLOAD 28 2180 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2181 struct rtw89_scan_option *option, 2182 struct rtw89_vif *rtwvif) 2183 { 2184 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2185 struct sk_buff *skb; 2186 u8 *cmd; 2187 int ret; 2188 2189 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD); 2190 if (!skb) { 2191 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2192 return -ENOMEM; 2193 } 2194 skb_put(skb, H2C_LEN_SCAN_OFFLOAD); 2195 cmd = skb->data; 2196 2197 RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id); 2198 RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port); 2199 RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0); 2200 RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable); 2201 RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true); 2202 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode); 2203 RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE); 2204 RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE); 2205 if (option->target_ch_mode) { 2206 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw); 2207 RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd, 2208 scan_info->op_pri_ch); 2209 RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd, 2210 scan_info->op_chan); 2211 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd, 2212 scan_info->op_band); 2213 } 2214 2215 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2216 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2217 H2C_FUNC_SCANOFLD, 1, 1, 2218 H2C_LEN_SCAN_OFFLOAD); 2219 2220 ret = rtw89_h2c_tx(rtwdev, skb, false); 2221 if (ret) { 2222 rtw89_err(rtwdev, "failed to send h2c\n"); 2223 goto fail; 2224 } 2225 2226 return 0; 2227 fail: 2228 dev_kfree_skb_any(skb); 2229 2230 return ret; 2231 } 2232 2233 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2234 struct rtw89_fw_h2c_rf_reg_info *info, 2235 u16 len, u8 page) 2236 { 2237 struct sk_buff *skb; 2238 u8 class = info->rf_path == RF_PATH_A ? 2239 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2240 int ret; 2241 2242 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2243 if (!skb) { 2244 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2245 return -ENOMEM; 2246 } 2247 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2248 2249 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2250 H2C_CAT_OUTSRC, class, page, 0, 0, 2251 len); 2252 2253 ret = rtw89_h2c_tx(rtwdev, skb, false); 2254 if (ret) { 2255 rtw89_err(rtwdev, "failed to send h2c\n"); 2256 goto fail; 2257 } 2258 2259 return 0; 2260 fail: 2261 dev_kfree_skb_any(skb); 2262 2263 return ret; 2264 } 2265 2266 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2267 { 2268 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2269 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2270 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2271 struct sk_buff *skb; 2272 int ret; 2273 2274 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2275 if (!skb) { 2276 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2277 return -ENOMEM; 2278 } 2279 skb_put(skb, sizeof(*mccch)); 2280 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2281 2282 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2283 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2284 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2285 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2286 mccch->current_channel = cpu_to_le32(chan->channel); 2287 mccch->current_band_type = cpu_to_le32(chan->band_type); 2288 2289 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2290 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2291 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2292 sizeof(*mccch)); 2293 2294 ret = rtw89_h2c_tx(rtwdev, skb, false); 2295 if (ret) { 2296 rtw89_err(rtwdev, "failed to send h2c\n"); 2297 goto fail; 2298 } 2299 2300 return 0; 2301 fail: 2302 dev_kfree_skb_any(skb); 2303 2304 return ret; 2305 } 2306 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2307 2308 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2309 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2310 bool rack, bool dack) 2311 { 2312 struct sk_buff *skb; 2313 int ret; 2314 2315 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2316 if (!skb) { 2317 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2318 return -ENOMEM; 2319 } 2320 skb_put_data(skb, buf, len); 2321 2322 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2323 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2324 len); 2325 2326 ret = rtw89_h2c_tx(rtwdev, skb, false); 2327 if (ret) { 2328 rtw89_err(rtwdev, "failed to send h2c\n"); 2329 goto fail; 2330 } 2331 2332 return 0; 2333 fail: 2334 dev_kfree_skb_any(skb); 2335 2336 return ret; 2337 } 2338 2339 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2340 { 2341 struct sk_buff *skb; 2342 int ret; 2343 2344 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2345 if (!skb) { 2346 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2347 return -ENOMEM; 2348 } 2349 skb_put_data(skb, buf, len); 2350 2351 ret = rtw89_h2c_tx(rtwdev, skb, false); 2352 if (ret) { 2353 rtw89_err(rtwdev, "failed to send h2c\n"); 2354 goto fail; 2355 } 2356 2357 return 0; 2358 fail: 2359 dev_kfree_skb_any(skb); 2360 2361 return ret; 2362 } 2363 2364 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2365 { 2366 struct rtw89_early_h2c *early_h2c; 2367 2368 lockdep_assert_held(&rtwdev->mutex); 2369 2370 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2371 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2372 } 2373 } 2374 2375 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2376 { 2377 struct rtw89_early_h2c *early_h2c, *tmp; 2378 2379 mutex_lock(&rtwdev->mutex); 2380 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2381 list_del(&early_h2c->list); 2382 kfree(early_h2c->h2c); 2383 kfree(early_h2c); 2384 } 2385 mutex_unlock(&rtwdev->mutex); 2386 } 2387 2388 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2389 { 2390 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2391 2392 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2393 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2394 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2395 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2396 } 2397 2398 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2399 struct sk_buff *c2h) 2400 { 2401 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2402 u8 category = attr->category; 2403 u8 class = attr->class; 2404 u8 func = attr->func; 2405 2406 switch (category) { 2407 default: 2408 return false; 2409 case RTW89_C2H_CAT_MAC: 2410 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2411 } 2412 } 2413 2414 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2415 { 2416 rtw89_fw_c2h_parse_attr(c2h); 2417 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2418 goto enqueue; 2419 2420 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2421 dev_kfree_skb_any(c2h); 2422 return; 2423 2424 enqueue: 2425 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2426 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2427 } 2428 2429 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2430 struct sk_buff *skb) 2431 { 2432 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2433 u8 category = attr->category; 2434 u8 class = attr->class; 2435 u8 func = attr->func; 2436 u16 len = attr->len; 2437 bool dump = true; 2438 2439 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2440 return; 2441 2442 switch (category) { 2443 case RTW89_C2H_CAT_TEST: 2444 break; 2445 case RTW89_C2H_CAT_MAC: 2446 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2447 if (class == RTW89_MAC_C2H_CLASS_INFO && 2448 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2449 dump = false; 2450 break; 2451 case RTW89_C2H_CAT_OUTSRC: 2452 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2453 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2454 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2455 else 2456 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2457 break; 2458 } 2459 2460 if (dump) 2461 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2462 } 2463 2464 void rtw89_fw_c2h_work(struct work_struct *work) 2465 { 2466 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2467 c2h_work); 2468 struct sk_buff *skb, *tmp; 2469 2470 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2471 skb_unlink(skb, &rtwdev->c2h_queue); 2472 mutex_lock(&rtwdev->mutex); 2473 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2474 mutex_unlock(&rtwdev->mutex); 2475 dev_kfree_skb_any(skb); 2476 } 2477 } 2478 2479 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2480 struct rtw89_mac_h2c_info *info) 2481 { 2482 const struct rtw89_chip_info *chip = rtwdev->chip; 2483 const u32 *h2c_reg = chip->h2c_regs; 2484 u8 i, val, len; 2485 int ret; 2486 2487 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2488 rtwdev, chip->h2c_ctrl_reg); 2489 if (ret) { 2490 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2491 return ret; 2492 } 2493 2494 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2495 sizeof(info->h2creg[0])); 2496 2497 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 2498 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 2499 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2500 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 2501 2502 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2503 2504 return 0; 2505 } 2506 2507 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2508 struct rtw89_mac_c2h_info *info) 2509 { 2510 const struct rtw89_chip_info *chip = rtwdev->chip; 2511 const u32 *c2h_reg = chip->c2h_regs; 2512 u32 ret; 2513 u8 i, val; 2514 2515 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2516 2517 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2518 RTW89_C2H_TIMEOUT, false, rtwdev, 2519 chip->c2h_ctrl_reg); 2520 if (ret) { 2521 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2522 return ret; 2523 } 2524 2525 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2526 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2527 2528 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2529 2530 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 2531 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 2532 RTW89_C2HREG_HDR_LEN; 2533 2534 return 0; 2535 } 2536 2537 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2538 struct rtw89_mac_h2c_info *h2c_info, 2539 struct rtw89_mac_c2h_info *c2h_info) 2540 { 2541 u32 ret; 2542 2543 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2544 lockdep_assert_held(&rtwdev->mutex); 2545 2546 if (!h2c_info && !c2h_info) 2547 return -EINVAL; 2548 2549 if (!h2c_info) 2550 goto recv_c2h; 2551 2552 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2553 if (ret) 2554 return ret; 2555 2556 recv_c2h: 2557 if (!c2h_info) 2558 return 0; 2559 2560 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2561 if (ret) 2562 return ret; 2563 2564 return 0; 2565 } 2566 2567 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 2568 { 2569 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 2570 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 2571 return; 2572 } 2573 2574 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 2575 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 2576 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 2577 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 2578 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 2579 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 2580 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 2581 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 2582 2583 rtw89_fw_prog_cnt_dump(rtwdev); 2584 } 2585 2586 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 2587 { 2588 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 2589 struct rtw89_pktofld_info *info, *tmp; 2590 u8 idx; 2591 2592 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 2593 if (!(rtwdev->chip->support_bands & BIT(idx))) 2594 continue; 2595 2596 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 2597 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2598 rtw89_core_release_bit_map(rtwdev->pkt_offload, 2599 info->id); 2600 list_del(&info->list); 2601 kfree(info); 2602 } 2603 } 2604 } 2605 2606 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 2607 struct rtw89_vif *rtwvif, 2608 struct sk_buff *skb) 2609 { 2610 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2611 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 2612 struct rtw89_pktofld_info *info; 2613 struct sk_buff *new; 2614 int ret = 0; 2615 u8 band; 2616 2617 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 2618 if (!(rtwdev->chip->support_bands & BIT(band))) 2619 continue; 2620 2621 new = skb_copy(skb, GFP_KERNEL); 2622 if (!new) { 2623 ret = -ENOMEM; 2624 goto out; 2625 } 2626 skb_put_data(new, ies->ies[band], ies->len[band]); 2627 skb_put_data(new, ies->common_ies, ies->common_ie_len); 2628 2629 info = kzalloc(sizeof(*info), GFP_KERNEL); 2630 if (!info) { 2631 ret = -ENOMEM; 2632 kfree_skb(new); 2633 goto out; 2634 } 2635 2636 list_add_tail(&info->list, &scan_info->pkt_list[band]); 2637 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 2638 if (ret) 2639 goto out; 2640 2641 kfree_skb(new); 2642 } 2643 out: 2644 return ret; 2645 } 2646 2647 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 2648 struct rtw89_vif *rtwvif) 2649 { 2650 struct cfg80211_scan_request *req = rtwvif->scan_req; 2651 struct sk_buff *skb; 2652 u8 num = req->n_ssids, i; 2653 int ret; 2654 2655 for (i = 0; i < num; i++) { 2656 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 2657 req->ssids[i].ssid, 2658 req->ssids[i].ssid_len, 2659 req->ie_len); 2660 if (!skb) 2661 return -ENOMEM; 2662 2663 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb); 2664 kfree_skb(skb); 2665 2666 if (ret) 2667 return ret; 2668 } 2669 2670 return 0; 2671 } 2672 2673 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 2674 int ssid_num, 2675 struct rtw89_mac_chinfo *ch_info) 2676 { 2677 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2678 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 2679 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2680 struct cfg80211_scan_request *req = rtwvif->scan_req; 2681 struct rtw89_pktofld_info *info; 2682 u8 band, probe_count = 0; 2683 2684 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 2685 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 2686 ch_info->bw = RTW89_SCAN_WIDTH; 2687 ch_info->tx_pkt = true; 2688 ch_info->cfg_tx_pwr = false; 2689 ch_info->tx_pwr_idx = 0; 2690 ch_info->tx_null = false; 2691 ch_info->pause_data = false; 2692 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 2693 2694 if (ssid_num) { 2695 ch_info->num_pkt = ssid_num; 2696 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 2697 2698 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 2699 ch_info->pkt_id[probe_count] = info->id; 2700 if (++probe_count >= ssid_num) 2701 break; 2702 } 2703 if (probe_count != ssid_num) 2704 rtw89_err(rtwdev, "SSID num differs from list len\n"); 2705 } 2706 2707 if (ch_info->ch_band == RTW89_BAND_6G) { 2708 if (ssid_num == 1 && req->ssids[0].ssid_len == 0) { 2709 ch_info->tx_pkt = false; 2710 if (!req->duration_mandatory) 2711 ch_info->period -= RTW89_DWELL_TIME; 2712 } 2713 } 2714 2715 switch (chan_type) { 2716 case RTW89_CHAN_OPERATE: 2717 ch_info->central_ch = scan_info->op_chan; 2718 ch_info->pri_ch = scan_info->op_pri_ch; 2719 ch_info->ch_band = scan_info->op_band; 2720 ch_info->bw = scan_info->op_bw; 2721 ch_info->tx_null = true; 2722 ch_info->num_pkt = 0; 2723 break; 2724 case RTW89_CHAN_DFS: 2725 if (ch_info->ch_band != RTW89_BAND_6G) 2726 ch_info->period = max_t(u8, ch_info->period, 2727 RTW89_DFS_CHAN_TIME); 2728 ch_info->dwell_time = RTW89_DWELL_TIME; 2729 break; 2730 case RTW89_CHAN_ACTIVE: 2731 break; 2732 default: 2733 rtw89_err(rtwdev, "Channel type out of bound\n"); 2734 } 2735 } 2736 2737 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 2738 struct rtw89_vif *rtwvif) 2739 { 2740 struct cfg80211_scan_request *req = rtwvif->scan_req; 2741 struct rtw89_mac_chinfo *ch_info, *tmp; 2742 struct ieee80211_channel *channel; 2743 struct list_head chan_list; 2744 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 2745 int list_len, off_chan_time = 0; 2746 enum rtw89_chan_type type; 2747 int ret = 0; 2748 u32 idx; 2749 2750 INIT_LIST_HEAD(&chan_list); 2751 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 2752 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 2753 idx++, list_len++) { 2754 channel = req->channels[idx]; 2755 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 2756 if (!ch_info) { 2757 ret = -ENOMEM; 2758 goto out; 2759 } 2760 2761 if (req->duration_mandatory) 2762 ch_info->period = req->duration; 2763 else if (channel->band == NL80211_BAND_6GHZ) 2764 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME; 2765 else 2766 ch_info->period = RTW89_CHANNEL_TIME; 2767 2768 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 2769 ch_info->central_ch = channel->hw_value; 2770 ch_info->pri_ch = channel->hw_value; 2771 ch_info->rand_seq_num = random_seq; 2772 2773 if (channel->flags & 2774 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 2775 type = RTW89_CHAN_DFS; 2776 else 2777 type = RTW89_CHAN_ACTIVE; 2778 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 2779 2780 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK && 2781 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 2782 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2783 if (!tmp) { 2784 ret = -ENOMEM; 2785 kfree(ch_info); 2786 goto out; 2787 } 2788 2789 type = RTW89_CHAN_OPERATE; 2790 tmp->period = req->duration_mandatory ? 2791 req->duration : RTW89_CHANNEL_TIME; 2792 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 2793 list_add_tail(&tmp->list, &chan_list); 2794 off_chan_time = 0; 2795 list_len++; 2796 } 2797 list_add_tail(&ch_info->list, &chan_list); 2798 off_chan_time += ch_info->period; 2799 } 2800 rtwdev->scan_info.last_chan_idx = idx; 2801 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 2802 2803 out: 2804 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 2805 list_del(&ch_info->list); 2806 kfree(ch_info); 2807 } 2808 2809 return ret; 2810 } 2811 2812 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 2813 struct rtw89_vif *rtwvif) 2814 { 2815 int ret; 2816 2817 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 2818 if (ret) { 2819 rtw89_err(rtwdev, "Update probe request failed\n"); 2820 goto out; 2821 } 2822 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif); 2823 out: 2824 return ret; 2825 } 2826 2827 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2828 struct ieee80211_scan_request *scan_req) 2829 { 2830 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2831 struct cfg80211_scan_request *req = &scan_req->req; 2832 u32 rx_fltr = rtwdev->hal.rx_fltr; 2833 u8 mac_addr[ETH_ALEN]; 2834 2835 rtwdev->scan_info.scanning_vif = vif; 2836 rtwdev->scan_info.last_chan_idx = 0; 2837 rtwvif->scan_ies = &scan_req->ies; 2838 rtwvif->scan_req = req; 2839 ieee80211_stop_queues(rtwdev->hw); 2840 2841 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 2842 get_random_mask_addr(mac_addr, req->mac_addr, 2843 req->mac_addr_mask); 2844 else 2845 ether_addr_copy(mac_addr, vif->addr); 2846 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 2847 2848 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 2849 rx_fltr &= ~B_AX_A_BC; 2850 rx_fltr &= ~B_AX_A_A1_MATCH; 2851 rtw89_write32_mask(rtwdev, 2852 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2853 B_AX_RX_FLTR_CFG_MASK, 2854 rx_fltr); 2855 } 2856 2857 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2858 bool aborted) 2859 { 2860 struct cfg80211_scan_info info = { 2861 .aborted = aborted, 2862 }; 2863 struct rtw89_vif *rtwvif; 2864 2865 if (!vif) 2866 return; 2867 2868 rtw89_write32_mask(rtwdev, 2869 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2870 B_AX_RX_FLTR_CFG_MASK, 2871 rtwdev->hal.rx_fltr); 2872 2873 rtw89_core_scan_complete(rtwdev, vif, true); 2874 ieee80211_scan_completed(rtwdev->hw, &info); 2875 ieee80211_wake_queues(rtwdev->hw); 2876 2877 rtw89_release_pkt_list(rtwdev); 2878 rtwvif = (struct rtw89_vif *)vif->drv_priv; 2879 rtwvif->scan_req = NULL; 2880 rtwvif->scan_ies = NULL; 2881 rtwdev->scan_info.last_chan_idx = 0; 2882 rtwdev->scan_info.scanning_vif = NULL; 2883 2884 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK) 2885 rtw89_store_op_chan(rtwdev, false); 2886 rtw89_set_channel(rtwdev); 2887 } 2888 2889 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 2890 { 2891 rtw89_hw_scan_offload(rtwdev, vif, false); 2892 rtw89_hw_scan_complete(rtwdev, vif, true); 2893 } 2894 2895 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2896 bool enable) 2897 { 2898 struct rtw89_scan_option opt = {0}; 2899 struct rtw89_vif *rtwvif; 2900 int ret = 0; 2901 2902 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 2903 if (!rtwvif) 2904 return -EINVAL; 2905 2906 opt.enable = enable; 2907 opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2908 if (enable) { 2909 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif); 2910 if (ret) 2911 goto out; 2912 } 2913 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 2914 out: 2915 return ret; 2916 } 2917 2918 void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup) 2919 { 2920 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2921 const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2922 struct rtw89_chan new; 2923 2924 if (backup) { 2925 scan_info->op_pri_ch = cur->primary_channel; 2926 scan_info->op_chan = cur->channel; 2927 scan_info->op_bw = cur->band_width; 2928 scan_info->op_band = cur->band_type; 2929 } else { 2930 rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch, 2931 scan_info->op_band, scan_info->op_bw); 2932 rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new); 2933 } 2934 } 2935 2936 #define H2C_FW_CPU_EXCEPTION_LEN 4 2937 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 2938 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 2939 { 2940 struct sk_buff *skb; 2941 int ret; 2942 2943 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 2944 if (!skb) { 2945 rtw89_err(rtwdev, 2946 "failed to alloc skb for fw cpu exception\n"); 2947 return -ENOMEM; 2948 } 2949 2950 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 2951 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 2952 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 2953 2954 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2955 H2C_CAT_TEST, 2956 H2C_CL_FW_STATUS_TEST, 2957 H2C_FUNC_CPU_EXCEPTION, 0, 0, 2958 H2C_FW_CPU_EXCEPTION_LEN); 2959 2960 ret = rtw89_h2c_tx(rtwdev, skb, false); 2961 if (ret) { 2962 rtw89_err(rtwdev, "failed to send h2c\n"); 2963 goto fail; 2964 } 2965 2966 return 0; 2967 2968 fail: 2969 dev_kfree_skb_any(skb); 2970 return ret; 2971 } 2972 2973 #define H2C_PKT_DROP_LEN 24 2974 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 2975 const struct rtw89_pkt_drop_params *params) 2976 { 2977 struct sk_buff *skb; 2978 int ret; 2979 2980 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 2981 if (!skb) { 2982 rtw89_err(rtwdev, 2983 "failed to alloc skb for packet drop\n"); 2984 return -ENOMEM; 2985 } 2986 2987 switch (params->sel) { 2988 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 2989 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 2990 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 2991 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 2992 case RTW89_PKT_DROP_SEL_BAND_ONCE: 2993 break; 2994 default: 2995 rtw89_debug(rtwdev, RTW89_DBG_FW, 2996 "H2C of pkt drop might not fully support sel: %d yet\n", 2997 params->sel); 2998 break; 2999 } 3000 3001 skb_put(skb, H2C_PKT_DROP_LEN); 3002 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3003 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3004 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3005 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3006 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3007 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3008 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3009 params->macid_band_sel[0]); 3010 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3011 params->macid_band_sel[1]); 3012 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3013 params->macid_band_sel[2]); 3014 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3015 params->macid_band_sel[3]); 3016 3017 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3018 H2C_CAT_MAC, 3019 H2C_CL_MAC_FW_OFLD, 3020 H2C_FUNC_PKT_DROP, 0, 0, 3021 H2C_PKT_DROP_LEN); 3022 3023 ret = rtw89_h2c_tx(rtwdev, skb, false); 3024 if (ret) { 3025 rtw89_err(rtwdev, "failed to send h2c\n"); 3026 goto fail; 3027 } 3028 3029 return 0; 3030 3031 fail: 3032 dev_kfree_skb_any(skb); 3033 return ret; 3034 } 3035 3036 #define H2C_KEEP_ALIVE_LEN 4 3037 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3038 bool enable) 3039 { 3040 struct sk_buff *skb; 3041 u8 pkt_id = 0; 3042 int ret; 3043 3044 if (enable) { 3045 ret = rtw89_fw_h2c_add_wow_fw_ofld(rtwdev, rtwvif, 3046 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id); 3047 if (ret) 3048 return -EPERM; 3049 } 3050 3051 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3052 if (!skb) { 3053 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3054 return -ENOMEM; 3055 } 3056 3057 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3058 3059 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3060 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3061 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3062 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3063 3064 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3065 H2C_CAT_MAC, 3066 H2C_CL_MAC_WOW, 3067 H2C_FUNC_KEEP_ALIVE, 0, 1, 3068 H2C_KEEP_ALIVE_LEN); 3069 3070 ret = rtw89_h2c_tx(rtwdev, skb, false); 3071 if (ret) { 3072 rtw89_err(rtwdev, "failed to send h2c\n"); 3073 goto fail; 3074 } 3075 3076 return 0; 3077 3078 fail: 3079 dev_kfree_skb_any(skb); 3080 3081 return ret; 3082 } 3083 3084 #define H2C_DISCONNECT_DETECT_LEN 8 3085 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3086 struct rtw89_vif *rtwvif, bool enable) 3087 { 3088 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3089 struct sk_buff *skb; 3090 u8 macid = rtwvif->mac_id; 3091 int ret; 3092 3093 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3094 if (!skb) { 3095 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3096 return -ENOMEM; 3097 } 3098 3099 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3100 3101 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3102 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3103 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3104 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3105 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3106 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3107 } 3108 3109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3110 H2C_CAT_MAC, 3111 H2C_CL_MAC_WOW, 3112 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3113 H2C_DISCONNECT_DETECT_LEN); 3114 3115 ret = rtw89_h2c_tx(rtwdev, skb, false); 3116 if (ret) { 3117 rtw89_err(rtwdev, "failed to send h2c\n"); 3118 goto fail; 3119 } 3120 3121 return 0; 3122 3123 fail: 3124 dev_kfree_skb_any(skb); 3125 3126 return ret; 3127 } 3128 3129 #define H2C_WOW_GLOBAL_LEN 8 3130 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3131 bool enable) 3132 { 3133 struct sk_buff *skb; 3134 u8 macid = rtwvif->mac_id; 3135 int ret; 3136 3137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3138 if (!skb) { 3139 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3140 return -ENOMEM; 3141 } 3142 3143 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3144 3145 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3146 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3147 3148 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3149 H2C_CAT_MAC, 3150 H2C_CL_MAC_WOW, 3151 H2C_FUNC_WOW_GLOBAL, 0, 1, 3152 H2C_WOW_GLOBAL_LEN); 3153 3154 ret = rtw89_h2c_tx(rtwdev, skb, false); 3155 if (ret) { 3156 rtw89_err(rtwdev, "failed to send h2c\n"); 3157 goto fail; 3158 } 3159 3160 return 0; 3161 3162 fail: 3163 dev_kfree_skb_any(skb); 3164 3165 return ret; 3166 } 3167 3168 #define H2C_WAKEUP_CTRL_LEN 4 3169 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3170 struct rtw89_vif *rtwvif, 3171 bool enable) 3172 { 3173 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3174 struct sk_buff *skb; 3175 u8 macid = rtwvif->mac_id; 3176 int ret; 3177 3178 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3179 if (!skb) { 3180 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3181 return -ENOMEM; 3182 } 3183 3184 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3185 3186 if (rtw_wow->pattern_cnt) 3187 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3188 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3189 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3190 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3191 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3192 3193 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3194 3195 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3196 H2C_CAT_MAC, 3197 H2C_CL_MAC_WOW, 3198 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3199 H2C_WAKEUP_CTRL_LEN); 3200 3201 ret = rtw89_h2c_tx(rtwdev, skb, false); 3202 if (ret) { 3203 rtw89_err(rtwdev, "failed to send h2c\n"); 3204 goto fail; 3205 } 3206 3207 return 0; 3208 3209 fail: 3210 dev_kfree_skb_any(skb); 3211 3212 return ret; 3213 } 3214 3215 #define H2C_WOW_CAM_UPD_LEN 24 3216 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3217 struct rtw89_wow_cam_info *cam_info) 3218 { 3219 struct sk_buff *skb; 3220 int ret; 3221 3222 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3223 if (!skb) { 3224 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3225 return -ENOMEM; 3226 } 3227 3228 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3229 3230 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3231 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3232 if (cam_info->valid) { 3233 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3234 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3235 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3236 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3237 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3238 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3239 cam_info->negative_pattern_match); 3240 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3241 cam_info->skip_mac_hdr); 3242 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3243 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3244 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3245 } 3246 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3247 3248 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3249 H2C_CAT_MAC, 3250 H2C_CL_MAC_WOW, 3251 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3252 H2C_WOW_CAM_UPD_LEN); 3253 3254 ret = rtw89_h2c_tx(rtwdev, skb, false); 3255 if (ret) { 3256 rtw89_err(rtwdev, "failed to send h2c\n"); 3257 goto fail; 3258 } 3259 3260 return 0; 3261 fail: 3262 dev_kfree_skb_any(skb); 3263 3264 return ret; 3265 } 3266 3267 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3268 struct rtw89_wait_info *wait, unsigned int cond) 3269 { 3270 int ret; 3271 3272 ret = rtw89_h2c_tx(rtwdev, skb, false); 3273 if (ret) { 3274 rtw89_err(rtwdev, "failed to send h2c\n"); 3275 dev_kfree_skb_any(skb); 3276 return -EBUSY; 3277 } 3278 3279 return rtw89_wait_for_cond(wait, cond); 3280 } 3281 3282 #define H2C_ADD_MCC_LEN 16 3283 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3284 const struct rtw89_fw_mcc_add_req *p) 3285 { 3286 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3287 struct sk_buff *skb; 3288 unsigned int cond; 3289 3290 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3291 if (!skb) { 3292 rtw89_err(rtwdev, 3293 "failed to alloc skb for add mcc\n"); 3294 return -ENOMEM; 3295 } 3296 3297 skb_put(skb, H2C_ADD_MCC_LEN); 3298 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3299 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3300 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3301 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3302 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3303 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3304 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3305 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3306 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3307 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3308 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3309 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3310 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3311 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3312 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3313 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3314 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3315 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3316 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3317 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3318 3319 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3320 H2C_CAT_MAC, 3321 H2C_CL_MCC, 3322 H2C_FUNC_ADD_MCC, 0, 0, 3323 H2C_ADD_MCC_LEN); 3324 3325 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3326 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3327 } 3328 3329 #define H2C_START_MCC_LEN 12 3330 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3331 const struct rtw89_fw_mcc_start_req *p) 3332 { 3333 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3334 struct sk_buff *skb; 3335 unsigned int cond; 3336 3337 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3338 if (!skb) { 3339 rtw89_err(rtwdev, 3340 "failed to alloc skb for start mcc\n"); 3341 return -ENOMEM; 3342 } 3343 3344 skb_put(skb, H2C_START_MCC_LEN); 3345 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3346 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3347 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3348 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3349 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3350 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3351 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3352 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3353 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3354 3355 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3356 H2C_CAT_MAC, 3357 H2C_CL_MCC, 3358 H2C_FUNC_START_MCC, 0, 0, 3359 H2C_START_MCC_LEN); 3360 3361 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3362 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3363 } 3364 3365 #define H2C_STOP_MCC_LEN 4 3366 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3367 bool prev_groups) 3368 { 3369 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3370 struct sk_buff *skb; 3371 unsigned int cond; 3372 3373 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3374 if (!skb) { 3375 rtw89_err(rtwdev, 3376 "failed to alloc skb for stop mcc\n"); 3377 return -ENOMEM; 3378 } 3379 3380 skb_put(skb, H2C_STOP_MCC_LEN); 3381 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3382 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3383 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3384 3385 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3386 H2C_CAT_MAC, 3387 H2C_CL_MCC, 3388 H2C_FUNC_STOP_MCC, 0, 0, 3389 H2C_STOP_MCC_LEN); 3390 3391 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3392 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3393 } 3394 3395 #define H2C_DEL_MCC_GROUP_LEN 4 3396 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3397 bool prev_groups) 3398 { 3399 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3400 struct sk_buff *skb; 3401 unsigned int cond; 3402 3403 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3404 if (!skb) { 3405 rtw89_err(rtwdev, 3406 "failed to alloc skb for del mcc group\n"); 3407 return -ENOMEM; 3408 } 3409 3410 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3411 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3412 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3413 3414 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3415 H2C_CAT_MAC, 3416 H2C_CL_MCC, 3417 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3418 H2C_DEL_MCC_GROUP_LEN); 3419 3420 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3421 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3422 } 3423 3424 #define H2C_RESET_MCC_GROUP_LEN 4 3425 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3426 { 3427 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3428 struct sk_buff *skb; 3429 unsigned int cond; 3430 3431 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3432 if (!skb) { 3433 rtw89_err(rtwdev, 3434 "failed to alloc skb for reset mcc group\n"); 3435 return -ENOMEM; 3436 } 3437 3438 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3439 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3440 3441 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3442 H2C_CAT_MAC, 3443 H2C_CL_MCC, 3444 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3445 H2C_RESET_MCC_GROUP_LEN); 3446 3447 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3448 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3449 } 3450 3451 #define H2C_MCC_REQ_TSF_LEN 4 3452 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3453 const struct rtw89_fw_mcc_tsf_req *req, 3454 struct rtw89_mac_mcc_tsf_rpt *rpt) 3455 { 3456 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3457 struct rtw89_mac_mcc_tsf_rpt *tmp; 3458 struct sk_buff *skb; 3459 unsigned int cond; 3460 int ret; 3461 3462 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 3463 if (!skb) { 3464 rtw89_err(rtwdev, 3465 "failed to alloc skb for mcc req tsf\n"); 3466 return -ENOMEM; 3467 } 3468 3469 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 3470 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 3471 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 3472 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 3473 3474 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3475 H2C_CAT_MAC, 3476 H2C_CL_MCC, 3477 H2C_FUNC_MCC_REQ_TSF, 0, 0, 3478 H2C_MCC_REQ_TSF_LEN); 3479 3480 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 3481 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3482 if (ret) 3483 return ret; 3484 3485 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 3486 *rpt = *tmp; 3487 3488 return 0; 3489 } 3490 3491 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 3492 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3493 u8 *bitmap) 3494 { 3495 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3496 struct sk_buff *skb; 3497 unsigned int cond; 3498 u8 map_len; 3499 u8 h2c_len; 3500 3501 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 3502 map_len = RTW89_MAX_MAC_ID_NUM / 8; 3503 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 3504 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 3505 if (!skb) { 3506 rtw89_err(rtwdev, 3507 "failed to alloc skb for mcc macid bitmap\n"); 3508 return -ENOMEM; 3509 } 3510 3511 skb_put(skb, h2c_len); 3512 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 3513 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 3514 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 3515 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 3516 3517 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3518 H2C_CAT_MAC, 3519 H2C_CL_MCC, 3520 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 3521 h2c_len); 3522 3523 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 3524 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3525 } 3526 3527 #define H2C_MCC_SYNC_LEN 4 3528 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 3529 u8 target, u8 offset) 3530 { 3531 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3532 struct sk_buff *skb; 3533 unsigned int cond; 3534 3535 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 3536 if (!skb) { 3537 rtw89_err(rtwdev, 3538 "failed to alloc skb for mcc sync\n"); 3539 return -ENOMEM; 3540 } 3541 3542 skb_put(skb, H2C_MCC_SYNC_LEN); 3543 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 3544 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 3545 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 3546 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 3547 3548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3549 H2C_CAT_MAC, 3550 H2C_CL_MCC, 3551 H2C_FUNC_MCC_SYNC, 0, 0, 3552 H2C_MCC_SYNC_LEN); 3553 3554 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 3555 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3556 } 3557 3558 #define H2C_MCC_SET_DURATION_LEN 20 3559 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 3560 const struct rtw89_fw_mcc_duration *p) 3561 { 3562 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3563 struct sk_buff *skb; 3564 unsigned int cond; 3565 3566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 3567 if (!skb) { 3568 rtw89_err(rtwdev, 3569 "failed to alloc skb for mcc set duration\n"); 3570 return -ENOMEM; 3571 } 3572 3573 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 3574 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 3575 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 3576 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 3577 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 3578 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 3579 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 3580 p->start_tsf_low); 3581 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 3582 p->start_tsf_high); 3583 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 3584 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 3585 3586 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3587 H2C_CAT_MAC, 3588 H2C_CL_MCC, 3589 H2C_FUNC_MCC_SET_DURATION, 0, 0, 3590 H2C_MCC_SET_DURATION_LEN); 3591 3592 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 3593 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3594 } 3595