1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 #include "util.h" 14 15 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 16 struct sk_buff *skb); 17 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 18 struct rtw89_wait_info *wait, unsigned int cond); 19 20 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 21 bool header) 22 { 23 struct sk_buff *skb; 24 u32 header_len = 0; 25 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 26 27 if (header) 28 header_len = H2C_HEADER_LEN; 29 30 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 31 if (!skb) 32 return NULL; 33 skb_reserve(skb, header_len + h2c_desc_size); 34 memset(skb->data, 0, len); 35 36 return skb; 37 } 38 39 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 40 { 41 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 42 } 43 44 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 45 { 46 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 47 } 48 49 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 50 { 51 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 52 53 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 54 } 55 56 #define FWDL_WAIT_CNT 400000 57 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 58 { 59 u8 val; 60 int ret; 61 62 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 63 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 64 1, FWDL_WAIT_CNT, false, rtwdev); 65 if (ret) { 66 switch (val) { 67 case RTW89_FWDL_CHECKSUM_FAIL: 68 rtw89_err(rtwdev, "fw checksum fail\n"); 69 return -EINVAL; 70 71 case RTW89_FWDL_SECURITY_FAIL: 72 rtw89_err(rtwdev, "fw security fail\n"); 73 return -EINVAL; 74 75 case RTW89_FWDL_CV_NOT_MATCH: 76 rtw89_err(rtwdev, "fw cv not match\n"); 77 return -EINVAL; 78 79 default: 80 return -EBUSY; 81 } 82 } 83 84 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 85 86 return 0; 87 } 88 89 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 90 struct rtw89_fw_bin_info *info) 91 { 92 struct rtw89_fw_hdr_section_info *section_info; 93 const u8 *fw_end = fw + len; 94 const u8 *fwdynhdr; 95 const u8 *bin; 96 u32 base_hdr_len; 97 u32 mssc_len = 0; 98 u32 i; 99 100 if (!info) 101 return -EINVAL; 102 103 info->section_num = GET_FW_HDR_SEC_NUM(fw); 104 base_hdr_len = RTW89_FW_HDR_SIZE + 105 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 106 info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw); 107 108 if (info->dynamic_hdr_en) { 109 info->hdr_len = GET_FW_HDR_LEN(fw); 110 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 111 fwdynhdr = fw + base_hdr_len; 112 if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) { 113 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 114 return -EINVAL; 115 } 116 } else { 117 info->hdr_len = base_hdr_len; 118 info->dynamic_hdr_len = 0; 119 } 120 121 bin = fw + info->hdr_len; 122 123 /* jump to section header */ 124 fw += RTW89_FW_HDR_SIZE; 125 section_info = info->section_info; 126 for (i = 0; i < info->section_num; i++) { 127 section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw); 128 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 129 section_info->mssc = GET_FWSECTION_HDR_MSSC(fw); 130 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 131 } else { 132 section_info->mssc = 0; 133 } 134 135 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 136 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 137 section_info->len += FWDL_SECTION_CHKSUM_LEN; 138 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 139 section_info->dladdr = 140 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 141 section_info->addr = bin; 142 bin += section_info->len; 143 fw += RTW89_FW_SECTION_HDR_SIZE; 144 section_info++; 145 } 146 147 if (fw_end != bin + mssc_len) { 148 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 149 return -EINVAL; 150 } 151 152 return 0; 153 } 154 155 static 156 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 157 struct rtw89_fw_suit *fw_suit, bool nowarn) 158 { 159 struct rtw89_fw_info *fw_info = &rtwdev->fw; 160 const struct firmware *firmware = fw_info->req.firmware; 161 const u8 *mfw = firmware->data; 162 u32 mfw_len = firmware->size; 163 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 164 const struct rtw89_mfw_info *mfw_info; 165 int i; 166 167 if (mfw_hdr->sig != RTW89_MFW_SIG) { 168 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 169 /* legacy firmware support normal type only */ 170 if (type != RTW89_FW_NORMAL) 171 return -EINVAL; 172 fw_suit->data = mfw; 173 fw_suit->size = mfw_len; 174 return 0; 175 } 176 177 for (i = 0; i < mfw_hdr->fw_nr; i++) { 178 mfw_info = &mfw_hdr->info[i]; 179 if (mfw_info->cv != rtwdev->hal.cv || 180 mfw_info->type != type || 181 mfw_info->mp) 182 continue; 183 184 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 185 fw_suit->size = le32_to_cpu(mfw_info->size); 186 return 0; 187 } 188 189 if (!nowarn) 190 rtw89_err(rtwdev, "no suitable firmware found\n"); 191 return -ENOENT; 192 } 193 194 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 195 enum rtw89_fw_type type, 196 struct rtw89_fw_suit *fw_suit) 197 { 198 const u8 *hdr = fw_suit->data; 199 200 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 201 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 202 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 203 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 204 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 205 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 206 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 207 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 208 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 209 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 210 211 rtw89_info(rtwdev, 212 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 213 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 214 fw_suit->sub_idex, fw_suit->cmd_ver, type); 215 } 216 217 static 218 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 219 bool nowarn) 220 { 221 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 222 int ret; 223 224 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 225 if (ret) 226 return ret; 227 228 rtw89_fw_update_ver(rtwdev, type, fw_suit); 229 230 return 0; 231 } 232 233 #define __DEF_FW_FEAT_COND(__cond, __op) \ 234 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 235 { \ 236 return suit_ver_code __op comp_ver_code; \ 237 } 238 239 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 240 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 241 __DEF_FW_FEAT_COND(lt, <); /* less than */ 242 243 struct __fw_feat_cfg { 244 enum rtw89_core_chip_id chip_id; 245 enum rtw89_fw_feature feature; 246 u32 ver_code; 247 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 248 }; 249 250 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 251 { \ 252 .chip_id = _chip, \ 253 .feature = RTW89_FW_FEATURE_ ## _feat, \ 254 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 255 .cond = __fw_feat_cond_ ## _cond, \ 256 } 257 258 static const struct __fw_feat_cfg fw_feat_tbl[] = { 259 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 260 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 261 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 262 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 263 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 264 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 265 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 266 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 267 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 268 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 269 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 270 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 271 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 272 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 273 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 274 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 275 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 276 }; 277 278 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 279 const struct rtw89_chip_info *chip, 280 u32 ver_code) 281 { 282 int i; 283 284 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 285 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 286 287 if (chip->chip_id != ent->chip_id) 288 continue; 289 290 if (ent->cond(ver_code, ent->ver_code)) 291 RTW89_SET_FW_FEATURE(ent->feature, fw); 292 } 293 } 294 295 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 296 { 297 const struct rtw89_chip_info *chip = rtwdev->chip; 298 const struct rtw89_fw_suit *fw_suit; 299 u32 suit_ver_code; 300 301 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 302 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 303 304 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 305 } 306 307 const struct firmware * 308 rtw89_early_fw_feature_recognize(struct device *device, 309 const struct rtw89_chip_info *chip, 310 struct rtw89_fw_info *early_fw, 311 int *used_fw_format) 312 { 313 union rtw89_compat_fw_hdr buf = {}; 314 const struct firmware *firmware; 315 bool full_req = false; 316 char fw_name[64]; 317 int fw_format; 318 u32 ver_code; 319 int ret; 320 321 /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will 322 * be denied (-EPERM). Then, we don't get right firmware things as 323 * expected. So, in this case, we have to request full firmware here. 324 */ 325 if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) 326 full_req = true; 327 328 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 329 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 330 chip->fw_basename, fw_format); 331 332 if (full_req) 333 ret = request_firmware(&firmware, fw_name, device); 334 else 335 ret = request_partial_firmware_into_buf(&firmware, fw_name, 336 device, &buf, sizeof(buf), 337 0); 338 if (!ret) { 339 dev_info(device, "loaded firmware %s\n", fw_name); 340 *used_fw_format = fw_format; 341 break; 342 } 343 } 344 345 if (ret) { 346 dev_err(device, "failed to early request firmware: %d\n", ret); 347 return NULL; 348 } 349 350 if (full_req) 351 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 352 else 353 ver_code = rtw89_compat_fw_hdr_ver_code(&buf); 354 355 if (!ver_code) 356 goto out; 357 358 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 359 360 out: 361 if (full_req) 362 return firmware; 363 364 release_firmware(firmware); 365 return NULL; 366 } 367 368 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 369 { 370 const struct rtw89_chip_info *chip = rtwdev->chip; 371 int ret; 372 373 if (chip->try_ce_fw) { 374 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 375 if (!ret) 376 goto normal_done; 377 } 378 379 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 380 if (ret) 381 return ret; 382 383 normal_done: 384 /* It still works if wowlan firmware isn't existing. */ 385 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 386 387 rtw89_fw_recognize_features(rtwdev); 388 389 rtw89_coex_recognize_ver(rtwdev); 390 391 return 0; 392 } 393 394 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 395 u8 type, u8 cat, u8 class, u8 func, 396 bool rack, bool dack, u32 len) 397 { 398 struct fwcmd_hdr *hdr; 399 400 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 401 402 if (!(rtwdev->fw.h2c_seq % 4)) 403 rack = true; 404 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 405 FIELD_PREP(H2C_HDR_CAT, cat) | 406 FIELD_PREP(H2C_HDR_CLASS, class) | 407 FIELD_PREP(H2C_HDR_FUNC, func) | 408 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 409 410 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 411 len + H2C_HEADER_LEN) | 412 (rack ? H2C_HDR_REC_ACK : 0) | 413 (dack ? H2C_HDR_DONE_ACK : 0)); 414 415 rtwdev->fw.h2c_seq++; 416 } 417 418 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 419 struct sk_buff *skb, 420 u8 type, u8 cat, u8 class, u8 func, 421 u32 len) 422 { 423 struct fwcmd_hdr *hdr; 424 425 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 426 427 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 428 FIELD_PREP(H2C_HDR_CAT, cat) | 429 FIELD_PREP(H2C_HDR_CLASS, class) | 430 FIELD_PREP(H2C_HDR_FUNC, func) | 431 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 432 433 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 434 len + H2C_HEADER_LEN)); 435 } 436 437 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 438 { 439 struct sk_buff *skb; 440 u32 ret = 0; 441 442 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 443 if (!skb) { 444 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 445 return -ENOMEM; 446 } 447 448 skb_put_data(skb, fw, len); 449 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 450 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 451 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 452 H2C_FUNC_MAC_FWHDR_DL, len); 453 454 ret = rtw89_h2c_tx(rtwdev, skb, false); 455 if (ret) { 456 rtw89_err(rtwdev, "failed to send h2c\n"); 457 ret = -1; 458 goto fail; 459 } 460 461 return 0; 462 fail: 463 dev_kfree_skb_any(skb); 464 465 return ret; 466 } 467 468 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 469 { 470 u8 val; 471 int ret; 472 473 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 474 if (ret) { 475 rtw89_err(rtwdev, "[ERR]FW header download\n"); 476 return ret; 477 } 478 479 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 480 1, FWDL_WAIT_CNT, false, 481 rtwdev, R_AX_WCPU_FW_CTRL); 482 if (ret) { 483 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 484 return ret; 485 } 486 487 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 488 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 489 490 return 0; 491 } 492 493 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 494 struct rtw89_fw_hdr_section_info *info) 495 { 496 struct sk_buff *skb; 497 const u8 *section = info->addr; 498 u32 residue_len = info->len; 499 u32 pkt_len; 500 int ret; 501 502 while (residue_len) { 503 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 504 pkt_len = FWDL_SECTION_PER_PKT_LEN; 505 else 506 pkt_len = residue_len; 507 508 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 509 if (!skb) { 510 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 511 return -ENOMEM; 512 } 513 skb_put_data(skb, section, pkt_len); 514 515 ret = rtw89_h2c_tx(rtwdev, skb, true); 516 if (ret) { 517 rtw89_err(rtwdev, "failed to send h2c\n"); 518 ret = -1; 519 goto fail; 520 } 521 522 section += pkt_len; 523 residue_len -= pkt_len; 524 } 525 526 return 0; 527 fail: 528 dev_kfree_skb_any(skb); 529 530 return ret; 531 } 532 533 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 534 struct rtw89_fw_bin_info *info) 535 { 536 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 537 u8 section_num = info->section_num; 538 int ret; 539 540 while (section_num--) { 541 ret = __rtw89_fw_download_main(rtwdev, section_info); 542 if (ret) 543 return ret; 544 section_info++; 545 } 546 547 mdelay(5); 548 549 ret = rtw89_fw_check_rdy(rtwdev); 550 if (ret) { 551 rtw89_warn(rtwdev, "download firmware fail\n"); 552 return ret; 553 } 554 555 return 0; 556 } 557 558 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 559 { 560 u32 val32; 561 u16 index; 562 563 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 564 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 565 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 566 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 567 568 for (index = 0; index < 15; index++) { 569 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 570 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 571 fsleep(10); 572 } 573 } 574 575 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 576 { 577 u32 val32; 578 u16 val16; 579 580 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 581 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 582 583 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 584 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 585 586 rtw89_fw_prog_cnt_dump(rtwdev); 587 } 588 589 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 590 { 591 struct rtw89_fw_info *fw_info = &rtwdev->fw; 592 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 593 struct rtw89_fw_bin_info info; 594 const u8 *fw = fw_suit->data; 595 u32 len = fw_suit->size; 596 u8 val; 597 int ret; 598 599 rtw89_mac_disable_cpu(rtwdev); 600 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 601 if (ret) 602 return ret; 603 604 if (!fw || !len) { 605 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 606 return -ENOENT; 607 } 608 609 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 610 if (ret) { 611 rtw89_err(rtwdev, "parse fw header fail\n"); 612 goto fwdl_err; 613 } 614 615 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 616 1, FWDL_WAIT_CNT, false, 617 rtwdev, R_AX_WCPU_FW_CTRL); 618 if (ret) { 619 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 620 goto fwdl_err; 621 } 622 623 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 624 if (ret) { 625 ret = -EBUSY; 626 goto fwdl_err; 627 } 628 629 ret = rtw89_fw_download_main(rtwdev, fw, &info); 630 if (ret) { 631 ret = -EBUSY; 632 goto fwdl_err; 633 } 634 635 fw_info->h2c_seq = 0; 636 fw_info->rec_seq = 0; 637 fw_info->h2c_counter = 0; 638 fw_info->c2h_counter = 0; 639 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 640 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 641 642 return ret; 643 644 fwdl_err: 645 rtw89_fw_dl_fail_dump(rtwdev); 646 return ret; 647 } 648 649 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 650 { 651 struct rtw89_fw_info *fw = &rtwdev->fw; 652 653 wait_for_completion(&fw->req.completion); 654 if (!fw->req.firmware) 655 return -EINVAL; 656 657 return 0; 658 } 659 660 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 661 struct rtw89_fw_req_info *req, 662 const char *fw_name, bool nowarn) 663 { 664 int ret; 665 666 if (req->firmware) { 667 rtw89_debug(rtwdev, RTW89_DBG_FW, 668 "full firmware has been early requested\n"); 669 complete_all(&req->completion); 670 return 0; 671 } 672 673 if (nowarn) 674 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 675 else 676 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 677 678 complete_all(&req->completion); 679 680 return ret; 681 } 682 683 void rtw89_load_firmware_work(struct work_struct *work) 684 { 685 struct rtw89_dev *rtwdev = 686 container_of(work, struct rtw89_dev, load_firmware_work); 687 const struct rtw89_chip_info *chip = rtwdev->chip; 688 char fw_name[64]; 689 690 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 691 chip->fw_basename, rtwdev->fw.fw_format); 692 693 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 694 } 695 696 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 697 { 698 struct rtw89_fw_info *fw = &rtwdev->fw; 699 700 cancel_work_sync(&rtwdev->load_firmware_work); 701 702 if (fw->req.firmware) { 703 release_firmware(fw->req.firmware); 704 705 /* assign NULL back in case rtw89_free_ieee80211_hw() 706 * try to release the same one again. 707 */ 708 fw->req.firmware = NULL; 709 } 710 } 711 712 #define H2C_CAM_LEN 60 713 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 714 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 715 { 716 struct sk_buff *skb; 717 int ret; 718 719 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 720 if (!skb) { 721 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 722 return -ENOMEM; 723 } 724 skb_put(skb, H2C_CAM_LEN); 725 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 726 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 727 728 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 729 H2C_CAT_MAC, 730 H2C_CL_MAC_ADDR_CAM_UPDATE, 731 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 732 H2C_CAM_LEN); 733 734 ret = rtw89_h2c_tx(rtwdev, skb, false); 735 if (ret) { 736 rtw89_err(rtwdev, "failed to send h2c\n"); 737 goto fail; 738 } 739 740 return 0; 741 fail: 742 dev_kfree_skb_any(skb); 743 744 return ret; 745 } 746 747 #define H2C_DCTL_SEC_CAM_LEN 68 748 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 749 struct rtw89_vif *rtwvif, 750 struct rtw89_sta *rtwsta) 751 { 752 struct sk_buff *skb; 753 int ret; 754 755 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 756 if (!skb) { 757 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 758 return -ENOMEM; 759 } 760 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 761 762 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 763 764 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 765 H2C_CAT_MAC, 766 H2C_CL_MAC_FR_EXCHG, 767 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 768 H2C_DCTL_SEC_CAM_LEN); 769 770 ret = rtw89_h2c_tx(rtwdev, skb, false); 771 if (ret) { 772 rtw89_err(rtwdev, "failed to send h2c\n"); 773 goto fail; 774 } 775 776 return 0; 777 fail: 778 dev_kfree_skb_any(skb); 779 780 return ret; 781 } 782 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 783 784 #define H2C_BA_CAM_LEN 8 785 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 786 bool valid, struct ieee80211_ampdu_params *params) 787 { 788 const struct rtw89_chip_info *chip = rtwdev->chip; 789 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 790 u8 macid = rtwsta->mac_id; 791 struct sk_buff *skb; 792 u8 entry_idx; 793 int ret; 794 795 ret = valid ? 796 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 797 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 798 if (ret) { 799 /* it still works even if we don't have static BA CAM, because 800 * hardware can create dynamic BA CAM automatically. 801 */ 802 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 803 "failed to %s entry tid=%d for h2c ba cam\n", 804 valid ? "alloc" : "free", params->tid); 805 return 0; 806 } 807 808 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 809 if (!skb) { 810 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 811 return -ENOMEM; 812 } 813 skb_put(skb, H2C_BA_CAM_LEN); 814 SET_BA_CAM_MACID(skb->data, macid); 815 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 816 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 817 else 818 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 819 if (!valid) 820 goto end; 821 SET_BA_CAM_VALID(skb->data, valid); 822 SET_BA_CAM_TID(skb->data, params->tid); 823 if (params->buf_size > 64) 824 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 825 else 826 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 827 /* If init req is set, hw will set the ssn */ 828 SET_BA_CAM_INIT_REQ(skb->data, 1); 829 SET_BA_CAM_SSN(skb->data, params->ssn); 830 831 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 832 SET_BA_CAM_STD_EN(skb->data, 1); 833 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 834 } 835 836 end: 837 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 838 H2C_CAT_MAC, 839 H2C_CL_BA_CAM, 840 H2C_FUNC_MAC_BA_CAM, 0, 1, 841 H2C_BA_CAM_LEN); 842 843 ret = rtw89_h2c_tx(rtwdev, skb, false); 844 if (ret) { 845 rtw89_err(rtwdev, "failed to send h2c\n"); 846 goto fail; 847 } 848 849 return 0; 850 fail: 851 dev_kfree_skb_any(skb); 852 853 return ret; 854 } 855 856 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 857 u8 entry_idx, u8 uid) 858 { 859 struct sk_buff *skb; 860 int ret; 861 862 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 863 if (!skb) { 864 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 865 return -ENOMEM; 866 } 867 skb_put(skb, H2C_BA_CAM_LEN); 868 869 SET_BA_CAM_VALID(skb->data, 1); 870 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 871 SET_BA_CAM_UID(skb->data, uid); 872 SET_BA_CAM_BAND(skb->data, 0); 873 SET_BA_CAM_STD_EN(skb->data, 0); 874 875 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 876 H2C_CAT_MAC, 877 H2C_CL_BA_CAM, 878 H2C_FUNC_MAC_BA_CAM, 0, 1, 879 H2C_BA_CAM_LEN); 880 881 ret = rtw89_h2c_tx(rtwdev, skb, false); 882 if (ret) { 883 rtw89_err(rtwdev, "failed to send h2c\n"); 884 goto fail; 885 } 886 887 return 0; 888 fail: 889 dev_kfree_skb_any(skb); 890 891 return ret; 892 } 893 894 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 895 { 896 const struct rtw89_chip_info *chip = rtwdev->chip; 897 u8 entry_idx = chip->bacam_num; 898 u8 uid = 0; 899 int i; 900 901 for (i = 0; i < chip->bacam_dynamic_num; i++) { 902 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 903 entry_idx++; 904 uid++; 905 } 906 } 907 908 #define H2C_LOG_CFG_LEN 12 909 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 910 { 911 struct sk_buff *skb; 912 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 913 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 914 int ret; 915 916 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 917 if (!skb) { 918 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 919 return -ENOMEM; 920 } 921 922 skb_put(skb, H2C_LOG_CFG_LEN); 923 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 924 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 925 SET_LOG_CFG_COMP(skb->data, comp); 926 SET_LOG_CFG_COMP_EXT(skb->data, 0); 927 928 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 929 H2C_CAT_MAC, 930 H2C_CL_FW_INFO, 931 H2C_FUNC_LOG_CFG, 0, 0, 932 H2C_LOG_CFG_LEN); 933 934 ret = rtw89_h2c_tx(rtwdev, skb, false); 935 if (ret) { 936 rtw89_err(rtwdev, "failed to send h2c\n"); 937 goto fail; 938 } 939 940 return 0; 941 fail: 942 dev_kfree_skb_any(skb); 943 944 return ret; 945 } 946 947 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 948 struct rtw89_vif *rtwvif, 949 enum rtw89_fw_pkt_ofld_type type, 950 u8 *id) 951 { 952 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 953 struct rtw89_pktofld_info *info; 954 struct sk_buff *skb; 955 int ret; 956 957 info = kzalloc(sizeof(*info), GFP_KERNEL); 958 if (!info) 959 return -ENOMEM; 960 961 switch (type) { 962 case RTW89_PKT_OFLD_TYPE_PS_POLL: 963 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 964 break; 965 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 966 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 967 break; 968 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 969 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 970 break; 971 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 972 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 973 break; 974 default: 975 goto err; 976 } 977 978 if (!skb) 979 goto err; 980 981 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 982 kfree_skb(skb); 983 984 if (ret) 985 goto err; 986 987 list_add_tail(&info->list, &rtwvif->general_pkt_list); 988 *id = info->id; 989 return 0; 990 991 err: 992 kfree(info); 993 return -ENOMEM; 994 } 995 996 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 997 struct rtw89_vif *rtwvif, bool notify_fw) 998 { 999 struct list_head *pkt_list = &rtwvif->general_pkt_list; 1000 struct rtw89_pktofld_info *info, *tmp; 1001 1002 list_for_each_entry_safe(info, tmp, pkt_list, list) { 1003 if (notify_fw) 1004 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1005 else 1006 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 1007 list_del(&info->list); 1008 kfree(info); 1009 } 1010 } 1011 1012 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1013 { 1014 struct rtw89_vif *rtwvif; 1015 1016 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1017 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1018 } 1019 1020 #define H2C_GENERAL_PKT_LEN 6 1021 #define H2C_GENERAL_PKT_ID_UND 0xff 1022 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1023 struct rtw89_vif *rtwvif, u8 macid) 1024 { 1025 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1026 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1027 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1028 struct sk_buff *skb; 1029 int ret; 1030 1031 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1032 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1033 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1034 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1035 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1036 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1037 1038 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1039 if (!skb) { 1040 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1041 return -ENOMEM; 1042 } 1043 skb_put(skb, H2C_GENERAL_PKT_LEN); 1044 SET_GENERAL_PKT_MACID(skb->data, macid); 1045 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1046 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1047 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1048 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1049 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1050 1051 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1052 H2C_CAT_MAC, 1053 H2C_CL_FW_INFO, 1054 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1055 H2C_GENERAL_PKT_LEN); 1056 1057 ret = rtw89_h2c_tx(rtwdev, skb, false); 1058 if (ret) { 1059 rtw89_err(rtwdev, "failed to send h2c\n"); 1060 goto fail; 1061 } 1062 1063 return 0; 1064 fail: 1065 dev_kfree_skb_any(skb); 1066 1067 return ret; 1068 } 1069 1070 #define H2C_LPS_PARM_LEN 8 1071 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1072 struct rtw89_lps_parm *lps_param) 1073 { 1074 struct sk_buff *skb; 1075 int ret; 1076 1077 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1078 if (!skb) { 1079 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1080 return -ENOMEM; 1081 } 1082 skb_put(skb, H2C_LPS_PARM_LEN); 1083 1084 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1085 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1086 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1087 SET_LPS_PARM_RLBM(skb->data, 1); 1088 SET_LPS_PARM_SMARTPS(skb->data, 1); 1089 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1090 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1091 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1092 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1093 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1094 1095 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1096 H2C_CAT_MAC, 1097 H2C_CL_MAC_PS, 1098 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1099 H2C_LPS_PARM_LEN); 1100 1101 ret = rtw89_h2c_tx(rtwdev, skb, false); 1102 if (ret) { 1103 rtw89_err(rtwdev, "failed to send h2c\n"); 1104 goto fail; 1105 } 1106 1107 return 0; 1108 fail: 1109 dev_kfree_skb_any(skb); 1110 1111 return ret; 1112 } 1113 1114 #define H2C_P2P_ACT_LEN 20 1115 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1116 struct ieee80211_p2p_noa_desc *desc, 1117 u8 act, u8 noa_id) 1118 { 1119 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1120 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1121 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1122 struct sk_buff *skb; 1123 u8 *cmd; 1124 int ret; 1125 1126 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1127 if (!skb) { 1128 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1129 return -ENOMEM; 1130 } 1131 skb_put(skb, H2C_P2P_ACT_LEN); 1132 cmd = skb->data; 1133 1134 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1135 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1136 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1137 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1138 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1139 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1140 if (desc) { 1141 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1142 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1143 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1144 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1145 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1146 } 1147 1148 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1149 H2C_CAT_MAC, H2C_CL_MAC_PS, 1150 H2C_FUNC_P2P_ACT, 0, 0, 1151 H2C_P2P_ACT_LEN); 1152 1153 ret = rtw89_h2c_tx(rtwdev, skb, false); 1154 if (ret) { 1155 rtw89_err(rtwdev, "failed to send h2c\n"); 1156 goto fail; 1157 } 1158 1159 return 0; 1160 fail: 1161 dev_kfree_skb_any(skb); 1162 1163 return ret; 1164 } 1165 1166 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1167 struct sk_buff *skb) 1168 { 1169 const struct rtw89_chip_info *chip = rtwdev->chip; 1170 struct rtw89_hal *hal = &rtwdev->hal; 1171 u8 ntx_path; 1172 u8 map_b; 1173 1174 if (chip->rf_path_num == 1) { 1175 ntx_path = RF_A; 1176 map_b = 0; 1177 } else { 1178 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1179 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1180 } 1181 1182 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1183 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1184 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1185 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1186 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1187 } 1188 1189 #define H2C_CMC_TBL_LEN 68 1190 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1191 struct rtw89_vif *rtwvif) 1192 { 1193 const struct rtw89_chip_info *chip = rtwdev->chip; 1194 struct sk_buff *skb; 1195 u8 macid = rtwvif->mac_id; 1196 int ret; 1197 1198 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1199 if (!skb) { 1200 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1201 return -ENOMEM; 1202 } 1203 skb_put(skb, H2C_CMC_TBL_LEN); 1204 SET_CTRL_INFO_MACID(skb->data, macid); 1205 SET_CTRL_INFO_OPERATION(skb->data, 1); 1206 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1207 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1208 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1209 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1210 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1211 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1212 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1213 } 1214 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1215 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1216 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1217 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1218 1219 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1220 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1221 chip->h2c_cctl_func_id, 0, 1, 1222 H2C_CMC_TBL_LEN); 1223 1224 ret = rtw89_h2c_tx(rtwdev, skb, false); 1225 if (ret) { 1226 rtw89_err(rtwdev, "failed to send h2c\n"); 1227 goto fail; 1228 } 1229 1230 return 0; 1231 fail: 1232 dev_kfree_skb_any(skb); 1233 1234 return ret; 1235 } 1236 1237 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1238 struct ieee80211_sta *sta, u8 *pads) 1239 { 1240 bool ppe_th; 1241 u8 ppe16, ppe8; 1242 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1243 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1244 u8 ru_bitmap; 1245 u8 n, idx, sh; 1246 u16 ppe; 1247 int i; 1248 1249 if (!sta->deflink.he_cap.has_he) 1250 return; 1251 1252 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1253 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1254 if (!ppe_th) { 1255 u8 pad; 1256 1257 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1258 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1259 1260 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1261 pads[i] = pad; 1262 1263 return; 1264 } 1265 1266 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1267 n = hweight8(ru_bitmap); 1268 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1269 1270 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1271 if (!(ru_bitmap & BIT(i))) { 1272 pads[i] = 1; 1273 continue; 1274 } 1275 1276 idx = n >> 3; 1277 sh = n & 7; 1278 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1279 1280 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1281 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1282 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1283 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1284 1285 if (ppe16 != 7 && ppe8 == 7) 1286 pads[i] = 2; 1287 else if (ppe8 != 7) 1288 pads[i] = 1; 1289 else 1290 pads[i] = 0; 1291 } 1292 } 1293 1294 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1295 struct ieee80211_vif *vif, 1296 struct ieee80211_sta *sta) 1297 { 1298 const struct rtw89_chip_info *chip = rtwdev->chip; 1299 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1300 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1301 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1302 struct sk_buff *skb; 1303 u8 pads[RTW89_PPE_BW_NUM]; 1304 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1305 u16 lowest_rate; 1306 int ret; 1307 1308 memset(pads, 0, sizeof(pads)); 1309 if (sta) 1310 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1311 1312 if (vif->p2p) 1313 lowest_rate = RTW89_HW_RATE_OFDM6; 1314 else if (chan->band_type == RTW89_BAND_2G) 1315 lowest_rate = RTW89_HW_RATE_CCK1; 1316 else 1317 lowest_rate = RTW89_HW_RATE_OFDM6; 1318 1319 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1320 if (!skb) { 1321 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1322 return -ENOMEM; 1323 } 1324 skb_put(skb, H2C_CMC_TBL_LEN); 1325 SET_CTRL_INFO_MACID(skb->data, mac_id); 1326 SET_CTRL_INFO_OPERATION(skb->data, 1); 1327 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1328 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1329 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1330 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1331 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1332 if (vif->type == NL80211_IFTYPE_STATION) 1333 SET_CMC_TBL_ULDL(skb->data, 1); 1334 else 1335 SET_CMC_TBL_ULDL(skb->data, 0); 1336 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1337 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1338 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1339 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1340 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1341 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1342 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1343 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1344 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1345 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1346 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1347 } 1348 if (sta) 1349 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1350 sta->deflink.he_cap.has_he); 1351 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1352 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1353 1354 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1355 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1356 chip->h2c_cctl_func_id, 0, 1, 1357 H2C_CMC_TBL_LEN); 1358 1359 ret = rtw89_h2c_tx(rtwdev, skb, false); 1360 if (ret) { 1361 rtw89_err(rtwdev, "failed to send h2c\n"); 1362 goto fail; 1363 } 1364 1365 return 0; 1366 fail: 1367 dev_kfree_skb_any(skb); 1368 1369 return ret; 1370 } 1371 1372 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1373 struct rtw89_sta *rtwsta) 1374 { 1375 const struct rtw89_chip_info *chip = rtwdev->chip; 1376 struct sk_buff *skb; 1377 int ret; 1378 1379 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1380 if (!skb) { 1381 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1382 return -ENOMEM; 1383 } 1384 skb_put(skb, H2C_CMC_TBL_LEN); 1385 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1386 SET_CTRL_INFO_OPERATION(skb->data, 1); 1387 if (rtwsta->cctl_tx_time) { 1388 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1389 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1390 } 1391 if (rtwsta->cctl_tx_retry_limit) { 1392 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1393 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1394 } 1395 1396 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1397 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1398 chip->h2c_cctl_func_id, 0, 1, 1399 H2C_CMC_TBL_LEN); 1400 1401 ret = rtw89_h2c_tx(rtwdev, skb, false); 1402 if (ret) { 1403 rtw89_err(rtwdev, "failed to send h2c\n"); 1404 goto fail; 1405 } 1406 1407 return 0; 1408 fail: 1409 dev_kfree_skb_any(skb); 1410 1411 return ret; 1412 } 1413 1414 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1415 struct rtw89_sta *rtwsta) 1416 { 1417 const struct rtw89_chip_info *chip = rtwdev->chip; 1418 struct sk_buff *skb; 1419 int ret; 1420 1421 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1422 return 0; 1423 1424 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1425 if (!skb) { 1426 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1427 return -ENOMEM; 1428 } 1429 skb_put(skb, H2C_CMC_TBL_LEN); 1430 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1431 SET_CTRL_INFO_OPERATION(skb->data, 1); 1432 1433 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1434 1435 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1436 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1437 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1438 H2C_CMC_TBL_LEN); 1439 1440 ret = rtw89_h2c_tx(rtwdev, skb, false); 1441 if (ret) { 1442 rtw89_err(rtwdev, "failed to send h2c\n"); 1443 goto fail; 1444 } 1445 1446 return 0; 1447 fail: 1448 dev_kfree_skb_any(skb); 1449 1450 return ret; 1451 } 1452 1453 #define H2C_BCN_BASE_LEN 12 1454 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1455 struct rtw89_vif *rtwvif) 1456 { 1457 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1458 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1459 struct sk_buff *skb; 1460 struct sk_buff *skb_beacon; 1461 u16 tim_offset; 1462 int bcn_total_len; 1463 u16 beacon_rate; 1464 int ret; 1465 1466 if (vif->p2p) 1467 beacon_rate = RTW89_HW_RATE_OFDM6; 1468 else if (chan->band_type == RTW89_BAND_2G) 1469 beacon_rate = RTW89_HW_RATE_CCK1; 1470 else 1471 beacon_rate = RTW89_HW_RATE_OFDM6; 1472 1473 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1474 NULL, 0); 1475 if (!skb_beacon) { 1476 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1477 return -ENOMEM; 1478 } 1479 1480 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1481 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1482 if (!skb) { 1483 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1484 dev_kfree_skb_any(skb_beacon); 1485 return -ENOMEM; 1486 } 1487 skb_put(skb, H2C_BCN_BASE_LEN); 1488 1489 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1490 SET_BCN_UPD_MBSSID(skb->data, 0); 1491 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1492 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1493 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1494 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1495 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1496 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1497 1498 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1499 dev_kfree_skb_any(skb_beacon); 1500 1501 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1502 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1503 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1504 bcn_total_len); 1505 1506 ret = rtw89_h2c_tx(rtwdev, skb, false); 1507 if (ret) { 1508 rtw89_err(rtwdev, "failed to send h2c\n"); 1509 dev_kfree_skb_any(skb); 1510 return ret; 1511 } 1512 1513 return 0; 1514 } 1515 1516 #define H2C_ROLE_MAINTAIN_LEN 4 1517 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1518 struct rtw89_vif *rtwvif, 1519 struct rtw89_sta *rtwsta, 1520 enum rtw89_upd_mode upd_mode) 1521 { 1522 struct sk_buff *skb; 1523 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1524 u8 self_role; 1525 int ret; 1526 1527 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1528 if (rtwsta) 1529 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1530 else 1531 self_role = rtwvif->self_role; 1532 } else { 1533 self_role = rtwvif->self_role; 1534 } 1535 1536 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1537 if (!skb) { 1538 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1539 return -ENOMEM; 1540 } 1541 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1542 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1543 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1544 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1545 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1546 1547 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1548 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1549 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1550 H2C_ROLE_MAINTAIN_LEN); 1551 1552 ret = rtw89_h2c_tx(rtwdev, skb, false); 1553 if (ret) { 1554 rtw89_err(rtwdev, "failed to send h2c\n"); 1555 goto fail; 1556 } 1557 1558 return 0; 1559 fail: 1560 dev_kfree_skb_any(skb); 1561 1562 return ret; 1563 } 1564 1565 #define H2C_JOIN_INFO_LEN 4 1566 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1567 struct rtw89_sta *rtwsta, bool dis_conn) 1568 { 1569 struct sk_buff *skb; 1570 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1571 u8 self_role = rtwvif->self_role; 1572 u8 net_type = rtwvif->net_type; 1573 int ret; 1574 1575 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1576 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1577 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1578 } 1579 1580 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1581 if (!skb) { 1582 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1583 return -ENOMEM; 1584 } 1585 skb_put(skb, H2C_JOIN_INFO_LEN); 1586 SET_JOININFO_MACID(skb->data, mac_id); 1587 SET_JOININFO_OP(skb->data, dis_conn); 1588 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1589 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1590 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1591 SET_JOININFO_ISHESTA(skb->data, 0); 1592 SET_JOININFO_DLBW(skb->data, 0); 1593 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1594 SET_JOININFO_DL_T_PE(skb->data, 0); 1595 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1596 SET_JOININFO_NET_TYPE(skb->data, net_type); 1597 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1598 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1599 1600 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1601 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1602 H2C_FUNC_MAC_JOININFO, 0, 1, 1603 H2C_JOIN_INFO_LEN); 1604 1605 ret = rtw89_h2c_tx(rtwdev, skb, false); 1606 if (ret) { 1607 rtw89_err(rtwdev, "failed to send h2c\n"); 1608 goto fail; 1609 } 1610 1611 return 0; 1612 fail: 1613 dev_kfree_skb_any(skb); 1614 1615 return ret; 1616 } 1617 1618 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1619 bool pause) 1620 { 1621 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1622 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1623 struct sk_buff *skb; 1624 int ret; 1625 1626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1627 if (!skb) { 1628 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1629 return -ENOMEM; 1630 } 1631 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1632 if (pause) 1633 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1634 skb_put_data(skb, &h2c, len); 1635 1636 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1637 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1638 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1639 len); 1640 1641 ret = rtw89_h2c_tx(rtwdev, skb, false); 1642 if (ret) { 1643 rtw89_err(rtwdev, "failed to send h2c\n"); 1644 goto fail; 1645 } 1646 1647 return 0; 1648 fail: 1649 dev_kfree_skb_any(skb); 1650 1651 return ret; 1652 } 1653 1654 #define H2C_EDCA_LEN 12 1655 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1656 u8 ac, u32 val) 1657 { 1658 struct sk_buff *skb; 1659 int ret; 1660 1661 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1662 if (!skb) { 1663 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1664 return -ENOMEM; 1665 } 1666 skb_put(skb, H2C_EDCA_LEN); 1667 RTW89_SET_EDCA_SEL(skb->data, 0); 1668 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1669 RTW89_SET_EDCA_WMM(skb->data, 0); 1670 RTW89_SET_EDCA_AC(skb->data, ac); 1671 RTW89_SET_EDCA_PARAM(skb->data, val); 1672 1673 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1674 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1675 H2C_FUNC_USR_EDCA, 0, 1, 1676 H2C_EDCA_LEN); 1677 1678 ret = rtw89_h2c_tx(rtwdev, skb, false); 1679 if (ret) { 1680 rtw89_err(rtwdev, "failed to send h2c\n"); 1681 goto fail; 1682 } 1683 1684 return 0; 1685 fail: 1686 dev_kfree_skb_any(skb); 1687 1688 return ret; 1689 } 1690 1691 #define H2C_TSF32_TOGL_LEN 4 1692 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1693 bool en) 1694 { 1695 struct sk_buff *skb; 1696 u16 early_us = en ? 2000 : 0; 1697 u8 *cmd; 1698 int ret; 1699 1700 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1701 if (!skb) { 1702 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1703 return -ENOMEM; 1704 } 1705 skb_put(skb, H2C_TSF32_TOGL_LEN); 1706 cmd = skb->data; 1707 1708 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1709 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1710 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1711 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1712 1713 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1714 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1715 H2C_FUNC_TSF32_TOGL, 0, 0, 1716 H2C_TSF32_TOGL_LEN); 1717 1718 ret = rtw89_h2c_tx(rtwdev, skb, false); 1719 if (ret) { 1720 rtw89_err(rtwdev, "failed to send h2c\n"); 1721 goto fail; 1722 } 1723 1724 return 0; 1725 fail: 1726 dev_kfree_skb_any(skb); 1727 1728 return ret; 1729 } 1730 1731 #define H2C_OFLD_CFG_LEN 8 1732 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1733 { 1734 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1735 struct sk_buff *skb; 1736 int ret; 1737 1738 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1739 if (!skb) { 1740 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1741 return -ENOMEM; 1742 } 1743 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1744 1745 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1746 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1747 H2C_FUNC_OFLD_CFG, 0, 1, 1748 H2C_OFLD_CFG_LEN); 1749 1750 ret = rtw89_h2c_tx(rtwdev, skb, false); 1751 if (ret) { 1752 rtw89_err(rtwdev, "failed to send h2c\n"); 1753 goto fail; 1754 } 1755 1756 return 0; 1757 fail: 1758 dev_kfree_skb_any(skb); 1759 1760 return ret; 1761 } 1762 1763 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 1764 struct ieee80211_vif *vif, 1765 bool connect) 1766 { 1767 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 1768 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 1769 struct rtw89_h2c_bcnfltr *h2c; 1770 u32 len = sizeof(*h2c); 1771 struct sk_buff *skb; 1772 int ret; 1773 1774 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1775 return -EINVAL; 1776 1777 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1778 return -EINVAL; 1779 1780 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1781 if (!skb) { 1782 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 1783 return -ENOMEM; 1784 } 1785 1786 skb_put(skb, len); 1787 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 1788 1789 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 1790 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 1791 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 1792 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 1793 RTW89_H2C_BCNFLTR_W0_MODE) | 1794 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 1795 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 1796 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 1797 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 1798 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 1799 1800 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1801 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1802 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 1803 1804 ret = rtw89_h2c_tx(rtwdev, skb, false); 1805 if (ret) { 1806 rtw89_err(rtwdev, "failed to send h2c\n"); 1807 goto fail; 1808 } 1809 1810 return 0; 1811 fail: 1812 dev_kfree_skb_any(skb); 1813 1814 return ret; 1815 } 1816 1817 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 1818 struct rtw89_rx_phy_ppdu *phy_ppdu) 1819 { 1820 struct rtw89_h2c_ofld_rssi *h2c; 1821 u32 len = sizeof(*h2c); 1822 struct sk_buff *skb; 1823 s8 rssi; 1824 int ret; 1825 1826 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1827 return -EINVAL; 1828 1829 if (!phy_ppdu) 1830 return -EINVAL; 1831 1832 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1833 if (!skb) { 1834 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 1835 return -ENOMEM; 1836 } 1837 1838 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 1839 skb_put(skb, len); 1840 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 1841 1842 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 1843 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 1844 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 1845 1846 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1847 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1848 H2C_FUNC_OFLD_RSSI, 0, 1, len); 1849 1850 ret = rtw89_h2c_tx(rtwdev, skb, false); 1851 if (ret) { 1852 rtw89_err(rtwdev, "failed to send h2c\n"); 1853 goto fail; 1854 } 1855 1856 return 0; 1857 fail: 1858 dev_kfree_skb_any(skb); 1859 1860 return ret; 1861 } 1862 1863 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 1864 { 1865 struct rtw89_traffic_stats *stats = &rtwvif->stats; 1866 struct rtw89_h2c_ofld *h2c; 1867 u32 len = sizeof(*h2c); 1868 struct sk_buff *skb; 1869 int ret; 1870 1871 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1872 return -EINVAL; 1873 1874 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1875 if (!skb) { 1876 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 1877 return -ENOMEM; 1878 } 1879 1880 skb_put(skb, len); 1881 h2c = (struct rtw89_h2c_ofld *)skb->data; 1882 1883 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 1884 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 1885 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 1886 1887 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1888 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1889 H2C_FUNC_OFLD_TP, 0, 1, len); 1890 1891 ret = rtw89_h2c_tx(rtwdev, skb, false); 1892 if (ret) { 1893 rtw89_err(rtwdev, "failed to send h2c\n"); 1894 goto fail; 1895 } 1896 1897 return 0; 1898 fail: 1899 dev_kfree_skb_any(skb); 1900 1901 return ret; 1902 } 1903 1904 #define H2C_RA_LEN 16 1905 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1906 { 1907 struct sk_buff *skb; 1908 u8 *cmd; 1909 int ret; 1910 1911 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1912 if (!skb) { 1913 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1914 return -ENOMEM; 1915 } 1916 skb_put(skb, H2C_RA_LEN); 1917 cmd = skb->data; 1918 rtw89_debug(rtwdev, RTW89_DBG_RA, 1919 "ra cmd msk: %llx ", ra->ra_mask); 1920 1921 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1922 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1923 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1924 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1925 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1926 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1927 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1928 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1929 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1930 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1931 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1932 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1933 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1934 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1935 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1936 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1937 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1938 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1939 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1940 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1941 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1942 1943 if (csi) { 1944 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1945 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1946 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1947 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1948 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1949 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1950 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1951 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1952 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1953 } 1954 1955 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1956 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1957 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1958 H2C_RA_LEN); 1959 1960 ret = rtw89_h2c_tx(rtwdev, skb, false); 1961 if (ret) { 1962 rtw89_err(rtwdev, "failed to send h2c\n"); 1963 goto fail; 1964 } 1965 1966 return 0; 1967 fail: 1968 dev_kfree_skb_any(skb); 1969 1970 return ret; 1971 } 1972 1973 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1974 { 1975 struct rtw89_btc *btc = &rtwdev->btc; 1976 struct rtw89_btc_dm *dm = &btc->dm; 1977 struct rtw89_btc_init_info *init_info = &dm->init_info; 1978 struct rtw89_btc_module *module = &init_info->module; 1979 struct rtw89_btc_ant_info *ant = &module->ant; 1980 struct rtw89_h2c_cxinit *h2c; 1981 u32 len = sizeof(*h2c); 1982 struct sk_buff *skb; 1983 int ret; 1984 1985 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1986 if (!skb) { 1987 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1988 return -ENOMEM; 1989 } 1990 skb_put(skb, len); 1991 h2c = (struct rtw89_h2c_cxinit *)skb->data; 1992 1993 h2c->hdr.type = CXDRVINFO_INIT; 1994 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 1995 1996 h2c->ant_type = ant->type; 1997 h2c->ant_num = ant->num; 1998 h2c->ant_iso = ant->isolation; 1999 h2c->ant_info = 2000 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 2001 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 2002 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 2003 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 2004 2005 h2c->mod_rfe = module->rfe_type; 2006 h2c->mod_cv = module->cv; 2007 h2c->mod_info = 2008 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2009 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2010 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2011 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2012 h2c->mod_adie_kt = module->kt_ver_adie; 2013 h2c->wl_gch = init_info->wl_guard_ch; 2014 2015 h2c->info = 2016 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2017 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2018 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2019 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2020 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2021 2022 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2023 H2C_CAT_OUTSRC, BTFC_SET, 2024 SET_DRV_INFO, 0, 0, 2025 len); 2026 2027 ret = rtw89_h2c_tx(rtwdev, skb, false); 2028 if (ret) { 2029 rtw89_err(rtwdev, "failed to send h2c\n"); 2030 goto fail; 2031 } 2032 2033 return 0; 2034 fail: 2035 dev_kfree_skb_any(skb); 2036 2037 return ret; 2038 } 2039 2040 #define PORT_DATA_OFFSET 4 2041 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2042 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2043 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2044 2045 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2046 { 2047 struct rtw89_btc *btc = &rtwdev->btc; 2048 const struct rtw89_btc_ver *ver = btc->ver; 2049 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2050 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2051 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2052 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2053 struct sk_buff *skb; 2054 u32 len; 2055 u8 offset = 0; 2056 u8 *cmd; 2057 int ret; 2058 int i; 2059 2060 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2061 2062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2063 if (!skb) { 2064 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2065 return -ENOMEM; 2066 } 2067 skb_put(skb, len); 2068 cmd = skb->data; 2069 2070 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2071 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2072 2073 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2074 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2075 2076 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2077 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2078 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2079 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2080 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2081 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2082 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2083 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2084 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2085 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2086 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2087 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2088 2089 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2090 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2091 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2092 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2093 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2094 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2095 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2096 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2097 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2098 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2099 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2100 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2101 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2102 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2103 } 2104 2105 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2106 H2C_CAT_OUTSRC, BTFC_SET, 2107 SET_DRV_INFO, 0, 0, 2108 len); 2109 2110 ret = rtw89_h2c_tx(rtwdev, skb, false); 2111 if (ret) { 2112 rtw89_err(rtwdev, "failed to send h2c\n"); 2113 goto fail; 2114 } 2115 2116 return 0; 2117 fail: 2118 dev_kfree_skb_any(skb); 2119 2120 return ret; 2121 } 2122 2123 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2124 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2125 2126 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2127 { 2128 struct rtw89_btc *btc = &rtwdev->btc; 2129 const struct rtw89_btc_ver *ver = btc->ver; 2130 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2131 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2132 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2133 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2134 struct sk_buff *skb; 2135 u32 len; 2136 u8 *cmd, offset; 2137 int ret; 2138 int i; 2139 2140 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2141 2142 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2143 if (!skb) { 2144 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2145 return -ENOMEM; 2146 } 2147 skb_put(skb, len); 2148 cmd = skb->data; 2149 2150 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2151 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2152 2153 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2154 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2155 2156 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2157 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2158 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2159 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2160 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2161 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2162 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2163 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2164 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2165 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2166 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2167 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2168 2169 offset = PORT_DATA_OFFSET; 2170 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2171 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2172 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2173 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2174 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2175 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2176 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2177 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2178 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2179 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2180 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2181 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2182 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2183 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2184 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2185 } 2186 2187 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2188 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2189 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2190 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2191 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2192 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2193 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2194 2195 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2196 H2C_CAT_OUTSRC, BTFC_SET, 2197 SET_DRV_INFO, 0, 0, 2198 len); 2199 2200 ret = rtw89_h2c_tx(rtwdev, skb, false); 2201 if (ret) { 2202 rtw89_err(rtwdev, "failed to send h2c\n"); 2203 goto fail; 2204 } 2205 2206 return 0; 2207 fail: 2208 dev_kfree_skb_any(skb); 2209 2210 return ret; 2211 } 2212 2213 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2214 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2215 2216 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2217 { 2218 struct rtw89_btc *btc = &rtwdev->btc; 2219 const struct rtw89_btc_ver *ver = btc->ver; 2220 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2221 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2222 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2223 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2224 struct sk_buff *skb; 2225 u32 len; 2226 u8 *cmd, offset; 2227 int ret; 2228 int i; 2229 2230 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2231 2232 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2233 if (!skb) { 2234 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2235 return -ENOMEM; 2236 } 2237 skb_put(skb, len); 2238 cmd = skb->data; 2239 2240 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2241 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2242 2243 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2244 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2245 2246 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2247 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2248 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2249 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2250 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2251 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2252 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2253 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2254 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2255 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2256 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2257 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2258 2259 offset = PORT_DATA_OFFSET; 2260 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2261 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2262 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2263 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2264 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2265 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2266 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2267 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2268 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2269 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2270 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2271 } 2272 2273 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2274 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2275 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2276 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2277 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2278 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2279 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2280 2281 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2282 H2C_CAT_OUTSRC, BTFC_SET, 2283 SET_DRV_INFO, 0, 0, 2284 len); 2285 2286 ret = rtw89_h2c_tx(rtwdev, skb, false); 2287 if (ret) { 2288 rtw89_err(rtwdev, "failed to send h2c\n"); 2289 goto fail; 2290 } 2291 2292 return 0; 2293 fail: 2294 dev_kfree_skb_any(skb); 2295 2296 return ret; 2297 } 2298 2299 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2300 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2301 { 2302 struct rtw89_btc *btc = &rtwdev->btc; 2303 const struct rtw89_btc_ver *ver = btc->ver; 2304 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2305 struct sk_buff *skb; 2306 u8 *cmd; 2307 int ret; 2308 2309 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2310 if (!skb) { 2311 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2312 return -ENOMEM; 2313 } 2314 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2315 cmd = skb->data; 2316 2317 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2318 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2319 2320 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2321 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2322 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2323 if (ver->fcxctrl == 0) 2324 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2325 2326 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2327 H2C_CAT_OUTSRC, BTFC_SET, 2328 SET_DRV_INFO, 0, 0, 2329 H2C_LEN_CXDRVINFO_CTRL); 2330 2331 ret = rtw89_h2c_tx(rtwdev, skb, false); 2332 if (ret) { 2333 rtw89_err(rtwdev, "failed to send h2c\n"); 2334 goto fail; 2335 } 2336 2337 return 0; 2338 fail: 2339 dev_kfree_skb_any(skb); 2340 2341 return ret; 2342 } 2343 2344 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2345 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2346 { 2347 struct rtw89_btc *btc = &rtwdev->btc; 2348 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2349 struct sk_buff *skb; 2350 u8 *cmd; 2351 int ret; 2352 2353 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2354 if (!skb) { 2355 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2356 return -ENOMEM; 2357 } 2358 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2359 cmd = skb->data; 2360 2361 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2362 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2363 2364 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2365 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2366 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2367 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2368 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2369 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2370 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2371 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2372 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2373 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2374 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2375 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2376 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2377 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2378 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2379 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2380 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2381 2382 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2383 H2C_CAT_OUTSRC, BTFC_SET, 2384 SET_DRV_INFO, 0, 0, 2385 H2C_LEN_CXDRVINFO_TRX); 2386 2387 ret = rtw89_h2c_tx(rtwdev, skb, false); 2388 if (ret) { 2389 rtw89_err(rtwdev, "failed to send h2c\n"); 2390 goto fail; 2391 } 2392 2393 return 0; 2394 fail: 2395 dev_kfree_skb_any(skb); 2396 2397 return ret; 2398 } 2399 2400 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2401 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2402 { 2403 struct rtw89_btc *btc = &rtwdev->btc; 2404 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2405 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2406 struct sk_buff *skb; 2407 u8 *cmd; 2408 int ret; 2409 2410 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2411 if (!skb) { 2412 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2413 return -ENOMEM; 2414 } 2415 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2416 cmd = skb->data; 2417 2418 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2419 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2420 2421 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2422 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2423 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2424 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2425 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2426 2427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2428 H2C_CAT_OUTSRC, BTFC_SET, 2429 SET_DRV_INFO, 0, 0, 2430 H2C_LEN_CXDRVINFO_RFK); 2431 2432 ret = rtw89_h2c_tx(rtwdev, skb, false); 2433 if (ret) { 2434 rtw89_err(rtwdev, "failed to send h2c\n"); 2435 goto fail; 2436 } 2437 2438 return 0; 2439 fail: 2440 dev_kfree_skb_any(skb); 2441 2442 return ret; 2443 } 2444 2445 #define H2C_LEN_PKT_OFLD 4 2446 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2447 { 2448 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2449 struct sk_buff *skb; 2450 unsigned int cond; 2451 u8 *cmd; 2452 int ret; 2453 2454 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2455 if (!skb) { 2456 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2457 return -ENOMEM; 2458 } 2459 skb_put(skb, H2C_LEN_PKT_OFLD); 2460 cmd = skb->data; 2461 2462 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2463 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2464 2465 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2466 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2467 H2C_FUNC_PACKET_OFLD, 1, 1, 2468 H2C_LEN_PKT_OFLD); 2469 2470 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 2471 2472 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2473 if (ret < 0) { 2474 rtw89_debug(rtwdev, RTW89_DBG_FW, 2475 "failed to del pkt ofld: id %d, ret %d\n", 2476 id, ret); 2477 return ret; 2478 } 2479 2480 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 2481 return 0; 2482 } 2483 2484 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2485 struct sk_buff *skb_ofld) 2486 { 2487 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2488 struct sk_buff *skb; 2489 unsigned int cond; 2490 u8 *cmd; 2491 u8 alloc_id; 2492 int ret; 2493 2494 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2495 RTW89_MAX_PKT_OFLD_NUM); 2496 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2497 return -ENOSPC; 2498 2499 *id = alloc_id; 2500 2501 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2502 if (!skb) { 2503 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2504 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2505 return -ENOMEM; 2506 } 2507 skb_put(skb, H2C_LEN_PKT_OFLD); 2508 cmd = skb->data; 2509 2510 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2511 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2512 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2513 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2514 2515 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2516 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2517 H2C_FUNC_PACKET_OFLD, 1, 1, 2518 H2C_LEN_PKT_OFLD + skb_ofld->len); 2519 2520 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 2521 2522 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2523 if (ret < 0) { 2524 rtw89_debug(rtwdev, RTW89_DBG_FW, 2525 "failed to add pkt ofld: id %d, ret %d\n", 2526 alloc_id, ret); 2527 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2528 return ret; 2529 } 2530 2531 return 0; 2532 } 2533 2534 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2535 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2536 struct list_head *chan_list) 2537 { 2538 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2539 struct rtw89_mac_chinfo *ch_info; 2540 struct sk_buff *skb; 2541 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2542 unsigned int cond; 2543 u8 *cmd; 2544 int ret; 2545 2546 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2547 if (!skb) { 2548 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2549 return -ENOMEM; 2550 } 2551 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2552 cmd = skb->data; 2553 2554 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2555 /* in unit of 4 bytes */ 2556 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2557 2558 list_for_each_entry(ch_info, chan_list, list) { 2559 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2560 2561 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2562 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2563 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2564 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2565 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2566 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2567 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2568 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2569 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2570 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2571 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2572 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2573 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2574 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2575 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2576 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2577 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2578 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2579 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2580 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2581 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2582 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2583 } 2584 2585 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2586 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2587 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2588 2589 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH); 2590 2591 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2592 if (ret) { 2593 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 2594 return ret; 2595 } 2596 2597 return 0; 2598 } 2599 2600 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2601 struct rtw89_scan_option *option, 2602 struct rtw89_vif *rtwvif) 2603 { 2604 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2605 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 2606 struct rtw89_h2c_scanofld *h2c; 2607 u32 len = sizeof(*h2c); 2608 struct sk_buff *skb; 2609 unsigned int cond; 2610 int ret; 2611 2612 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2613 if (!skb) { 2614 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2615 return -ENOMEM; 2616 } 2617 skb_put(skb, len); 2618 h2c = (struct rtw89_h2c_scanofld *)skb->data; 2619 2620 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 2621 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 2622 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 2623 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 2624 2625 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 2626 le32_encode_bits(option->target_ch_mode, 2627 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 2628 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 2629 RTW89_H2C_SCANOFLD_W1_START_MODE) | 2630 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 2631 2632 if (option->target_ch_mode) { 2633 h2c->w1 |= le32_encode_bits(op->band_width, 2634 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 2635 le32_encode_bits(op->primary_channel, 2636 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 2637 le32_encode_bits(op->channel, 2638 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 2639 h2c->w0 |= le32_encode_bits(op->band_type, 2640 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 2641 } 2642 2643 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2644 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2645 H2C_FUNC_SCANOFLD, 1, 1, 2646 len); 2647 2648 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD); 2649 2650 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2651 if (ret) { 2652 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 2653 return ret; 2654 } 2655 2656 return 0; 2657 } 2658 2659 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2660 struct rtw89_fw_h2c_rf_reg_info *info, 2661 u16 len, u8 page) 2662 { 2663 struct sk_buff *skb; 2664 u8 class = info->rf_path == RF_PATH_A ? 2665 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2666 int ret; 2667 2668 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2669 if (!skb) { 2670 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2671 return -ENOMEM; 2672 } 2673 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2674 2675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2676 H2C_CAT_OUTSRC, class, page, 0, 0, 2677 len); 2678 2679 ret = rtw89_h2c_tx(rtwdev, skb, false); 2680 if (ret) { 2681 rtw89_err(rtwdev, "failed to send h2c\n"); 2682 goto fail; 2683 } 2684 2685 return 0; 2686 fail: 2687 dev_kfree_skb_any(skb); 2688 2689 return ret; 2690 } 2691 2692 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2693 { 2694 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2695 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2696 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2697 struct sk_buff *skb; 2698 int ret; 2699 2700 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2701 if (!skb) { 2702 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2703 return -ENOMEM; 2704 } 2705 skb_put(skb, sizeof(*mccch)); 2706 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2707 2708 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2709 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2710 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2711 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2712 mccch->current_channel = cpu_to_le32(chan->channel); 2713 mccch->current_band_type = cpu_to_le32(chan->band_type); 2714 2715 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2716 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2717 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2718 sizeof(*mccch)); 2719 2720 ret = rtw89_h2c_tx(rtwdev, skb, false); 2721 if (ret) { 2722 rtw89_err(rtwdev, "failed to send h2c\n"); 2723 goto fail; 2724 } 2725 2726 return 0; 2727 fail: 2728 dev_kfree_skb_any(skb); 2729 2730 return ret; 2731 } 2732 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2733 2734 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2735 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2736 bool rack, bool dack) 2737 { 2738 struct sk_buff *skb; 2739 int ret; 2740 2741 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2742 if (!skb) { 2743 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2744 return -ENOMEM; 2745 } 2746 skb_put_data(skb, buf, len); 2747 2748 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2749 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2750 len); 2751 2752 ret = rtw89_h2c_tx(rtwdev, skb, false); 2753 if (ret) { 2754 rtw89_err(rtwdev, "failed to send h2c\n"); 2755 goto fail; 2756 } 2757 2758 return 0; 2759 fail: 2760 dev_kfree_skb_any(skb); 2761 2762 return ret; 2763 } 2764 2765 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2766 { 2767 struct sk_buff *skb; 2768 int ret; 2769 2770 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2771 if (!skb) { 2772 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2773 return -ENOMEM; 2774 } 2775 skb_put_data(skb, buf, len); 2776 2777 ret = rtw89_h2c_tx(rtwdev, skb, false); 2778 if (ret) { 2779 rtw89_err(rtwdev, "failed to send h2c\n"); 2780 goto fail; 2781 } 2782 2783 return 0; 2784 fail: 2785 dev_kfree_skb_any(skb); 2786 2787 return ret; 2788 } 2789 2790 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2791 { 2792 struct rtw89_early_h2c *early_h2c; 2793 2794 lockdep_assert_held(&rtwdev->mutex); 2795 2796 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2797 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2798 } 2799 } 2800 2801 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2802 { 2803 struct rtw89_early_h2c *early_h2c, *tmp; 2804 2805 mutex_lock(&rtwdev->mutex); 2806 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2807 list_del(&early_h2c->list); 2808 kfree(early_h2c->h2c); 2809 kfree(early_h2c); 2810 } 2811 mutex_unlock(&rtwdev->mutex); 2812 } 2813 2814 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2815 { 2816 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2817 2818 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2819 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2820 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2821 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2822 } 2823 2824 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2825 struct sk_buff *c2h) 2826 { 2827 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2828 u8 category = attr->category; 2829 u8 class = attr->class; 2830 u8 func = attr->func; 2831 2832 switch (category) { 2833 default: 2834 return false; 2835 case RTW89_C2H_CAT_MAC: 2836 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2837 } 2838 } 2839 2840 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2841 { 2842 rtw89_fw_c2h_parse_attr(c2h); 2843 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2844 goto enqueue; 2845 2846 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2847 dev_kfree_skb_any(c2h); 2848 return; 2849 2850 enqueue: 2851 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2852 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2853 } 2854 2855 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2856 struct sk_buff *skb) 2857 { 2858 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2859 u8 category = attr->category; 2860 u8 class = attr->class; 2861 u8 func = attr->func; 2862 u16 len = attr->len; 2863 bool dump = true; 2864 2865 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2866 return; 2867 2868 switch (category) { 2869 case RTW89_C2H_CAT_TEST: 2870 break; 2871 case RTW89_C2H_CAT_MAC: 2872 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2873 if (class == RTW89_MAC_C2H_CLASS_INFO && 2874 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2875 dump = false; 2876 break; 2877 case RTW89_C2H_CAT_OUTSRC: 2878 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2879 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2880 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2881 else 2882 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2883 break; 2884 } 2885 2886 if (dump) 2887 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2888 } 2889 2890 void rtw89_fw_c2h_work(struct work_struct *work) 2891 { 2892 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2893 c2h_work); 2894 struct sk_buff *skb, *tmp; 2895 2896 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2897 skb_unlink(skb, &rtwdev->c2h_queue); 2898 mutex_lock(&rtwdev->mutex); 2899 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2900 mutex_unlock(&rtwdev->mutex); 2901 dev_kfree_skb_any(skb); 2902 } 2903 } 2904 2905 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2906 struct rtw89_mac_h2c_info *info) 2907 { 2908 const struct rtw89_chip_info *chip = rtwdev->chip; 2909 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2910 const u32 *h2c_reg = chip->h2c_regs; 2911 u8 i, val, len; 2912 int ret; 2913 2914 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2915 rtwdev, chip->h2c_ctrl_reg); 2916 if (ret) { 2917 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2918 return ret; 2919 } 2920 2921 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2922 sizeof(info->u.h2creg[0])); 2923 2924 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 2925 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 2926 2927 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2928 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 2929 2930 fw_info->h2c_counter++; 2931 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 2932 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 2933 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2934 2935 return 0; 2936 } 2937 2938 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2939 struct rtw89_mac_c2h_info *info) 2940 { 2941 const struct rtw89_chip_info *chip = rtwdev->chip; 2942 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2943 const u32 *c2h_reg = chip->c2h_regs; 2944 u32 ret; 2945 u8 i, val; 2946 2947 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2948 2949 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2950 RTW89_C2H_TIMEOUT, false, rtwdev, 2951 chip->c2h_ctrl_reg); 2952 if (ret) { 2953 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2954 return ret; 2955 } 2956 2957 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2958 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2959 2960 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2961 2962 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 2963 info->content_len = 2964 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 2965 RTW89_C2HREG_HDR_LEN; 2966 2967 fw_info->c2h_counter++; 2968 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 2969 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 2970 2971 return 0; 2972 } 2973 2974 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2975 struct rtw89_mac_h2c_info *h2c_info, 2976 struct rtw89_mac_c2h_info *c2h_info) 2977 { 2978 u32 ret; 2979 2980 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2981 lockdep_assert_held(&rtwdev->mutex); 2982 2983 if (!h2c_info && !c2h_info) 2984 return -EINVAL; 2985 2986 if (!h2c_info) 2987 goto recv_c2h; 2988 2989 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2990 if (ret) 2991 return ret; 2992 2993 recv_c2h: 2994 if (!c2h_info) 2995 return 0; 2996 2997 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2998 if (ret) 2999 return ret; 3000 3001 return 0; 3002 } 3003 3004 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 3005 { 3006 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 3007 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 3008 return; 3009 } 3010 3011 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3012 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3013 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3014 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3015 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3016 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3017 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3018 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3019 3020 rtw89_fw_prog_cnt_dump(rtwdev); 3021 } 3022 3023 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3024 { 3025 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3026 struct rtw89_pktofld_info *info, *tmp; 3027 u8 idx; 3028 3029 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3030 if (!(rtwdev->chip->support_bands & BIT(idx))) 3031 continue; 3032 3033 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3034 if (test_bit(info->id, rtwdev->pkt_offload)) 3035 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3036 list_del(&info->list); 3037 kfree(info); 3038 } 3039 } 3040 } 3041 3042 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3043 struct rtw89_vif *rtwvif, 3044 struct rtw89_pktofld_info *info, 3045 enum nl80211_band band, u8 ssid_idx) 3046 { 3047 struct cfg80211_scan_request *req = rtwvif->scan_req; 3048 3049 if (band != NL80211_BAND_6GHZ) 3050 return false; 3051 3052 if (req->ssids[ssid_idx].ssid_len) { 3053 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3054 req->ssids[ssid_idx].ssid_len); 3055 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3056 return false; 3057 } else { 3058 return true; 3059 } 3060 } 3061 3062 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3063 struct rtw89_vif *rtwvif, 3064 struct sk_buff *skb, u8 ssid_idx) 3065 { 3066 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3067 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3068 struct rtw89_pktofld_info *info; 3069 struct sk_buff *new; 3070 int ret = 0; 3071 u8 band; 3072 3073 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3074 if (!(rtwdev->chip->support_bands & BIT(band))) 3075 continue; 3076 3077 new = skb_copy(skb, GFP_KERNEL); 3078 if (!new) { 3079 ret = -ENOMEM; 3080 goto out; 3081 } 3082 skb_put_data(new, ies->ies[band], ies->len[band]); 3083 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3084 3085 info = kzalloc(sizeof(*info), GFP_KERNEL); 3086 if (!info) { 3087 ret = -ENOMEM; 3088 kfree_skb(new); 3089 goto out; 3090 } 3091 3092 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3093 ssid_idx)) { 3094 kfree_skb(new); 3095 kfree(info); 3096 goto out; 3097 } 3098 3099 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3100 if (ret) { 3101 kfree_skb(new); 3102 kfree(info); 3103 goto out; 3104 } 3105 3106 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3107 kfree_skb(new); 3108 } 3109 out: 3110 return ret; 3111 } 3112 3113 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3114 struct rtw89_vif *rtwvif) 3115 { 3116 struct cfg80211_scan_request *req = rtwvif->scan_req; 3117 struct sk_buff *skb; 3118 u8 num = req->n_ssids, i; 3119 int ret; 3120 3121 for (i = 0; i < num; i++) { 3122 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3123 req->ssids[i].ssid, 3124 req->ssids[i].ssid_len, 3125 req->ie_len); 3126 if (!skb) 3127 return -ENOMEM; 3128 3129 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3130 kfree_skb(skb); 3131 3132 if (ret) 3133 return ret; 3134 } 3135 3136 return 0; 3137 } 3138 3139 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3140 struct cfg80211_scan_request *req, 3141 struct rtw89_mac_chinfo *ch_info) 3142 { 3143 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3144 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3145 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3146 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3147 struct cfg80211_scan_6ghz_params *params; 3148 struct rtw89_pktofld_info *info, *tmp; 3149 struct ieee80211_hdr *hdr; 3150 struct sk_buff *skb; 3151 bool found; 3152 int ret = 0; 3153 u8 i; 3154 3155 if (!req->n_6ghz_params) 3156 return 0; 3157 3158 for (i = 0; i < req->n_6ghz_params; i++) { 3159 params = &req->scan_6ghz_params[i]; 3160 3161 if (req->channels[params->channel_idx]->hw_value != 3162 ch_info->pri_ch) 3163 continue; 3164 3165 found = false; 3166 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3167 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3168 found = true; 3169 break; 3170 } 3171 } 3172 if (found) 3173 continue; 3174 3175 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3176 NULL, 0, req->ie_len); 3177 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3178 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3179 hdr = (struct ieee80211_hdr *)skb->data; 3180 ether_addr_copy(hdr->addr3, params->bssid); 3181 3182 info = kzalloc(sizeof(*info), GFP_KERNEL); 3183 if (!info) { 3184 ret = -ENOMEM; 3185 kfree_skb(skb); 3186 goto out; 3187 } 3188 3189 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3190 if (ret) { 3191 kfree_skb(skb); 3192 kfree(info); 3193 goto out; 3194 } 3195 3196 ether_addr_copy(info->bssid, params->bssid); 3197 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3198 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3199 3200 ch_info->tx_pkt = true; 3201 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3202 3203 kfree_skb(skb); 3204 } 3205 3206 out: 3207 return ret; 3208 } 3209 3210 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3211 int ssid_num, 3212 struct rtw89_mac_chinfo *ch_info) 3213 { 3214 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3215 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3216 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3217 struct cfg80211_scan_request *req = rtwvif->scan_req; 3218 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3219 struct rtw89_pktofld_info *info; 3220 u8 band, probe_count = 0; 3221 int ret; 3222 3223 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3224 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3225 ch_info->bw = RTW89_SCAN_WIDTH; 3226 ch_info->tx_pkt = true; 3227 ch_info->cfg_tx_pwr = false; 3228 ch_info->tx_pwr_idx = 0; 3229 ch_info->tx_null = false; 3230 ch_info->pause_data = false; 3231 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3232 3233 if (ch_info->ch_band == RTW89_BAND_6G) { 3234 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3235 !ch_info->is_psc) { 3236 ch_info->tx_pkt = false; 3237 if (!req->duration_mandatory) 3238 ch_info->period -= RTW89_DWELL_TIME_6G; 3239 } 3240 } 3241 3242 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3243 if (ret) 3244 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3245 3246 if (ssid_num) { 3247 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3248 3249 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3250 if (info->channel_6ghz && 3251 ch_info->pri_ch != info->channel_6ghz) 3252 continue; 3253 ch_info->pkt_id[probe_count++] = info->id; 3254 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3255 break; 3256 } 3257 ch_info->num_pkt = probe_count; 3258 } 3259 3260 switch (chan_type) { 3261 case RTW89_CHAN_OPERATE: 3262 ch_info->central_ch = op->channel; 3263 ch_info->pri_ch = op->primary_channel; 3264 ch_info->ch_band = op->band_type; 3265 ch_info->bw = op->band_width; 3266 ch_info->tx_null = true; 3267 ch_info->num_pkt = 0; 3268 break; 3269 case RTW89_CHAN_DFS: 3270 if (ch_info->ch_band != RTW89_BAND_6G) 3271 ch_info->period = max_t(u8, ch_info->period, 3272 RTW89_DFS_CHAN_TIME); 3273 ch_info->dwell_time = RTW89_DWELL_TIME; 3274 break; 3275 case RTW89_CHAN_ACTIVE: 3276 break; 3277 default: 3278 rtw89_err(rtwdev, "Channel type out of bound\n"); 3279 } 3280 } 3281 3282 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3283 struct rtw89_vif *rtwvif, bool connected) 3284 { 3285 struct cfg80211_scan_request *req = rtwvif->scan_req; 3286 struct rtw89_mac_chinfo *ch_info, *tmp; 3287 struct ieee80211_channel *channel; 3288 struct list_head chan_list; 3289 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3290 int list_len, off_chan_time = 0; 3291 enum rtw89_chan_type type; 3292 int ret = 0; 3293 u32 idx; 3294 3295 INIT_LIST_HEAD(&chan_list); 3296 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3297 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3298 idx++, list_len++) { 3299 channel = req->channels[idx]; 3300 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3301 if (!ch_info) { 3302 ret = -ENOMEM; 3303 goto out; 3304 } 3305 3306 if (req->duration_mandatory) 3307 ch_info->period = req->duration; 3308 else if (channel->band == NL80211_BAND_6GHZ) 3309 ch_info->period = RTW89_CHANNEL_TIME_6G + 3310 RTW89_DWELL_TIME_6G; 3311 else 3312 ch_info->period = RTW89_CHANNEL_TIME; 3313 3314 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3315 ch_info->central_ch = channel->hw_value; 3316 ch_info->pri_ch = channel->hw_value; 3317 ch_info->rand_seq_num = random_seq; 3318 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3319 3320 if (channel->flags & 3321 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3322 type = RTW89_CHAN_DFS; 3323 else 3324 type = RTW89_CHAN_ACTIVE; 3325 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3326 3327 if (connected && 3328 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3329 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3330 if (!tmp) { 3331 ret = -ENOMEM; 3332 kfree(ch_info); 3333 goto out; 3334 } 3335 3336 type = RTW89_CHAN_OPERATE; 3337 tmp->period = req->duration_mandatory ? 3338 req->duration : RTW89_CHANNEL_TIME; 3339 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3340 list_add_tail(&tmp->list, &chan_list); 3341 off_chan_time = 0; 3342 list_len++; 3343 } 3344 list_add_tail(&ch_info->list, &chan_list); 3345 off_chan_time += ch_info->period; 3346 } 3347 rtwdev->scan_info.last_chan_idx = idx; 3348 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3349 3350 out: 3351 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3352 list_del(&ch_info->list); 3353 kfree(ch_info); 3354 } 3355 3356 return ret; 3357 } 3358 3359 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3360 struct rtw89_vif *rtwvif, bool connected) 3361 { 3362 int ret; 3363 3364 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3365 if (ret) { 3366 rtw89_err(rtwdev, "Update probe request failed\n"); 3367 goto out; 3368 } 3369 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3370 out: 3371 return ret; 3372 } 3373 3374 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3375 struct ieee80211_scan_request *scan_req) 3376 { 3377 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3378 struct cfg80211_scan_request *req = &scan_req->req; 3379 u32 rx_fltr = rtwdev->hal.rx_fltr; 3380 u8 mac_addr[ETH_ALEN]; 3381 3382 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 3383 rtwdev->scan_info.scanning_vif = vif; 3384 rtwdev->scan_info.last_chan_idx = 0; 3385 rtwvif->scan_ies = &scan_req->ies; 3386 rtwvif->scan_req = req; 3387 ieee80211_stop_queues(rtwdev->hw); 3388 3389 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 3390 get_random_mask_addr(mac_addr, req->mac_addr, 3391 req->mac_addr_mask); 3392 else 3393 ether_addr_copy(mac_addr, vif->addr); 3394 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 3395 3396 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 3397 rx_fltr &= ~B_AX_A_BC; 3398 rx_fltr &= ~B_AX_A_A1_MATCH; 3399 rtw89_write32_mask(rtwdev, 3400 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3401 B_AX_RX_FLTR_CFG_MASK, 3402 rx_fltr); 3403 } 3404 3405 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3406 bool aborted) 3407 { 3408 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3409 struct cfg80211_scan_info info = { 3410 .aborted = aborted, 3411 }; 3412 struct rtw89_vif *rtwvif; 3413 3414 if (!vif) 3415 return; 3416 3417 rtw89_write32_mask(rtwdev, 3418 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3419 B_AX_RX_FLTR_CFG_MASK, 3420 rtwdev->hal.rx_fltr); 3421 3422 rtw89_core_scan_complete(rtwdev, vif, true); 3423 ieee80211_scan_completed(rtwdev->hw, &info); 3424 ieee80211_wake_queues(rtwdev->hw); 3425 3426 rtw89_release_pkt_list(rtwdev); 3427 rtwvif = (struct rtw89_vif *)vif->drv_priv; 3428 rtwvif->scan_req = NULL; 3429 rtwvif->scan_ies = NULL; 3430 scan_info->last_chan_idx = 0; 3431 scan_info->scanning_vif = NULL; 3432 3433 rtw89_set_channel(rtwdev); 3434 } 3435 3436 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 3437 { 3438 rtw89_hw_scan_offload(rtwdev, vif, false); 3439 rtw89_hw_scan_complete(rtwdev, vif, true); 3440 } 3441 3442 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3443 bool enable) 3444 { 3445 struct rtw89_scan_option opt = {0}; 3446 struct rtw89_vif *rtwvif; 3447 bool connected; 3448 int ret = 0; 3449 3450 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3451 if (!rtwvif) 3452 return -EINVAL; 3453 3454 /* This variable implies connected or during attempt to connect */ 3455 connected = !is_zero_ether_addr(rtwvif->bssid); 3456 opt.enable = enable; 3457 opt.target_ch_mode = connected; 3458 if (enable) { 3459 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 3460 if (ret) 3461 goto out; 3462 } 3463 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 3464 out: 3465 return ret; 3466 } 3467 3468 #define H2C_FW_CPU_EXCEPTION_LEN 4 3469 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 3470 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 3471 { 3472 struct sk_buff *skb; 3473 int ret; 3474 3475 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 3476 if (!skb) { 3477 rtw89_err(rtwdev, 3478 "failed to alloc skb for fw cpu exception\n"); 3479 return -ENOMEM; 3480 } 3481 3482 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3483 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3484 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3485 3486 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3487 H2C_CAT_TEST, 3488 H2C_CL_FW_STATUS_TEST, 3489 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3490 H2C_FW_CPU_EXCEPTION_LEN); 3491 3492 ret = rtw89_h2c_tx(rtwdev, skb, false); 3493 if (ret) { 3494 rtw89_err(rtwdev, "failed to send h2c\n"); 3495 goto fail; 3496 } 3497 3498 return 0; 3499 3500 fail: 3501 dev_kfree_skb_any(skb); 3502 return ret; 3503 } 3504 3505 #define H2C_PKT_DROP_LEN 24 3506 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3507 const struct rtw89_pkt_drop_params *params) 3508 { 3509 struct sk_buff *skb; 3510 int ret; 3511 3512 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 3513 if (!skb) { 3514 rtw89_err(rtwdev, 3515 "failed to alloc skb for packet drop\n"); 3516 return -ENOMEM; 3517 } 3518 3519 switch (params->sel) { 3520 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 3521 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 3522 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 3523 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 3524 case RTW89_PKT_DROP_SEL_BAND_ONCE: 3525 break; 3526 default: 3527 rtw89_debug(rtwdev, RTW89_DBG_FW, 3528 "H2C of pkt drop might not fully support sel: %d yet\n", 3529 params->sel); 3530 break; 3531 } 3532 3533 skb_put(skb, H2C_PKT_DROP_LEN); 3534 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3535 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3536 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3537 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3538 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3539 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3540 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3541 params->macid_band_sel[0]); 3542 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3543 params->macid_band_sel[1]); 3544 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3545 params->macid_band_sel[2]); 3546 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3547 params->macid_band_sel[3]); 3548 3549 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3550 H2C_CAT_MAC, 3551 H2C_CL_MAC_FW_OFLD, 3552 H2C_FUNC_PKT_DROP, 0, 0, 3553 H2C_PKT_DROP_LEN); 3554 3555 ret = rtw89_h2c_tx(rtwdev, skb, false); 3556 if (ret) { 3557 rtw89_err(rtwdev, "failed to send h2c\n"); 3558 goto fail; 3559 } 3560 3561 return 0; 3562 3563 fail: 3564 dev_kfree_skb_any(skb); 3565 return ret; 3566 } 3567 3568 #define H2C_KEEP_ALIVE_LEN 4 3569 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3570 bool enable) 3571 { 3572 struct sk_buff *skb; 3573 u8 pkt_id = 0; 3574 int ret; 3575 3576 if (enable) { 3577 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 3578 RTW89_PKT_OFLD_TYPE_NULL_DATA, 3579 &pkt_id); 3580 if (ret) 3581 return -EPERM; 3582 } 3583 3584 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3585 if (!skb) { 3586 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3587 return -ENOMEM; 3588 } 3589 3590 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3591 3592 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3593 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3594 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3595 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3596 3597 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3598 H2C_CAT_MAC, 3599 H2C_CL_MAC_WOW, 3600 H2C_FUNC_KEEP_ALIVE, 0, 1, 3601 H2C_KEEP_ALIVE_LEN); 3602 3603 ret = rtw89_h2c_tx(rtwdev, skb, false); 3604 if (ret) { 3605 rtw89_err(rtwdev, "failed to send h2c\n"); 3606 goto fail; 3607 } 3608 3609 return 0; 3610 3611 fail: 3612 dev_kfree_skb_any(skb); 3613 3614 return ret; 3615 } 3616 3617 #define H2C_DISCONNECT_DETECT_LEN 8 3618 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3619 struct rtw89_vif *rtwvif, bool enable) 3620 { 3621 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3622 struct sk_buff *skb; 3623 u8 macid = rtwvif->mac_id; 3624 int ret; 3625 3626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3627 if (!skb) { 3628 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3629 return -ENOMEM; 3630 } 3631 3632 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3633 3634 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3635 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3636 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3637 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3638 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3639 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3640 } 3641 3642 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3643 H2C_CAT_MAC, 3644 H2C_CL_MAC_WOW, 3645 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3646 H2C_DISCONNECT_DETECT_LEN); 3647 3648 ret = rtw89_h2c_tx(rtwdev, skb, false); 3649 if (ret) { 3650 rtw89_err(rtwdev, "failed to send h2c\n"); 3651 goto fail; 3652 } 3653 3654 return 0; 3655 3656 fail: 3657 dev_kfree_skb_any(skb); 3658 3659 return ret; 3660 } 3661 3662 #define H2C_WOW_GLOBAL_LEN 8 3663 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3664 bool enable) 3665 { 3666 struct sk_buff *skb; 3667 u8 macid = rtwvif->mac_id; 3668 int ret; 3669 3670 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3671 if (!skb) { 3672 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3673 return -ENOMEM; 3674 } 3675 3676 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3677 3678 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3679 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3680 3681 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3682 H2C_CAT_MAC, 3683 H2C_CL_MAC_WOW, 3684 H2C_FUNC_WOW_GLOBAL, 0, 1, 3685 H2C_WOW_GLOBAL_LEN); 3686 3687 ret = rtw89_h2c_tx(rtwdev, skb, false); 3688 if (ret) { 3689 rtw89_err(rtwdev, "failed to send h2c\n"); 3690 goto fail; 3691 } 3692 3693 return 0; 3694 3695 fail: 3696 dev_kfree_skb_any(skb); 3697 3698 return ret; 3699 } 3700 3701 #define H2C_WAKEUP_CTRL_LEN 4 3702 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3703 struct rtw89_vif *rtwvif, 3704 bool enable) 3705 { 3706 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3707 struct sk_buff *skb; 3708 u8 macid = rtwvif->mac_id; 3709 int ret; 3710 3711 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3712 if (!skb) { 3713 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3714 return -ENOMEM; 3715 } 3716 3717 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3718 3719 if (rtw_wow->pattern_cnt) 3720 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3721 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3722 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3723 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3724 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3725 3726 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3727 3728 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3729 H2C_CAT_MAC, 3730 H2C_CL_MAC_WOW, 3731 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3732 H2C_WAKEUP_CTRL_LEN); 3733 3734 ret = rtw89_h2c_tx(rtwdev, skb, false); 3735 if (ret) { 3736 rtw89_err(rtwdev, "failed to send h2c\n"); 3737 goto fail; 3738 } 3739 3740 return 0; 3741 3742 fail: 3743 dev_kfree_skb_any(skb); 3744 3745 return ret; 3746 } 3747 3748 #define H2C_WOW_CAM_UPD_LEN 24 3749 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3750 struct rtw89_wow_cam_info *cam_info) 3751 { 3752 struct sk_buff *skb; 3753 int ret; 3754 3755 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3756 if (!skb) { 3757 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3758 return -ENOMEM; 3759 } 3760 3761 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3762 3763 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3764 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3765 if (cam_info->valid) { 3766 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3767 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3768 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3769 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3770 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3771 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3772 cam_info->negative_pattern_match); 3773 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3774 cam_info->skip_mac_hdr); 3775 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3776 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3777 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3778 } 3779 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3780 3781 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3782 H2C_CAT_MAC, 3783 H2C_CL_MAC_WOW, 3784 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3785 H2C_WOW_CAM_UPD_LEN); 3786 3787 ret = rtw89_h2c_tx(rtwdev, skb, false); 3788 if (ret) { 3789 rtw89_err(rtwdev, "failed to send h2c\n"); 3790 goto fail; 3791 } 3792 3793 return 0; 3794 fail: 3795 dev_kfree_skb_any(skb); 3796 3797 return ret; 3798 } 3799 3800 /* Return < 0, if failures happen during waiting for the condition. 3801 * Return 0, when waiting for the condition succeeds. 3802 * Return > 0, if the wait is considered unreachable due to driver/FW design, 3803 * where 1 means during SER. 3804 */ 3805 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3806 struct rtw89_wait_info *wait, unsigned int cond) 3807 { 3808 int ret; 3809 3810 ret = rtw89_h2c_tx(rtwdev, skb, false); 3811 if (ret) { 3812 rtw89_err(rtwdev, "failed to send h2c\n"); 3813 dev_kfree_skb_any(skb); 3814 return -EBUSY; 3815 } 3816 3817 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 3818 return 1; 3819 3820 return rtw89_wait_for_cond(wait, cond); 3821 } 3822 3823 #define H2C_ADD_MCC_LEN 16 3824 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3825 const struct rtw89_fw_mcc_add_req *p) 3826 { 3827 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3828 struct sk_buff *skb; 3829 unsigned int cond; 3830 3831 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3832 if (!skb) { 3833 rtw89_err(rtwdev, 3834 "failed to alloc skb for add mcc\n"); 3835 return -ENOMEM; 3836 } 3837 3838 skb_put(skb, H2C_ADD_MCC_LEN); 3839 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3840 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3841 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3842 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3843 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3844 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3845 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3846 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3847 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3848 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3849 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3850 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3851 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3852 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3853 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3854 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3855 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3856 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3857 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3858 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3859 3860 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3861 H2C_CAT_MAC, 3862 H2C_CL_MCC, 3863 H2C_FUNC_ADD_MCC, 0, 0, 3864 H2C_ADD_MCC_LEN); 3865 3866 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3867 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3868 } 3869 3870 #define H2C_START_MCC_LEN 12 3871 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3872 const struct rtw89_fw_mcc_start_req *p) 3873 { 3874 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3875 struct sk_buff *skb; 3876 unsigned int cond; 3877 3878 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3879 if (!skb) { 3880 rtw89_err(rtwdev, 3881 "failed to alloc skb for start mcc\n"); 3882 return -ENOMEM; 3883 } 3884 3885 skb_put(skb, H2C_START_MCC_LEN); 3886 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3887 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3888 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3889 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3890 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3891 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3892 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3893 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3894 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3895 3896 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3897 H2C_CAT_MAC, 3898 H2C_CL_MCC, 3899 H2C_FUNC_START_MCC, 0, 0, 3900 H2C_START_MCC_LEN); 3901 3902 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3903 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3904 } 3905 3906 #define H2C_STOP_MCC_LEN 4 3907 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3908 bool prev_groups) 3909 { 3910 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3911 struct sk_buff *skb; 3912 unsigned int cond; 3913 3914 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3915 if (!skb) { 3916 rtw89_err(rtwdev, 3917 "failed to alloc skb for stop mcc\n"); 3918 return -ENOMEM; 3919 } 3920 3921 skb_put(skb, H2C_STOP_MCC_LEN); 3922 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3923 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3924 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3925 3926 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3927 H2C_CAT_MAC, 3928 H2C_CL_MCC, 3929 H2C_FUNC_STOP_MCC, 0, 0, 3930 H2C_STOP_MCC_LEN); 3931 3932 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3933 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3934 } 3935 3936 #define H2C_DEL_MCC_GROUP_LEN 4 3937 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3938 bool prev_groups) 3939 { 3940 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3941 struct sk_buff *skb; 3942 unsigned int cond; 3943 3944 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3945 if (!skb) { 3946 rtw89_err(rtwdev, 3947 "failed to alloc skb for del mcc group\n"); 3948 return -ENOMEM; 3949 } 3950 3951 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3952 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3953 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3954 3955 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3956 H2C_CAT_MAC, 3957 H2C_CL_MCC, 3958 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3959 H2C_DEL_MCC_GROUP_LEN); 3960 3961 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3962 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3963 } 3964 3965 #define H2C_RESET_MCC_GROUP_LEN 4 3966 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3967 { 3968 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3969 struct sk_buff *skb; 3970 unsigned int cond; 3971 3972 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3973 if (!skb) { 3974 rtw89_err(rtwdev, 3975 "failed to alloc skb for reset mcc group\n"); 3976 return -ENOMEM; 3977 } 3978 3979 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3980 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3981 3982 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3983 H2C_CAT_MAC, 3984 H2C_CL_MCC, 3985 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3986 H2C_RESET_MCC_GROUP_LEN); 3987 3988 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3989 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3990 } 3991 3992 #define H2C_MCC_REQ_TSF_LEN 4 3993 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3994 const struct rtw89_fw_mcc_tsf_req *req, 3995 struct rtw89_mac_mcc_tsf_rpt *rpt) 3996 { 3997 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3998 struct rtw89_mac_mcc_tsf_rpt *tmp; 3999 struct sk_buff *skb; 4000 unsigned int cond; 4001 int ret; 4002 4003 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 4004 if (!skb) { 4005 rtw89_err(rtwdev, 4006 "failed to alloc skb for mcc req tsf\n"); 4007 return -ENOMEM; 4008 } 4009 4010 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 4011 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 4012 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 4013 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 4014 4015 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4016 H2C_CAT_MAC, 4017 H2C_CL_MCC, 4018 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4019 H2C_MCC_REQ_TSF_LEN); 4020 4021 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4022 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4023 if (ret) 4024 return ret; 4025 4026 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4027 *rpt = *tmp; 4028 4029 return 0; 4030 } 4031 4032 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4033 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4034 u8 *bitmap) 4035 { 4036 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4037 struct sk_buff *skb; 4038 unsigned int cond; 4039 u8 map_len; 4040 u8 h2c_len; 4041 4042 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4043 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4044 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4045 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4046 if (!skb) { 4047 rtw89_err(rtwdev, 4048 "failed to alloc skb for mcc macid bitmap\n"); 4049 return -ENOMEM; 4050 } 4051 4052 skb_put(skb, h2c_len); 4053 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4054 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4055 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4056 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4057 4058 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4059 H2C_CAT_MAC, 4060 H2C_CL_MCC, 4061 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4062 h2c_len); 4063 4064 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4065 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4066 } 4067 4068 #define H2C_MCC_SYNC_LEN 4 4069 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4070 u8 target, u8 offset) 4071 { 4072 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4073 struct sk_buff *skb; 4074 unsigned int cond; 4075 4076 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4077 if (!skb) { 4078 rtw89_err(rtwdev, 4079 "failed to alloc skb for mcc sync\n"); 4080 return -ENOMEM; 4081 } 4082 4083 skb_put(skb, H2C_MCC_SYNC_LEN); 4084 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4085 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4086 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4087 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4088 4089 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4090 H2C_CAT_MAC, 4091 H2C_CL_MCC, 4092 H2C_FUNC_MCC_SYNC, 0, 0, 4093 H2C_MCC_SYNC_LEN); 4094 4095 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4096 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4097 } 4098 4099 #define H2C_MCC_SET_DURATION_LEN 20 4100 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4101 const struct rtw89_fw_mcc_duration *p) 4102 { 4103 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4104 struct sk_buff *skb; 4105 unsigned int cond; 4106 4107 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4108 if (!skb) { 4109 rtw89_err(rtwdev, 4110 "failed to alloc skb for mcc set duration\n"); 4111 return -ENOMEM; 4112 } 4113 4114 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4115 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4116 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4117 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4118 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4119 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4120 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4121 p->start_tsf_low); 4122 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4123 p->start_tsf_high); 4124 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4125 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4126 4127 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4128 H2C_CAT_MAC, 4129 H2C_CL_MCC, 4130 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4131 H2C_MCC_SET_DURATION_LEN); 4132 4133 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4134 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4135 } 4136