1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 14 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 15 bool header) 16 { 17 struct sk_buff *skb; 18 u32 header_len = 0; 19 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 20 21 if (header) 22 header_len = H2C_HEADER_LEN; 23 24 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 25 if (!skb) 26 return NULL; 27 skb_reserve(skb, header_len + h2c_desc_size); 28 memset(skb->data, 0, len); 29 30 return skb; 31 } 32 33 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 34 { 35 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 36 } 37 38 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 39 { 40 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 41 } 42 43 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 44 { 45 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 46 47 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 48 } 49 50 #define FWDL_WAIT_CNT 400000 51 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 52 { 53 u8 val; 54 int ret; 55 56 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 57 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 58 1, FWDL_WAIT_CNT, false, rtwdev); 59 if (ret) { 60 switch (val) { 61 case RTW89_FWDL_CHECKSUM_FAIL: 62 rtw89_err(rtwdev, "fw checksum fail\n"); 63 return -EINVAL; 64 65 case RTW89_FWDL_SECURITY_FAIL: 66 rtw89_err(rtwdev, "fw security fail\n"); 67 return -EINVAL; 68 69 case RTW89_FWDL_CV_NOT_MATCH: 70 rtw89_err(rtwdev, "fw cv not match\n"); 71 return -EINVAL; 72 73 default: 74 return -EBUSY; 75 } 76 } 77 78 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 79 80 return 0; 81 } 82 83 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 84 struct rtw89_fw_bin_info *info) 85 { 86 struct rtw89_fw_hdr_section_info *section_info; 87 const u8 *fw_end = fw + len; 88 const u8 *bin; 89 u32 i; 90 91 if (!info) 92 return -EINVAL; 93 94 info->section_num = GET_FW_HDR_SEC_NUM(fw); 95 info->hdr_len = RTW89_FW_HDR_SIZE + 96 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 97 98 bin = fw + info->hdr_len; 99 100 /* jump to section header */ 101 fw += RTW89_FW_HDR_SIZE; 102 section_info = info->section_info; 103 for (i = 0; i < info->section_num; i++) { 104 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 105 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 106 section_info->len += FWDL_SECTION_CHKSUM_LEN; 107 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 108 section_info->dladdr = 109 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 110 section_info->addr = bin; 111 bin += section_info->len; 112 fw += RTW89_FW_SECTION_HDR_SIZE; 113 section_info++; 114 } 115 116 if (fw_end != bin) { 117 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 118 return -EINVAL; 119 } 120 121 return 0; 122 } 123 124 static 125 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 126 struct rtw89_fw_suit *fw_suit) 127 { 128 struct rtw89_fw_info *fw_info = &rtwdev->fw; 129 const u8 *mfw = fw_info->firmware->data; 130 u32 mfw_len = fw_info->firmware->size; 131 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 132 const struct rtw89_mfw_info *mfw_info; 133 int i; 134 135 if (mfw_hdr->sig != RTW89_MFW_SIG) { 136 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 137 /* legacy firmware support normal type only */ 138 if (type != RTW89_FW_NORMAL) 139 return -EINVAL; 140 fw_suit->data = mfw; 141 fw_suit->size = mfw_len; 142 return 0; 143 } 144 145 for (i = 0; i < mfw_hdr->fw_nr; i++) { 146 mfw_info = &mfw_hdr->info[i]; 147 if (mfw_info->cv != rtwdev->hal.cv || 148 mfw_info->type != type || 149 mfw_info->mp) 150 continue; 151 152 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 153 fw_suit->size = le32_to_cpu(mfw_info->size); 154 return 0; 155 } 156 157 rtw89_err(rtwdev, "no suitable firmware found\n"); 158 return -ENOENT; 159 } 160 161 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 162 enum rtw89_fw_type type, 163 struct rtw89_fw_suit *fw_suit) 164 { 165 const u8 *hdr = fw_suit->data; 166 167 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 168 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 169 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 170 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 171 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 172 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 173 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 174 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 175 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 176 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 177 178 rtw89_info(rtwdev, 179 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 180 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 181 fw_suit->sub_idex, fw_suit->cmd_ver, type); 182 } 183 184 static 185 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 186 { 187 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 188 int ret; 189 190 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit); 191 if (ret) 192 return ret; 193 194 rtw89_fw_update_ver(rtwdev, type, fw_suit); 195 196 return 0; 197 } 198 199 #define __DEF_FW_FEAT_COND(__cond, __op) \ 200 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 201 { \ 202 return suit_ver_code __op comp_ver_code; \ 203 } 204 205 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 206 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 207 208 struct __fw_feat_cfg { 209 enum rtw89_core_chip_id chip_id; 210 enum rtw89_fw_feature feature; 211 u32 ver_code; 212 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 213 }; 214 215 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 216 { \ 217 .chip_id = _chip, \ 218 .feature = RTW89_FW_FEATURE_ ## _feat, \ 219 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 220 .cond = __fw_feat_cond_ ## _cond, \ 221 } 222 223 static const struct __fw_feat_cfg fw_feat_tbl[] = { 224 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 225 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 226 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 227 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 228 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP), 229 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP), 230 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 231 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 232 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 233 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 234 }; 235 236 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 237 { 238 const struct rtw89_chip_info *chip = rtwdev->chip; 239 const struct __fw_feat_cfg *ent; 240 const struct rtw89_fw_suit *fw_suit; 241 u32 suit_ver_code; 242 int i; 243 244 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 245 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 246 247 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 248 ent = &fw_feat_tbl[i]; 249 if (chip->chip_id != ent->chip_id) 250 continue; 251 252 if (ent->cond(suit_ver_code, ent->ver_code)) 253 RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw); 254 } 255 } 256 257 void rtw89_early_fw_feature_recognize(struct device *device, 258 const struct rtw89_chip_info *chip, 259 u32 *early_feat_map) 260 { 261 union { 262 struct rtw89_mfw_hdr mfw_hdr; 263 u8 fw_hdr[RTW89_FW_HDR_SIZE]; 264 } buf = {}; 265 const struct firmware *firmware; 266 u32 ver_code; 267 int ret; 268 int i; 269 270 ret = request_partial_firmware_into_buf(&firmware, chip->fw_name, 271 device, &buf, sizeof(buf), 0); 272 if (ret) { 273 dev_err(device, "failed to early request firmware: %d\n", ret); 274 return; 275 } 276 277 ver_code = buf.mfw_hdr.sig != RTW89_MFW_SIG ? 278 RTW89_FW_HDR_VER_CODE(&buf.fw_hdr) : 279 RTW89_MFW_HDR_VER_CODE(&buf.mfw_hdr); 280 if (!ver_code) 281 goto out; 282 283 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 284 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 285 286 if (chip->chip_id != ent->chip_id) 287 continue; 288 289 if (ent->cond(ver_code, ent->ver_code)) 290 *early_feat_map |= BIT(ent->feature); 291 } 292 293 out: 294 release_firmware(firmware); 295 } 296 297 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 298 { 299 int ret; 300 301 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL); 302 if (ret) 303 return ret; 304 305 /* It still works if wowlan firmware isn't existing. */ 306 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN); 307 308 rtw89_fw_recognize_features(rtwdev); 309 310 return 0; 311 } 312 313 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 314 u8 type, u8 cat, u8 class, u8 func, 315 bool rack, bool dack, u32 len) 316 { 317 struct fwcmd_hdr *hdr; 318 319 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 320 321 if (!(rtwdev->fw.h2c_seq % 4)) 322 rack = true; 323 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 324 FIELD_PREP(H2C_HDR_CAT, cat) | 325 FIELD_PREP(H2C_HDR_CLASS, class) | 326 FIELD_PREP(H2C_HDR_FUNC, func) | 327 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 328 329 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 330 len + H2C_HEADER_LEN) | 331 (rack ? H2C_HDR_REC_ACK : 0) | 332 (dack ? H2C_HDR_DONE_ACK : 0)); 333 334 rtwdev->fw.h2c_seq++; 335 } 336 337 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 338 struct sk_buff *skb, 339 u8 type, u8 cat, u8 class, u8 func, 340 u32 len) 341 { 342 struct fwcmd_hdr *hdr; 343 344 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 345 346 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 347 FIELD_PREP(H2C_HDR_CAT, cat) | 348 FIELD_PREP(H2C_HDR_CLASS, class) | 349 FIELD_PREP(H2C_HDR_FUNC, func) | 350 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 351 352 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 353 len + H2C_HEADER_LEN)); 354 } 355 356 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 357 { 358 struct sk_buff *skb; 359 u32 ret = 0; 360 361 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 362 if (!skb) { 363 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 364 return -ENOMEM; 365 } 366 367 skb_put_data(skb, fw, len); 368 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 369 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 370 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 371 H2C_FUNC_MAC_FWHDR_DL, len); 372 373 ret = rtw89_h2c_tx(rtwdev, skb, false); 374 if (ret) { 375 rtw89_err(rtwdev, "failed to send h2c\n"); 376 ret = -1; 377 goto fail; 378 } 379 380 return 0; 381 fail: 382 dev_kfree_skb_any(skb); 383 384 return ret; 385 } 386 387 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 388 { 389 u8 val; 390 int ret; 391 392 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 393 if (ret) { 394 rtw89_err(rtwdev, "[ERR]FW header download\n"); 395 return ret; 396 } 397 398 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 399 1, FWDL_WAIT_CNT, false, 400 rtwdev, R_AX_WCPU_FW_CTRL); 401 if (ret) { 402 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 403 return ret; 404 } 405 406 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 407 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 408 409 return 0; 410 } 411 412 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 413 struct rtw89_fw_hdr_section_info *info) 414 { 415 struct sk_buff *skb; 416 const u8 *section = info->addr; 417 u32 residue_len = info->len; 418 u32 pkt_len; 419 int ret; 420 421 while (residue_len) { 422 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 423 pkt_len = FWDL_SECTION_PER_PKT_LEN; 424 else 425 pkt_len = residue_len; 426 427 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 428 if (!skb) { 429 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 430 return -ENOMEM; 431 } 432 skb_put_data(skb, section, pkt_len); 433 434 ret = rtw89_h2c_tx(rtwdev, skb, true); 435 if (ret) { 436 rtw89_err(rtwdev, "failed to send h2c\n"); 437 ret = -1; 438 goto fail; 439 } 440 441 section += pkt_len; 442 residue_len -= pkt_len; 443 } 444 445 return 0; 446 fail: 447 dev_kfree_skb_any(skb); 448 449 return ret; 450 } 451 452 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 453 struct rtw89_fw_bin_info *info) 454 { 455 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 456 u8 section_num = info->section_num; 457 int ret; 458 459 while (section_num--) { 460 ret = __rtw89_fw_download_main(rtwdev, section_info); 461 if (ret) 462 return ret; 463 section_info++; 464 } 465 466 mdelay(5); 467 468 ret = rtw89_fw_check_rdy(rtwdev); 469 if (ret) { 470 rtw89_warn(rtwdev, "download firmware fail\n"); 471 return ret; 472 } 473 474 return 0; 475 } 476 477 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 478 { 479 u32 val32; 480 u16 index; 481 482 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 483 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 484 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 485 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 486 487 for (index = 0; index < 15; index++) { 488 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 489 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 490 fsleep(10); 491 } 492 } 493 494 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 495 { 496 u32 val32; 497 u16 val16; 498 499 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 500 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 501 502 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 503 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 504 505 rtw89_fw_prog_cnt_dump(rtwdev); 506 } 507 508 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 509 { 510 struct rtw89_fw_info *fw_info = &rtwdev->fw; 511 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 512 struct rtw89_fw_bin_info info; 513 const u8 *fw = fw_suit->data; 514 u32 len = fw_suit->size; 515 u8 val; 516 int ret; 517 518 if (!fw || !len) { 519 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 520 return -ENOENT; 521 } 522 523 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 524 if (ret) { 525 rtw89_err(rtwdev, "parse fw header fail\n"); 526 goto fwdl_err; 527 } 528 529 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 530 1, FWDL_WAIT_CNT, false, 531 rtwdev, R_AX_WCPU_FW_CTRL); 532 if (ret) { 533 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 534 goto fwdl_err; 535 } 536 537 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len); 538 if (ret) { 539 ret = -EBUSY; 540 goto fwdl_err; 541 } 542 543 ret = rtw89_fw_download_main(rtwdev, fw, &info); 544 if (ret) { 545 ret = -EBUSY; 546 goto fwdl_err; 547 } 548 549 fw_info->h2c_seq = 0; 550 fw_info->rec_seq = 0; 551 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 552 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 553 554 return ret; 555 556 fwdl_err: 557 rtw89_fw_dl_fail_dump(rtwdev); 558 return ret; 559 } 560 561 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 562 { 563 struct rtw89_fw_info *fw = &rtwdev->fw; 564 565 wait_for_completion(&fw->completion); 566 if (!fw->firmware) 567 return -EINVAL; 568 569 return 0; 570 } 571 572 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context) 573 { 574 struct rtw89_fw_info *fw = context; 575 struct rtw89_dev *rtwdev = fw->rtwdev; 576 577 if (!firmware || !firmware->data) { 578 rtw89_err(rtwdev, "failed to request firmware\n"); 579 complete_all(&fw->completion); 580 return; 581 } 582 583 fw->firmware = firmware; 584 complete_all(&fw->completion); 585 } 586 587 int rtw89_load_firmware(struct rtw89_dev *rtwdev) 588 { 589 struct rtw89_fw_info *fw = &rtwdev->fw; 590 const char *fw_name = rtwdev->chip->fw_name; 591 int ret; 592 593 fw->rtwdev = rtwdev; 594 init_completion(&fw->completion); 595 596 ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, 597 GFP_KERNEL, fw, rtw89_load_firmware_cb); 598 if (ret) { 599 rtw89_err(rtwdev, "failed to async firmware request\n"); 600 return ret; 601 } 602 603 return 0; 604 } 605 606 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 607 { 608 struct rtw89_fw_info *fw = &rtwdev->fw; 609 610 rtw89_wait_firmware_completion(rtwdev); 611 612 if (fw->firmware) 613 release_firmware(fw->firmware); 614 } 615 616 #define H2C_CAM_LEN 60 617 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 618 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 619 { 620 struct sk_buff *skb; 621 int ret; 622 623 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 624 if (!skb) { 625 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 626 return -ENOMEM; 627 } 628 skb_put(skb, H2C_CAM_LEN); 629 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 630 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 631 632 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 633 H2C_CAT_MAC, 634 H2C_CL_MAC_ADDR_CAM_UPDATE, 635 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 636 H2C_CAM_LEN); 637 638 ret = rtw89_h2c_tx(rtwdev, skb, false); 639 if (ret) { 640 rtw89_err(rtwdev, "failed to send h2c\n"); 641 goto fail; 642 } 643 644 return 0; 645 fail: 646 dev_kfree_skb_any(skb); 647 648 return ret; 649 } 650 651 #define H2C_DCTL_SEC_CAM_LEN 68 652 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 653 struct rtw89_vif *rtwvif, 654 struct rtw89_sta *rtwsta) 655 { 656 struct sk_buff *skb; 657 int ret; 658 659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 660 if (!skb) { 661 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 662 return -ENOMEM; 663 } 664 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 665 666 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 667 668 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 669 H2C_CAT_MAC, 670 H2C_CL_MAC_FR_EXCHG, 671 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 672 H2C_DCTL_SEC_CAM_LEN); 673 674 ret = rtw89_h2c_tx(rtwdev, skb, false); 675 if (ret) { 676 rtw89_err(rtwdev, "failed to send h2c\n"); 677 goto fail; 678 } 679 680 return 0; 681 fail: 682 dev_kfree_skb_any(skb); 683 684 return ret; 685 } 686 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 687 688 #define H2C_BA_CAM_LEN 8 689 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 690 bool valid, struct ieee80211_ampdu_params *params) 691 { 692 const struct rtw89_chip_info *chip = rtwdev->chip; 693 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 694 u8 macid = rtwsta->mac_id; 695 struct sk_buff *skb; 696 u8 entry_idx; 697 int ret; 698 699 ret = valid ? 700 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 701 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 702 if (ret) { 703 /* it still works even if we don't have static BA CAM, because 704 * hardware can create dynamic BA CAM automatically. 705 */ 706 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 707 "failed to %s entry tid=%d for h2c ba cam\n", 708 valid ? "alloc" : "free", params->tid); 709 return 0; 710 } 711 712 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 713 if (!skb) { 714 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 715 return -ENOMEM; 716 } 717 skb_put(skb, H2C_BA_CAM_LEN); 718 SET_BA_CAM_MACID(skb->data, macid); 719 if (chip->bacam_v1) 720 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 721 else 722 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 723 if (!valid) 724 goto end; 725 SET_BA_CAM_VALID(skb->data, valid); 726 SET_BA_CAM_TID(skb->data, params->tid); 727 if (params->buf_size > 64) 728 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 729 else 730 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 731 /* If init req is set, hw will set the ssn */ 732 SET_BA_CAM_INIT_REQ(skb->data, 1); 733 SET_BA_CAM_SSN(skb->data, params->ssn); 734 735 if (chip->bacam_v1) { 736 SET_BA_CAM_STD_EN(skb->data, 1); 737 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 738 } 739 740 end: 741 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 742 H2C_CAT_MAC, 743 H2C_CL_BA_CAM, 744 H2C_FUNC_MAC_BA_CAM, 0, 1, 745 H2C_BA_CAM_LEN); 746 747 ret = rtw89_h2c_tx(rtwdev, skb, false); 748 if (ret) { 749 rtw89_err(rtwdev, "failed to send h2c\n"); 750 goto fail; 751 } 752 753 return 0; 754 fail: 755 dev_kfree_skb_any(skb); 756 757 return ret; 758 } 759 760 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev, 761 u8 entry_idx, u8 uid) 762 { 763 struct sk_buff *skb; 764 int ret; 765 766 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 767 if (!skb) { 768 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 769 return -ENOMEM; 770 } 771 skb_put(skb, H2C_BA_CAM_LEN); 772 773 SET_BA_CAM_VALID(skb->data, 1); 774 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 775 SET_BA_CAM_UID(skb->data, uid); 776 SET_BA_CAM_BAND(skb->data, 0); 777 SET_BA_CAM_STD_EN(skb->data, 0); 778 779 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 780 H2C_CAT_MAC, 781 H2C_CL_BA_CAM, 782 H2C_FUNC_MAC_BA_CAM, 0, 1, 783 H2C_BA_CAM_LEN); 784 785 ret = rtw89_h2c_tx(rtwdev, skb, false); 786 if (ret) { 787 rtw89_err(rtwdev, "failed to send h2c\n"); 788 goto fail; 789 } 790 791 return 0; 792 fail: 793 dev_kfree_skb_any(skb); 794 795 return ret; 796 } 797 798 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev) 799 { 800 const struct rtw89_chip_info *chip = rtwdev->chip; 801 u8 entry_idx = chip->bacam_num; 802 u8 uid = 0; 803 int i; 804 805 for (i = 0; i < chip->bacam_dynamic_num; i++) { 806 rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid); 807 entry_idx++; 808 uid++; 809 } 810 } 811 812 #define H2C_LOG_CFG_LEN 12 813 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 814 { 815 struct sk_buff *skb; 816 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 817 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 818 int ret; 819 820 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 821 if (!skb) { 822 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 823 return -ENOMEM; 824 } 825 826 skb_put(skb, H2C_LOG_CFG_LEN); 827 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 828 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 829 SET_LOG_CFG_COMP(skb->data, comp); 830 SET_LOG_CFG_COMP_EXT(skb->data, 0); 831 832 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 833 H2C_CAT_MAC, 834 H2C_CL_FW_INFO, 835 H2C_FUNC_LOG_CFG, 0, 0, 836 H2C_LOG_CFG_LEN); 837 838 ret = rtw89_h2c_tx(rtwdev, skb, false); 839 if (ret) { 840 rtw89_err(rtwdev, "failed to send h2c\n"); 841 goto fail; 842 } 843 844 return 0; 845 fail: 846 dev_kfree_skb_any(skb); 847 848 return ret; 849 } 850 851 #define H2C_GENERAL_PKT_LEN 6 852 #define H2C_GENERAL_PKT_ID_UND 0xff 853 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid) 854 { 855 struct sk_buff *skb; 856 int ret; 857 858 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 859 if (!skb) { 860 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 861 return -ENOMEM; 862 } 863 skb_put(skb, H2C_GENERAL_PKT_LEN); 864 SET_GENERAL_PKT_MACID(skb->data, macid); 865 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 866 SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 867 SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 868 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 869 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 870 871 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 872 H2C_CAT_MAC, 873 H2C_CL_FW_INFO, 874 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 875 H2C_GENERAL_PKT_LEN); 876 877 ret = rtw89_h2c_tx(rtwdev, skb, false); 878 if (ret) { 879 rtw89_err(rtwdev, "failed to send h2c\n"); 880 goto fail; 881 } 882 883 return 0; 884 fail: 885 dev_kfree_skb_any(skb); 886 887 return ret; 888 } 889 890 #define H2C_LPS_PARM_LEN 8 891 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 892 struct rtw89_lps_parm *lps_param) 893 { 894 struct sk_buff *skb; 895 int ret; 896 897 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 898 if (!skb) { 899 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 900 return -ENOMEM; 901 } 902 skb_put(skb, H2C_LPS_PARM_LEN); 903 904 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 905 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 906 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 907 SET_LPS_PARM_RLBM(skb->data, 1); 908 SET_LPS_PARM_SMARTPS(skb->data, 1); 909 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 910 SET_LPS_PARM_VOUAPSD(skb->data, 0); 911 SET_LPS_PARM_VIUAPSD(skb->data, 0); 912 SET_LPS_PARM_BEUAPSD(skb->data, 0); 913 SET_LPS_PARM_BKUAPSD(skb->data, 0); 914 915 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 916 H2C_CAT_MAC, 917 H2C_CL_MAC_PS, 918 H2C_FUNC_MAC_LPS_PARM, 0, 1, 919 H2C_LPS_PARM_LEN); 920 921 ret = rtw89_h2c_tx(rtwdev, skb, false); 922 if (ret) { 923 rtw89_err(rtwdev, "failed to send h2c\n"); 924 goto fail; 925 } 926 927 return 0; 928 fail: 929 dev_kfree_skb_any(skb); 930 931 return ret; 932 } 933 934 #define H2C_P2P_ACT_LEN 20 935 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 936 struct ieee80211_p2p_noa_desc *desc, 937 u8 act, u8 noa_id) 938 { 939 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 940 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 941 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 942 struct sk_buff *skb; 943 u8 *cmd; 944 int ret; 945 946 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 947 if (!skb) { 948 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 949 return -ENOMEM; 950 } 951 skb_put(skb, H2C_P2P_ACT_LEN); 952 cmd = skb->data; 953 954 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 955 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 956 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 957 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 958 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 959 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 960 if (desc) { 961 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 962 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 963 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 964 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 965 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 966 } 967 968 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 969 H2C_CAT_MAC, H2C_CL_MAC_PS, 970 H2C_FUNC_P2P_ACT, 0, 0, 971 H2C_P2P_ACT_LEN); 972 973 ret = rtw89_h2c_tx(rtwdev, skb, false); 974 if (ret) { 975 rtw89_err(rtwdev, "failed to send h2c\n"); 976 goto fail; 977 } 978 979 return 0; 980 fail: 981 dev_kfree_skb_any(skb); 982 983 return ret; 984 } 985 986 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 987 struct sk_buff *skb) 988 { 989 struct rtw89_hal *hal = &rtwdev->hal; 990 u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 991 u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 992 993 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 994 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 995 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 996 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 997 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 998 } 999 1000 #define H2C_CMC_TBL_LEN 68 1001 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1002 struct rtw89_vif *rtwvif) 1003 { 1004 const struct rtw89_chip_info *chip = rtwdev->chip; 1005 struct sk_buff *skb; 1006 u8 macid = rtwvif->mac_id; 1007 int ret; 1008 1009 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1010 if (!skb) { 1011 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1012 return -ENOMEM; 1013 } 1014 skb_put(skb, H2C_CMC_TBL_LEN); 1015 SET_CTRL_INFO_MACID(skb->data, macid); 1016 SET_CTRL_INFO_OPERATION(skb->data, 1); 1017 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1018 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1019 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1020 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1021 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1022 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1023 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1024 } 1025 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1026 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1027 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1028 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1029 1030 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1031 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1032 chip->h2c_cctl_func_id, 0, 1, 1033 H2C_CMC_TBL_LEN); 1034 1035 ret = rtw89_h2c_tx(rtwdev, skb, false); 1036 if (ret) { 1037 rtw89_err(rtwdev, "failed to send h2c\n"); 1038 goto fail; 1039 } 1040 1041 return 0; 1042 fail: 1043 dev_kfree_skb_any(skb); 1044 1045 return ret; 1046 } 1047 1048 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1049 struct ieee80211_sta *sta, u8 *pads) 1050 { 1051 bool ppe_th; 1052 u8 ppe16, ppe8; 1053 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1054 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1055 u8 ru_bitmap; 1056 u8 n, idx, sh; 1057 u16 ppe; 1058 int i; 1059 1060 if (!sta->deflink.he_cap.has_he) 1061 return; 1062 1063 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1064 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1065 if (!ppe_th) { 1066 u8 pad; 1067 1068 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1069 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1070 1071 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1072 pads[i] = pad; 1073 1074 return; 1075 } 1076 1077 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1078 n = hweight8(ru_bitmap); 1079 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1080 1081 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1082 if (!(ru_bitmap & BIT(i))) { 1083 pads[i] = 1; 1084 continue; 1085 } 1086 1087 idx = n >> 3; 1088 sh = n & 7; 1089 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1090 1091 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1092 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1093 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1094 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1095 1096 if (ppe16 != 7 && ppe8 == 7) 1097 pads[i] = 2; 1098 else if (ppe8 != 7) 1099 pads[i] = 1; 1100 else 1101 pads[i] = 0; 1102 } 1103 } 1104 1105 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1106 struct ieee80211_vif *vif, 1107 struct ieee80211_sta *sta) 1108 { 1109 const struct rtw89_chip_info *chip = rtwdev->chip; 1110 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1111 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1112 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1113 struct sk_buff *skb; 1114 u8 pads[RTW89_PPE_BW_NUM]; 1115 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1116 u16 lowest_rate; 1117 int ret; 1118 1119 memset(pads, 0, sizeof(pads)); 1120 if (sta) 1121 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1122 1123 if (vif->p2p) 1124 lowest_rate = RTW89_HW_RATE_OFDM6; 1125 else if (chan->band_type == RTW89_BAND_2G) 1126 lowest_rate = RTW89_HW_RATE_CCK1; 1127 else 1128 lowest_rate = RTW89_HW_RATE_OFDM6; 1129 1130 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1131 if (!skb) { 1132 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1133 return -ENOMEM; 1134 } 1135 skb_put(skb, H2C_CMC_TBL_LEN); 1136 SET_CTRL_INFO_MACID(skb->data, mac_id); 1137 SET_CTRL_INFO_OPERATION(skb->data, 1); 1138 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1139 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1140 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1141 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1142 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1143 if (vif->type == NL80211_IFTYPE_STATION) 1144 SET_CMC_TBL_ULDL(skb->data, 1); 1145 else 1146 SET_CMC_TBL_ULDL(skb->data, 0); 1147 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1148 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1149 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1150 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1151 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1152 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1153 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1154 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1155 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1156 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1157 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1158 } 1159 if (sta) 1160 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1161 sta->deflink.he_cap.has_he); 1162 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1163 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1164 1165 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1166 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1167 chip->h2c_cctl_func_id, 0, 1, 1168 H2C_CMC_TBL_LEN); 1169 1170 ret = rtw89_h2c_tx(rtwdev, skb, false); 1171 if (ret) { 1172 rtw89_err(rtwdev, "failed to send h2c\n"); 1173 goto fail; 1174 } 1175 1176 return 0; 1177 fail: 1178 dev_kfree_skb_any(skb); 1179 1180 return ret; 1181 } 1182 1183 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1184 struct rtw89_sta *rtwsta) 1185 { 1186 const struct rtw89_chip_info *chip = rtwdev->chip; 1187 struct sk_buff *skb; 1188 int ret; 1189 1190 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1191 if (!skb) { 1192 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1193 return -ENOMEM; 1194 } 1195 skb_put(skb, H2C_CMC_TBL_LEN); 1196 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1197 SET_CTRL_INFO_OPERATION(skb->data, 1); 1198 if (rtwsta->cctl_tx_time) { 1199 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1200 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1201 } 1202 if (rtwsta->cctl_tx_retry_limit) { 1203 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1204 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1205 } 1206 1207 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1208 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1209 chip->h2c_cctl_func_id, 0, 1, 1210 H2C_CMC_TBL_LEN); 1211 1212 ret = rtw89_h2c_tx(rtwdev, skb, false); 1213 if (ret) { 1214 rtw89_err(rtwdev, "failed to send h2c\n"); 1215 goto fail; 1216 } 1217 1218 return 0; 1219 fail: 1220 dev_kfree_skb_any(skb); 1221 1222 return ret; 1223 } 1224 1225 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1226 struct rtw89_sta *rtwsta) 1227 { 1228 const struct rtw89_chip_info *chip = rtwdev->chip; 1229 struct sk_buff *skb; 1230 int ret; 1231 1232 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1233 return 0; 1234 1235 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1236 if (!skb) { 1237 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1238 return -ENOMEM; 1239 } 1240 skb_put(skb, H2C_CMC_TBL_LEN); 1241 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1242 SET_CTRL_INFO_OPERATION(skb->data, 1); 1243 1244 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1245 1246 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1247 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1248 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1249 H2C_CMC_TBL_LEN); 1250 1251 ret = rtw89_h2c_tx(rtwdev, skb, false); 1252 if (ret) { 1253 rtw89_err(rtwdev, "failed to send h2c\n"); 1254 goto fail; 1255 } 1256 1257 return 0; 1258 fail: 1259 dev_kfree_skb_any(skb); 1260 1261 return ret; 1262 } 1263 1264 #define H2C_BCN_BASE_LEN 12 1265 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1266 struct rtw89_vif *rtwvif) 1267 { 1268 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1269 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1270 struct sk_buff *skb; 1271 struct sk_buff *skb_beacon; 1272 u16 tim_offset; 1273 int bcn_total_len; 1274 u16 beacon_rate; 1275 int ret; 1276 1277 if (vif->p2p) 1278 beacon_rate = RTW89_HW_RATE_OFDM6; 1279 else if (chan->band_type == RTW89_BAND_2G) 1280 beacon_rate = RTW89_HW_RATE_CCK1; 1281 else 1282 beacon_rate = RTW89_HW_RATE_OFDM6; 1283 1284 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1285 NULL, 0); 1286 if (!skb_beacon) { 1287 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1288 return -ENOMEM; 1289 } 1290 1291 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1292 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1293 if (!skb) { 1294 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1295 dev_kfree_skb_any(skb_beacon); 1296 return -ENOMEM; 1297 } 1298 skb_put(skb, H2C_BCN_BASE_LEN); 1299 1300 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1301 SET_BCN_UPD_MBSSID(skb->data, 0); 1302 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1303 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1304 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1305 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1306 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1307 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1308 1309 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1310 dev_kfree_skb_any(skb_beacon); 1311 1312 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1313 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1314 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1315 bcn_total_len); 1316 1317 ret = rtw89_h2c_tx(rtwdev, skb, false); 1318 if (ret) { 1319 rtw89_err(rtwdev, "failed to send h2c\n"); 1320 dev_kfree_skb_any(skb); 1321 return ret; 1322 } 1323 1324 return 0; 1325 } 1326 1327 #define H2C_ROLE_MAINTAIN_LEN 4 1328 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1329 struct rtw89_vif *rtwvif, 1330 struct rtw89_sta *rtwsta, 1331 enum rtw89_upd_mode upd_mode) 1332 { 1333 struct sk_buff *skb; 1334 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1335 u8 self_role; 1336 int ret; 1337 1338 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1339 if (rtwsta) 1340 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1341 else 1342 self_role = rtwvif->self_role; 1343 } else { 1344 self_role = rtwvif->self_role; 1345 } 1346 1347 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1348 if (!skb) { 1349 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1350 return -ENOMEM; 1351 } 1352 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1353 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1354 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1355 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1356 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1357 1358 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1359 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1360 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1361 H2C_ROLE_MAINTAIN_LEN); 1362 1363 ret = rtw89_h2c_tx(rtwdev, skb, false); 1364 if (ret) { 1365 rtw89_err(rtwdev, "failed to send h2c\n"); 1366 goto fail; 1367 } 1368 1369 return 0; 1370 fail: 1371 dev_kfree_skb_any(skb); 1372 1373 return ret; 1374 } 1375 1376 #define H2C_JOIN_INFO_LEN 4 1377 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1378 struct rtw89_sta *rtwsta, bool dis_conn) 1379 { 1380 struct sk_buff *skb; 1381 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1382 u8 self_role = rtwvif->self_role; 1383 u8 net_type = rtwvif->net_type; 1384 int ret; 1385 1386 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1387 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1388 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1389 } 1390 1391 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1392 if (!skb) { 1393 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1394 return -ENOMEM; 1395 } 1396 skb_put(skb, H2C_JOIN_INFO_LEN); 1397 SET_JOININFO_MACID(skb->data, mac_id); 1398 SET_JOININFO_OP(skb->data, dis_conn); 1399 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1400 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1401 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1402 SET_JOININFO_ISHESTA(skb->data, 0); 1403 SET_JOININFO_DLBW(skb->data, 0); 1404 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1405 SET_JOININFO_DL_T_PE(skb->data, 0); 1406 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1407 SET_JOININFO_NET_TYPE(skb->data, net_type); 1408 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1409 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1410 1411 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1412 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1413 H2C_FUNC_MAC_JOININFO, 0, 1, 1414 H2C_JOIN_INFO_LEN); 1415 1416 ret = rtw89_h2c_tx(rtwdev, skb, false); 1417 if (ret) { 1418 rtw89_err(rtwdev, "failed to send h2c\n"); 1419 goto fail; 1420 } 1421 1422 return 0; 1423 fail: 1424 dev_kfree_skb_any(skb); 1425 1426 return ret; 1427 } 1428 1429 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1430 bool pause) 1431 { 1432 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1433 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1434 struct sk_buff *skb; 1435 int ret; 1436 1437 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1438 if (!skb) { 1439 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1440 return -ENOMEM; 1441 } 1442 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1443 if (pause) 1444 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1445 skb_put_data(skb, &h2c, len); 1446 1447 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1448 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1449 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1450 len); 1451 1452 ret = rtw89_h2c_tx(rtwdev, skb, false); 1453 if (ret) { 1454 rtw89_err(rtwdev, "failed to send h2c\n"); 1455 goto fail; 1456 } 1457 1458 return 0; 1459 fail: 1460 dev_kfree_skb_any(skb); 1461 1462 return ret; 1463 } 1464 1465 #define H2C_EDCA_LEN 12 1466 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1467 u8 ac, u32 val) 1468 { 1469 struct sk_buff *skb; 1470 int ret; 1471 1472 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1473 if (!skb) { 1474 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1475 return -ENOMEM; 1476 } 1477 skb_put(skb, H2C_EDCA_LEN); 1478 RTW89_SET_EDCA_SEL(skb->data, 0); 1479 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1480 RTW89_SET_EDCA_WMM(skb->data, 0); 1481 RTW89_SET_EDCA_AC(skb->data, ac); 1482 RTW89_SET_EDCA_PARAM(skb->data, val); 1483 1484 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1485 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1486 H2C_FUNC_USR_EDCA, 0, 1, 1487 H2C_EDCA_LEN); 1488 1489 ret = rtw89_h2c_tx(rtwdev, skb, false); 1490 if (ret) { 1491 rtw89_err(rtwdev, "failed to send h2c\n"); 1492 goto fail; 1493 } 1494 1495 return 0; 1496 fail: 1497 dev_kfree_skb_any(skb); 1498 1499 return ret; 1500 } 1501 1502 #define H2C_TSF32_TOGL_LEN 4 1503 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1504 bool en) 1505 { 1506 struct sk_buff *skb; 1507 u16 early_us = en ? 2000 : 0; 1508 u8 *cmd; 1509 int ret; 1510 1511 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1512 if (!skb) { 1513 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1514 return -ENOMEM; 1515 } 1516 skb_put(skb, H2C_TSF32_TOGL_LEN); 1517 cmd = skb->data; 1518 1519 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1520 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1521 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1522 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1523 1524 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1525 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1526 H2C_FUNC_TSF32_TOGL, 0, 0, 1527 H2C_TSF32_TOGL_LEN); 1528 1529 ret = rtw89_h2c_tx(rtwdev, skb, false); 1530 if (ret) { 1531 rtw89_err(rtwdev, "failed to send h2c\n"); 1532 goto fail; 1533 } 1534 1535 return 0; 1536 fail: 1537 dev_kfree_skb_any(skb); 1538 1539 return ret; 1540 } 1541 1542 #define H2C_OFLD_CFG_LEN 8 1543 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1544 { 1545 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1546 struct sk_buff *skb; 1547 int ret; 1548 1549 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1550 if (!skb) { 1551 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1552 return -ENOMEM; 1553 } 1554 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1555 1556 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1557 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1558 H2C_FUNC_OFLD_CFG, 0, 1, 1559 H2C_OFLD_CFG_LEN); 1560 1561 ret = rtw89_h2c_tx(rtwdev, skb, false); 1562 if (ret) { 1563 rtw89_err(rtwdev, "failed to send h2c\n"); 1564 goto fail; 1565 } 1566 1567 return 0; 1568 fail: 1569 dev_kfree_skb_any(skb); 1570 1571 return ret; 1572 } 1573 1574 #define H2C_RA_LEN 16 1575 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1576 { 1577 struct sk_buff *skb; 1578 u8 *cmd; 1579 int ret; 1580 1581 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1582 if (!skb) { 1583 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1584 return -ENOMEM; 1585 } 1586 skb_put(skb, H2C_RA_LEN); 1587 cmd = skb->data; 1588 rtw89_debug(rtwdev, RTW89_DBG_RA, 1589 "ra cmd msk: %llx ", ra->ra_mask); 1590 1591 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1592 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1593 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1594 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1595 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1596 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1597 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1598 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1599 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1600 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1601 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1602 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1603 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1604 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1605 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1606 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1607 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1608 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1609 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1610 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1611 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1612 1613 if (csi) { 1614 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1615 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1616 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1617 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1618 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1619 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1620 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1621 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1622 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1623 } 1624 1625 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1626 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1627 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1628 H2C_RA_LEN); 1629 1630 ret = rtw89_h2c_tx(rtwdev, skb, false); 1631 if (ret) { 1632 rtw89_err(rtwdev, "failed to send h2c\n"); 1633 goto fail; 1634 } 1635 1636 return 0; 1637 fail: 1638 dev_kfree_skb_any(skb); 1639 1640 return ret; 1641 } 1642 1643 #define H2C_LEN_CXDRVHDR 2 1644 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR) 1645 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1646 { 1647 struct rtw89_btc *btc = &rtwdev->btc; 1648 struct rtw89_btc_dm *dm = &btc->dm; 1649 struct rtw89_btc_init_info *init_info = &dm->init_info; 1650 struct rtw89_btc_module *module = &init_info->module; 1651 struct rtw89_btc_ant_info *ant = &module->ant; 1652 struct sk_buff *skb; 1653 u8 *cmd; 1654 int ret; 1655 1656 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT); 1657 if (!skb) { 1658 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1659 return -ENOMEM; 1660 } 1661 skb_put(skb, H2C_LEN_CXDRVINFO_INIT); 1662 cmd = skb->data; 1663 1664 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT); 1665 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR); 1666 1667 RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type); 1668 RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num); 1669 RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation); 1670 RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos); 1671 RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity); 1672 1673 RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type); 1674 RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv); 1675 RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo); 1676 RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos); 1677 RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type); 1678 1679 RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch); 1680 RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only); 1681 RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok); 1682 RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en); 1683 RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other); 1684 RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only); 1685 1686 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1687 H2C_CAT_OUTSRC, BTFC_SET, 1688 SET_DRV_INFO, 0, 0, 1689 H2C_LEN_CXDRVINFO_INIT); 1690 1691 ret = rtw89_h2c_tx(rtwdev, skb, false); 1692 if (ret) { 1693 rtw89_err(rtwdev, "failed to send h2c\n"); 1694 goto fail; 1695 } 1696 1697 return 0; 1698 fail: 1699 dev_kfree_skb_any(skb); 1700 1701 return ret; 1702 } 1703 1704 #define PORT_DATA_OFFSET 4 1705 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 1706 #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR) 1707 #define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \ 1708 H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \ 1709 H2C_LEN_CXDRVHDR) 1710 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 1711 { 1712 struct rtw89_btc *btc = &rtwdev->btc; 1713 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1714 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 1715 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1716 struct rtw89_btc_wl_active_role *active = role_info->active_role; 1717 struct sk_buff *skb; 1718 u8 offset = 0; 1719 u8 *cmd; 1720 int ret; 1721 int i; 1722 1723 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE); 1724 if (!skb) { 1725 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1726 return -ENOMEM; 1727 } 1728 skb_put(skb, H2C_LEN_CXDRVINFO_ROLE); 1729 cmd = skb->data; 1730 1731 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1732 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR); 1733 1734 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1735 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1736 1737 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1738 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1739 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1740 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1741 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1742 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1743 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1744 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1745 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1746 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1747 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1748 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1749 1750 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1751 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1752 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1753 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1754 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1755 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1756 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1757 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1758 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1759 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1760 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1761 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1762 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1763 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1764 } 1765 1766 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1767 H2C_CAT_OUTSRC, BTFC_SET, 1768 SET_DRV_INFO, 0, 0, 1769 H2C_LEN_CXDRVINFO_ROLE); 1770 1771 ret = rtw89_h2c_tx(rtwdev, skb, false); 1772 if (ret) { 1773 rtw89_err(rtwdev, "failed to send h2c\n"); 1774 goto fail; 1775 } 1776 1777 return 0; 1778 fail: 1779 dev_kfree_skb_any(skb); 1780 1781 return ret; 1782 } 1783 1784 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 1785 { 1786 struct rtw89_btc *btc = &rtwdev->btc; 1787 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1788 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 1789 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1790 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 1791 struct sk_buff *skb; 1792 u8 *cmd, offset; 1793 int ret; 1794 int i; 1795 1796 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1); 1797 if (!skb) { 1798 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1799 return -ENOMEM; 1800 } 1801 skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1); 1802 cmd = skb->data; 1803 1804 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1805 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR); 1806 1807 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1808 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1809 1810 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1811 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1812 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1813 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1814 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1815 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1816 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1817 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1818 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1819 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1820 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1821 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1822 1823 offset = PORT_DATA_OFFSET; 1824 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1825 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1826 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1827 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1828 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1829 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1830 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1831 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1832 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1833 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1834 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1835 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1836 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1837 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1838 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 1839 } 1840 1841 offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 1842 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 1843 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 1844 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 1845 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 1846 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 1847 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 1848 1849 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1850 H2C_CAT_OUTSRC, BTFC_SET, 1851 SET_DRV_INFO, 0, 0, 1852 H2C_LEN_CXDRVINFO_ROLE_V1); 1853 1854 ret = rtw89_h2c_tx(rtwdev, skb, false); 1855 if (ret) { 1856 rtw89_err(rtwdev, "failed to send h2c\n"); 1857 goto fail; 1858 } 1859 1860 return 0; 1861 fail: 1862 dev_kfree_skb_any(skb); 1863 1864 return ret; 1865 } 1866 1867 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 1868 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 1869 { 1870 const struct rtw89_chip_info *chip = rtwdev->chip; 1871 struct rtw89_btc *btc = &rtwdev->btc; 1872 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 1873 struct sk_buff *skb; 1874 u8 *cmd; 1875 int ret; 1876 1877 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 1878 if (!skb) { 1879 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 1880 return -ENOMEM; 1881 } 1882 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 1883 cmd = skb->data; 1884 1885 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 1886 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 1887 1888 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 1889 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 1890 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 1891 if (chip->chip_id == RTL8852A) 1892 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 1893 1894 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1895 H2C_CAT_OUTSRC, BTFC_SET, 1896 SET_DRV_INFO, 0, 0, 1897 H2C_LEN_CXDRVINFO_CTRL); 1898 1899 ret = rtw89_h2c_tx(rtwdev, skb, false); 1900 if (ret) { 1901 rtw89_err(rtwdev, "failed to send h2c\n"); 1902 goto fail; 1903 } 1904 1905 return 0; 1906 fail: 1907 dev_kfree_skb_any(skb); 1908 1909 return ret; 1910 } 1911 1912 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 1913 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 1914 { 1915 struct rtw89_btc *btc = &rtwdev->btc; 1916 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1917 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 1918 struct sk_buff *skb; 1919 u8 *cmd; 1920 int ret; 1921 1922 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 1923 if (!skb) { 1924 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 1925 return -ENOMEM; 1926 } 1927 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 1928 cmd = skb->data; 1929 1930 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 1931 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 1932 1933 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 1934 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 1935 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 1936 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 1937 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 1938 1939 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1940 H2C_CAT_OUTSRC, BTFC_SET, 1941 SET_DRV_INFO, 0, 0, 1942 H2C_LEN_CXDRVINFO_RFK); 1943 1944 ret = rtw89_h2c_tx(rtwdev, skb, false); 1945 if (ret) { 1946 rtw89_err(rtwdev, "failed to send h2c\n"); 1947 goto fail; 1948 } 1949 1950 return 0; 1951 fail: 1952 dev_kfree_skb_any(skb); 1953 1954 return ret; 1955 } 1956 1957 #define H2C_LEN_PKT_OFLD 4 1958 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 1959 { 1960 struct sk_buff *skb; 1961 u8 *cmd; 1962 int ret; 1963 1964 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 1965 if (!skb) { 1966 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 1967 return -ENOMEM; 1968 } 1969 skb_put(skb, H2C_LEN_PKT_OFLD); 1970 cmd = skb->data; 1971 1972 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 1973 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 1974 1975 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1976 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1977 H2C_FUNC_PACKET_OFLD, 1, 1, 1978 H2C_LEN_PKT_OFLD); 1979 1980 ret = rtw89_h2c_tx(rtwdev, skb, false); 1981 if (ret) { 1982 rtw89_err(rtwdev, "failed to send h2c\n"); 1983 goto fail; 1984 } 1985 1986 return 0; 1987 fail: 1988 dev_kfree_skb_any(skb); 1989 1990 return ret; 1991 } 1992 1993 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 1994 struct sk_buff *skb_ofld) 1995 { 1996 struct sk_buff *skb; 1997 u8 *cmd; 1998 u8 alloc_id; 1999 int ret; 2000 2001 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2002 RTW89_MAX_PKT_OFLD_NUM); 2003 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2004 return -ENOSPC; 2005 2006 *id = alloc_id; 2007 2008 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2009 if (!skb) { 2010 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2011 return -ENOMEM; 2012 } 2013 skb_put(skb, H2C_LEN_PKT_OFLD); 2014 cmd = skb->data; 2015 2016 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2017 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2018 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2019 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2020 2021 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2022 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2023 H2C_FUNC_PACKET_OFLD, 1, 1, 2024 H2C_LEN_PKT_OFLD + skb_ofld->len); 2025 2026 ret = rtw89_h2c_tx(rtwdev, skb, false); 2027 if (ret) { 2028 rtw89_err(rtwdev, "failed to send h2c\n"); 2029 goto fail; 2030 } 2031 2032 return 0; 2033 fail: 2034 dev_kfree_skb_any(skb); 2035 2036 return ret; 2037 } 2038 2039 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2040 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2041 struct list_head *chan_list) 2042 { 2043 struct rtw89_mac_chinfo *ch_info; 2044 struct sk_buff *skb; 2045 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2046 u8 *cmd; 2047 int ret; 2048 2049 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2050 if (!skb) { 2051 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2052 return -ENOMEM; 2053 } 2054 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2055 cmd = skb->data; 2056 2057 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2058 /* in unit of 4 bytes */ 2059 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2060 2061 list_for_each_entry(ch_info, chan_list, list) { 2062 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2063 2064 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2065 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2066 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2067 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2068 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2069 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2070 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2071 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2072 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2073 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2074 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2075 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2076 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2077 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2078 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2079 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2080 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2081 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2082 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2083 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2084 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2085 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2086 } 2087 2088 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2089 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2090 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2091 2092 ret = rtw89_h2c_tx(rtwdev, skb, false); 2093 if (ret) { 2094 rtw89_err(rtwdev, "failed to send h2c\n"); 2095 goto fail; 2096 } 2097 2098 return 0; 2099 fail: 2100 dev_kfree_skb_any(skb); 2101 2102 return ret; 2103 } 2104 2105 #define H2C_LEN_SCAN_OFFLOAD 28 2106 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2107 struct rtw89_scan_option *option, 2108 struct rtw89_vif *rtwvif) 2109 { 2110 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2111 struct sk_buff *skb; 2112 u8 *cmd; 2113 int ret; 2114 2115 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD); 2116 if (!skb) { 2117 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2118 return -ENOMEM; 2119 } 2120 skb_put(skb, H2C_LEN_SCAN_OFFLOAD); 2121 cmd = skb->data; 2122 2123 RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id); 2124 RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port); 2125 RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0); 2126 RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable); 2127 RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true); 2128 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode); 2129 RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE); 2130 RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE); 2131 if (option->target_ch_mode) { 2132 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw); 2133 RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd, 2134 scan_info->op_pri_ch); 2135 RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd, 2136 scan_info->op_chan); 2137 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd, 2138 scan_info->op_band); 2139 } 2140 2141 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2142 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2143 H2C_FUNC_SCANOFLD, 1, 1, 2144 H2C_LEN_SCAN_OFFLOAD); 2145 2146 ret = rtw89_h2c_tx(rtwdev, skb, false); 2147 if (ret) { 2148 rtw89_err(rtwdev, "failed to send h2c\n"); 2149 goto fail; 2150 } 2151 2152 return 0; 2153 fail: 2154 dev_kfree_skb_any(skb); 2155 2156 return ret; 2157 } 2158 2159 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2160 struct rtw89_fw_h2c_rf_reg_info *info, 2161 u16 len, u8 page) 2162 { 2163 struct sk_buff *skb; 2164 u8 class = info->rf_path == RF_PATH_A ? 2165 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2166 int ret; 2167 2168 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2169 if (!skb) { 2170 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2171 return -ENOMEM; 2172 } 2173 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2174 2175 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2176 H2C_CAT_OUTSRC, class, page, 0, 0, 2177 len); 2178 2179 ret = rtw89_h2c_tx(rtwdev, skb, false); 2180 if (ret) { 2181 rtw89_err(rtwdev, "failed to send h2c\n"); 2182 goto fail; 2183 } 2184 2185 return 0; 2186 fail: 2187 dev_kfree_skb_any(skb); 2188 2189 return ret; 2190 } 2191 2192 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2193 { 2194 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2195 struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; 2196 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2197 struct sk_buff *skb; 2198 int ret; 2199 2200 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2201 if (!skb) { 2202 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2203 return -ENOMEM; 2204 } 2205 skb_put(skb, sizeof(*mccch)); 2206 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2207 2208 mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]); 2209 mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]); 2210 mccch->band_0 = cpu_to_le32(mcc_info->band[0]); 2211 mccch->band_1 = cpu_to_le32(mcc_info->band[1]); 2212 mccch->current_channel = cpu_to_le32(chan->channel); 2213 mccch->current_band_type = cpu_to_le32(chan->band_type); 2214 2215 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2216 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2217 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2218 sizeof(*mccch)); 2219 2220 ret = rtw89_h2c_tx(rtwdev, skb, false); 2221 if (ret) { 2222 rtw89_err(rtwdev, "failed to send h2c\n"); 2223 goto fail; 2224 } 2225 2226 return 0; 2227 fail: 2228 dev_kfree_skb_any(skb); 2229 2230 return ret; 2231 } 2232 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2233 2234 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2235 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2236 bool rack, bool dack) 2237 { 2238 struct sk_buff *skb; 2239 int ret; 2240 2241 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2242 if (!skb) { 2243 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2244 return -ENOMEM; 2245 } 2246 skb_put_data(skb, buf, len); 2247 2248 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2249 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2250 len); 2251 2252 ret = rtw89_h2c_tx(rtwdev, skb, false); 2253 if (ret) { 2254 rtw89_err(rtwdev, "failed to send h2c\n"); 2255 goto fail; 2256 } 2257 2258 return 0; 2259 fail: 2260 dev_kfree_skb_any(skb); 2261 2262 return ret; 2263 } 2264 2265 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2266 { 2267 struct sk_buff *skb; 2268 int ret; 2269 2270 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2271 if (!skb) { 2272 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2273 return -ENOMEM; 2274 } 2275 skb_put_data(skb, buf, len); 2276 2277 ret = rtw89_h2c_tx(rtwdev, skb, false); 2278 if (ret) { 2279 rtw89_err(rtwdev, "failed to send h2c\n"); 2280 goto fail; 2281 } 2282 2283 return 0; 2284 fail: 2285 dev_kfree_skb_any(skb); 2286 2287 return ret; 2288 } 2289 2290 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2291 { 2292 struct rtw89_early_h2c *early_h2c; 2293 2294 lockdep_assert_held(&rtwdev->mutex); 2295 2296 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2297 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2298 } 2299 } 2300 2301 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2302 { 2303 struct rtw89_early_h2c *early_h2c, *tmp; 2304 2305 mutex_lock(&rtwdev->mutex); 2306 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2307 list_del(&early_h2c->list); 2308 kfree(early_h2c->h2c); 2309 kfree(early_h2c); 2310 } 2311 mutex_unlock(&rtwdev->mutex); 2312 } 2313 2314 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2315 { 2316 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2317 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2318 } 2319 2320 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2321 struct sk_buff *skb) 2322 { 2323 u8 category = RTW89_GET_C2H_CATEGORY(skb->data); 2324 u8 class = RTW89_GET_C2H_CLASS(skb->data); 2325 u8 func = RTW89_GET_C2H_FUNC(skb->data); 2326 u16 len = RTW89_GET_C2H_LEN(skb->data); 2327 bool dump = true; 2328 2329 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2330 return; 2331 2332 switch (category) { 2333 case RTW89_C2H_CAT_TEST: 2334 break; 2335 case RTW89_C2H_CAT_MAC: 2336 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2337 if (class == RTW89_MAC_C2H_CLASS_INFO && 2338 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2339 dump = false; 2340 break; 2341 case RTW89_C2H_CAT_OUTSRC: 2342 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2343 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2344 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2345 else 2346 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2347 break; 2348 } 2349 2350 if (dump) 2351 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2352 } 2353 2354 void rtw89_fw_c2h_work(struct work_struct *work) 2355 { 2356 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2357 c2h_work); 2358 struct sk_buff *skb, *tmp; 2359 2360 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2361 skb_unlink(skb, &rtwdev->c2h_queue); 2362 mutex_lock(&rtwdev->mutex); 2363 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2364 mutex_unlock(&rtwdev->mutex); 2365 dev_kfree_skb_any(skb); 2366 } 2367 } 2368 2369 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2370 struct rtw89_mac_h2c_info *info) 2371 { 2372 const struct rtw89_chip_info *chip = rtwdev->chip; 2373 const u32 *h2c_reg = chip->h2c_regs; 2374 u8 i, val, len; 2375 int ret; 2376 2377 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2378 rtwdev, chip->h2c_ctrl_reg); 2379 if (ret) { 2380 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2381 return ret; 2382 } 2383 2384 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2385 sizeof(info->h2creg[0])); 2386 2387 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 2388 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 2389 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2390 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 2391 2392 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2393 2394 return 0; 2395 } 2396 2397 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2398 struct rtw89_mac_c2h_info *info) 2399 { 2400 const struct rtw89_chip_info *chip = rtwdev->chip; 2401 const u32 *c2h_reg = chip->c2h_regs; 2402 u32 ret; 2403 u8 i, val; 2404 2405 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2406 2407 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2408 RTW89_C2H_TIMEOUT, false, rtwdev, 2409 chip->c2h_ctrl_reg); 2410 if (ret) { 2411 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2412 return ret; 2413 } 2414 2415 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2416 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2417 2418 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2419 2420 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 2421 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 2422 RTW89_C2HREG_HDR_LEN; 2423 2424 return 0; 2425 } 2426 2427 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2428 struct rtw89_mac_h2c_info *h2c_info, 2429 struct rtw89_mac_c2h_info *c2h_info) 2430 { 2431 u32 ret; 2432 2433 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2434 lockdep_assert_held(&rtwdev->mutex); 2435 2436 if (!h2c_info && !c2h_info) 2437 return -EINVAL; 2438 2439 if (!h2c_info) 2440 goto recv_c2h; 2441 2442 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2443 if (ret) 2444 return ret; 2445 2446 recv_c2h: 2447 if (!c2h_info) 2448 return 0; 2449 2450 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2451 if (ret) 2452 return ret; 2453 2454 return 0; 2455 } 2456 2457 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 2458 { 2459 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 2460 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 2461 return; 2462 } 2463 2464 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 2465 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 2466 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 2467 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 2468 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 2469 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 2470 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 2471 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 2472 2473 rtw89_fw_prog_cnt_dump(rtwdev); 2474 } 2475 2476 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 2477 { 2478 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 2479 struct rtw89_pktofld_info *info, *tmp; 2480 u8 idx; 2481 2482 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 2483 if (!(rtwdev->chip->support_bands & BIT(idx))) 2484 continue; 2485 2486 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 2487 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2488 rtw89_core_release_bit_map(rtwdev->pkt_offload, 2489 info->id); 2490 list_del(&info->list); 2491 kfree(info); 2492 } 2493 } 2494 } 2495 2496 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 2497 struct rtw89_vif *rtwvif, 2498 struct sk_buff *skb) 2499 { 2500 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2501 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 2502 struct rtw89_pktofld_info *info; 2503 struct sk_buff *new; 2504 int ret = 0; 2505 u8 band; 2506 2507 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 2508 if (!(rtwdev->chip->support_bands & BIT(band))) 2509 continue; 2510 2511 new = skb_copy(skb, GFP_KERNEL); 2512 if (!new) { 2513 ret = -ENOMEM; 2514 goto out; 2515 } 2516 skb_put_data(new, ies->ies[band], ies->len[band]); 2517 skb_put_data(new, ies->common_ies, ies->common_ie_len); 2518 2519 info = kzalloc(sizeof(*info), GFP_KERNEL); 2520 if (!info) { 2521 ret = -ENOMEM; 2522 kfree_skb(new); 2523 goto out; 2524 } 2525 2526 list_add_tail(&info->list, &scan_info->pkt_list[band]); 2527 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 2528 if (ret) 2529 goto out; 2530 2531 kfree_skb(new); 2532 } 2533 out: 2534 return ret; 2535 } 2536 2537 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 2538 struct rtw89_vif *rtwvif) 2539 { 2540 struct cfg80211_scan_request *req = rtwvif->scan_req; 2541 struct sk_buff *skb; 2542 u8 num = req->n_ssids, i; 2543 int ret; 2544 2545 for (i = 0; i < num; i++) { 2546 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 2547 req->ssids[i].ssid, 2548 req->ssids[i].ssid_len, 2549 req->ie_len); 2550 if (!skb) 2551 return -ENOMEM; 2552 2553 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb); 2554 kfree_skb(skb); 2555 2556 if (ret) 2557 return ret; 2558 } 2559 2560 return 0; 2561 } 2562 2563 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 2564 int ssid_num, 2565 struct rtw89_mac_chinfo *ch_info) 2566 { 2567 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2568 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 2569 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2570 struct cfg80211_scan_request *req = rtwvif->scan_req; 2571 struct rtw89_pktofld_info *info; 2572 u8 band, probe_count = 0; 2573 2574 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 2575 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 2576 ch_info->bw = RTW89_SCAN_WIDTH; 2577 ch_info->tx_pkt = true; 2578 ch_info->cfg_tx_pwr = false; 2579 ch_info->tx_pwr_idx = 0; 2580 ch_info->tx_null = false; 2581 ch_info->pause_data = false; 2582 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 2583 2584 if (ssid_num) { 2585 ch_info->num_pkt = ssid_num; 2586 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 2587 2588 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 2589 ch_info->pkt_id[probe_count] = info->id; 2590 if (++probe_count >= ssid_num) 2591 break; 2592 } 2593 if (probe_count != ssid_num) 2594 rtw89_err(rtwdev, "SSID num differs from list len\n"); 2595 } 2596 2597 if (ch_info->ch_band == RTW89_BAND_6G) { 2598 if (ssid_num == 1 && req->ssids[0].ssid_len == 0) { 2599 ch_info->tx_pkt = false; 2600 if (!req->duration_mandatory) 2601 ch_info->period -= RTW89_DWELL_TIME; 2602 } 2603 } 2604 2605 switch (chan_type) { 2606 case RTW89_CHAN_OPERATE: 2607 ch_info->central_ch = scan_info->op_chan; 2608 ch_info->pri_ch = scan_info->op_pri_ch; 2609 ch_info->ch_band = scan_info->op_band; 2610 ch_info->bw = scan_info->op_bw; 2611 ch_info->tx_null = true; 2612 ch_info->num_pkt = 0; 2613 break; 2614 case RTW89_CHAN_DFS: 2615 if (ch_info->ch_band != RTW89_BAND_6G) 2616 ch_info->period = max_t(u8, ch_info->period, 2617 RTW89_DFS_CHAN_TIME); 2618 ch_info->dwell_time = RTW89_DWELL_TIME; 2619 break; 2620 case RTW89_CHAN_ACTIVE: 2621 break; 2622 default: 2623 rtw89_err(rtwdev, "Channel type out of bound\n"); 2624 } 2625 } 2626 2627 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 2628 struct rtw89_vif *rtwvif) 2629 { 2630 struct cfg80211_scan_request *req = rtwvif->scan_req; 2631 struct rtw89_mac_chinfo *ch_info, *tmp; 2632 struct ieee80211_channel *channel; 2633 struct list_head chan_list; 2634 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 2635 int list_len, off_chan_time = 0; 2636 enum rtw89_chan_type type; 2637 int ret = 0; 2638 u32 idx; 2639 2640 INIT_LIST_HEAD(&chan_list); 2641 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 2642 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 2643 idx++, list_len++) { 2644 channel = req->channels[idx]; 2645 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 2646 if (!ch_info) { 2647 ret = -ENOMEM; 2648 goto out; 2649 } 2650 2651 if (req->duration_mandatory) 2652 ch_info->period = req->duration; 2653 else if (channel->band == NL80211_BAND_6GHZ) 2654 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME; 2655 else 2656 ch_info->period = RTW89_CHANNEL_TIME; 2657 2658 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 2659 ch_info->central_ch = channel->hw_value; 2660 ch_info->pri_ch = channel->hw_value; 2661 ch_info->rand_seq_num = random_seq; 2662 2663 if (channel->flags & 2664 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 2665 type = RTW89_CHAN_DFS; 2666 else 2667 type = RTW89_CHAN_ACTIVE; 2668 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 2669 2670 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK && 2671 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 2672 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2673 if (!tmp) { 2674 ret = -ENOMEM; 2675 kfree(ch_info); 2676 goto out; 2677 } 2678 2679 type = RTW89_CHAN_OPERATE; 2680 tmp->period = req->duration_mandatory ? 2681 req->duration : RTW89_CHANNEL_TIME; 2682 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 2683 list_add_tail(&tmp->list, &chan_list); 2684 off_chan_time = 0; 2685 list_len++; 2686 } 2687 list_add_tail(&ch_info->list, &chan_list); 2688 off_chan_time += ch_info->period; 2689 } 2690 rtwdev->scan_info.last_chan_idx = idx; 2691 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 2692 2693 out: 2694 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 2695 list_del(&ch_info->list); 2696 kfree(ch_info); 2697 } 2698 2699 return ret; 2700 } 2701 2702 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 2703 struct rtw89_vif *rtwvif) 2704 { 2705 int ret; 2706 2707 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 2708 if (ret) { 2709 rtw89_err(rtwdev, "Update probe request failed\n"); 2710 goto out; 2711 } 2712 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif); 2713 out: 2714 return ret; 2715 } 2716 2717 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2718 struct ieee80211_scan_request *scan_req) 2719 { 2720 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2721 struct cfg80211_scan_request *req = &scan_req->req; 2722 u32 rx_fltr = rtwdev->hal.rx_fltr; 2723 u8 mac_addr[ETH_ALEN]; 2724 2725 rtwdev->scan_info.scanning_vif = vif; 2726 rtwdev->scan_info.last_chan_idx = 0; 2727 rtwvif->scan_ies = &scan_req->ies; 2728 rtwvif->scan_req = req; 2729 ieee80211_stop_queues(rtwdev->hw); 2730 2731 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 2732 get_random_mask_addr(mac_addr, req->mac_addr, 2733 req->mac_addr_mask); 2734 else 2735 ether_addr_copy(mac_addr, vif->addr); 2736 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 2737 2738 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 2739 rx_fltr &= ~B_AX_A_BC; 2740 rx_fltr &= ~B_AX_A_A1_MATCH; 2741 rtw89_write32_mask(rtwdev, 2742 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2743 B_AX_RX_FLTR_CFG_MASK, 2744 rx_fltr); 2745 } 2746 2747 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2748 bool aborted) 2749 { 2750 struct cfg80211_scan_info info = { 2751 .aborted = aborted, 2752 }; 2753 struct rtw89_vif *rtwvif; 2754 2755 if (!vif) 2756 return; 2757 2758 rtw89_write32_mask(rtwdev, 2759 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2760 B_AX_RX_FLTR_CFG_MASK, 2761 rtwdev->hal.rx_fltr); 2762 2763 rtw89_core_scan_complete(rtwdev, vif, true); 2764 ieee80211_scan_completed(rtwdev->hw, &info); 2765 ieee80211_wake_queues(rtwdev->hw); 2766 2767 rtw89_release_pkt_list(rtwdev); 2768 rtwvif = (struct rtw89_vif *)vif->drv_priv; 2769 rtwvif->scan_req = NULL; 2770 rtwvif->scan_ies = NULL; 2771 rtwdev->scan_info.last_chan_idx = 0; 2772 rtwdev->scan_info.scanning_vif = NULL; 2773 2774 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK) 2775 rtw89_store_op_chan(rtwdev, false); 2776 rtw89_set_channel(rtwdev); 2777 } 2778 2779 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 2780 { 2781 rtw89_hw_scan_offload(rtwdev, vif, false); 2782 rtw89_hw_scan_complete(rtwdev, vif, true); 2783 } 2784 2785 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2786 bool enable) 2787 { 2788 struct rtw89_scan_option opt = {0}; 2789 struct rtw89_vif *rtwvif; 2790 int ret = 0; 2791 2792 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 2793 if (!rtwvif) 2794 return -EINVAL; 2795 2796 opt.enable = enable; 2797 opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2798 if (enable) { 2799 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif); 2800 if (ret) 2801 goto out; 2802 } 2803 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 2804 out: 2805 return ret; 2806 } 2807 2808 void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup) 2809 { 2810 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2811 const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2812 struct rtw89_chan new; 2813 2814 if (backup) { 2815 scan_info->op_pri_ch = cur->primary_channel; 2816 scan_info->op_chan = cur->channel; 2817 scan_info->op_bw = cur->band_width; 2818 scan_info->op_band = cur->band_type; 2819 } else { 2820 rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch, 2821 scan_info->op_band, scan_info->op_bw); 2822 rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new); 2823 } 2824 } 2825 2826 #define H2C_FW_CPU_EXCEPTION_LEN 4 2827 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 2828 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 2829 { 2830 struct sk_buff *skb; 2831 int ret; 2832 2833 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 2834 if (!skb) { 2835 rtw89_err(rtwdev, 2836 "failed to alloc skb for fw cpu exception\n"); 2837 return -ENOMEM; 2838 } 2839 2840 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 2841 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 2842 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 2843 2844 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2845 H2C_CAT_TEST, 2846 H2C_CL_FW_STATUS_TEST, 2847 H2C_FUNC_CPU_EXCEPTION, 0, 0, 2848 H2C_FW_CPU_EXCEPTION_LEN); 2849 2850 ret = rtw89_h2c_tx(rtwdev, skb, false); 2851 if (ret) { 2852 rtw89_err(rtwdev, "failed to send h2c\n"); 2853 goto fail; 2854 } 2855 2856 return 0; 2857 2858 fail: 2859 dev_kfree_skb_any(skb); 2860 return ret; 2861 } 2862 2863 #define H2C_PKT_DROP_LEN 24 2864 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 2865 const struct rtw89_pkt_drop_params *params) 2866 { 2867 struct sk_buff *skb; 2868 int ret; 2869 2870 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 2871 if (!skb) { 2872 rtw89_err(rtwdev, 2873 "failed to alloc skb for packet drop\n"); 2874 return -ENOMEM; 2875 } 2876 2877 switch (params->sel) { 2878 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 2879 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 2880 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 2881 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 2882 break; 2883 default: 2884 rtw89_debug(rtwdev, RTW89_DBG_FW, 2885 "H2C of pkt drop might not fully support sel: %d yet\n", 2886 params->sel); 2887 break; 2888 } 2889 2890 skb_put(skb, H2C_PKT_DROP_LEN); 2891 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 2892 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 2893 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 2894 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 2895 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 2896 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 2897 2898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2899 H2C_CAT_MAC, 2900 H2C_CL_MAC_FW_OFLD, 2901 H2C_FUNC_PKT_DROP, 0, 0, 2902 H2C_PKT_DROP_LEN); 2903 2904 ret = rtw89_h2c_tx(rtwdev, skb, false); 2905 if (ret) { 2906 rtw89_err(rtwdev, "failed to send h2c\n"); 2907 goto fail; 2908 } 2909 2910 return 0; 2911 2912 fail: 2913 dev_kfree_skb_any(skb); 2914 return ret; 2915 } 2916