1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 #include "util.h" 14 15 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 16 struct sk_buff *skb); 17 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 18 struct rtw89_wait_info *wait, unsigned int cond); 19 20 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 21 bool header) 22 { 23 struct sk_buff *skb; 24 u32 header_len = 0; 25 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 26 27 if (header) 28 header_len = H2C_HEADER_LEN; 29 30 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 31 if (!skb) 32 return NULL; 33 skb_reserve(skb, header_len + h2c_desc_size); 34 memset(skb->data, 0, len); 35 36 return skb; 37 } 38 39 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 40 { 41 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 42 } 43 44 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 45 { 46 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 47 } 48 49 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 50 { 51 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 52 53 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 54 } 55 56 #define FWDL_WAIT_CNT 400000 57 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 58 { 59 u8 val; 60 int ret; 61 62 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 63 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 64 1, FWDL_WAIT_CNT, false, rtwdev); 65 if (ret) { 66 switch (val) { 67 case RTW89_FWDL_CHECKSUM_FAIL: 68 rtw89_err(rtwdev, "fw checksum fail\n"); 69 return -EINVAL; 70 71 case RTW89_FWDL_SECURITY_FAIL: 72 rtw89_err(rtwdev, "fw security fail\n"); 73 return -EINVAL; 74 75 case RTW89_FWDL_CV_NOT_MATCH: 76 rtw89_err(rtwdev, "fw cv not match\n"); 77 return -EINVAL; 78 79 default: 80 return -EBUSY; 81 } 82 } 83 84 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 85 86 return 0; 87 } 88 89 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 90 struct rtw89_fw_bin_info *info) 91 { 92 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 93 struct rtw89_fw_hdr_section_info *section_info; 94 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 95 const struct rtw89_fw_hdr_section *section; 96 const u8 *fw_end = fw + len; 97 const u8 *bin; 98 u32 base_hdr_len; 99 u32 mssc_len = 0; 100 u32 i; 101 102 if (!info) 103 return -EINVAL; 104 105 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 106 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 107 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 108 109 if (info->dynamic_hdr_en) { 110 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 111 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 112 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 113 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 114 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 115 return -EINVAL; 116 } 117 } else { 118 info->hdr_len = base_hdr_len; 119 info->dynamic_hdr_len = 0; 120 } 121 122 bin = fw + info->hdr_len; 123 124 /* jump to section header */ 125 section_info = info->section_info; 126 for (i = 0; i < info->section_num; i++) { 127 section = &fw_hdr->sections[i]; 128 section_info->type = 129 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 130 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 131 section_info->mssc = 132 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 133 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 134 } else { 135 section_info->mssc = 0; 136 } 137 138 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 139 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 140 section_info->len += FWDL_SECTION_CHKSUM_LEN; 141 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 142 section_info->dladdr = 143 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 144 section_info->addr = bin; 145 bin += section_info->len; 146 section_info++; 147 } 148 149 if (fw_end != bin + mssc_len) { 150 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 151 return -EINVAL; 152 } 153 154 return 0; 155 } 156 157 static 158 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 159 struct rtw89_fw_suit *fw_suit, bool nowarn) 160 { 161 struct rtw89_fw_info *fw_info = &rtwdev->fw; 162 const struct firmware *firmware = fw_info->req.firmware; 163 const u8 *mfw = firmware->data; 164 u32 mfw_len = firmware->size; 165 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 166 const struct rtw89_mfw_info *mfw_info; 167 int i; 168 169 if (mfw_hdr->sig != RTW89_MFW_SIG) { 170 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 171 /* legacy firmware support normal type only */ 172 if (type != RTW89_FW_NORMAL) 173 return -EINVAL; 174 fw_suit->data = mfw; 175 fw_suit->size = mfw_len; 176 return 0; 177 } 178 179 for (i = 0; i < mfw_hdr->fw_nr; i++) { 180 mfw_info = &mfw_hdr->info[i]; 181 if (mfw_info->cv != rtwdev->hal.cv || 182 mfw_info->type != type || 183 mfw_info->mp) 184 continue; 185 186 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 187 fw_suit->size = le32_to_cpu(mfw_info->size); 188 return 0; 189 } 190 191 if (!nowarn) 192 rtw89_err(rtwdev, "no suitable firmware found\n"); 193 return -ENOENT; 194 } 195 196 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 197 enum rtw89_fw_type type, 198 struct rtw89_fw_suit *fw_suit) 199 { 200 const struct rtw89_fw_hdr *hdr = (const struct rtw89_fw_hdr *)fw_suit->data; 201 202 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 203 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 204 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 205 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 206 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 207 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 208 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 209 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 210 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 211 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 212 213 rtw89_info(rtwdev, 214 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 215 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 216 fw_suit->sub_idex, fw_suit->cmd_ver, type); 217 } 218 219 static 220 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 221 bool nowarn) 222 { 223 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 224 int ret; 225 226 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 227 if (ret) 228 return ret; 229 230 rtw89_fw_update_ver(rtwdev, type, fw_suit); 231 232 return 0; 233 } 234 235 #define __DEF_FW_FEAT_COND(__cond, __op) \ 236 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 237 { \ 238 return suit_ver_code __op comp_ver_code; \ 239 } 240 241 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 242 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 243 __DEF_FW_FEAT_COND(lt, <); /* less than */ 244 245 struct __fw_feat_cfg { 246 enum rtw89_core_chip_id chip_id; 247 enum rtw89_fw_feature feature; 248 u32 ver_code; 249 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 250 }; 251 252 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 253 { \ 254 .chip_id = _chip, \ 255 .feature = RTW89_FW_FEATURE_ ## _feat, \ 256 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 257 .cond = __fw_feat_cond_ ## _cond, \ 258 } 259 260 static const struct __fw_feat_cfg fw_feat_tbl[] = { 261 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 262 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 263 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 264 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 265 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 266 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 267 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 268 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 269 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 270 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 271 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 272 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 273 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 274 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 275 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 276 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 277 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 278 }; 279 280 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 281 const struct rtw89_chip_info *chip, 282 u32 ver_code) 283 { 284 int i; 285 286 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 287 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 288 289 if (chip->chip_id != ent->chip_id) 290 continue; 291 292 if (ent->cond(ver_code, ent->ver_code)) 293 RTW89_SET_FW_FEATURE(ent->feature, fw); 294 } 295 } 296 297 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 298 { 299 const struct rtw89_chip_info *chip = rtwdev->chip; 300 const struct rtw89_fw_suit *fw_suit; 301 u32 suit_ver_code; 302 303 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 304 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 305 306 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 307 } 308 309 const struct firmware * 310 rtw89_early_fw_feature_recognize(struct device *device, 311 const struct rtw89_chip_info *chip, 312 struct rtw89_fw_info *early_fw, 313 int *used_fw_format) 314 { 315 union rtw89_compat_fw_hdr buf = {}; 316 const struct firmware *firmware; 317 bool full_req = false; 318 char fw_name[64]; 319 int fw_format; 320 u32 ver_code; 321 int ret; 322 323 /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will 324 * be denied (-EPERM). Then, we don't get right firmware things as 325 * expected. So, in this case, we have to request full firmware here. 326 */ 327 if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) 328 full_req = true; 329 330 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 331 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 332 chip->fw_basename, fw_format); 333 334 if (full_req) 335 ret = request_firmware(&firmware, fw_name, device); 336 else 337 ret = request_partial_firmware_into_buf(&firmware, fw_name, 338 device, &buf, sizeof(buf), 339 0); 340 if (!ret) { 341 dev_info(device, "loaded firmware %s\n", fw_name); 342 *used_fw_format = fw_format; 343 break; 344 } 345 } 346 347 if (ret) { 348 dev_err(device, "failed to early request firmware: %d\n", ret); 349 return NULL; 350 } 351 352 if (full_req) 353 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 354 else 355 ver_code = rtw89_compat_fw_hdr_ver_code(&buf); 356 357 if (!ver_code) 358 goto out; 359 360 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 361 362 out: 363 if (full_req) 364 return firmware; 365 366 release_firmware(firmware); 367 return NULL; 368 } 369 370 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 371 { 372 const struct rtw89_chip_info *chip = rtwdev->chip; 373 int ret; 374 375 if (chip->try_ce_fw) { 376 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 377 if (!ret) 378 goto normal_done; 379 } 380 381 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 382 if (ret) 383 return ret; 384 385 normal_done: 386 /* It still works if wowlan firmware isn't existing. */ 387 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 388 389 rtw89_fw_recognize_features(rtwdev); 390 391 rtw89_coex_recognize_ver(rtwdev); 392 393 return 0; 394 } 395 396 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 397 u8 type, u8 cat, u8 class, u8 func, 398 bool rack, bool dack, u32 len) 399 { 400 struct fwcmd_hdr *hdr; 401 402 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 403 404 if (!(rtwdev->fw.h2c_seq % 4)) 405 rack = true; 406 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 407 FIELD_PREP(H2C_HDR_CAT, cat) | 408 FIELD_PREP(H2C_HDR_CLASS, class) | 409 FIELD_PREP(H2C_HDR_FUNC, func) | 410 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 411 412 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 413 len + H2C_HEADER_LEN) | 414 (rack ? H2C_HDR_REC_ACK : 0) | 415 (dack ? H2C_HDR_DONE_ACK : 0)); 416 417 rtwdev->fw.h2c_seq++; 418 } 419 420 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 421 struct sk_buff *skb, 422 u8 type, u8 cat, u8 class, u8 func, 423 u32 len) 424 { 425 struct fwcmd_hdr *hdr; 426 427 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 428 429 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 430 FIELD_PREP(H2C_HDR_CAT, cat) | 431 FIELD_PREP(H2C_HDR_CLASS, class) | 432 FIELD_PREP(H2C_HDR_FUNC, func) | 433 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 434 435 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 436 len + H2C_HEADER_LEN)); 437 } 438 439 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 440 { 441 struct sk_buff *skb; 442 u32 ret = 0; 443 444 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 445 if (!skb) { 446 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 447 return -ENOMEM; 448 } 449 450 skb_put_data(skb, fw, len); 451 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 452 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 453 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 454 H2C_FUNC_MAC_FWHDR_DL, len); 455 456 ret = rtw89_h2c_tx(rtwdev, skb, false); 457 if (ret) { 458 rtw89_err(rtwdev, "failed to send h2c\n"); 459 ret = -1; 460 goto fail; 461 } 462 463 return 0; 464 fail: 465 dev_kfree_skb_any(skb); 466 467 return ret; 468 } 469 470 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 471 { 472 u8 val; 473 int ret; 474 475 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 476 if (ret) { 477 rtw89_err(rtwdev, "[ERR]FW header download\n"); 478 return ret; 479 } 480 481 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 482 1, FWDL_WAIT_CNT, false, 483 rtwdev, R_AX_WCPU_FW_CTRL); 484 if (ret) { 485 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 486 return ret; 487 } 488 489 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 490 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 491 492 return 0; 493 } 494 495 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 496 struct rtw89_fw_hdr_section_info *info) 497 { 498 struct sk_buff *skb; 499 const u8 *section = info->addr; 500 u32 residue_len = info->len; 501 u32 pkt_len; 502 int ret; 503 504 while (residue_len) { 505 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 506 pkt_len = FWDL_SECTION_PER_PKT_LEN; 507 else 508 pkt_len = residue_len; 509 510 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 511 if (!skb) { 512 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 513 return -ENOMEM; 514 } 515 skb_put_data(skb, section, pkt_len); 516 517 ret = rtw89_h2c_tx(rtwdev, skb, true); 518 if (ret) { 519 rtw89_err(rtwdev, "failed to send h2c\n"); 520 ret = -1; 521 goto fail; 522 } 523 524 section += pkt_len; 525 residue_len -= pkt_len; 526 } 527 528 return 0; 529 fail: 530 dev_kfree_skb_any(skb); 531 532 return ret; 533 } 534 535 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 536 struct rtw89_fw_bin_info *info) 537 { 538 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 539 u8 section_num = info->section_num; 540 int ret; 541 542 while (section_num--) { 543 ret = __rtw89_fw_download_main(rtwdev, section_info); 544 if (ret) 545 return ret; 546 section_info++; 547 } 548 549 mdelay(5); 550 551 ret = rtw89_fw_check_rdy(rtwdev); 552 if (ret) { 553 rtw89_warn(rtwdev, "download firmware fail\n"); 554 return ret; 555 } 556 557 return 0; 558 } 559 560 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 561 { 562 u32 val32; 563 u16 index; 564 565 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 566 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 567 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 568 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 569 570 for (index = 0; index < 15; index++) { 571 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 572 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 573 fsleep(10); 574 } 575 } 576 577 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 578 { 579 u32 val32; 580 u16 val16; 581 582 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 583 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 584 585 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 586 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 587 588 rtw89_fw_prog_cnt_dump(rtwdev); 589 } 590 591 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 592 { 593 struct rtw89_fw_info *fw_info = &rtwdev->fw; 594 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 595 struct rtw89_fw_bin_info info; 596 const u8 *fw = fw_suit->data; 597 u32 len = fw_suit->size; 598 u8 val; 599 int ret; 600 601 rtw89_mac_disable_cpu(rtwdev); 602 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 603 if (ret) 604 return ret; 605 606 if (!fw || !len) { 607 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 608 return -ENOENT; 609 } 610 611 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 612 if (ret) { 613 rtw89_err(rtwdev, "parse fw header fail\n"); 614 goto fwdl_err; 615 } 616 617 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 618 1, FWDL_WAIT_CNT, false, 619 rtwdev, R_AX_WCPU_FW_CTRL); 620 if (ret) { 621 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 622 goto fwdl_err; 623 } 624 625 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 626 if (ret) { 627 ret = -EBUSY; 628 goto fwdl_err; 629 } 630 631 ret = rtw89_fw_download_main(rtwdev, fw, &info); 632 if (ret) { 633 ret = -EBUSY; 634 goto fwdl_err; 635 } 636 637 fw_info->h2c_seq = 0; 638 fw_info->rec_seq = 0; 639 fw_info->h2c_counter = 0; 640 fw_info->c2h_counter = 0; 641 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 642 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 643 644 return ret; 645 646 fwdl_err: 647 rtw89_fw_dl_fail_dump(rtwdev); 648 return ret; 649 } 650 651 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 652 { 653 struct rtw89_fw_info *fw = &rtwdev->fw; 654 655 wait_for_completion(&fw->req.completion); 656 if (!fw->req.firmware) 657 return -EINVAL; 658 659 return 0; 660 } 661 662 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 663 struct rtw89_fw_req_info *req, 664 const char *fw_name, bool nowarn) 665 { 666 int ret; 667 668 if (req->firmware) { 669 rtw89_debug(rtwdev, RTW89_DBG_FW, 670 "full firmware has been early requested\n"); 671 complete_all(&req->completion); 672 return 0; 673 } 674 675 if (nowarn) 676 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 677 else 678 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 679 680 complete_all(&req->completion); 681 682 return ret; 683 } 684 685 void rtw89_load_firmware_work(struct work_struct *work) 686 { 687 struct rtw89_dev *rtwdev = 688 container_of(work, struct rtw89_dev, load_firmware_work); 689 const struct rtw89_chip_info *chip = rtwdev->chip; 690 char fw_name[64]; 691 692 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 693 chip->fw_basename, rtwdev->fw.fw_format); 694 695 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 696 } 697 698 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 699 { 700 struct rtw89_fw_info *fw = &rtwdev->fw; 701 702 cancel_work_sync(&rtwdev->load_firmware_work); 703 704 if (fw->req.firmware) { 705 release_firmware(fw->req.firmware); 706 707 /* assign NULL back in case rtw89_free_ieee80211_hw() 708 * try to release the same one again. 709 */ 710 fw->req.firmware = NULL; 711 } 712 } 713 714 #define H2C_CAM_LEN 60 715 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 716 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 717 { 718 struct sk_buff *skb; 719 int ret; 720 721 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 722 if (!skb) { 723 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 724 return -ENOMEM; 725 } 726 skb_put(skb, H2C_CAM_LEN); 727 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 728 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 729 730 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 731 H2C_CAT_MAC, 732 H2C_CL_MAC_ADDR_CAM_UPDATE, 733 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 734 H2C_CAM_LEN); 735 736 ret = rtw89_h2c_tx(rtwdev, skb, false); 737 if (ret) { 738 rtw89_err(rtwdev, "failed to send h2c\n"); 739 goto fail; 740 } 741 742 return 0; 743 fail: 744 dev_kfree_skb_any(skb); 745 746 return ret; 747 } 748 749 #define H2C_DCTL_SEC_CAM_LEN 68 750 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 751 struct rtw89_vif *rtwvif, 752 struct rtw89_sta *rtwsta) 753 { 754 struct sk_buff *skb; 755 int ret; 756 757 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 758 if (!skb) { 759 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 760 return -ENOMEM; 761 } 762 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 763 764 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 765 766 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 767 H2C_CAT_MAC, 768 H2C_CL_MAC_FR_EXCHG, 769 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 770 H2C_DCTL_SEC_CAM_LEN); 771 772 ret = rtw89_h2c_tx(rtwdev, skb, false); 773 if (ret) { 774 rtw89_err(rtwdev, "failed to send h2c\n"); 775 goto fail; 776 } 777 778 return 0; 779 fail: 780 dev_kfree_skb_any(skb); 781 782 return ret; 783 } 784 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 785 786 #define H2C_BA_CAM_LEN 8 787 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 788 bool valid, struct ieee80211_ampdu_params *params) 789 { 790 const struct rtw89_chip_info *chip = rtwdev->chip; 791 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 792 u8 macid = rtwsta->mac_id; 793 struct sk_buff *skb; 794 u8 entry_idx; 795 int ret; 796 797 ret = valid ? 798 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 799 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 800 if (ret) { 801 /* it still works even if we don't have static BA CAM, because 802 * hardware can create dynamic BA CAM automatically. 803 */ 804 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 805 "failed to %s entry tid=%d for h2c ba cam\n", 806 valid ? "alloc" : "free", params->tid); 807 return 0; 808 } 809 810 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 811 if (!skb) { 812 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 813 return -ENOMEM; 814 } 815 skb_put(skb, H2C_BA_CAM_LEN); 816 SET_BA_CAM_MACID(skb->data, macid); 817 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 818 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 819 else 820 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 821 if (!valid) 822 goto end; 823 SET_BA_CAM_VALID(skb->data, valid); 824 SET_BA_CAM_TID(skb->data, params->tid); 825 if (params->buf_size > 64) 826 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 827 else 828 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 829 /* If init req is set, hw will set the ssn */ 830 SET_BA_CAM_INIT_REQ(skb->data, 1); 831 SET_BA_CAM_SSN(skb->data, params->ssn); 832 833 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 834 SET_BA_CAM_STD_EN(skb->data, 1); 835 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 836 } 837 838 end: 839 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 840 H2C_CAT_MAC, 841 H2C_CL_BA_CAM, 842 H2C_FUNC_MAC_BA_CAM, 0, 1, 843 H2C_BA_CAM_LEN); 844 845 ret = rtw89_h2c_tx(rtwdev, skb, false); 846 if (ret) { 847 rtw89_err(rtwdev, "failed to send h2c\n"); 848 goto fail; 849 } 850 851 return 0; 852 fail: 853 dev_kfree_skb_any(skb); 854 855 return ret; 856 } 857 858 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 859 u8 entry_idx, u8 uid) 860 { 861 struct sk_buff *skb; 862 int ret; 863 864 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 865 if (!skb) { 866 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 867 return -ENOMEM; 868 } 869 skb_put(skb, H2C_BA_CAM_LEN); 870 871 SET_BA_CAM_VALID(skb->data, 1); 872 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 873 SET_BA_CAM_UID(skb->data, uid); 874 SET_BA_CAM_BAND(skb->data, 0); 875 SET_BA_CAM_STD_EN(skb->data, 0); 876 877 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 878 H2C_CAT_MAC, 879 H2C_CL_BA_CAM, 880 H2C_FUNC_MAC_BA_CAM, 0, 1, 881 H2C_BA_CAM_LEN); 882 883 ret = rtw89_h2c_tx(rtwdev, skb, false); 884 if (ret) { 885 rtw89_err(rtwdev, "failed to send h2c\n"); 886 goto fail; 887 } 888 889 return 0; 890 fail: 891 dev_kfree_skb_any(skb); 892 893 return ret; 894 } 895 896 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 897 { 898 const struct rtw89_chip_info *chip = rtwdev->chip; 899 u8 entry_idx = chip->bacam_num; 900 u8 uid = 0; 901 int i; 902 903 for (i = 0; i < chip->bacam_dynamic_num; i++) { 904 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 905 entry_idx++; 906 uid++; 907 } 908 } 909 910 #define H2C_LOG_CFG_LEN 12 911 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 912 { 913 struct sk_buff *skb; 914 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 915 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 916 int ret; 917 918 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 919 if (!skb) { 920 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 921 return -ENOMEM; 922 } 923 924 skb_put(skb, H2C_LOG_CFG_LEN); 925 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 926 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 927 SET_LOG_CFG_COMP(skb->data, comp); 928 SET_LOG_CFG_COMP_EXT(skb->data, 0); 929 930 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 931 H2C_CAT_MAC, 932 H2C_CL_FW_INFO, 933 H2C_FUNC_LOG_CFG, 0, 0, 934 H2C_LOG_CFG_LEN); 935 936 ret = rtw89_h2c_tx(rtwdev, skb, false); 937 if (ret) { 938 rtw89_err(rtwdev, "failed to send h2c\n"); 939 goto fail; 940 } 941 942 return 0; 943 fail: 944 dev_kfree_skb_any(skb); 945 946 return ret; 947 } 948 949 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 950 struct rtw89_vif *rtwvif, 951 enum rtw89_fw_pkt_ofld_type type, 952 u8 *id) 953 { 954 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 955 struct rtw89_pktofld_info *info; 956 struct sk_buff *skb; 957 int ret; 958 959 info = kzalloc(sizeof(*info), GFP_KERNEL); 960 if (!info) 961 return -ENOMEM; 962 963 switch (type) { 964 case RTW89_PKT_OFLD_TYPE_PS_POLL: 965 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 966 break; 967 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 968 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 969 break; 970 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 971 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 972 break; 973 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 974 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 975 break; 976 default: 977 goto err; 978 } 979 980 if (!skb) 981 goto err; 982 983 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 984 kfree_skb(skb); 985 986 if (ret) 987 goto err; 988 989 list_add_tail(&info->list, &rtwvif->general_pkt_list); 990 *id = info->id; 991 return 0; 992 993 err: 994 kfree(info); 995 return -ENOMEM; 996 } 997 998 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 999 struct rtw89_vif *rtwvif, bool notify_fw) 1000 { 1001 struct list_head *pkt_list = &rtwvif->general_pkt_list; 1002 struct rtw89_pktofld_info *info, *tmp; 1003 1004 list_for_each_entry_safe(info, tmp, pkt_list, list) { 1005 if (notify_fw) 1006 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1007 else 1008 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 1009 list_del(&info->list); 1010 kfree(info); 1011 } 1012 } 1013 1014 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1015 { 1016 struct rtw89_vif *rtwvif; 1017 1018 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1019 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1020 } 1021 1022 #define H2C_GENERAL_PKT_LEN 6 1023 #define H2C_GENERAL_PKT_ID_UND 0xff 1024 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1025 struct rtw89_vif *rtwvif, u8 macid) 1026 { 1027 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1028 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1029 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1030 struct sk_buff *skb; 1031 int ret; 1032 1033 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1034 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1035 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1036 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1037 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1038 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1039 1040 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1041 if (!skb) { 1042 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1043 return -ENOMEM; 1044 } 1045 skb_put(skb, H2C_GENERAL_PKT_LEN); 1046 SET_GENERAL_PKT_MACID(skb->data, macid); 1047 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1048 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1049 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1050 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1051 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1052 1053 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1054 H2C_CAT_MAC, 1055 H2C_CL_FW_INFO, 1056 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1057 H2C_GENERAL_PKT_LEN); 1058 1059 ret = rtw89_h2c_tx(rtwdev, skb, false); 1060 if (ret) { 1061 rtw89_err(rtwdev, "failed to send h2c\n"); 1062 goto fail; 1063 } 1064 1065 return 0; 1066 fail: 1067 dev_kfree_skb_any(skb); 1068 1069 return ret; 1070 } 1071 1072 #define H2C_LPS_PARM_LEN 8 1073 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1074 struct rtw89_lps_parm *lps_param) 1075 { 1076 struct sk_buff *skb; 1077 int ret; 1078 1079 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1080 if (!skb) { 1081 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1082 return -ENOMEM; 1083 } 1084 skb_put(skb, H2C_LPS_PARM_LEN); 1085 1086 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1087 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1088 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1089 SET_LPS_PARM_RLBM(skb->data, 1); 1090 SET_LPS_PARM_SMARTPS(skb->data, 1); 1091 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1092 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1093 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1094 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1095 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1096 1097 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1098 H2C_CAT_MAC, 1099 H2C_CL_MAC_PS, 1100 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1101 H2C_LPS_PARM_LEN); 1102 1103 ret = rtw89_h2c_tx(rtwdev, skb, false); 1104 if (ret) { 1105 rtw89_err(rtwdev, "failed to send h2c\n"); 1106 goto fail; 1107 } 1108 1109 return 0; 1110 fail: 1111 dev_kfree_skb_any(skb); 1112 1113 return ret; 1114 } 1115 1116 #define H2C_P2P_ACT_LEN 20 1117 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1118 struct ieee80211_p2p_noa_desc *desc, 1119 u8 act, u8 noa_id) 1120 { 1121 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1122 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1123 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1124 struct sk_buff *skb; 1125 u8 *cmd; 1126 int ret; 1127 1128 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1129 if (!skb) { 1130 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1131 return -ENOMEM; 1132 } 1133 skb_put(skb, H2C_P2P_ACT_LEN); 1134 cmd = skb->data; 1135 1136 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1137 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1138 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1139 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1140 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1141 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1142 if (desc) { 1143 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1144 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1145 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1146 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1147 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1148 } 1149 1150 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1151 H2C_CAT_MAC, H2C_CL_MAC_PS, 1152 H2C_FUNC_P2P_ACT, 0, 0, 1153 H2C_P2P_ACT_LEN); 1154 1155 ret = rtw89_h2c_tx(rtwdev, skb, false); 1156 if (ret) { 1157 rtw89_err(rtwdev, "failed to send h2c\n"); 1158 goto fail; 1159 } 1160 1161 return 0; 1162 fail: 1163 dev_kfree_skb_any(skb); 1164 1165 return ret; 1166 } 1167 1168 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1169 struct sk_buff *skb) 1170 { 1171 const struct rtw89_chip_info *chip = rtwdev->chip; 1172 struct rtw89_hal *hal = &rtwdev->hal; 1173 u8 ntx_path; 1174 u8 map_b; 1175 1176 if (chip->rf_path_num == 1) { 1177 ntx_path = RF_A; 1178 map_b = 0; 1179 } else { 1180 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1181 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1182 } 1183 1184 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1185 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1186 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1187 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1188 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1189 } 1190 1191 #define H2C_CMC_TBL_LEN 68 1192 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1193 struct rtw89_vif *rtwvif) 1194 { 1195 const struct rtw89_chip_info *chip = rtwdev->chip; 1196 struct sk_buff *skb; 1197 u8 macid = rtwvif->mac_id; 1198 int ret; 1199 1200 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1201 if (!skb) { 1202 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1203 return -ENOMEM; 1204 } 1205 skb_put(skb, H2C_CMC_TBL_LEN); 1206 SET_CTRL_INFO_MACID(skb->data, macid); 1207 SET_CTRL_INFO_OPERATION(skb->data, 1); 1208 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1209 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1210 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1211 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1212 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1213 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1214 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1215 } 1216 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1217 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1218 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1219 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1220 1221 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1222 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1223 chip->h2c_cctl_func_id, 0, 1, 1224 H2C_CMC_TBL_LEN); 1225 1226 ret = rtw89_h2c_tx(rtwdev, skb, false); 1227 if (ret) { 1228 rtw89_err(rtwdev, "failed to send h2c\n"); 1229 goto fail; 1230 } 1231 1232 return 0; 1233 fail: 1234 dev_kfree_skb_any(skb); 1235 1236 return ret; 1237 } 1238 1239 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1240 struct ieee80211_sta *sta, u8 *pads) 1241 { 1242 bool ppe_th; 1243 u8 ppe16, ppe8; 1244 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1245 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1246 u8 ru_bitmap; 1247 u8 n, idx, sh; 1248 u16 ppe; 1249 int i; 1250 1251 if (!sta->deflink.he_cap.has_he) 1252 return; 1253 1254 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1255 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1256 if (!ppe_th) { 1257 u8 pad; 1258 1259 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1260 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1261 1262 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1263 pads[i] = pad; 1264 1265 return; 1266 } 1267 1268 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1269 n = hweight8(ru_bitmap); 1270 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1271 1272 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1273 if (!(ru_bitmap & BIT(i))) { 1274 pads[i] = 1; 1275 continue; 1276 } 1277 1278 idx = n >> 3; 1279 sh = n & 7; 1280 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1281 1282 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1283 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1284 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1285 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1286 1287 if (ppe16 != 7 && ppe8 == 7) 1288 pads[i] = 2; 1289 else if (ppe8 != 7) 1290 pads[i] = 1; 1291 else 1292 pads[i] = 0; 1293 } 1294 } 1295 1296 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1297 struct ieee80211_vif *vif, 1298 struct ieee80211_sta *sta) 1299 { 1300 const struct rtw89_chip_info *chip = rtwdev->chip; 1301 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1302 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1303 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1304 struct sk_buff *skb; 1305 u8 pads[RTW89_PPE_BW_NUM]; 1306 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1307 u16 lowest_rate; 1308 int ret; 1309 1310 memset(pads, 0, sizeof(pads)); 1311 if (sta) 1312 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1313 1314 if (vif->p2p) 1315 lowest_rate = RTW89_HW_RATE_OFDM6; 1316 else if (chan->band_type == RTW89_BAND_2G) 1317 lowest_rate = RTW89_HW_RATE_CCK1; 1318 else 1319 lowest_rate = RTW89_HW_RATE_OFDM6; 1320 1321 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1322 if (!skb) { 1323 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1324 return -ENOMEM; 1325 } 1326 skb_put(skb, H2C_CMC_TBL_LEN); 1327 SET_CTRL_INFO_MACID(skb->data, mac_id); 1328 SET_CTRL_INFO_OPERATION(skb->data, 1); 1329 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1330 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1331 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1332 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1333 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1334 if (vif->type == NL80211_IFTYPE_STATION) 1335 SET_CMC_TBL_ULDL(skb->data, 1); 1336 else 1337 SET_CMC_TBL_ULDL(skb->data, 0); 1338 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1339 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1340 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1341 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1342 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1343 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1344 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1345 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1346 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1347 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1348 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1349 } 1350 if (sta) 1351 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1352 sta->deflink.he_cap.has_he); 1353 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1354 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1355 1356 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1357 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1358 chip->h2c_cctl_func_id, 0, 1, 1359 H2C_CMC_TBL_LEN); 1360 1361 ret = rtw89_h2c_tx(rtwdev, skb, false); 1362 if (ret) { 1363 rtw89_err(rtwdev, "failed to send h2c\n"); 1364 goto fail; 1365 } 1366 1367 return 0; 1368 fail: 1369 dev_kfree_skb_any(skb); 1370 1371 return ret; 1372 } 1373 1374 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1375 struct rtw89_sta *rtwsta) 1376 { 1377 const struct rtw89_chip_info *chip = rtwdev->chip; 1378 struct sk_buff *skb; 1379 int ret; 1380 1381 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1382 if (!skb) { 1383 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1384 return -ENOMEM; 1385 } 1386 skb_put(skb, H2C_CMC_TBL_LEN); 1387 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1388 SET_CTRL_INFO_OPERATION(skb->data, 1); 1389 if (rtwsta->cctl_tx_time) { 1390 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1391 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1392 } 1393 if (rtwsta->cctl_tx_retry_limit) { 1394 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1395 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1396 } 1397 1398 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1399 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1400 chip->h2c_cctl_func_id, 0, 1, 1401 H2C_CMC_TBL_LEN); 1402 1403 ret = rtw89_h2c_tx(rtwdev, skb, false); 1404 if (ret) { 1405 rtw89_err(rtwdev, "failed to send h2c\n"); 1406 goto fail; 1407 } 1408 1409 return 0; 1410 fail: 1411 dev_kfree_skb_any(skb); 1412 1413 return ret; 1414 } 1415 1416 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1417 struct rtw89_sta *rtwsta) 1418 { 1419 const struct rtw89_chip_info *chip = rtwdev->chip; 1420 struct sk_buff *skb; 1421 int ret; 1422 1423 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1424 return 0; 1425 1426 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1427 if (!skb) { 1428 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1429 return -ENOMEM; 1430 } 1431 skb_put(skb, H2C_CMC_TBL_LEN); 1432 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1433 SET_CTRL_INFO_OPERATION(skb->data, 1); 1434 1435 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1436 1437 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1438 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1439 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1440 H2C_CMC_TBL_LEN); 1441 1442 ret = rtw89_h2c_tx(rtwdev, skb, false); 1443 if (ret) { 1444 rtw89_err(rtwdev, "failed to send h2c\n"); 1445 goto fail; 1446 } 1447 1448 return 0; 1449 fail: 1450 dev_kfree_skb_any(skb); 1451 1452 return ret; 1453 } 1454 1455 #define H2C_BCN_BASE_LEN 12 1456 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1457 struct rtw89_vif *rtwvif) 1458 { 1459 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1460 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1461 struct sk_buff *skb; 1462 struct sk_buff *skb_beacon; 1463 u16 tim_offset; 1464 int bcn_total_len; 1465 u16 beacon_rate; 1466 int ret; 1467 1468 if (vif->p2p) 1469 beacon_rate = RTW89_HW_RATE_OFDM6; 1470 else if (chan->band_type == RTW89_BAND_2G) 1471 beacon_rate = RTW89_HW_RATE_CCK1; 1472 else 1473 beacon_rate = RTW89_HW_RATE_OFDM6; 1474 1475 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1476 NULL, 0); 1477 if (!skb_beacon) { 1478 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1479 return -ENOMEM; 1480 } 1481 1482 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1483 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1484 if (!skb) { 1485 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1486 dev_kfree_skb_any(skb_beacon); 1487 return -ENOMEM; 1488 } 1489 skb_put(skb, H2C_BCN_BASE_LEN); 1490 1491 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1492 SET_BCN_UPD_MBSSID(skb->data, 0); 1493 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1494 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1495 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1496 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1497 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1498 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1499 1500 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1501 dev_kfree_skb_any(skb_beacon); 1502 1503 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1504 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1505 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1506 bcn_total_len); 1507 1508 ret = rtw89_h2c_tx(rtwdev, skb, false); 1509 if (ret) { 1510 rtw89_err(rtwdev, "failed to send h2c\n"); 1511 dev_kfree_skb_any(skb); 1512 return ret; 1513 } 1514 1515 return 0; 1516 } 1517 1518 #define H2C_ROLE_MAINTAIN_LEN 4 1519 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1520 struct rtw89_vif *rtwvif, 1521 struct rtw89_sta *rtwsta, 1522 enum rtw89_upd_mode upd_mode) 1523 { 1524 struct sk_buff *skb; 1525 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1526 u8 self_role; 1527 int ret; 1528 1529 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1530 if (rtwsta) 1531 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1532 else 1533 self_role = rtwvif->self_role; 1534 } else { 1535 self_role = rtwvif->self_role; 1536 } 1537 1538 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1539 if (!skb) { 1540 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1541 return -ENOMEM; 1542 } 1543 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1544 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1545 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1546 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1547 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1548 1549 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1550 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1551 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1552 H2C_ROLE_MAINTAIN_LEN); 1553 1554 ret = rtw89_h2c_tx(rtwdev, skb, false); 1555 if (ret) { 1556 rtw89_err(rtwdev, "failed to send h2c\n"); 1557 goto fail; 1558 } 1559 1560 return 0; 1561 fail: 1562 dev_kfree_skb_any(skb); 1563 1564 return ret; 1565 } 1566 1567 #define H2C_JOIN_INFO_LEN 4 1568 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1569 struct rtw89_sta *rtwsta, bool dis_conn) 1570 { 1571 struct sk_buff *skb; 1572 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1573 u8 self_role = rtwvif->self_role; 1574 u8 net_type = rtwvif->net_type; 1575 int ret; 1576 1577 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1578 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1579 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1580 } 1581 1582 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1583 if (!skb) { 1584 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1585 return -ENOMEM; 1586 } 1587 skb_put(skb, H2C_JOIN_INFO_LEN); 1588 SET_JOININFO_MACID(skb->data, mac_id); 1589 SET_JOININFO_OP(skb->data, dis_conn); 1590 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1591 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1592 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1593 SET_JOININFO_ISHESTA(skb->data, 0); 1594 SET_JOININFO_DLBW(skb->data, 0); 1595 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1596 SET_JOININFO_DL_T_PE(skb->data, 0); 1597 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1598 SET_JOININFO_NET_TYPE(skb->data, net_type); 1599 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1600 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1601 1602 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1603 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1604 H2C_FUNC_MAC_JOININFO, 0, 1, 1605 H2C_JOIN_INFO_LEN); 1606 1607 ret = rtw89_h2c_tx(rtwdev, skb, false); 1608 if (ret) { 1609 rtw89_err(rtwdev, "failed to send h2c\n"); 1610 goto fail; 1611 } 1612 1613 return 0; 1614 fail: 1615 dev_kfree_skb_any(skb); 1616 1617 return ret; 1618 } 1619 1620 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1621 bool pause) 1622 { 1623 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1624 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1625 struct sk_buff *skb; 1626 int ret; 1627 1628 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1629 if (!skb) { 1630 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1631 return -ENOMEM; 1632 } 1633 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1634 if (pause) 1635 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1636 skb_put_data(skb, &h2c, len); 1637 1638 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1639 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1640 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1641 len); 1642 1643 ret = rtw89_h2c_tx(rtwdev, skb, false); 1644 if (ret) { 1645 rtw89_err(rtwdev, "failed to send h2c\n"); 1646 goto fail; 1647 } 1648 1649 return 0; 1650 fail: 1651 dev_kfree_skb_any(skb); 1652 1653 return ret; 1654 } 1655 1656 #define H2C_EDCA_LEN 12 1657 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1658 u8 ac, u32 val) 1659 { 1660 struct sk_buff *skb; 1661 int ret; 1662 1663 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1664 if (!skb) { 1665 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1666 return -ENOMEM; 1667 } 1668 skb_put(skb, H2C_EDCA_LEN); 1669 RTW89_SET_EDCA_SEL(skb->data, 0); 1670 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1671 RTW89_SET_EDCA_WMM(skb->data, 0); 1672 RTW89_SET_EDCA_AC(skb->data, ac); 1673 RTW89_SET_EDCA_PARAM(skb->data, val); 1674 1675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1676 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1677 H2C_FUNC_USR_EDCA, 0, 1, 1678 H2C_EDCA_LEN); 1679 1680 ret = rtw89_h2c_tx(rtwdev, skb, false); 1681 if (ret) { 1682 rtw89_err(rtwdev, "failed to send h2c\n"); 1683 goto fail; 1684 } 1685 1686 return 0; 1687 fail: 1688 dev_kfree_skb_any(skb); 1689 1690 return ret; 1691 } 1692 1693 #define H2C_TSF32_TOGL_LEN 4 1694 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1695 bool en) 1696 { 1697 struct sk_buff *skb; 1698 u16 early_us = en ? 2000 : 0; 1699 u8 *cmd; 1700 int ret; 1701 1702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1703 if (!skb) { 1704 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1705 return -ENOMEM; 1706 } 1707 skb_put(skb, H2C_TSF32_TOGL_LEN); 1708 cmd = skb->data; 1709 1710 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1711 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1712 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1713 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1714 1715 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1716 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1717 H2C_FUNC_TSF32_TOGL, 0, 0, 1718 H2C_TSF32_TOGL_LEN); 1719 1720 ret = rtw89_h2c_tx(rtwdev, skb, false); 1721 if (ret) { 1722 rtw89_err(rtwdev, "failed to send h2c\n"); 1723 goto fail; 1724 } 1725 1726 return 0; 1727 fail: 1728 dev_kfree_skb_any(skb); 1729 1730 return ret; 1731 } 1732 1733 #define H2C_OFLD_CFG_LEN 8 1734 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1735 { 1736 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1737 struct sk_buff *skb; 1738 int ret; 1739 1740 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1741 if (!skb) { 1742 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1743 return -ENOMEM; 1744 } 1745 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1746 1747 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1748 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1749 H2C_FUNC_OFLD_CFG, 0, 1, 1750 H2C_OFLD_CFG_LEN); 1751 1752 ret = rtw89_h2c_tx(rtwdev, skb, false); 1753 if (ret) { 1754 rtw89_err(rtwdev, "failed to send h2c\n"); 1755 goto fail; 1756 } 1757 1758 return 0; 1759 fail: 1760 dev_kfree_skb_any(skb); 1761 1762 return ret; 1763 } 1764 1765 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 1766 struct ieee80211_vif *vif, 1767 bool connect) 1768 { 1769 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 1770 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 1771 struct rtw89_h2c_bcnfltr *h2c; 1772 u32 len = sizeof(*h2c); 1773 struct sk_buff *skb; 1774 int ret; 1775 1776 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1777 return -EINVAL; 1778 1779 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1780 return -EINVAL; 1781 1782 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1783 if (!skb) { 1784 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 1785 return -ENOMEM; 1786 } 1787 1788 skb_put(skb, len); 1789 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 1790 1791 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 1792 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 1793 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 1794 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 1795 RTW89_H2C_BCNFLTR_W0_MODE) | 1796 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 1797 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 1798 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 1799 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 1800 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 1801 1802 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1803 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1804 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 1805 1806 ret = rtw89_h2c_tx(rtwdev, skb, false); 1807 if (ret) { 1808 rtw89_err(rtwdev, "failed to send h2c\n"); 1809 goto fail; 1810 } 1811 1812 return 0; 1813 fail: 1814 dev_kfree_skb_any(skb); 1815 1816 return ret; 1817 } 1818 1819 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 1820 struct rtw89_rx_phy_ppdu *phy_ppdu) 1821 { 1822 struct rtw89_h2c_ofld_rssi *h2c; 1823 u32 len = sizeof(*h2c); 1824 struct sk_buff *skb; 1825 s8 rssi; 1826 int ret; 1827 1828 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 1829 return -EINVAL; 1830 1831 if (!phy_ppdu) 1832 return -EINVAL; 1833 1834 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1835 if (!skb) { 1836 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 1837 return -ENOMEM; 1838 } 1839 1840 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 1841 skb_put(skb, len); 1842 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 1843 1844 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 1845 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 1846 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 1847 1848 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1849 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1850 H2C_FUNC_OFLD_RSSI, 0, 1, len); 1851 1852 ret = rtw89_h2c_tx(rtwdev, skb, false); 1853 if (ret) { 1854 rtw89_err(rtwdev, "failed to send h2c\n"); 1855 goto fail; 1856 } 1857 1858 return 0; 1859 fail: 1860 dev_kfree_skb_any(skb); 1861 1862 return ret; 1863 } 1864 1865 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 1866 { 1867 struct rtw89_traffic_stats *stats = &rtwvif->stats; 1868 struct rtw89_h2c_ofld *h2c; 1869 u32 len = sizeof(*h2c); 1870 struct sk_buff *skb; 1871 int ret; 1872 1873 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 1874 return -EINVAL; 1875 1876 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1877 if (!skb) { 1878 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 1879 return -ENOMEM; 1880 } 1881 1882 skb_put(skb, len); 1883 h2c = (struct rtw89_h2c_ofld *)skb->data; 1884 1885 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 1886 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 1887 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 1888 1889 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1890 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1891 H2C_FUNC_OFLD_TP, 0, 1, len); 1892 1893 ret = rtw89_h2c_tx(rtwdev, skb, false); 1894 if (ret) { 1895 rtw89_err(rtwdev, "failed to send h2c\n"); 1896 goto fail; 1897 } 1898 1899 return 0; 1900 fail: 1901 dev_kfree_skb_any(skb); 1902 1903 return ret; 1904 } 1905 1906 #define H2C_RA_LEN 16 1907 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1908 { 1909 struct sk_buff *skb; 1910 u8 *cmd; 1911 int ret; 1912 1913 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1914 if (!skb) { 1915 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1916 return -ENOMEM; 1917 } 1918 skb_put(skb, H2C_RA_LEN); 1919 cmd = skb->data; 1920 rtw89_debug(rtwdev, RTW89_DBG_RA, 1921 "ra cmd msk: %llx ", ra->ra_mask); 1922 1923 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1924 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1925 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1926 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1927 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1928 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1929 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1930 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1931 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1932 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1933 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1934 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1935 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1936 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1937 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1938 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1939 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1940 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1941 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1942 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1943 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1944 1945 if (csi) { 1946 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1947 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1948 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1949 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1950 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1951 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1952 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1953 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1954 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1955 } 1956 1957 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1958 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1959 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1960 H2C_RA_LEN); 1961 1962 ret = rtw89_h2c_tx(rtwdev, skb, false); 1963 if (ret) { 1964 rtw89_err(rtwdev, "failed to send h2c\n"); 1965 goto fail; 1966 } 1967 1968 return 0; 1969 fail: 1970 dev_kfree_skb_any(skb); 1971 1972 return ret; 1973 } 1974 1975 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1976 { 1977 struct rtw89_btc *btc = &rtwdev->btc; 1978 struct rtw89_btc_dm *dm = &btc->dm; 1979 struct rtw89_btc_init_info *init_info = &dm->init_info; 1980 struct rtw89_btc_module *module = &init_info->module; 1981 struct rtw89_btc_ant_info *ant = &module->ant; 1982 struct rtw89_h2c_cxinit *h2c; 1983 u32 len = sizeof(*h2c); 1984 struct sk_buff *skb; 1985 int ret; 1986 1987 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1988 if (!skb) { 1989 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1990 return -ENOMEM; 1991 } 1992 skb_put(skb, len); 1993 h2c = (struct rtw89_h2c_cxinit *)skb->data; 1994 1995 h2c->hdr.type = CXDRVINFO_INIT; 1996 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 1997 1998 h2c->ant_type = ant->type; 1999 h2c->ant_num = ant->num; 2000 h2c->ant_iso = ant->isolation; 2001 h2c->ant_info = 2002 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 2003 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 2004 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 2005 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 2006 2007 h2c->mod_rfe = module->rfe_type; 2008 h2c->mod_cv = module->cv; 2009 h2c->mod_info = 2010 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2011 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2012 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2013 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2014 h2c->mod_adie_kt = module->kt_ver_adie; 2015 h2c->wl_gch = init_info->wl_guard_ch; 2016 2017 h2c->info = 2018 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2019 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2020 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2021 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2022 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2023 2024 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2025 H2C_CAT_OUTSRC, BTFC_SET, 2026 SET_DRV_INFO, 0, 0, 2027 len); 2028 2029 ret = rtw89_h2c_tx(rtwdev, skb, false); 2030 if (ret) { 2031 rtw89_err(rtwdev, "failed to send h2c\n"); 2032 goto fail; 2033 } 2034 2035 return 0; 2036 fail: 2037 dev_kfree_skb_any(skb); 2038 2039 return ret; 2040 } 2041 2042 #define PORT_DATA_OFFSET 4 2043 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2044 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2045 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2046 2047 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2048 { 2049 struct rtw89_btc *btc = &rtwdev->btc; 2050 const struct rtw89_btc_ver *ver = btc->ver; 2051 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2052 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2053 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2054 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2055 struct sk_buff *skb; 2056 u32 len; 2057 u8 offset = 0; 2058 u8 *cmd; 2059 int ret; 2060 int i; 2061 2062 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2063 2064 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2065 if (!skb) { 2066 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2067 return -ENOMEM; 2068 } 2069 skb_put(skb, len); 2070 cmd = skb->data; 2071 2072 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2073 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2074 2075 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2076 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2077 2078 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2079 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2080 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2081 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2082 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2083 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2084 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2085 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2086 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2087 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2088 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2089 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2090 2091 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2092 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2093 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2094 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2095 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2096 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2097 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2098 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2099 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2100 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2101 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2102 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2103 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2104 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2105 } 2106 2107 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2108 H2C_CAT_OUTSRC, BTFC_SET, 2109 SET_DRV_INFO, 0, 0, 2110 len); 2111 2112 ret = rtw89_h2c_tx(rtwdev, skb, false); 2113 if (ret) { 2114 rtw89_err(rtwdev, "failed to send h2c\n"); 2115 goto fail; 2116 } 2117 2118 return 0; 2119 fail: 2120 dev_kfree_skb_any(skb); 2121 2122 return ret; 2123 } 2124 2125 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2126 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2127 2128 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2129 { 2130 struct rtw89_btc *btc = &rtwdev->btc; 2131 const struct rtw89_btc_ver *ver = btc->ver; 2132 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2133 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2134 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2135 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2136 struct sk_buff *skb; 2137 u32 len; 2138 u8 *cmd, offset; 2139 int ret; 2140 int i; 2141 2142 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2143 2144 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2145 if (!skb) { 2146 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2147 return -ENOMEM; 2148 } 2149 skb_put(skb, len); 2150 cmd = skb->data; 2151 2152 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2153 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2154 2155 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2156 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2157 2158 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2159 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2160 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2161 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2162 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2163 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2164 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2165 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2166 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2167 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2168 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2169 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2170 2171 offset = PORT_DATA_OFFSET; 2172 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2173 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2174 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2175 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2176 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2177 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2178 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2179 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2180 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2181 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2182 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2183 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2184 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2185 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2186 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2187 } 2188 2189 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2190 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2191 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2192 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2193 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2194 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2195 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2196 2197 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2198 H2C_CAT_OUTSRC, BTFC_SET, 2199 SET_DRV_INFO, 0, 0, 2200 len); 2201 2202 ret = rtw89_h2c_tx(rtwdev, skb, false); 2203 if (ret) { 2204 rtw89_err(rtwdev, "failed to send h2c\n"); 2205 goto fail; 2206 } 2207 2208 return 0; 2209 fail: 2210 dev_kfree_skb_any(skb); 2211 2212 return ret; 2213 } 2214 2215 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2216 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2217 2218 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2219 { 2220 struct rtw89_btc *btc = &rtwdev->btc; 2221 const struct rtw89_btc_ver *ver = btc->ver; 2222 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2223 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2224 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2225 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2226 struct sk_buff *skb; 2227 u32 len; 2228 u8 *cmd, offset; 2229 int ret; 2230 int i; 2231 2232 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2233 2234 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2235 if (!skb) { 2236 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2237 return -ENOMEM; 2238 } 2239 skb_put(skb, len); 2240 cmd = skb->data; 2241 2242 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2243 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2244 2245 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2246 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2247 2248 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2249 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2250 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2251 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2252 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2253 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2254 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2255 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2256 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2257 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2258 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2259 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2260 2261 offset = PORT_DATA_OFFSET; 2262 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2263 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2264 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2265 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2266 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2267 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2268 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2269 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2270 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2271 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2272 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2273 } 2274 2275 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2276 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2277 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2278 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2279 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2280 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2281 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2282 2283 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2284 H2C_CAT_OUTSRC, BTFC_SET, 2285 SET_DRV_INFO, 0, 0, 2286 len); 2287 2288 ret = rtw89_h2c_tx(rtwdev, skb, false); 2289 if (ret) { 2290 rtw89_err(rtwdev, "failed to send h2c\n"); 2291 goto fail; 2292 } 2293 2294 return 0; 2295 fail: 2296 dev_kfree_skb_any(skb); 2297 2298 return ret; 2299 } 2300 2301 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2302 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2303 { 2304 struct rtw89_btc *btc = &rtwdev->btc; 2305 const struct rtw89_btc_ver *ver = btc->ver; 2306 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2307 struct sk_buff *skb; 2308 u8 *cmd; 2309 int ret; 2310 2311 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2312 if (!skb) { 2313 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2314 return -ENOMEM; 2315 } 2316 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2317 cmd = skb->data; 2318 2319 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2320 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2321 2322 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2323 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2324 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2325 if (ver->fcxctrl == 0) 2326 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2327 2328 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2329 H2C_CAT_OUTSRC, BTFC_SET, 2330 SET_DRV_INFO, 0, 0, 2331 H2C_LEN_CXDRVINFO_CTRL); 2332 2333 ret = rtw89_h2c_tx(rtwdev, skb, false); 2334 if (ret) { 2335 rtw89_err(rtwdev, "failed to send h2c\n"); 2336 goto fail; 2337 } 2338 2339 return 0; 2340 fail: 2341 dev_kfree_skb_any(skb); 2342 2343 return ret; 2344 } 2345 2346 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2347 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2348 { 2349 struct rtw89_btc *btc = &rtwdev->btc; 2350 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2351 struct sk_buff *skb; 2352 u8 *cmd; 2353 int ret; 2354 2355 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2356 if (!skb) { 2357 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2358 return -ENOMEM; 2359 } 2360 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2361 cmd = skb->data; 2362 2363 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2364 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2365 2366 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2367 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2368 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2369 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2370 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2371 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2372 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2373 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2374 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2375 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2376 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2377 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2378 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2379 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2380 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2381 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2382 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2383 2384 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2385 H2C_CAT_OUTSRC, BTFC_SET, 2386 SET_DRV_INFO, 0, 0, 2387 H2C_LEN_CXDRVINFO_TRX); 2388 2389 ret = rtw89_h2c_tx(rtwdev, skb, false); 2390 if (ret) { 2391 rtw89_err(rtwdev, "failed to send h2c\n"); 2392 goto fail; 2393 } 2394 2395 return 0; 2396 fail: 2397 dev_kfree_skb_any(skb); 2398 2399 return ret; 2400 } 2401 2402 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2403 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2404 { 2405 struct rtw89_btc *btc = &rtwdev->btc; 2406 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2407 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2408 struct sk_buff *skb; 2409 u8 *cmd; 2410 int ret; 2411 2412 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2413 if (!skb) { 2414 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2415 return -ENOMEM; 2416 } 2417 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2418 cmd = skb->data; 2419 2420 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2421 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2422 2423 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2424 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2425 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2426 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2427 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2428 2429 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2430 H2C_CAT_OUTSRC, BTFC_SET, 2431 SET_DRV_INFO, 0, 0, 2432 H2C_LEN_CXDRVINFO_RFK); 2433 2434 ret = rtw89_h2c_tx(rtwdev, skb, false); 2435 if (ret) { 2436 rtw89_err(rtwdev, "failed to send h2c\n"); 2437 goto fail; 2438 } 2439 2440 return 0; 2441 fail: 2442 dev_kfree_skb_any(skb); 2443 2444 return ret; 2445 } 2446 2447 #define H2C_LEN_PKT_OFLD 4 2448 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2449 { 2450 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2451 struct sk_buff *skb; 2452 unsigned int cond; 2453 u8 *cmd; 2454 int ret; 2455 2456 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2457 if (!skb) { 2458 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2459 return -ENOMEM; 2460 } 2461 skb_put(skb, H2C_LEN_PKT_OFLD); 2462 cmd = skb->data; 2463 2464 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2465 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2466 2467 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2468 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2469 H2C_FUNC_PACKET_OFLD, 1, 1, 2470 H2C_LEN_PKT_OFLD); 2471 2472 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 2473 2474 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2475 if (ret < 0) { 2476 rtw89_debug(rtwdev, RTW89_DBG_FW, 2477 "failed to del pkt ofld: id %d, ret %d\n", 2478 id, ret); 2479 return ret; 2480 } 2481 2482 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 2483 return 0; 2484 } 2485 2486 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2487 struct sk_buff *skb_ofld) 2488 { 2489 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2490 struct sk_buff *skb; 2491 unsigned int cond; 2492 u8 *cmd; 2493 u8 alloc_id; 2494 int ret; 2495 2496 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2497 RTW89_MAX_PKT_OFLD_NUM); 2498 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2499 return -ENOSPC; 2500 2501 *id = alloc_id; 2502 2503 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2504 if (!skb) { 2505 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2506 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2507 return -ENOMEM; 2508 } 2509 skb_put(skb, H2C_LEN_PKT_OFLD); 2510 cmd = skb->data; 2511 2512 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2513 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2514 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2515 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2516 2517 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2518 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2519 H2C_FUNC_PACKET_OFLD, 1, 1, 2520 H2C_LEN_PKT_OFLD + skb_ofld->len); 2521 2522 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 2523 2524 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2525 if (ret < 0) { 2526 rtw89_debug(rtwdev, RTW89_DBG_FW, 2527 "failed to add pkt ofld: id %d, ret %d\n", 2528 alloc_id, ret); 2529 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2530 return ret; 2531 } 2532 2533 return 0; 2534 } 2535 2536 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2537 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2538 struct list_head *chan_list) 2539 { 2540 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2541 struct rtw89_mac_chinfo *ch_info; 2542 struct sk_buff *skb; 2543 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2544 unsigned int cond; 2545 u8 *cmd; 2546 int ret; 2547 2548 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2549 if (!skb) { 2550 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2551 return -ENOMEM; 2552 } 2553 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2554 cmd = skb->data; 2555 2556 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2557 /* in unit of 4 bytes */ 2558 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2559 2560 list_for_each_entry(ch_info, chan_list, list) { 2561 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2562 2563 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2564 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2565 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2566 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2567 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2568 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2569 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2570 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2571 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2572 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2573 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2574 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2575 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2576 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2577 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2578 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2579 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2580 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2581 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2582 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2583 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2584 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2585 } 2586 2587 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2588 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2589 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2590 2591 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH); 2592 2593 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2594 if (ret) { 2595 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 2596 return ret; 2597 } 2598 2599 return 0; 2600 } 2601 2602 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2603 struct rtw89_scan_option *option, 2604 struct rtw89_vif *rtwvif) 2605 { 2606 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2607 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 2608 struct rtw89_h2c_scanofld *h2c; 2609 u32 len = sizeof(*h2c); 2610 struct sk_buff *skb; 2611 unsigned int cond; 2612 int ret; 2613 2614 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2615 if (!skb) { 2616 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2617 return -ENOMEM; 2618 } 2619 skb_put(skb, len); 2620 h2c = (struct rtw89_h2c_scanofld *)skb->data; 2621 2622 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 2623 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 2624 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 2625 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 2626 2627 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 2628 le32_encode_bits(option->target_ch_mode, 2629 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 2630 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 2631 RTW89_H2C_SCANOFLD_W1_START_MODE) | 2632 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 2633 2634 if (option->target_ch_mode) { 2635 h2c->w1 |= le32_encode_bits(op->band_width, 2636 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 2637 le32_encode_bits(op->primary_channel, 2638 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 2639 le32_encode_bits(op->channel, 2640 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 2641 h2c->w0 |= le32_encode_bits(op->band_type, 2642 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 2643 } 2644 2645 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2646 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2647 H2C_FUNC_SCANOFLD, 1, 1, 2648 len); 2649 2650 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD); 2651 2652 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2653 if (ret) { 2654 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 2655 return ret; 2656 } 2657 2658 return 0; 2659 } 2660 2661 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2662 struct rtw89_fw_h2c_rf_reg_info *info, 2663 u16 len, u8 page) 2664 { 2665 struct sk_buff *skb; 2666 u8 class = info->rf_path == RF_PATH_A ? 2667 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2668 int ret; 2669 2670 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2671 if (!skb) { 2672 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2673 return -ENOMEM; 2674 } 2675 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2676 2677 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2678 H2C_CAT_OUTSRC, class, page, 0, 0, 2679 len); 2680 2681 ret = rtw89_h2c_tx(rtwdev, skb, false); 2682 if (ret) { 2683 rtw89_err(rtwdev, "failed to send h2c\n"); 2684 goto fail; 2685 } 2686 2687 return 0; 2688 fail: 2689 dev_kfree_skb_any(skb); 2690 2691 return ret; 2692 } 2693 2694 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2695 { 2696 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2697 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2698 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2699 struct sk_buff *skb; 2700 int ret; 2701 2702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2703 if (!skb) { 2704 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2705 return -ENOMEM; 2706 } 2707 skb_put(skb, sizeof(*mccch)); 2708 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2709 2710 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2711 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2712 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2713 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2714 mccch->current_channel = cpu_to_le32(chan->channel); 2715 mccch->current_band_type = cpu_to_le32(chan->band_type); 2716 2717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2718 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2719 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2720 sizeof(*mccch)); 2721 2722 ret = rtw89_h2c_tx(rtwdev, skb, false); 2723 if (ret) { 2724 rtw89_err(rtwdev, "failed to send h2c\n"); 2725 goto fail; 2726 } 2727 2728 return 0; 2729 fail: 2730 dev_kfree_skb_any(skb); 2731 2732 return ret; 2733 } 2734 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2735 2736 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2737 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2738 bool rack, bool dack) 2739 { 2740 struct sk_buff *skb; 2741 int ret; 2742 2743 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2744 if (!skb) { 2745 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2746 return -ENOMEM; 2747 } 2748 skb_put_data(skb, buf, len); 2749 2750 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2751 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2752 len); 2753 2754 ret = rtw89_h2c_tx(rtwdev, skb, false); 2755 if (ret) { 2756 rtw89_err(rtwdev, "failed to send h2c\n"); 2757 goto fail; 2758 } 2759 2760 return 0; 2761 fail: 2762 dev_kfree_skb_any(skb); 2763 2764 return ret; 2765 } 2766 2767 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2768 { 2769 struct sk_buff *skb; 2770 int ret; 2771 2772 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2773 if (!skb) { 2774 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2775 return -ENOMEM; 2776 } 2777 skb_put_data(skb, buf, len); 2778 2779 ret = rtw89_h2c_tx(rtwdev, skb, false); 2780 if (ret) { 2781 rtw89_err(rtwdev, "failed to send h2c\n"); 2782 goto fail; 2783 } 2784 2785 return 0; 2786 fail: 2787 dev_kfree_skb_any(skb); 2788 2789 return ret; 2790 } 2791 2792 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2793 { 2794 struct rtw89_early_h2c *early_h2c; 2795 2796 lockdep_assert_held(&rtwdev->mutex); 2797 2798 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2799 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2800 } 2801 } 2802 2803 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2804 { 2805 struct rtw89_early_h2c *early_h2c, *tmp; 2806 2807 mutex_lock(&rtwdev->mutex); 2808 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2809 list_del(&early_h2c->list); 2810 kfree(early_h2c->h2c); 2811 kfree(early_h2c); 2812 } 2813 mutex_unlock(&rtwdev->mutex); 2814 } 2815 2816 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2817 { 2818 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2819 2820 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2821 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2822 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2823 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2824 } 2825 2826 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2827 struct sk_buff *c2h) 2828 { 2829 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2830 u8 category = attr->category; 2831 u8 class = attr->class; 2832 u8 func = attr->func; 2833 2834 switch (category) { 2835 default: 2836 return false; 2837 case RTW89_C2H_CAT_MAC: 2838 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2839 } 2840 } 2841 2842 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2843 { 2844 rtw89_fw_c2h_parse_attr(c2h); 2845 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2846 goto enqueue; 2847 2848 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2849 dev_kfree_skb_any(c2h); 2850 return; 2851 2852 enqueue: 2853 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2854 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2855 } 2856 2857 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2858 struct sk_buff *skb) 2859 { 2860 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2861 u8 category = attr->category; 2862 u8 class = attr->class; 2863 u8 func = attr->func; 2864 u16 len = attr->len; 2865 bool dump = true; 2866 2867 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2868 return; 2869 2870 switch (category) { 2871 case RTW89_C2H_CAT_TEST: 2872 break; 2873 case RTW89_C2H_CAT_MAC: 2874 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2875 if (class == RTW89_MAC_C2H_CLASS_INFO && 2876 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2877 dump = false; 2878 break; 2879 case RTW89_C2H_CAT_OUTSRC: 2880 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2881 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2882 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2883 else 2884 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2885 break; 2886 } 2887 2888 if (dump) 2889 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2890 } 2891 2892 void rtw89_fw_c2h_work(struct work_struct *work) 2893 { 2894 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2895 c2h_work); 2896 struct sk_buff *skb, *tmp; 2897 2898 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2899 skb_unlink(skb, &rtwdev->c2h_queue); 2900 mutex_lock(&rtwdev->mutex); 2901 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2902 mutex_unlock(&rtwdev->mutex); 2903 dev_kfree_skb_any(skb); 2904 } 2905 } 2906 2907 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2908 struct rtw89_mac_h2c_info *info) 2909 { 2910 const struct rtw89_chip_info *chip = rtwdev->chip; 2911 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2912 const u32 *h2c_reg = chip->h2c_regs; 2913 u8 i, val, len; 2914 int ret; 2915 2916 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2917 rtwdev, chip->h2c_ctrl_reg); 2918 if (ret) { 2919 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2920 return ret; 2921 } 2922 2923 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2924 sizeof(info->u.h2creg[0])); 2925 2926 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 2927 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 2928 2929 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2930 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 2931 2932 fw_info->h2c_counter++; 2933 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 2934 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 2935 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2936 2937 return 0; 2938 } 2939 2940 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2941 struct rtw89_mac_c2h_info *info) 2942 { 2943 const struct rtw89_chip_info *chip = rtwdev->chip; 2944 struct rtw89_fw_info *fw_info = &rtwdev->fw; 2945 const u32 *c2h_reg = chip->c2h_regs; 2946 u32 ret; 2947 u8 i, val; 2948 2949 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2950 2951 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2952 RTW89_C2H_TIMEOUT, false, rtwdev, 2953 chip->c2h_ctrl_reg); 2954 if (ret) { 2955 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2956 return ret; 2957 } 2958 2959 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2960 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2961 2962 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2963 2964 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 2965 info->content_len = 2966 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 2967 RTW89_C2HREG_HDR_LEN; 2968 2969 fw_info->c2h_counter++; 2970 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 2971 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 2972 2973 return 0; 2974 } 2975 2976 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2977 struct rtw89_mac_h2c_info *h2c_info, 2978 struct rtw89_mac_c2h_info *c2h_info) 2979 { 2980 u32 ret; 2981 2982 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2983 lockdep_assert_held(&rtwdev->mutex); 2984 2985 if (!h2c_info && !c2h_info) 2986 return -EINVAL; 2987 2988 if (!h2c_info) 2989 goto recv_c2h; 2990 2991 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2992 if (ret) 2993 return ret; 2994 2995 recv_c2h: 2996 if (!c2h_info) 2997 return 0; 2998 2999 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 3000 if (ret) 3001 return ret; 3002 3003 return 0; 3004 } 3005 3006 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 3007 { 3008 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 3009 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 3010 return; 3011 } 3012 3013 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3014 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3015 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3016 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3017 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3018 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3019 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3020 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3021 3022 rtw89_fw_prog_cnt_dump(rtwdev); 3023 } 3024 3025 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3026 { 3027 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3028 struct rtw89_pktofld_info *info, *tmp; 3029 u8 idx; 3030 3031 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3032 if (!(rtwdev->chip->support_bands & BIT(idx))) 3033 continue; 3034 3035 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3036 if (test_bit(info->id, rtwdev->pkt_offload)) 3037 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3038 list_del(&info->list); 3039 kfree(info); 3040 } 3041 } 3042 } 3043 3044 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3045 struct rtw89_vif *rtwvif, 3046 struct rtw89_pktofld_info *info, 3047 enum nl80211_band band, u8 ssid_idx) 3048 { 3049 struct cfg80211_scan_request *req = rtwvif->scan_req; 3050 3051 if (band != NL80211_BAND_6GHZ) 3052 return false; 3053 3054 if (req->ssids[ssid_idx].ssid_len) { 3055 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3056 req->ssids[ssid_idx].ssid_len); 3057 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3058 return false; 3059 } else { 3060 return true; 3061 } 3062 } 3063 3064 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3065 struct rtw89_vif *rtwvif, 3066 struct sk_buff *skb, u8 ssid_idx) 3067 { 3068 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3069 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3070 struct rtw89_pktofld_info *info; 3071 struct sk_buff *new; 3072 int ret = 0; 3073 u8 band; 3074 3075 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3076 if (!(rtwdev->chip->support_bands & BIT(band))) 3077 continue; 3078 3079 new = skb_copy(skb, GFP_KERNEL); 3080 if (!new) { 3081 ret = -ENOMEM; 3082 goto out; 3083 } 3084 skb_put_data(new, ies->ies[band], ies->len[band]); 3085 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3086 3087 info = kzalloc(sizeof(*info), GFP_KERNEL); 3088 if (!info) { 3089 ret = -ENOMEM; 3090 kfree_skb(new); 3091 goto out; 3092 } 3093 3094 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3095 ssid_idx)) { 3096 kfree_skb(new); 3097 kfree(info); 3098 goto out; 3099 } 3100 3101 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3102 if (ret) { 3103 kfree_skb(new); 3104 kfree(info); 3105 goto out; 3106 } 3107 3108 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3109 kfree_skb(new); 3110 } 3111 out: 3112 return ret; 3113 } 3114 3115 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3116 struct rtw89_vif *rtwvif) 3117 { 3118 struct cfg80211_scan_request *req = rtwvif->scan_req; 3119 struct sk_buff *skb; 3120 u8 num = req->n_ssids, i; 3121 int ret; 3122 3123 for (i = 0; i < num; i++) { 3124 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3125 req->ssids[i].ssid, 3126 req->ssids[i].ssid_len, 3127 req->ie_len); 3128 if (!skb) 3129 return -ENOMEM; 3130 3131 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3132 kfree_skb(skb); 3133 3134 if (ret) 3135 return ret; 3136 } 3137 3138 return 0; 3139 } 3140 3141 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3142 struct cfg80211_scan_request *req, 3143 struct rtw89_mac_chinfo *ch_info) 3144 { 3145 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3146 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3147 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3148 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3149 struct cfg80211_scan_6ghz_params *params; 3150 struct rtw89_pktofld_info *info, *tmp; 3151 struct ieee80211_hdr *hdr; 3152 struct sk_buff *skb; 3153 bool found; 3154 int ret = 0; 3155 u8 i; 3156 3157 if (!req->n_6ghz_params) 3158 return 0; 3159 3160 for (i = 0; i < req->n_6ghz_params; i++) { 3161 params = &req->scan_6ghz_params[i]; 3162 3163 if (req->channels[params->channel_idx]->hw_value != 3164 ch_info->pri_ch) 3165 continue; 3166 3167 found = false; 3168 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3169 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3170 found = true; 3171 break; 3172 } 3173 } 3174 if (found) 3175 continue; 3176 3177 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3178 NULL, 0, req->ie_len); 3179 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3180 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3181 hdr = (struct ieee80211_hdr *)skb->data; 3182 ether_addr_copy(hdr->addr3, params->bssid); 3183 3184 info = kzalloc(sizeof(*info), GFP_KERNEL); 3185 if (!info) { 3186 ret = -ENOMEM; 3187 kfree_skb(skb); 3188 goto out; 3189 } 3190 3191 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3192 if (ret) { 3193 kfree_skb(skb); 3194 kfree(info); 3195 goto out; 3196 } 3197 3198 ether_addr_copy(info->bssid, params->bssid); 3199 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3200 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3201 3202 ch_info->tx_pkt = true; 3203 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3204 3205 kfree_skb(skb); 3206 } 3207 3208 out: 3209 return ret; 3210 } 3211 3212 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3213 int ssid_num, 3214 struct rtw89_mac_chinfo *ch_info) 3215 { 3216 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3217 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3218 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3219 struct cfg80211_scan_request *req = rtwvif->scan_req; 3220 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3221 struct rtw89_pktofld_info *info; 3222 u8 band, probe_count = 0; 3223 int ret; 3224 3225 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3226 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3227 ch_info->bw = RTW89_SCAN_WIDTH; 3228 ch_info->tx_pkt = true; 3229 ch_info->cfg_tx_pwr = false; 3230 ch_info->tx_pwr_idx = 0; 3231 ch_info->tx_null = false; 3232 ch_info->pause_data = false; 3233 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3234 3235 if (ch_info->ch_band == RTW89_BAND_6G) { 3236 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3237 !ch_info->is_psc) { 3238 ch_info->tx_pkt = false; 3239 if (!req->duration_mandatory) 3240 ch_info->period -= RTW89_DWELL_TIME_6G; 3241 } 3242 } 3243 3244 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3245 if (ret) 3246 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3247 3248 if (ssid_num) { 3249 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3250 3251 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3252 if (info->channel_6ghz && 3253 ch_info->pri_ch != info->channel_6ghz) 3254 continue; 3255 ch_info->pkt_id[probe_count++] = info->id; 3256 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3257 break; 3258 } 3259 ch_info->num_pkt = probe_count; 3260 } 3261 3262 switch (chan_type) { 3263 case RTW89_CHAN_OPERATE: 3264 ch_info->central_ch = op->channel; 3265 ch_info->pri_ch = op->primary_channel; 3266 ch_info->ch_band = op->band_type; 3267 ch_info->bw = op->band_width; 3268 ch_info->tx_null = true; 3269 ch_info->num_pkt = 0; 3270 break; 3271 case RTW89_CHAN_DFS: 3272 if (ch_info->ch_band != RTW89_BAND_6G) 3273 ch_info->period = max_t(u8, ch_info->period, 3274 RTW89_DFS_CHAN_TIME); 3275 ch_info->dwell_time = RTW89_DWELL_TIME; 3276 break; 3277 case RTW89_CHAN_ACTIVE: 3278 break; 3279 default: 3280 rtw89_err(rtwdev, "Channel type out of bound\n"); 3281 } 3282 } 3283 3284 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3285 struct rtw89_vif *rtwvif, bool connected) 3286 { 3287 struct cfg80211_scan_request *req = rtwvif->scan_req; 3288 struct rtw89_mac_chinfo *ch_info, *tmp; 3289 struct ieee80211_channel *channel; 3290 struct list_head chan_list; 3291 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3292 int list_len, off_chan_time = 0; 3293 enum rtw89_chan_type type; 3294 int ret = 0; 3295 u32 idx; 3296 3297 INIT_LIST_HEAD(&chan_list); 3298 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3299 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3300 idx++, list_len++) { 3301 channel = req->channels[idx]; 3302 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3303 if (!ch_info) { 3304 ret = -ENOMEM; 3305 goto out; 3306 } 3307 3308 if (req->duration_mandatory) 3309 ch_info->period = req->duration; 3310 else if (channel->band == NL80211_BAND_6GHZ) 3311 ch_info->period = RTW89_CHANNEL_TIME_6G + 3312 RTW89_DWELL_TIME_6G; 3313 else 3314 ch_info->period = RTW89_CHANNEL_TIME; 3315 3316 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3317 ch_info->central_ch = channel->hw_value; 3318 ch_info->pri_ch = channel->hw_value; 3319 ch_info->rand_seq_num = random_seq; 3320 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3321 3322 if (channel->flags & 3323 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3324 type = RTW89_CHAN_DFS; 3325 else 3326 type = RTW89_CHAN_ACTIVE; 3327 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3328 3329 if (connected && 3330 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3331 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3332 if (!tmp) { 3333 ret = -ENOMEM; 3334 kfree(ch_info); 3335 goto out; 3336 } 3337 3338 type = RTW89_CHAN_OPERATE; 3339 tmp->period = req->duration_mandatory ? 3340 req->duration : RTW89_CHANNEL_TIME; 3341 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3342 list_add_tail(&tmp->list, &chan_list); 3343 off_chan_time = 0; 3344 list_len++; 3345 } 3346 list_add_tail(&ch_info->list, &chan_list); 3347 off_chan_time += ch_info->period; 3348 } 3349 rtwdev->scan_info.last_chan_idx = idx; 3350 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3351 3352 out: 3353 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3354 list_del(&ch_info->list); 3355 kfree(ch_info); 3356 } 3357 3358 return ret; 3359 } 3360 3361 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3362 struct rtw89_vif *rtwvif, bool connected) 3363 { 3364 int ret; 3365 3366 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3367 if (ret) { 3368 rtw89_err(rtwdev, "Update probe request failed\n"); 3369 goto out; 3370 } 3371 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3372 out: 3373 return ret; 3374 } 3375 3376 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3377 struct ieee80211_scan_request *scan_req) 3378 { 3379 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3380 struct cfg80211_scan_request *req = &scan_req->req; 3381 u32 rx_fltr = rtwdev->hal.rx_fltr; 3382 u8 mac_addr[ETH_ALEN]; 3383 3384 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 3385 rtwdev->scan_info.scanning_vif = vif; 3386 rtwdev->scan_info.last_chan_idx = 0; 3387 rtwvif->scan_ies = &scan_req->ies; 3388 rtwvif->scan_req = req; 3389 ieee80211_stop_queues(rtwdev->hw); 3390 3391 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 3392 get_random_mask_addr(mac_addr, req->mac_addr, 3393 req->mac_addr_mask); 3394 else 3395 ether_addr_copy(mac_addr, vif->addr); 3396 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 3397 3398 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 3399 rx_fltr &= ~B_AX_A_BC; 3400 rx_fltr &= ~B_AX_A_A1_MATCH; 3401 rtw89_write32_mask(rtwdev, 3402 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3403 B_AX_RX_FLTR_CFG_MASK, 3404 rx_fltr); 3405 } 3406 3407 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3408 bool aborted) 3409 { 3410 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3411 struct cfg80211_scan_info info = { 3412 .aborted = aborted, 3413 }; 3414 struct rtw89_vif *rtwvif; 3415 3416 if (!vif) 3417 return; 3418 3419 rtw89_write32_mask(rtwdev, 3420 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 3421 B_AX_RX_FLTR_CFG_MASK, 3422 rtwdev->hal.rx_fltr); 3423 3424 rtw89_core_scan_complete(rtwdev, vif, true); 3425 ieee80211_scan_completed(rtwdev->hw, &info); 3426 ieee80211_wake_queues(rtwdev->hw); 3427 3428 rtw89_release_pkt_list(rtwdev); 3429 rtwvif = (struct rtw89_vif *)vif->drv_priv; 3430 rtwvif->scan_req = NULL; 3431 rtwvif->scan_ies = NULL; 3432 scan_info->last_chan_idx = 0; 3433 scan_info->scanning_vif = NULL; 3434 3435 rtw89_set_channel(rtwdev); 3436 } 3437 3438 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 3439 { 3440 rtw89_hw_scan_offload(rtwdev, vif, false); 3441 rtw89_hw_scan_complete(rtwdev, vif, true); 3442 } 3443 3444 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3445 bool enable) 3446 { 3447 struct rtw89_scan_option opt = {0}; 3448 struct rtw89_vif *rtwvif; 3449 bool connected; 3450 int ret = 0; 3451 3452 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3453 if (!rtwvif) 3454 return -EINVAL; 3455 3456 /* This variable implies connected or during attempt to connect */ 3457 connected = !is_zero_ether_addr(rtwvif->bssid); 3458 opt.enable = enable; 3459 opt.target_ch_mode = connected; 3460 if (enable) { 3461 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 3462 if (ret) 3463 goto out; 3464 } 3465 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 3466 out: 3467 return ret; 3468 } 3469 3470 #define H2C_FW_CPU_EXCEPTION_LEN 4 3471 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 3472 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 3473 { 3474 struct sk_buff *skb; 3475 int ret; 3476 3477 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 3478 if (!skb) { 3479 rtw89_err(rtwdev, 3480 "failed to alloc skb for fw cpu exception\n"); 3481 return -ENOMEM; 3482 } 3483 3484 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3485 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3486 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3487 3488 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3489 H2C_CAT_TEST, 3490 H2C_CL_FW_STATUS_TEST, 3491 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3492 H2C_FW_CPU_EXCEPTION_LEN); 3493 3494 ret = rtw89_h2c_tx(rtwdev, skb, false); 3495 if (ret) { 3496 rtw89_err(rtwdev, "failed to send h2c\n"); 3497 goto fail; 3498 } 3499 3500 return 0; 3501 3502 fail: 3503 dev_kfree_skb_any(skb); 3504 return ret; 3505 } 3506 3507 #define H2C_PKT_DROP_LEN 24 3508 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3509 const struct rtw89_pkt_drop_params *params) 3510 { 3511 struct sk_buff *skb; 3512 int ret; 3513 3514 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 3515 if (!skb) { 3516 rtw89_err(rtwdev, 3517 "failed to alloc skb for packet drop\n"); 3518 return -ENOMEM; 3519 } 3520 3521 switch (params->sel) { 3522 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 3523 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 3524 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 3525 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 3526 case RTW89_PKT_DROP_SEL_BAND_ONCE: 3527 break; 3528 default: 3529 rtw89_debug(rtwdev, RTW89_DBG_FW, 3530 "H2C of pkt drop might not fully support sel: %d yet\n", 3531 params->sel); 3532 break; 3533 } 3534 3535 skb_put(skb, H2C_PKT_DROP_LEN); 3536 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3537 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3538 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3539 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3540 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3541 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3542 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3543 params->macid_band_sel[0]); 3544 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3545 params->macid_band_sel[1]); 3546 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3547 params->macid_band_sel[2]); 3548 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3549 params->macid_band_sel[3]); 3550 3551 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3552 H2C_CAT_MAC, 3553 H2C_CL_MAC_FW_OFLD, 3554 H2C_FUNC_PKT_DROP, 0, 0, 3555 H2C_PKT_DROP_LEN); 3556 3557 ret = rtw89_h2c_tx(rtwdev, skb, false); 3558 if (ret) { 3559 rtw89_err(rtwdev, "failed to send h2c\n"); 3560 goto fail; 3561 } 3562 3563 return 0; 3564 3565 fail: 3566 dev_kfree_skb_any(skb); 3567 return ret; 3568 } 3569 3570 #define H2C_KEEP_ALIVE_LEN 4 3571 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3572 bool enable) 3573 { 3574 struct sk_buff *skb; 3575 u8 pkt_id = 0; 3576 int ret; 3577 3578 if (enable) { 3579 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 3580 RTW89_PKT_OFLD_TYPE_NULL_DATA, 3581 &pkt_id); 3582 if (ret) 3583 return -EPERM; 3584 } 3585 3586 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3587 if (!skb) { 3588 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3589 return -ENOMEM; 3590 } 3591 3592 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3593 3594 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3595 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3596 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3597 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3598 3599 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3600 H2C_CAT_MAC, 3601 H2C_CL_MAC_WOW, 3602 H2C_FUNC_KEEP_ALIVE, 0, 1, 3603 H2C_KEEP_ALIVE_LEN); 3604 3605 ret = rtw89_h2c_tx(rtwdev, skb, false); 3606 if (ret) { 3607 rtw89_err(rtwdev, "failed to send h2c\n"); 3608 goto fail; 3609 } 3610 3611 return 0; 3612 3613 fail: 3614 dev_kfree_skb_any(skb); 3615 3616 return ret; 3617 } 3618 3619 #define H2C_DISCONNECT_DETECT_LEN 8 3620 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3621 struct rtw89_vif *rtwvif, bool enable) 3622 { 3623 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3624 struct sk_buff *skb; 3625 u8 macid = rtwvif->mac_id; 3626 int ret; 3627 3628 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3629 if (!skb) { 3630 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3631 return -ENOMEM; 3632 } 3633 3634 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3635 3636 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3637 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3638 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3639 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3640 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3641 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3642 } 3643 3644 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3645 H2C_CAT_MAC, 3646 H2C_CL_MAC_WOW, 3647 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3648 H2C_DISCONNECT_DETECT_LEN); 3649 3650 ret = rtw89_h2c_tx(rtwdev, skb, false); 3651 if (ret) { 3652 rtw89_err(rtwdev, "failed to send h2c\n"); 3653 goto fail; 3654 } 3655 3656 return 0; 3657 3658 fail: 3659 dev_kfree_skb_any(skb); 3660 3661 return ret; 3662 } 3663 3664 #define H2C_WOW_GLOBAL_LEN 8 3665 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3666 bool enable) 3667 { 3668 struct sk_buff *skb; 3669 u8 macid = rtwvif->mac_id; 3670 int ret; 3671 3672 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3673 if (!skb) { 3674 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3675 return -ENOMEM; 3676 } 3677 3678 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3679 3680 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3681 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3682 3683 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3684 H2C_CAT_MAC, 3685 H2C_CL_MAC_WOW, 3686 H2C_FUNC_WOW_GLOBAL, 0, 1, 3687 H2C_WOW_GLOBAL_LEN); 3688 3689 ret = rtw89_h2c_tx(rtwdev, skb, false); 3690 if (ret) { 3691 rtw89_err(rtwdev, "failed to send h2c\n"); 3692 goto fail; 3693 } 3694 3695 return 0; 3696 3697 fail: 3698 dev_kfree_skb_any(skb); 3699 3700 return ret; 3701 } 3702 3703 #define H2C_WAKEUP_CTRL_LEN 4 3704 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3705 struct rtw89_vif *rtwvif, 3706 bool enable) 3707 { 3708 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3709 struct sk_buff *skb; 3710 u8 macid = rtwvif->mac_id; 3711 int ret; 3712 3713 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3714 if (!skb) { 3715 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3716 return -ENOMEM; 3717 } 3718 3719 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3720 3721 if (rtw_wow->pattern_cnt) 3722 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3723 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3724 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3725 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3726 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3727 3728 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3729 3730 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3731 H2C_CAT_MAC, 3732 H2C_CL_MAC_WOW, 3733 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3734 H2C_WAKEUP_CTRL_LEN); 3735 3736 ret = rtw89_h2c_tx(rtwdev, skb, false); 3737 if (ret) { 3738 rtw89_err(rtwdev, "failed to send h2c\n"); 3739 goto fail; 3740 } 3741 3742 return 0; 3743 3744 fail: 3745 dev_kfree_skb_any(skb); 3746 3747 return ret; 3748 } 3749 3750 #define H2C_WOW_CAM_UPD_LEN 24 3751 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3752 struct rtw89_wow_cam_info *cam_info) 3753 { 3754 struct sk_buff *skb; 3755 int ret; 3756 3757 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3758 if (!skb) { 3759 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3760 return -ENOMEM; 3761 } 3762 3763 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3764 3765 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3766 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3767 if (cam_info->valid) { 3768 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3769 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3770 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3771 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3772 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3773 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3774 cam_info->negative_pattern_match); 3775 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3776 cam_info->skip_mac_hdr); 3777 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3778 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3779 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3780 } 3781 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3782 3783 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3784 H2C_CAT_MAC, 3785 H2C_CL_MAC_WOW, 3786 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3787 H2C_WOW_CAM_UPD_LEN); 3788 3789 ret = rtw89_h2c_tx(rtwdev, skb, false); 3790 if (ret) { 3791 rtw89_err(rtwdev, "failed to send h2c\n"); 3792 goto fail; 3793 } 3794 3795 return 0; 3796 fail: 3797 dev_kfree_skb_any(skb); 3798 3799 return ret; 3800 } 3801 3802 /* Return < 0, if failures happen during waiting for the condition. 3803 * Return 0, when waiting for the condition succeeds. 3804 * Return > 0, if the wait is considered unreachable due to driver/FW design, 3805 * where 1 means during SER. 3806 */ 3807 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3808 struct rtw89_wait_info *wait, unsigned int cond) 3809 { 3810 int ret; 3811 3812 ret = rtw89_h2c_tx(rtwdev, skb, false); 3813 if (ret) { 3814 rtw89_err(rtwdev, "failed to send h2c\n"); 3815 dev_kfree_skb_any(skb); 3816 return -EBUSY; 3817 } 3818 3819 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 3820 return 1; 3821 3822 return rtw89_wait_for_cond(wait, cond); 3823 } 3824 3825 #define H2C_ADD_MCC_LEN 16 3826 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3827 const struct rtw89_fw_mcc_add_req *p) 3828 { 3829 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3830 struct sk_buff *skb; 3831 unsigned int cond; 3832 3833 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3834 if (!skb) { 3835 rtw89_err(rtwdev, 3836 "failed to alloc skb for add mcc\n"); 3837 return -ENOMEM; 3838 } 3839 3840 skb_put(skb, H2C_ADD_MCC_LEN); 3841 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3842 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3843 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3844 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3845 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3846 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3847 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3848 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3849 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3850 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3851 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3852 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3853 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3854 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3855 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3856 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3857 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3858 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3859 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3860 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3861 3862 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3863 H2C_CAT_MAC, 3864 H2C_CL_MCC, 3865 H2C_FUNC_ADD_MCC, 0, 0, 3866 H2C_ADD_MCC_LEN); 3867 3868 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3869 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3870 } 3871 3872 #define H2C_START_MCC_LEN 12 3873 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3874 const struct rtw89_fw_mcc_start_req *p) 3875 { 3876 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3877 struct sk_buff *skb; 3878 unsigned int cond; 3879 3880 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3881 if (!skb) { 3882 rtw89_err(rtwdev, 3883 "failed to alloc skb for start mcc\n"); 3884 return -ENOMEM; 3885 } 3886 3887 skb_put(skb, H2C_START_MCC_LEN); 3888 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3889 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3890 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3891 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3892 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3893 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3894 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3895 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3896 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3897 3898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3899 H2C_CAT_MAC, 3900 H2C_CL_MCC, 3901 H2C_FUNC_START_MCC, 0, 0, 3902 H2C_START_MCC_LEN); 3903 3904 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3905 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3906 } 3907 3908 #define H2C_STOP_MCC_LEN 4 3909 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3910 bool prev_groups) 3911 { 3912 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3913 struct sk_buff *skb; 3914 unsigned int cond; 3915 3916 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3917 if (!skb) { 3918 rtw89_err(rtwdev, 3919 "failed to alloc skb for stop mcc\n"); 3920 return -ENOMEM; 3921 } 3922 3923 skb_put(skb, H2C_STOP_MCC_LEN); 3924 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3925 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3926 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3927 3928 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3929 H2C_CAT_MAC, 3930 H2C_CL_MCC, 3931 H2C_FUNC_STOP_MCC, 0, 0, 3932 H2C_STOP_MCC_LEN); 3933 3934 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3935 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3936 } 3937 3938 #define H2C_DEL_MCC_GROUP_LEN 4 3939 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3940 bool prev_groups) 3941 { 3942 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3943 struct sk_buff *skb; 3944 unsigned int cond; 3945 3946 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3947 if (!skb) { 3948 rtw89_err(rtwdev, 3949 "failed to alloc skb for del mcc group\n"); 3950 return -ENOMEM; 3951 } 3952 3953 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3954 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3955 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3956 3957 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3958 H2C_CAT_MAC, 3959 H2C_CL_MCC, 3960 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3961 H2C_DEL_MCC_GROUP_LEN); 3962 3963 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3964 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3965 } 3966 3967 #define H2C_RESET_MCC_GROUP_LEN 4 3968 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3969 { 3970 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3971 struct sk_buff *skb; 3972 unsigned int cond; 3973 3974 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3975 if (!skb) { 3976 rtw89_err(rtwdev, 3977 "failed to alloc skb for reset mcc group\n"); 3978 return -ENOMEM; 3979 } 3980 3981 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3982 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3983 3984 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3985 H2C_CAT_MAC, 3986 H2C_CL_MCC, 3987 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3988 H2C_RESET_MCC_GROUP_LEN); 3989 3990 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3991 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3992 } 3993 3994 #define H2C_MCC_REQ_TSF_LEN 4 3995 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3996 const struct rtw89_fw_mcc_tsf_req *req, 3997 struct rtw89_mac_mcc_tsf_rpt *rpt) 3998 { 3999 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4000 struct rtw89_mac_mcc_tsf_rpt *tmp; 4001 struct sk_buff *skb; 4002 unsigned int cond; 4003 int ret; 4004 4005 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 4006 if (!skb) { 4007 rtw89_err(rtwdev, 4008 "failed to alloc skb for mcc req tsf\n"); 4009 return -ENOMEM; 4010 } 4011 4012 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 4013 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 4014 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 4015 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 4016 4017 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4018 H2C_CAT_MAC, 4019 H2C_CL_MCC, 4020 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4021 H2C_MCC_REQ_TSF_LEN); 4022 4023 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4024 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4025 if (ret) 4026 return ret; 4027 4028 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4029 *rpt = *tmp; 4030 4031 return 0; 4032 } 4033 4034 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4035 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4036 u8 *bitmap) 4037 { 4038 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4039 struct sk_buff *skb; 4040 unsigned int cond; 4041 u8 map_len; 4042 u8 h2c_len; 4043 4044 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4045 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4046 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4047 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4048 if (!skb) { 4049 rtw89_err(rtwdev, 4050 "failed to alloc skb for mcc macid bitmap\n"); 4051 return -ENOMEM; 4052 } 4053 4054 skb_put(skb, h2c_len); 4055 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4056 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4057 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4058 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4059 4060 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4061 H2C_CAT_MAC, 4062 H2C_CL_MCC, 4063 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4064 h2c_len); 4065 4066 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4067 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4068 } 4069 4070 #define H2C_MCC_SYNC_LEN 4 4071 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4072 u8 target, u8 offset) 4073 { 4074 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4075 struct sk_buff *skb; 4076 unsigned int cond; 4077 4078 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4079 if (!skb) { 4080 rtw89_err(rtwdev, 4081 "failed to alloc skb for mcc sync\n"); 4082 return -ENOMEM; 4083 } 4084 4085 skb_put(skb, H2C_MCC_SYNC_LEN); 4086 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4087 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4088 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4089 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4090 4091 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4092 H2C_CAT_MAC, 4093 H2C_CL_MCC, 4094 H2C_FUNC_MCC_SYNC, 0, 0, 4095 H2C_MCC_SYNC_LEN); 4096 4097 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4098 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4099 } 4100 4101 #define H2C_MCC_SET_DURATION_LEN 20 4102 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4103 const struct rtw89_fw_mcc_duration *p) 4104 { 4105 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4106 struct sk_buff *skb; 4107 unsigned int cond; 4108 4109 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4110 if (!skb) { 4111 rtw89_err(rtwdev, 4112 "failed to alloc skb for mcc set duration\n"); 4113 return -ENOMEM; 4114 } 4115 4116 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4117 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4118 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4119 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4120 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4121 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4122 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4123 p->start_tsf_low); 4124 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4125 p->start_tsf_high); 4126 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4127 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4128 4129 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4130 H2C_CAT_MAC, 4131 H2C_CL_MCC, 4132 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4133 H2C_MCC_SET_DURATION_LEN); 4134 4135 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4136 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4137 } 4138